Commit be1ae552 authored by Richard Torenvliet's avatar Richard Torenvliet

Add a way of altering the amount of shape components needed for the reconstruction.

parent 8ef44a7e
......@@ -82,6 +82,30 @@ class AAMPoints():
return self.points_list
def draw_triangles(self, image=None, show_points=True):
assert self.points_list is not None, \
'the list points already need to be scaled order to correctly work,\
this requires that get_scaled_points is executed first.'
if image is None:
image = self.image
triangles = Triangulation(self.points_list[:, 0], self.points_list[:, 1])
for t, tri in enumerate(triangles.triangles):
p1, p2, p3 = self.points_list[tri]
cv2.line(image, tuple(p1), tuple(p2), (255, 0, 100), 1)
cv2.line(image, tuple(p2), tuple(p3), (255, 0, 100), 1)
cv2.line(image, tuple(p3), tuple(p1), (255, 0, 100), 1)
if show_points:
point_indices = list(range(0, max(self.actual_shape)))
for i, p in enumerate(self.points_list):
point_index = int(point_indices[i])
cv2.putText(image, str(point_index), tuple((p[0], p[1])),
cv2.FONT_HERSHEY_SIMPLEX, .5, (100, 0, 255))
cv2.circle(image, tuple(p), 3, color=(0, 255, 100))
def calculate_bounding_box(self):
"""
Calculate bounding box in the **scaled** points list
......
......@@ -92,27 +92,6 @@ class IMMPoints(aam.AAMPoints):
return np.asarray(points_list, dtype='f')
def draw_triangles(self, image, points, multiply=True):
if multiply:
h, w, c = image.shape
points[:, 0] = points[:, 0] * w
points[:, 1] = points[:, 1] * h
point_indices = list(range(0, 58))
triangles = Triangulation(points[:, 0], points[:, 1])
for t, tri in enumerate(triangles.triangles):
p1, p2, p3 = points[tri]
cv2.line(image, tuple(p1), tuple(p2), (255, 0, 100), 1)
cv2.line(image, tuple(p2), tuple(p3), (255, 0, 100), 1)
cv2.line(image, tuple(p3), tuple(p1), (255, 0, 100), 1)
for i, p in enumerate(points):
point_index = int(point_indices[i])
cv2.putText(image, str(point_index), tuple((p[0], p[1])),
cv2.FONT_HERSHEY_SIMPLEX, .5, (100, 0, 255))
cv2.circle(image, tuple(p), 3, color=(0, 255, 100))
def show_on_image(self, image, window_name='image', multiply=True):
self.draw_triangles(image, self.points_list, multiply=multiply)
......
......@@ -2,6 +2,7 @@
# python std
import argparse
import importlib
import copy
# installed packages
import cv2
......@@ -207,21 +208,35 @@ def show_reconstruction(args):
mean_points = dataset_module.IMMPoints(points_list=shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
n_components = 58
count = 0
while True:
input_image_copy = input_image.copy()
input_points_copy = copy.deepcopy(input_points)
reconstruction.reconstruct_shape(
input_image_copy, input_points_copy, shape_model,
n_components=n_components - count
)
reconstruction.reconstruct_texture(
input_image, # src image
input_image, # dst image
input_image_copy, # src image
input_image_copy, # dst image
texture_model,
input_points, # shape points input
mean_points, # shape points mean
input_points_copy, # shape points input
mean_points # shape points mean
)
input_points_copy.get_scaled_points(input_image.shape)
input_points_copy.draw_triangles(image=input_image_copy, show_points=False)
dst = reconstruction.get_texture(
mean_points, texture_model.mean_values
)
mean_points, texture_model.mean_values
)
cv2.imshow('original', input_points.get_image())
cv2.imshow('reconstructed', input_image)
cv2.imshow('original', input_image)
cv2.imshow('reconstructed', input_image_copy)
cv2.imshow('main face', dst)
k = cv2.waitKey(0) & 0xFF
......@@ -229,6 +244,8 @@ def show_reconstruction(args):
if k == 27:
break
count += 2
cv2.destroyAllWindows()
......
......@@ -153,12 +153,19 @@ def reconstruct(feature_vector, Vt, mean_values, n_components=None):
"""
# if n_components is None, use all components.
if n_components is None:
n_components = Vt.shape[1]
# create the zero mean, is always needed for PCA reconstruction.
# we can only alter the data in zero_mean.
zm = feature_vector - mean_values
# project the zero mean onto the the VT bases (with potentially less
# amount of dimensions than the original.
yk = np.dot(Vt[:n_components], zm.T)
# project back to the original and add the mean_values again.
return np.dot(Vt[:n_components].T, yk) + mean_values
......
......@@ -78,45 +78,79 @@ def get_texture(Points, flattened_texture):
return np.asarray(flattened_texture, np.uint8).reshape((h_slice, w_slice, 3))
def reconstruct_shape(image, points, shape_model, n_components=None):
input_points = points.get_points()
mean_points = shape_model.mean_values
reconstructed = pca.reconstruct(
input_points,
shape_model.Vt,
mean_points,
n_components=n_components
)
points.normalized_flattened_points_list = reconstructed
def reconstruct_texture(src_image, dst_image, texture_model, src_points, dst_points):
"""
Recontruct texture given the src and dst image
Args:
src_points(aam.AAMPoints)
dst_points(aam.AAMPoints)
src_image(ndarray): numpy / OpenCV image in BGR
dst_image(ndarray): numpy image / OpenCVImage, may be None, if None
then we create an image just as big a the input image but then with a
black background.
texture_model(PCAModel): The PCAModel that holds the information that
we need to reconstruct the image, see pca module.
Make one by doing this: (see PCAModel on how it is stored in numpy
file).
texture_model = pca.PCAModel(model_texture_file)
src_points(aam.AAMPoints): The AAMPoints object contains the location
of the landmarks on the face that we need to perform piece wise affine
warping.
dst_points(aam.AAMPoints): The AAMPoints object contains the location
of the landmarks on the face that we need to perform piece wise affine
warping.
"""
Vt = texture_model.Vt
triangles = texture_model.triangles
mean_texture = texture_model.mean_values
# n_components = texture_model.n_components
# S_mean format
h, w, c = src_image.shape
# empty input_texture
input_texture = np.full((h, w, 3), fill_value=0, dtype=np.uint8)
points2d_src = src_points.get_scaled_points(src_image.shape)
points2d_dst = dst_points.get_scaled_points(dst_image.shape)
# get the texture from the rectangles.
aam.sample_from_triangles(
src_image,
points2d_src,
points2d_dst,
triangles,
input_texture
input_texture # this will be filled with the texture afterwards.
)
# define the rectangle around the dst_points.
offset_x, offset_y, w_slice, h_slice = dst_points.get_bounding_box()
# cut out this region from the input_texture.
input_texture = input_texture[offset_y: offset_y + h_slice,
offset_x: offset_x + w_slice].flatten()
# Still in S_mean format
# perfrom the PCA reconstruction using the input_texture.
r_texture = pca.reconstruct(input_texture, Vt, mean_texture)
# Make an image from the float data
# Make an image from the data, the texture is still of type `float`.
r_texture = np.asarray(r_texture, np.uint8).reshape((h_slice, w_slice, 3))
# subtract the offset
# subtract the offset, this is needed because the image is now a
# small rectangle around the face which starts at [0,0], wheras it first
# was located at offset_x, offset_y. We need both rectangles to start at
# [0, 0]. Please note that this should be improved to avoid confusion.
points2d_dst[:, 0] -= offset_x
points2d_dst[:, 1] -= offset_y
......
......@@ -4,11 +4,13 @@ import base64
from glob import glob
import cv2
import numpy as np
from tornado import websocket, web, ioloop, autoreload
import pca
from datasets import imm
from reconstruction import reconstruction
from settings import logger
BASE = '../viewer/app'
FILES_DIR = '../data/'
......@@ -19,7 +21,7 @@ FACE_DB = '{}{}'.format(FILES_DIR, FACE_DB_NAME)
class ImageWebSocketHandler(websocket.WebSocketHandler):
handlers = {
'filename': 'handle_return_image',
'reconstruction_index': 'handle_return_reconstruction'
'reconstruction': 'handle_return_reconstruction'
}
def __init__(self, *args, **kwargs):
......@@ -57,6 +59,12 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
def handle_return_reconstruction(self, message):
""" Return the reconstruction of the given image """
image_index = message['reconstruction_index']
image_as_background = message.get('background_image', True)
shape_components = message.get('shape_components', 58)
print message
logger.info('using %s shape_components', shape_components)
asf_filename = self.asf[image_index]
input_points = imm.IMMPoints(filename=asf_filename)
......@@ -65,16 +73,29 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
mean_points = imm.IMMPoints(points_list=self.shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
#TODO This one is VERY SLOW, try to optimize
# set dst image to an empty image if value is None
if image_as_background is False:
h, w, _ = input_image.shape
dst_image = np.full((h, w, 3), fill_value=0, dtype=np.uint8)
else:
dst_image = input_image
reconstruction.reconstruct_shape(
input_image, input_points, self.shape_model,
n_components=shape_components
)
reconstruction.reconstruct_texture(
input_image, # src image
input_image, # dst image
dst_image, # dst image
self.texture_model,
input_points, # shape points input
mean_points, # shape points mean
mean_points # shape points mean
)
_, reconstructed = cv2.imencode('.jpg', input_image)
input_points.draw_triangles(show_points=False)
_, reconstructed = cv2.imencode('.jpg', dst_image)
reconstructed = base64.b64encode(reconstructed)
self.write_message(json.dumps({'reconstructed': reconstructed}))
......@@ -96,7 +117,7 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
print(msg, e)
self.__return_error(msg)
handler(message)
handler(message[m])
def on_close(self):
print("WebSocket closed")
......
......@@ -8,13 +8,10 @@ const ThreeComponent = Ember.Component.extend({
scene: null,
willRender() {
if (this.scene) {
return;
}
if (this.scene) { return; }
var scene = new THREE.Scene();
var gui = new dat.GUI();
var camera = new THREE.PerspectiveCamera(
75, window.innerWidth / window.innerHeight, 0.1, 1000
);
......@@ -23,10 +20,25 @@ const ThreeComponent = Ember.Component.extend({
var renderer = new THREE.WebGLRenderer();
// the sidebar 'dat-gui' controls
var reconstructionControls = {
index: 0,
shape_components: 58,
background_image: true,
};
for(var i = 0; i < 15; i++) {
reconstructionControls['shape_eigen_value_' + i] = 0.0;
}
var shapeEigenValueSliders = {};
this.set('scene', scene);
this.set('camera', camera);
this.set('renderer', renderer);
this.set('gui', gui);
this.set('controls', reconstructionControls);
this.set('shapeEigenValueSliders', shapeEigenValueSliders);
this.get('store').findAll('face').then((faces) => {
this.set('faces', faces);
......@@ -45,29 +57,47 @@ const ThreeComponent = Ember.Component.extend({
}),
/**
* Adds the 'dat-gui' sliders
*
* See:
* http://learningthreejs.com/blog/2011/08/14/dat-gui-simple-ui-for-demos/
*/
addSliders() {
var self = this;
var gui = this.get('gui');
var obj = {
name: "Image filename",
index: 0
};
var components = {
name: "Components",
components: 0
};
var reconstructionControls = this.get('controls');
var shapeEigenValueSliders = this.get('shapeEigenValueSliders');
var length = this.get('faces').get('length');
var imagesSlider = gui.add(obj, "index").min(0).max(
length - 1).step(1);
var index = gui.add(reconstructionControls, 'index', 0, length - 1);
var shape_components = gui.add(reconstructionControls, 'shape_components', 0, 58);
var background = gui.add(reconstructionControls, 'background_image');
var shapeEigenValueControls = gui.addFolder('shape_eigen_values');
for(var i = 0; i < 15; i++) {
shapeEigenValueControls.add(reconstructionControls, 'shape_eigen_value_' + i, 0.0, 10.0);
}
// on index change
index.onChange(function(newValue) {
// update the image_index, which is on the controller
self.set('image_index', parseInt(newValue));
self.sendAction('updateIndex', parseInt(newValue));
});
gui.add(components, "components").min(0).max(length - 1).step(1);
background.onChange(function(newValue) {
self.sendAction('updateBackground', newValue);
});
shape_components.onChange(function(newValue) {
self.sendAction('updateShapeComponents', newValue);
});
imagesSlider.onChange(function(newValue) {
self.set('image_index', newValue);
self.sendAction('update', newValue);
reconstructionControls.onChange(function(newValue) {
console.log(newValue);
});
}
});
......
......@@ -10,7 +10,8 @@ export default Ember.Controller.extend({
image: null,
image_index: 0,
n_components: null,
background_image: true,
shape_components: null,
n_images: null,
reconstructed: null,
......@@ -64,7 +65,8 @@ export default Ember.Controller.extend({
this.set('loading', false);
},
getReconstruction: Ember.observer('image_index', function() {
getReconstruction: Ember.observer(
'image_index', 'background_image', 'shape_components', function() {
this.send('getReconstruction');
}),
......@@ -75,7 +77,6 @@ export default Ember.Controller.extend({
actions: {
getImage(faceModel) {
this.set('loading', true);
var filename = faceModel.get('filename');
const socket = this.get('socketRef');
......@@ -86,18 +87,33 @@ export default Ember.Controller.extend({
getReconstruction() {
this.set('loading', true);
const socket = this.get('socketRef');
socket.send(
JSON.stringify({reconstruction_index: this.get('image_index')}
JSON.stringify({
reconstruction: {
reconstruction_index: this.get('image_index'),
background_image: this.get('background_image'),
shape_components: this.get('shape_components')
}
}
));
},
// connects components together
// handles the upate action passed to a component
updateComponentConnector(index) {
updateIndexComponentConnector(index) {
this.set('image_index', index);
},
updateBackgroundComponentConnector(showBackground) {
this.set('background_image', showBackground);
},
updateShapeComponents(components) {
console.log('shape_components', components);
this.set('shape_components', components);
}
}
});
{{# if current_face_filename }}
<img src='{{current_face_filename}}' alt='missing original'>
{{else}}
<div class="table">
<div class="table-cell align-middle">
{{fa-icon "spinner" spin=true size='lg'}}
Loading..
</div>
</div>
{{/if}}
{{yield}}
......@@ -3,25 +3,34 @@
<div class="container">
<div class="clearfix">
<div class="col col-6">
{{images-loader image_index=image_index}}
{{images-loader image_index=image_index
background_image=background_image
shape_components=shape_components
loading=loading
}}
</div>
<div class="col col-6">
{{#if loading }}
<div class="table">
<div class="table-cell align-middle">
{{fa-icon "spinner" spin=true size='lg'}}
Loading..
</div>
<div class="table">
<div class="table-cell align-middle">
{{#if reconstructed }}
<img src='data:image/jpg;base64,{{reconstructed}}'
alt='missing image'>
{{/if}}
{{#if loading }}
{{fa-icon "spinner" spin=true size='lg'}} Loading..
{{/if}}
</div>
{{else if reconstructed }}
<img src='data:image/jpg;base64,{{reconstructed}}'
alt='missing image'>
{{/if}}
</div>
</div>
</div>
<div class="col col-12 px2">
{{three-js-reconstruction update=(action 'updateComponentConnector')}}
{{three-js-reconstruction
updateIndex=(action 'updateIndexComponentConnector')
updateBackground=(action 'updateBackgroundComponentConnector')
updateShapeComponents=(action 'updateShapeComponents')
}}
</div>
</div>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment