Commit 292bf39b authored by Richard Torenvliet's avatar Richard Torenvliet

Made changes to support eos, made Python bindings to call it from python

parent 579f7adc
FROM smvanveen/computer-vision:20161109143812 FROM smvanveen/computer-vision:20161109143812
RUN git clone https://github.com/davisking/dlib.git
RUN (cd dlib; python setup.py install --yes USE_AVX_INSTRUCTIONS) # install python requirements
RUN apt-get install graphviz -y
COPY requirements.txt /tmp COPY requirements.txt /tmp
RUN pip install -r /tmp/requirements.txt RUN pip install -r /tmp/requirements.txt
# extra packages:
# graphviz: for cProfiling using pycallgraph.
# libeigen3-dev: for eos: 3D morphable face model fitting library.
RUN apt-get install -y \
graphviz \
libeigen3-dev
WORKDIR /libs
# install dlib
RUN git clone https://github.com/davisking/dlib.git
RUN (cd dlib; python setup.py install --yes USE_AVX_INSTRUCTIONS)
# install eos (face-recosntruction, (3D Morphable Face Model fitting library)
RUN git clone --recursive https://github.com/patrikhuber/eos.git
# remove dependency on opencv 2.4.3, opencv 3.0 works fine
WORKDIR /libs/eos
RUN sed -i 's/2.4.3//g' CMakeLists.txt
RUN mkdir build
WORKDIR /libs/eos/build
RUN cmake ../ \
-DCMAKE_INSTALL_PREFIX=/usr/local/eos \
-DGENERATE_PYTHON_BINDINGS=on \
-DBUILD_UTILS=on \
-DPYTHON_EXECUTABLE=/usr/bin/python
RUN make && make install
ENV PYTHONPATH=/usr/local/eos/bin/:$PYTHONPATH
WORKDIR /src WORKDIR /src
...@@ -17,9 +17,6 @@ runnit: ...@@ -17,9 +17,6 @@ runnit:
$(BASE_DOCKER_CMD) python main.py $(BASE_DOCKER_CMD) python main.py
src/reconstruction/texture.so: src/reconstruction/texture.pyx
$(BASE_DOCKER_CMD) /bin/bash -c '(cd reconstruction; python setup.py build_ext --inplace)'
## IMM Dataset ## IMM Dataset
data/pca_imm_shape_model.npy: data/pca_imm_shape_model.npy:
$(BASE_DOCKER_CMD) python main.py \ $(BASE_DOCKER_CMD) python main.py \
......
...@@ -15,6 +15,7 @@ $(info $(TARGETS)) ...@@ -15,6 +15,7 @@ $(info $(TARGETS))
DEPENDENCIES:= data/imm_face_db DEPENDENCIES:= data/imm_face_db
TARGETS:= data/shape_predictor_68_face_landmarks.dat\ TARGETS:= data/shape_predictor_68_face_landmarks.dat\
src/reconstruction/texture.so \ src/reconstruction/texture.so \
src/reconstruction/fit.so \
data/pca_ibug_shape_model.npy \ data/pca_ibug_shape_model.npy \
data/pca_ibug_texture_model.npy data/pca_ibug_texture_model.npy
...@@ -43,3 +44,5 @@ $(SITE_PACKAGES)/cv%: ...@@ -43,3 +44,5 @@ $(SITE_PACKAGES)/cv%:
@ls $@ @ls $@
src/reconstruction/fit.so: src/reconstruction/fit-model.cpp
$(BASE_DOCKER_CMD) /bin/bash -c '(cd reconstruction; python setup.py build_ext --inplace)'
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: examples/fit-model.cpp
*
* Copyright 2015 Patrik Huber *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <Python.h>
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include <vector>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using eos::core::Landmark;
using eos::core::LandmarkCollection;
using eos::core::LandmarkMapper;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Vec4f;
using std::cout;
using std::endl;
using std::vector;
using std::string;
/**
* Reads an ibug .pts landmark file and returns an ordered vector with
* the 68 2D landmark coordinates.
*
* @param[in] filename Path to a .pts file.
* @return An ordered vector with the 68 ibug landmarks.
*/
LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
{
using std::getline;
using cv::Vec2f;
using std::string;
LandmarkCollection<Vec2f> landmarks;
landmarks.reserve(68);
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open landmark file: " + filename));
}
string line;
// Skip the first 3 lines, they're header lines:
getline(file, line); // 'version: 1'
getline(file, line); // 'n_points : 68'
getline(file, line); // '{'
int ibugId = 1;
while (getline(file, line))
{
if (line == "}") { // end of the file
break;
}
std::stringstream lineStream(line);
Landmark<Vec2f> landmark;
landmark.name = std::to_string(ibugId);
if (!(lineStream >> landmark.coordinates[0] >> landmark.coordinates[1])) {
throw std::runtime_error(string("Landmark format error while parsing the line: " + line));
}
// From the iBug website:
// "Please note that the re-annotated data for this challenge are saved in the Matlab convention of 1 being
// the first index, i.e. the coordinates of the top left pixel in an image are x=1, y=1."
// ==> So we shift every point by 1:
landmark.coordinates[0] -= 1.0f;
landmark.coordinates[1] -= 1.0f;
landmarks.emplace_back(landmark);
++ibugId;
}
return landmarks;
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
*
* First, the 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an affine camera matrix
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
*/
int main(int argc, char *argv[])
{
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("../share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("data/image_0010.png"),
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: fit-model [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
Mat image = cv::imread(imagefile.string());
LandmarkCollection<cv::Vec2f> landmarks;
try {
landmarks = read_pts_landmarks(landmarksfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error reading the landmarks: " << e.what() << endl;
return EXIT_FAILURE;
}
morphablemodel::MorphableModel morphable_model;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// Draw the loaded landmarks:
Mat outimg = image.clone();
for (auto&& lm : landmarks) {
cv::rectangle(
outimg,
cv::Point2f(lm.coordinates[0] - 2.0f,
lm.coordinates[1] - 2.0f),
cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f),
{ 255, 0, 0 }
);
}
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(
morphable_model, affine_from_ortho, image_points, vertex_indices
);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
return EXIT_SUCCESS;
}
static PyObject * fit_model(PyObject *self, PyObject *args) {
LandmarkCollection<cv::Vec2f> landmarks;
morphablemodel::MorphableModel morphable_model;
LandmarkMapper("/usr/local/eos/share/ibug2did.txt");
morphable_model = morphablemodel::load_model("/usr/local/eos/share/sfm_shape_3448.bin");
// Draw the loaded landmarks:
//Mat outimg = image.clone();
//for (auto&& lm : landmarks) {
// cv::rectangle(
// outimg,
// cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f),
// { 255, 0, 0 }
// );
//}
//// These will be the final 2D and 3D points used for the fitting:
//vector<Vec4f> model_points; // the points in the 3D shape model
//vector<int> vertex_indices; // their vertex indices
//vector<Vec2f> image_points; // the corresponding 2D landmark points
//// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
//for (int i = 0; i < landmarks.size(); ++i) {
// auto converted_name = landmark_mapper.convert(landmarks[i].name);
// if (!converted_name) { // no mapping defined for the current landmark
// continue;
// }
// int vertex_idx = std::stoi(converted_name.get());
// Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
// model_points.emplace_back(vertex);
// vertex_indices.emplace_back(vertex_idx);
// image_points.emplace_back(landmarks[i].coordinates);
//}
//// Estimate the camera (pose) from the 2D - 3D point correspondences
//fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
//fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
//// The 3D head pose can be recovered as follows:
//float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
//// and similarly for pitch and roll.
//// Estimate the shape coefficients by fitting the shape to the landmarks:
//Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
//vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(
// morphable_model, affine_from_ortho, image_points, vertex_indices
//);
//// Obtain the full mesh with the estimated coefficients:
//render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
//// Extract the texture from the image using given mesh and camera parameters:
//Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
//// Save the mesh as textured obj:
//outputfile += fs::path(".obj");
//render::write_textured_obj(mesh, outputfile.string());
//// And save the isomap:
//outputfile.replace_extension(".isomap.png");
//cv::imwrite(outputfile.string(), isomap);
//cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
//return EXIT_SUCCESS;
Py_RETURN_NONE;
}
static PyMethodDef module_methods[] = {
// name of function, function_name
{"fit_model", fit_model, METH_VARARGS, "Fits an image with given landmarks to a 3DMM"},
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC initfit(void) {
(void) Py_InitModule("fit", module_methods);
}
...@@ -3,7 +3,6 @@ import numpy as np ...@@ -3,7 +3,6 @@ import numpy as np
import pca import pca
import aam import aam
from utility import import_dataset_module
def draw_shape(image, points, triangles, multiply=True): def draw_shape(image, points, triangles, multiply=True):
......
...@@ -16,6 +16,29 @@ extensions = [ ...@@ -16,6 +16,29 @@ extensions = [
'texture', 'texture',
['texture.pyx'], ['texture.pyx'],
include_dirs=[np.get_include()], ), include_dirs=[np.get_include()], ),
Extension(
'fit',
['fit-model.cpp'],
language="c++",
include_dirs=[
'/usr/local/eos/include/', # path need to be changed in future
'/usr/local/eos/3rdparty/glm/',
'/usr/local/eos/3rdparty/cereal-1.1.1/include/',
'/usr/local/include/opencv2/',
'/usr/include/boost/'
],
library_dirs=[
'/usr/local/eos/bin',
'/usr/lib/x86_64-linux-gnu/',
'/usr/local/lib/'
],
libraries=[
'boost_program_options',
'boost_filesystem',
'opencv_world'
],
extra_compile_args=['-std=c++14'], )
#include_dirs=[np.get_include()], ),
#Extension( #Extension(
# 'halide', # 'halide',
# ['texture_halide.cpp'], # ['texture_halide.cpp'],
......
...@@ -3,24 +3,28 @@ import cv2 ...@@ -3,24 +3,28 @@ import cv2
import pca as pca import pca as pca
from settings import logger from settings import logger
from reconstruction import reconstruction from reconstruction import reconstruction
from utility import import_dataset_module
#model_texture_file = '/data/pca_ibug_texture_model.npy' model_texture_file = '/data/pca_ibug_texture_model.npy'
#model_shape_file = '/data/pca_ibug_shape_model.npy' model_shape_file = '/data/pca_ibug_shape_model.npy'
model_texture_file = '/data/pca_imm_texture_model.npy' #model_texture_file = '/data/pca_imm_texture_model.npy'
model_shape_file = '/data/pca_imm_shape_model.npy' #model_shape_file = '/data/pca_imm_shape_model.npy'
def main():
def shape():
shape_components = 58 shape_components = 58
shape_model = pca.PCAModel(model_shape_file) shape_model = pca.PCAModel(model_shape_file)
texture_model = pca.PCAModel(model_texture_file) texture_model = pca.PCAModel(model_texture_file)
logger.info('using %s shape_components', shape_components) logger.info('using %s shape_components', shape_components)
image_filename = '/data/imm_face_db/01-1m.asf' image_filename = '/data/imm_face_db/01-1m.jpg'
dataset_module = import_dataset_module('ibug')
dst_image = reconstruction.reconstruct_shape_texture( dst_image = reconstruction.reconstruct_shape_texture(
'imm', dataset_module,
shape_model, shape_model,
texture_model, texture_model,
image_filename, image_filename,
...@@ -30,5 +34,23 @@ def main(): ...@@ -30,5 +34,23 @@ def main():
cv2.imwrite('/data/reconstructed.png', dst_image) cv2.imwrite('/data/reconstructed.png', dst_image)
def fit_model():
from reconstruction import fit
shape_components = 58
shape_model = pca.PCAModel(model_shape_file)
texture_model = pca.PCAModel(model_texture_file)
logger.info('using %s shape_components', shape_components)
image_filename = '/data/imm_face_db/01-1m.jpg'
dataset_module = import_dataset_module('ibug')
input_points = dataset_module.factory(filename=image_filename)
input_image = input_points.get_image()
fit.fit_model(image)
if __name__ == '__main__': if __name__ == '__main__':
main() #fit_model()
shape()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment