Commit 5fe5a737 authored by Patrik Huber's avatar Patrik Huber

Merge branch 'devel'

# Resolved conflicts:
#	include/eos/fitting/RenderingParameters.hpp
#	include/eos/fitting/contour_correspondence.hpp
parents e00cfa0d 5d745ff8
[submodule "3rdparty/pybind11"]
path = 3rdparty/pybind11
url = https://github.com/pybind/pybind11.git
[submodule "3rdparty/nanoflann"]
path = 3rdparty/nanoflann
url = https://github.com/jlblancoc/nanoflann.git
[submodule "3rdparty/glm"]
path = 3rdparty/glm
url = https://github.com/g-truc/glm.git
[submodule "3rdparty/eigen3-nnls"]
path = 3rdparty/eigen3-nnls
url = https://github.com/hmatuschek/eigen3-nnls.git
Subproject commit d20add35bcfc9932671cab9ad786b24fd320a592
Subproject commit 206d65a8498a8dfb1910ac8d3de893286fa89e9a
......@@ -96,6 +96,8 @@ message(STATUS "Eigen3 version: ${EIGEN3_VERSION}")
set(CEREAL_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/cereal-1.1.1/include")
set(glm_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/glm")
set(nanoflann_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/nanoflann/include")
set(eigen3_nnls_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/eigen3-nnls/src")
# Header files:
set(HEADERS
......@@ -105,6 +107,7 @@ set(HEADERS
include/eos/morphablemodel/MorphableModel.hpp
include/eos/morphablemodel/Blendshape.hpp
include/eos/morphablemodel/coefficients.hpp
include/eos/morphablemodel/EdgeTopology.hpp
include/eos/morphablemodel/io/cvssp.hpp
include/eos/morphablemodel/io/mat_cerealisation.hpp
include/eos/fitting/affine_camera_estimation.hpp
......@@ -116,6 +119,7 @@ set(HEADERS
include/eos/fitting/linear_shape_fitting.hpp
include/eos/fitting/contour_correspondence.hpp
include/eos/fitting/blendshape_fitting.hpp
include/eos/fitting/closest_edge_fitting.hpp
include/eos/fitting/fitting.hpp
include/eos/fitting/ceres_nonlinear.hpp
include/eos/fitting/RenderingParameters.hpp
......@@ -136,6 +140,8 @@ include_directories(${Boost_INCLUDE_DIRS})
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${EIGEN3_INCLUDE_DIR})
include_directories(${glm_INCLUDE_DIR})
include_directories(${nanoflann_INCLUDE_DIR})
include_directories(${eigen3_nnls_INCLUDE_DIR})
# Custom target for the library, to make the headers show up in IDEs:
add_custom_target(eos SOURCES ${HEADERS})
......
......@@ -33,7 +33,11 @@ if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif()
# Model fitting (affine cam & shape to landmarks) example:
# Simple model fitting (orthographic camera & shape to landmarks) example:
add_executable(fit-model-simple fit-model-simple.cpp)
target_link_libraries(fit-model-simple ${OpenCV_LIBS} ${Boost_LIBRARIES})
# Model fitting example that fits orthographic camera, shape, blendshapes, and contours:
add_executable(fit-model fit-model.cpp)
target_link_libraries(fit-model ${OpenCV_LIBS} ${Boost_LIBRARIES})
......@@ -55,6 +59,7 @@ target_link_libraries(generate-obj ${OpenCV_LIBS} ${Boost_LIBRARIES})
# install target:
install(TARGETS fit-model-simple DESTINATION bin)
install(TARGETS fit-model DESTINATION bin)
install(TARGETS generate-obj DESTINATION bin)
install(DIRECTORY ${CMAKE_SOURCE_DIR}/examples/data DESTINATION bin)
......@@ -212,10 +212,9 @@ int main(int argc, char *argv[])
constexpr bool use_perspective = false;
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// These will be the 2D image points and their corresponding 3D vertex id's used for the fitting:
vector<Vec2f> image_points; // the 2D landmark points
vector<int> vertex_indices; // their corresponding vertex indices
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
......@@ -225,7 +224,6 @@ int main(int argc, char *argv[])
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
......@@ -259,7 +257,7 @@ int main(int argc, char *argv[])
blendshape_coefficients.resize(6);
Problem camera_costfunction;
for (int i = 0; i < model_points.size(); ++i)
for (int i = 0; i < image_points.size(); ++i)
{
CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & fov/frustum_scale */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective));
camera_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]);
......@@ -333,13 +331,11 @@ int main(int argc, char *argv[])
// Contour fitting:
// These are the additional contour-correspondences we're going to find and then use:
vector<Vec4f> model_points_contour; // the points in the 3D shape model
vector<int> vertex_indices_contour; // their vertex indices
vector<Vec2f> image_points_contour; // the corresponding 2D landmark points
vector<Vec2f> image_points_contour; // the 2D landmark points
vector<int> vertex_indices_contour; // their corresponding 3D vertex indices
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, model_points_contour, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, ibug_contour, model_contour, glm::degrees(euler_angles[1]), morphable_model, t_mtx * rot_mtx, projection_mtx, viewport);
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, ibug_contour, model_contour, glm::degrees(euler_angles[1]), morphable_model.get_mean(), t_mtx * rot_mtx, projection_mtx, viewport);
using eos::fitting::concat;
model_points = concat(model_points, model_points_contour);
vertex_indices = concat(vertex_indices, vertex_indices_contour);
image_points = concat(image_points, image_points_contour);
......@@ -347,7 +343,7 @@ int main(int argc, char *argv[])
start = std::chrono::steady_clock::now();
Problem fitting_costfunction;
// Landmark constraint:
for (int i = 0; i < model_points.size(); ++i)
for (int i = 0; i < image_points.size(); ++i)
{
CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & focal length */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective));
fitting_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]);
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: examples/fit-model-simple.cpp
*
* Copyright 2015 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include <vector>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using eos::core::Landmark;
using eos::core::LandmarkCollection;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Vec4f;
using std::cout;
using std::endl;
using std::vector;
using std::string;
/**
* Reads an ibug .pts landmark file and returns an ordered vector with
* the 68 2D landmark coordinates.
*
* @param[in] filename Path to a .pts file.
* @return An ordered vector with the 68 ibug landmarks.
*/
LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
{
using std::getline;
using cv::Vec2f;
using std::string;
LandmarkCollection<Vec2f> landmarks;
landmarks.reserve(68);
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open landmark file: " + filename));
}
string line;
// Skip the first 3 lines, they're header lines:
getline(file, line); // 'version: 1'
getline(file, line); // 'n_points : 68'
getline(file, line); // '{'
int ibugId = 1;
while (getline(file, line))
{
if (line == "}") { // end of the file
break;
}
std::stringstream lineStream(line);
Landmark<Vec2f> landmark;
landmark.name = std::to_string(ibugId);
if (!(lineStream >> landmark.coordinates[0] >> landmark.coordinates[1])) {
throw std::runtime_error(string("Landmark format error while parsing the line: " + line));
}
// From the iBug website:
// "Please note that the re-annotated data for this challenge are saved in the Matlab convention of 1 being
// the first index, i.e. the coordinates of the top left pixel in an image are x=1, y=1."
// ==> So we shift every point by 1:
landmark.coordinates[0] -= 1.0f;
landmark.coordinates[1] -= 1.0f;
landmarks.emplace_back(landmark);
++ibugId;
}
return landmarks;
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
*
* First, the 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an orthographic camera
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
*/
int main(int argc, char *argv[])
{
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("../share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("data/image_0010.png"),
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: fit-model-simple [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
Mat image = cv::imread(imagefile.string());
LandmarkCollection<cv::Vec2f> landmarks;
try {
landmarks = read_pts_landmarks(landmarksfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error reading the landmarks: " << e.what() << endl;
return EXIT_FAILURE;
}
morphablemodel::MorphableModel morphable_model;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// Draw the loaded landmarks:
Mat outimg = image.clone();
for (auto&& lm : landmarks) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
return EXIT_SUCCESS;
}
......@@ -3,7 +3,7 @@
*
* File: examples/fit-model.cpp
*
* Copyright 2015 Patrik Huber
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
......@@ -19,9 +19,9 @@
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/fitting/fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
......@@ -100,18 +100,46 @@ LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
return landmarks;
};
/**
* Draws the given mesh as wireframe into the image.
*
* It does backface culling, i.e. draws only vertices in CCW order.
*
* @param[in] image An image to draw into.
* @param[in] mesh The mesh to draw.
* @param[in] modelview Model-view matrix to draw the mesh.
* @param[in] projection Projection matrix to draw the mesh.
* @param[in] viewport Viewport to draw the mesh.
* @param[in] colour Colour of the mesh to be drawn.
*/
void draw_wireframe(cv::Mat image, const eos::render::Mesh& mesh, glm::mat4x4 modelview, glm::mat4x4 projection, glm::vec4 viewport, cv::Scalar colour = cv::Scalar(0, 255, 0, 255))
{
for (const auto& triangle : mesh.tvi)
{
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
{
cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
cv::line(image, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), colour);
}
}
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
* In addition to fit-model-simple, this example uses blendshapes, contour-
* fitting, and can iterate the fitting.
*
* First, the 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an affine camera matrix
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
* 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper.
*/
int main(int argc, char *argv[])
{
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile;
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, contourfile, edgetopologyfile, blendshapesfile, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
......@@ -125,6 +153,12 @@ int main(int argc, char *argv[])
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping")
("model-contour,c", po::value<fs::path>(&contourfile)->required()->default_value("../share/model_contours.json"),
"file with model contour indices")
("edge-topology,e", po::value<fs::path>(&edgetopologyfile)->required()->default_value("../share/sfm_3448_edge_topology.json"),
"file with model's precomputed edge topology")
("blendshapes,b", po::value<fs::path>(&blendshapesfile)->required()->default_value("../share/expression_blendshapes_3448.bin"),
"file with blendshapes")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
......@@ -161,52 +195,45 @@ int main(int argc, char *argv[])
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
// The landmark mapper is used to map ibug landmark identifiers to vertex ids:
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// The expression blendshapes:
vector<morphablemodel::Blendshape> blendshapes = morphablemodel::load_blendshapes(blendshapesfile.string());
// These two are used to fit the front-facing contour to the ibug contour landmarks:
fitting::ModelContour model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
fitting::ContourLandmarks ibug_contour = fitting::ContourLandmarks::load(mappingsfile.string());
// The edge topology is used to speed up computation of the occluding face contour fitting:
morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
// Draw the loaded landmarks:
Mat outimg = image.clone();
for (auto&& lm : landmarks) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// Fit the model, get back a mesh and the pose:
render::Mesh mesh;
fitting::RenderingParameters rendering_params;
std::tie(mesh, rendering_params) = fitting::fit_shape_and_pose(morphable_model, blendshapes, landmarks, landmark_mapper, image.cols, image.rows, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f);
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(outimg, mesh, rendering_params.get_modelview(), rendering_params.get_projection(), fitting::get_opencv_viewport(image.cols, image.rows));
outputfile += fs::path(".png");
cv::imwrite(outputfile.string(), outimg);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
outputfile.replace_extension(".obj");
render::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
......
......@@ -125,18 +125,19 @@ public:
};
// This assumes estimate_sop was run on points with OpenCV viewport! I.e. y flipped.
RenderingParameters(ScaledOrthoProjectionParameters ortho_params, int image_width, int image_height) {
camera_type = CameraType::Orthographic;
RenderingParameters(ScaledOrthoProjectionParameters ortho_params, int screen_width, int screen_height) : camera_type(CameraType::Orthographic), t_x(ortho_params.tx), t_y(ortho_params.ty), screen_width(screen_width), screen_height(screen_height) {
rotation = glm::quat(ortho_params.R);
t_x = ortho_params.tx;
t_y = ortho_params.ty;
const auto l = 0.0;
const auto r = image_width / ortho_params.s;
const auto r = screen_width / ortho_params.s;
const auto b = 0.0; // The b and t values are not tested for what happens if the SOP parameters
const auto t = image_height / ortho_params.s; // were estimated on a non-flipped viewport.
const auto t = screen_height / ortho_params.s; // were estimated on a non-flipped viewport.
frustum = Frustum(l, r, b, t);
};
auto get_camera_type() const {
return camera_type;
};
glm::quat get_rotation() const {
return rotation;
};
......@@ -158,6 +159,18 @@ public:
}
};
Frustum get_frustum() const {
return frustum;
};
int get_screen_width() const {
return screen_width;
};
int get_screen_height() const {
return screen_height;
};
private:
CameraType camera_type = CameraType::Orthographic;
Frustum frustum; // Can construct a glm::ortho or glm::perspective matrix from this.
......
......@@ -24,6 +24,9 @@
#include "eos/morphablemodel/Blendshape.hpp"
#include "Eigen/Core" // for nnls.h
#include "nnls.h"
#include "opencv2/core/core.hpp"
#include <vector>
......@@ -34,10 +37,15 @@ namespace eos {
/**
* Fits blendshape coefficients to given 2D landmarks, given a current face shape instance.
* It's a linear, closed-form solution fitting algorithm, with regularisation (constraining the L2-norm of the coefficients).
* It's a linear, closed-form solution fitting algorithm, with regularisation (constraining
* the L2-norm of the coefficients). However, there is no constraint on the coefficients,
* so negative coefficients are allowed, which, with linear blendshapes (offsets), will most
* likely not be desired. Thus, prefer the function
* fit_blendshapes_to_landmarks_nnls(std::vector<eos::morphablemodel::Blendshape>, cv::Mat, cv::Mat, std::vector<cv::Vec2f>, std::vector<int>).
*
* This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear. Instead of the PCA basis, the blendshapes are used, and instead of
* the mean, a current face instance is used to do the fitting from.
* This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear.
* Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current
* face instance is used to do the fitting from.
*
* @param[in] blendshapes A vector with blendshapes to estimate the coefficients for.
* @param[in] face_instance A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix.
......@@ -108,7 +116,88 @@ inline std::vector<float> fit_blendshapes_to_landmarks_linear(std::vector<eos::m
Mat c_s;
bool non_singular = cv::solve(AtAReg, -A.t() * b, c_s, cv::DECOMP_SVD); // DECOMP_SVD calculates the pseudo-inverse if the matrix is not invertible.
// Because we're using SVD, non_singular will always be true. If we were to use e.g. Cholesky, we could return an expected<T>.
return std::vector<float>(c_s);
};
/**
* Fits blendshape coefficients to given 2D landmarks, given a current face shape instance.
* Uses non-negative least-squares (NNLS) to solve for the coefficients. The NNLS algorithm
* used doesn't support any regularisation.
*
* This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear.
* Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current
* face instance is used to do the fitting from.
*
* @param[in] blendshapes A vector with blendshapes to estimate the coefficients for.
* @param[in] face_instance A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix.
* @param[in] affine_camera_matrix A 3x4 affine camera matrix from model to screen-space (should probably be of type CV_32FC1 as all our calculations are done with float).
* @param[in] landmarks 2D landmarks from an image to fit the blendshapes to.
* @param[in] vertex_ids The vertex ids in the model that correspond to the 2D points.
* @return The estimated blendshape-coefficients.
*/
inline std::vector<float> fit_blendshapes_to_landmarks_nnls(std::vector<eos::morphablemodel::Blendshape> blendshapes, cv::Mat face_instance, cv::Mat affine_camera_matrix, std::vector<cv::Vec2f> landmarks, std::vector<int> vertex_ids)
{
using cv::Mat;
assert(landmarks.size() == vertex_ids.size());
int num_coeffs_to_fit = blendshapes.size();
int num_landmarks = static_cast<int>(landmarks.size());
// Copy all blendshapes into a "basis" matrix with each blendshape being a column:
cv::Mat blendshapes_as_basis(blendshapes[0].deformation.rows, blendshapes.size(), CV_32FC1); // assert blendshapes.size() > 0 and all of them have same number of rows, and 1 col
for (int i = 0; i < blendshapes.size(); ++i)
{
blendshapes[i].deformation.copyTo(blendshapes_as_basis.col(i));
}
// $\hat{V} \in R^{3N\times m-1}$, subselect the rows of the eigenvector matrix $V$ associated with the $N$ feature points
// And we insert a row of zeros after every third row, resulting in matrix $\hat{V}_h \in R^{4N\times m-1}$:
Mat V_hat_h = Mat::zeros(4 * num_landmarks, num_coeffs_to_fit, CV_32FC1);
int row_index = 0;
for (int i = 0; i < num_landmarks; ++i) {
Mat basis_rows = blendshapes_as_basis.rowRange(vertex_ids[i] * 3, (vertex_ids[i] * 3) + 3);
basis_rows.colRange(0, num_coeffs_to_fit).copyTo(V_hat_h.rowRange(row_index, row_index + 3)); // Todo: I think we can remove colRange here, as we always want to use all given blendshapes
row_index += 4; // replace 3 rows and skip the 4th one, it has all zeros
}
// Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera matrix C (P_Affine, affine_camera_matrix) is placed on the diagonal:
Mat P = Mat::zeros(3 * num_landmarks, 4 * num_landmarks, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
Mat submatrix_to_replace = P.colRange(4 * i, (4 * i) + 4).rowRange(3 * i, (3 * i) + 3);
affine_camera_matrix.copyTo(submatrix_to_replace);
}
// The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$
Mat y = Mat::ones(3 * num_landmarks, 1, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
y.at<float>(3 * i, 0) = landmarks[i][0];
y.at<float>((3 * i) + 1, 0) = landmarks[i][1];
//y.at<float>((3 * i) + 2, 0) = 1; // already 1, stays (homogeneous coordinate)
}
// The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t
Mat v_bar = Mat::ones(4 * num_landmarks, 1, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
cv::Vec4f model_mean(face_instance.at<float>(vertex_ids[i]*3), face_instance.at<float>(vertex_ids[i]*3 + 1), face_instance.at<float>(vertex_ids[i]*3 + 2), 1.0f);
v_bar.at<float>(4 * i, 0) = model_mean[0];
v_bar.at<float>((4 * i) + 1, 0) = model_mean[1];
v_bar.at<float>((4 * i) + 2, 0) = model_mean[2];
//v_bar.at<float>((4 * i) + 3, 0) = 1; // already 1, stays (homogeneous coordinate)
// note: now that a Vec4f is returned, we could use copyTo?
}
// Bring into standard regularised quadratic form:
Mat A = P * V_hat_h; // camera matrix times the basis
Mat b = P * v_bar - y; // camera matrix times the mean, minus the landmarks.
// Solve using NNLS:
using RowMajorMatrixXf = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
Eigen::Map<RowMajorMatrixXf> A_Eigen(A.ptr<float>(), A.rows, A.cols);
Eigen::Map<RowMajorMatrixXf> b_Eigen(b.ptr<float>(), b.rows, b.cols);
Eigen::VectorXf x;
bool non_singular = Eigen::NNLS<Eigen::MatrixXf>::solve(A_Eigen, -b_Eigen, x);
Mat c_s(x.rows(), x.cols(), CV_32FC1, x.data()); // create an OpenCV Mat header for the Eigen data
return std::vector<float>(c_s);
};
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/fitting/closest_edge_fitting.hpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef CLOSESTEDGEFITTING_HPP_
#define CLOSESTEDGEFITTING_HPP_
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/render/Mesh.hpp"
#include "eos/render/utils.hpp"
#include "nanoflann.hpp"
#include "glm/common.hpp"
#include "glm/vec3.hpp"
#include "glm/vec4.hpp"
#include "glm/mat4x4.hpp"
#include "Eigen/Dense" // Need only Vector2f actually.
#include "opencv2/core/core.hpp"
#include "boost/optional.hpp"
#include <vector>
#include <algorithm>
#include <utility>
#include <cstddef>
namespace eos {
namespace fitting {
/**
* @brief Computes the intersection of the given ray with the given triangle.
*
* Uses the Möller-Trumbore algorithm algorithm "Fast Minimum Storage
* Ray/Triangle Intersection". Independent implementation, inspired by:
* http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
* The default eps (1e-6f) is from the paper.
* When culling is on, rays intersecting triangles from the back will be discarded -
* otherwise, the triangles normal direction w.r.t. the ray direction is just ignored.
*
* Note: The use of optional might turn out as a performance problem, as this
* function is called loads of time - how costly is it to construct a boost::none optional?
*
* @param[in] ray_origin Ray origin.
* @param[in] ray_direction Ray direction.
* @param[in] v0 First vertex of a triangle.
* @param[in] v1 Second vertex of a triangle.
* @param[in] v2 Third vertex of a triangle.
* @param[in] enable_backculling When culling is on, rays intersecting triangles from the back will be discarded.
* @return Whether the ray intersects the triangle, and if yes, including the distance.
*/
inline std::pair<bool, boost::optional<float>> ray_triangle_intersect(const glm::vec3& ray_origin, const glm::vec3& ray_direction, const glm::vec3& v0, const glm::vec3& v1, const glm::vec3& v2, bool enable_backculling)
{
using glm::vec3;
const float epsilon = 1e-6f;
vec3 v0v1 = v1 - v0;
vec3 v0v2 = v2 - v0;
vec3 pvec = glm::cross(ray_direction, v0v2);
float det = glm::dot(v0v1, pvec);
if (enable_backculling)
{
// If det is negative, the triangle is back-facing.
// If det is close to 0, the ray misses the triangle.
if (det < epsilon)
return { false, boost::none };
}
else {
// If det is close to 0, the ray and triangle are parallel.
if (std::abs(det) < epsilon)
return { false, boost::none };
}
float inv_det = 1 / det;
vec3 tvec = ray_origin - v0;
auto u = glm::dot(tvec, pvec) * inv_det;
if (u < 0 || u > 1)
return { false, boost::none };
vec3 qvec = glm::cross(tvec, v0v1);
auto v = glm::dot(ray_direction, qvec) * inv_det;
if (v < 0 || u + v > 1)
return { false, boost::none };
auto t = glm::dot(v0v2, qvec) * inv_det;
return { true, t };
};
/**
* @brief Computes the vertices that lie on occluding boundaries, given a particular pose.
*
* This algorithm computes the edges that lie on occluding boundaries of the mesh.
* It performs a visibility text of each vertex, and returns a list of the (unique)
* vertices that make the boundary edges.
* An edge is defined as the line whose two adjacent faces normals flip the sign.
*
* @param[in] mesh The mesh to use.
* @param[in] edge_topology The edge topology of the given mesh.
* @param[in] R The rotation (pose) under which the occluding boundaries should be computed.
* @return A vector with unique vertex id's making up the edges.
*/
inline std::vector<int> occluding_boundary_vertices(const render::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, glm::mat4x4 R)
{
// Rotate the mesh:
std::vector<glm::vec4> rotated_vertices;
std::for_each(begin(mesh.vertices), end(mesh.vertices), [&rotated_vertices, &R](auto&& v) { rotated_vertices.push_back(R * v); });
// Compute the face normals of the rotated mesh:
std::vector<glm::vec3> facenormals;
for (auto&& f : mesh.tvi) { // for each face (triangle):
auto n = render::compute_face_normal(rotated_vertices[f[0]], rotated_vertices[f[1]], rotated_vertices[f[2]]);
facenormals.push_back(n);
}
// Find occluding edges:
std::vector<int> occluding_edges_indices;
for (int edge_idx = 0; edge_idx < edge_topology.adjacent_faces.size(); ++edge_idx) // For each edge... Ef contains the indices of the two adjacent faces
{
const auto& edge = edge_topology.adjacent_faces[edge_idx];
if (edge[0] == 0) // ==> NOTE/Todo Need to change this if we use 0-based indexing!
{
// Edges with a zero index lie on the mesh boundary, i.e. they are only
// adjacent to one face.
continue;
}
// Compute the occluding edges as those where the two adjacent face normals
// differ in the sign of their z-component:
// Changing from 1-based indexing to 0-based!
if (glm::sign(facenormals[edge[0] - 1].z) != glm::sign(facenormals[edge[1] - 1].z))
{
// It's an occluding edge, store the index:
occluding_edges_indices.push_back(edge_idx);
}
}
// Select the vertices lying at the two ends of the occluding edges and remove duplicates:
// (This is what EdgeTopology::adjacent_vertices is needed for).
std::vector<int> occluding_vertices; // The model's contour vertices
for (auto&& edge_idx : occluding_edges_indices)
{
// Changing from 1-based indexing to 0-based!
occluding_vertices.push_back(edge_topology.adjacent_vertices[edge_idx][0] - 1);
occluding_vertices.push_back(edge_topology.adjacent_vertices[edge_idx][1] - 1);
}
// Remove duplicate vertex id's (std::unique works only on sorted sequences):
std::sort(begin(occluding_vertices), end(occluding_vertices));
occluding_vertices.erase(std::unique(begin(occluding_vertices), end(occluding_vertices)), end(occluding_vertices));
// Perform ray-casting to find out which vertices are not visible (i.e. self-occluded):
std::vector<bool> visibility;
for (const auto& vertex_idx : occluding_vertices)
{
bool visible = true;
// For every tri of the rotated mesh:
for (auto&& tri : mesh.tvi)
{
auto& v0 = rotated_vertices[tri[0]];
auto& v1 = rotated_vertices[tri[1]];
auto& v2 = rotated_vertices[tri[2]];
glm::vec3 ray_origin = rotated_vertices[vertex_idx];
glm::vec3 ray_direction(0.0f, 0.0f, 1.0f); // we shoot the ray from the vertex towards the camera
auto intersect = ray_triangle_intersect(ray_origin, ray_direction, glm::vec3(v0), glm::vec3(v1), glm::vec3(v2), false);
// first is bool intersect, second is the distance t
if (intersect.first == true)
{
// We've hit a triangle. Ray hit its own triangle. If it's behind the ray origin, ignore the intersection:
// Check if in front or behind?
if (intersect.second.get() <= 1e-4)
{
continue; // the intersection is behind the vertex, we don't care about it
}
// Otherwise, we've hit a genuine triangle, and the vertex is not visible:
visible = false;
break;
}
}
visibility.push_back(visible);
}
// Remove vertices from occluding boundary list that are not visible:
std::vector<int> final_vertex_ids;
for (int i = 0; i < occluding_vertices.size(); ++i)
{
if (visibility[i] == true)
{
final_vertex_ids.push_back(occluding_vertices[i]);
}
}
return final_vertex_ids;
};
/** A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage.
* The i'th vector represents a point in the state space.
*
* This adaptor is from the nanoflann examples and shows how to use it with these types of containers:
* typedef std::vector<std::vector<double> > my_vector_of_vectors_t;
* typedef std::vector<Eigen::VectorXd> my_vector_of_vectors_t;
*
* It works with any type inside the vector that has operator[] defined to access
* its elements, as well as a ::size() operator to return its number of dimensions.
* Eigen::VectorX is one of them. cv::Point is not (no [] and no size()), glm is also
* not (no size()).
*
* \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations.
* \tparam num_t The type of the point coordinates (typically, double or float).
* \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
* \tparam IndexType The type for indices in the KD-tree index (typically, size_t of int)
*/
template <class VectorOfVectorsType, typename num_t = double, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = size_t>
struct KDTreeVectorOfVectorsAdaptor
{
typedef KDTreeVectorOfVectorsAdaptor<VectorOfVectorsType, num_t, DIM, Distance> self_t;
typedef typename Distance::template traits<num_t, self_t>::distance_t metric_t;
typedef nanoflann::KDTreeSingleIndexAdaptor<metric_t, self_t, DIM, IndexType> index_t;
index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index.
/// Constructor: takes a const ref to the vector of vectors object with the data points
// Make sure the data is kept alive while the kd-tree is in use.
KDTreeVectorOfVectorsAdaptor(const int dimensionality, const VectorOfVectorsType &mat, const int leaf_max_size = 10) : m_data(mat)
{
assert(mat.size() != 0 && mat[0].size() != 0);
const size_t dims = mat[0].size();
if (DIM>0 && static_cast<int>(dims) != DIM)
throw std::runtime_error("Data set dimensionality does not match the 'DIM' template argument");
index = new index_t(dims, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size));
index->buildIndex();
}
~KDTreeVectorOfVectorsAdaptor() {
delete index;
}
const VectorOfVectorsType &m_data;
/** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]).
* Note that this is a short-cut method for index->findNeighbors().
* The user can also call index->... methods as desired.
* \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface.
*/
inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const
{
nanoflann::KNNResultSet<num_t, IndexType> resultSet(num_closest);
resultSet.init(out_indices, out_distances_sq);
index->findNeighbors(resultSet, query_point, nanoflann::SearchParams());
}
/** @name Interface expected by KDTreeSingleIndexAdaptor
* @{ */
const self_t & derived() const {
return *this;
}
self_t & derived() {
return *this;
}
// Must return the number of data points
inline size_t kdtree_get_point_count() const {
return m_data.size();
}
// Returns the distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class:
inline num_t kdtree_distance(const num_t *p1, const size_t idx_p2, size_t size) const
{
num_t s = 0;
for (size_t i = 0; i<size; i++) {
const num_t d = p1[i] - m_data[idx_p2][i];
s += d*d;
}
return s;
}
// Returns the dim'th component of the idx'th point in the class:
inline num_t kdtree_get_pt(const size_t idx, int dim) const {
return m_data[idx][dim];
}
// Optional bounding-box computation: return false to default to a standard bbox computation loop.
// Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
// Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
template <class BBOX>
bool kdtree_get_bbox(BBOX & /*bb*/) const {
return false;
}
/** @} */
};
/**
* @brief For a given list of 2D edge points, find corresponding 3D vertex IDs.
*
* This algorithm first computes the 3D mesh's occluding boundary vertices under
* the given pose. Then, for each 2D image edge point given, it searches for the
* closest 3D edge vertex (projected to 2D). Correspondences lying further away
* than \c distance_threshold (times a scale-factor) are discarded.
* It returns a list of the remaining image edge points and their corresponding
* 3D vertex ID.
*
* The given \c rendering_parameters camery_type must be CameraType::Orthographic.
*
* The units of \c distance_threshold are somewhat complicated. The function
* uses squared distances, and the \c distance_threshold is further multiplied
* with a face-size and image resolution dependent scale factor.
* It's reasonable to use correspondences that are 10 to 15 pixels away on a
* 1280x720 image with s=0.93. This would be a distance_threshold of around 200.
* 64 might be a conservative default.
*
* @param[in] mesh The 3D mesh.
* @param[in] edge_topology The mesh's edge topology (used for fast computation).
* @param[in] rendering_parameters Rendering (pose) parameters of the mesh.
* @param[in] image_edges A list of points that are edges.
* @param[in] distance_threshold All correspondences below this threshold.
* @return A pair consisting of the used image edge points and their associated 3D vertex index.
*/
inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_correspondences(const render::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, const fitting::RenderingParameters& rendering_parameters, const std::vector<Eigen::Vector2f>& image_edges, float distance_threshold = 64.0f)
{
assert(rendering_parameters.get_camera_type() == fitting::CameraType::Orthographic);
using std::vector;
using Eigen::Vector2f;
// Compute vertices that lye on occluding boundaries:
auto occluding_vertices = occluding_boundary_vertices(mesh, edge_topology, glm::mat4x4(rendering_parameters.get_rotation()));
// Project these occluding boundary vertices from 3D to 2D:
vector<Vector2f> model_edges_projected;
for (const auto& v : occluding_vertices)
{
auto p = glm::project({ mesh.vertices[v][0], mesh.vertices[v][1], mesh.vertices[v][2] }, rendering_parameters.get_modelview(), rendering_parameters.get_projection(), fitting::get_opencv_viewport(rendering_parameters.get_screen_width(), rendering_parameters.get_screen_height()));
model_edges_projected.push_back({ p.x, p.y });
}
// Find edge correspondences:
// Build a kd-tree and use nearest neighbour search:
using kd_tree_t = KDTreeVectorOfVectorsAdaptor<vector<Vector2f>, float, 2>;
kd_tree_t tree(2, image_edges); // dim, samples, max_leaf
tree.index->buildIndex();
vector<std::pair<std::size_t, double>> idx_d; // will contain [ index to the 2D edge in 'image_edges', distance (L2^2) ]
for (const auto& e : model_edges_projected)
{
std::size_t idx; // contains the indices into the original 'image_edges' vector
double dist_sq; // squared distances
nanoflann::KNNResultSet<double> resultSet(1);
resultSet.init(&idx, &dist_sq);
tree.index->findNeighbors(resultSet, e.data(), nanoflann::SearchParams(10));
idx_d.push_back({ idx, dist_sq });
}
// Filter edge matches:
// We filter below by discarding all correspondence that are a certain distance apart.
// We could also (or in addition to) discard the worst 5% of the distances or something like that.
// Filter and store the image (edge) points with their corresponding vertex id:
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
assert(occluding_vertices.size() == idx_d.size());
for (int i = 0; i < occluding_vertices.size(); ++i)
{
auto ortho_scale = rendering_parameters.get_screen_width() / rendering_parameters.get_frustum().r; // This might be a bit of a hack - we recover the "real" scaling from the SOP estimate
if (idx_d[i].second <= distance_threshold * ortho_scale) // I think multiplying by the scale is good here and gives us invariance w.r.t. the image resolution and face size.
{
auto edge_point = image_edges[idx_d[i].first];
// Store the found 2D edge point, and the associated vertex id:
vertex_indices.push_back(occluding_vertices[i]);
image_points.push_back(cv::Vec2f(edge_point[0], edge_point[1]));
}
}
return { image_points, vertex_indices };
};
} /* namespace fitting */
} /* namespace eos */
#endif /* CLOSESTEDGEFITTING_HPP_ */
......@@ -46,7 +46,8 @@ namespace eos {
struct ModelContour;
struct ContourLandmarks;
std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_angle, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour);
std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const morphablemodel::MorphableModel& morphable_model, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport);
std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport);
/**
* @brief Definition of the vertex indices that define the right and left model contour.
......@@ -119,6 +120,7 @@ struct ModelContour
*
* Note: Better names could be ContourDefinition or ImageContourLandmarks, to
* disambiguate 3D and 2D landmarks?
* Todo: I think this should go into the LandmarkMapper. Isn't it part of ibug2did.txt already?
*/
struct ContourLandmarks
{
......@@ -191,19 +193,19 @@ struct ContourLandmarks
* @param[in] landmarks All image landmarks.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] yaw_angle Yaw angle of the current fitting. The front-facing contour will be chosen depending on this yaw angle.
* @param[in] morphable_model A Morphable Model whose mean is used.
* @param[in] yaw_angle Yaw angle of the current fitting, in degrees. The front-facing contour will be chosen depending on this yaw angle.
* @param[in] mesh The mesh that's used to find the nearest contour points.
* @param[in] view_model Model-view matrix of the current fitting to project the 3D model vertices to 2D.
* @param[in] ortho_projection Projection matrix to project the 3D model vertices to 2D.
* @param[in] viewport Current viewport to use.
* @return A tuple with the 2D contour landmark points, the corresponding points in the 3D shape model and their vertex indices.
*/
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour, float yaw_angle, const morphablemodel::MorphableModel& morphable_model, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour, float yaw_angle, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
{
// Select which side of the contour we'll use:
std::vector<int> model_contour_indices;
std::vector<std::string> landmark_contour_identifiers;
std::tie(landmark_contour_identifiers, model_contour_indices) = select_contour(glm::degrees(yaw_angle), contour_landmarks, model_contour);
std::tie(landmark_contour_identifiers, model_contour_indices) = select_contour(yaw_angle, contour_landmarks, model_contour);
// These are the additional contour-correspondences we're going to find and then use:
std::vector<cv::Vec4f> model_points_contour; // the points in the 3D shape model
......@@ -212,7 +214,7 @@ inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<in
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
// Note/Todo: Loop here instead of calling this function where we have no idea what it's doing? What does its documentation say?
return get_nearest_contour_correspondences(landmarks, landmark_contour_identifiers, model_contour_indices, morphable_model, view_model, ortho_projection, viewport);
return get_nearest_contour_correspondences(landmarks, landmark_contour_identifiers, model_contour_indices, mesh, view_model, ortho_projection, viewport);
};
/**
......@@ -223,25 +225,32 @@ inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<in
* have different size.
* Correspondence can be established using get_nearest_contour_correspondences().
*
* If the yaw angle is between +-7.5°, both contours will be selected.
*
* Note: Maybe rename to find_nearest_contour_points, to highlight that there is (potentially a lot) computational cost involved?
*
* @param[in] yaw_angle yaw angle in degrees.
* @param[in] yaw_angle Yaw angle in degrees.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be used/considered to find the closest corresponding 3D vertex.
* @return A pair with two vectors containing the selected 2D image contour landmark ids and the 3D model contour indices.
*/
inline std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_angle, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour)
std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_angle, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour)
{
using std::begin;
using std::end;
std::vector<int> model_contour_indices;
std::vector<std::string> contour_landmark_identifiers;
if (yaw_angle >= 0.0f) { // positive yaw = subject looking to the left
model_contour_indices = model_contour.right_contour; // ==> we use the right cnt-lms
contour_landmark_identifiers = contour_landmarks.right_contour;
if (yaw_angle >= -7.5f) { // positive yaw = subject looking to the left
// ==> we use the right cnt-lms
model_contour_indices.insert(end(model_contour_indices), begin(model_contour.right_contour), end(model_contour.right_contour));
contour_landmark_identifiers.insert(end(contour_landmark_identifiers), begin(contour_landmarks.right_contour), end(contour_landmarks.right_contour));
}
else {
model_contour_indices = model_contour.left_contour;
contour_landmark_identifiers = contour_landmarks.left_contour;
if (yaw_angle <= 7.5f) {
// ==> we use the left cnt-lms
model_contour_indices.insert(end(model_contour_indices), begin(model_contour.left_contour), end(model_contour.left_contour));
contour_landmark_identifiers.insert(end(contour_landmark_identifiers), begin(contour_landmarks.left_contour), end(contour_landmarks.left_contour));
}
// Note there's an overlap between the angles - if a subject is between +- 7.5°, both contours get added.
return std::make_pair(contour_landmark_identifiers, model_contour_indices);
};
......@@ -252,24 +261,18 @@ inline std::pair<std::vector<std::string>, std::vector<int>> select_contour(floa
*
* Note: Maybe rename to find_nearest_contour_points, to highlight that there is (potentially a lot) computational cost involved?
* Note: Does ortho_projection have to be specifically orthographic? Otherwise, if it works with perspective too, rename to just "projection".
* More notes:
* Actually, only return the vertex id, not the point? Same with get_corresponding_pointset? Because
* then it's much easier to use the current shape estimate instead of the mean! But this function needs to project.
* So... it should take a Mesh actually? But creating a Mesh is a lot of computation?
* When we want to use the non-mean, then we need to use draw_sample() anyway? So overhead of Mesh is only if we use the mean?
* Maybe two overloads?
* Note: Uses the mean to calculate.
* Note: Actually, only return the vertex id, not the model point as well? Same with get_corresponding_pointset?
*
* @param[in] landmarks All image landmarks.
* @param[in] landmark_contour_identifiers 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour_indices The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] morphable_model The Morphable Model whose shape (coefficients) are estimated.
* @param[in] mesh The mesh that's projected to find the nearest contour vertex.
* @param[in] view_model Model-view matrix of the current fitting to project the 3D model vertices to 2D.
* @param[in] ortho_projection Projection matrix to project the 3D model vertices to 2D.
* @param[in] viewport Current viewport to use.
* @return A tuple with the 2D contour landmark points, the corresponding points in the 3D shape model and their vertex indices.
*/
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const morphablemodel::MorphableModel& morphable_model, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
{
// These are the additional contour-correspondences we're going to find and then use!
std::vector<cv::Vec4f> model_points_cnt; // the points in the 3D shape model
......@@ -289,8 +292,8 @@ inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<in
std::vector<float> distances_2d;
for (auto&& model_contour_vertex_idx : model_contour_indices) // we could actually pre-project them, i.e. only project them once, not for each landmark newly...
{
auto vertex = morphable_model.get_shape_model().get_mean_at_point(model_contour_vertex_idx);
glm::vec3 proj = glm::project(glm::vec3{ vertex[0], vertex[1], vertex[2] }, view_model, ortho_projection, viewport);
auto vertex = mesh.vertices[model_contour_vertex_idx];
glm::vec3 proj = glm::project(glm::vec3(vertex), view_model, ortho_projection, viewport);
cv::Vec2f screen_point_model_contour(proj.x, proj.y);
double dist = cv::norm(screen_point_model_contour, screen_point_2d_contour_landmark, cv::NORM_L2);
......@@ -301,7 +304,7 @@ inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<in
auto min_ele_idx = std::distance(begin(distances_2d), min_ele);
auto the_3dmm_vertex_id_that_is_closest = model_contour_indices[min_ele_idx];
cv::Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(the_3dmm_vertex_id_that_is_closest);
cv::Vec4f vertex(mesh.vertices[the_3dmm_vertex_id_that_is_closest].x, mesh.vertices[the_3dmm_vertex_id_that_is_closest].y, mesh.vertices[the_3dmm_vertex_id_that_is_closest].z, mesh.vertices[the_3dmm_vertex_id_that_is_closest].w);
model_points_cnt.emplace_back(vertex);
vertex_indices_cnt.emplace_back(the_3dmm_vertex_id_that_is_closest);
image_points_cnt.emplace_back(screen_point_2d_contour_landmark);
......
......@@ -22,14 +22,23 @@
#ifndef FITTING_HPP_
#define FITTING_HPP_
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/fitting/blendshape_fitting.hpp"
#include "eos/fitting/contour_correspondence.hpp"
#include "eos/fitting/closest_edge_fitting.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "opencv2/core/core.hpp"
#include <cassert>
#include <vector>
#include <algorithm>
namespace eos {
namespace fitting {
......@@ -81,7 +90,7 @@ inline cv::Mat fit_shape(cv::Mat affine_camera_matrix, morphablemodel::Morphable
// Estimate the blendshape coefficients with the current PCA model estimate:
Mat pca_model_shape = morphable_model.get_shape_model().draw_sample(current_pca_coeffs);
current_blendshape_coeffs = eos::fitting::fit_blendshapes_to_landmarks_linear(blendshapes, pca_model_shape, affine_camera_matrix, image_points, vertex_indices, 0.0f);
current_blendshape_coeffs = fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, pca_model_shape, affine_camera_matrix, image_points, vertex_indices);
combined_shape = pca_model_shape + blendshapes_as_basis * Mat(current_blendshape_coeffs);
} while (std::abs(cv::norm(current_pca_coeffs) - cv::norm(last_pca_coeffs)) >= 0.01 || std::abs(cv::norm(current_blendshape_coeffs) - cv::norm(last_blendshape_coeffs)) >= 0.01);
......@@ -111,7 +120,6 @@ inline cv::Mat fit_shape(cv::Mat affine_camera_matrix, morphablemodel::Morphable
return fit_shape(affine_camera_matrix, morphable_model, blendshapes, image_points, vertex_indices, lambda, num_coefficients_to_fit, unused, unused);
};
/**
* @brief Takes a LandmarkCollection of 2D landmarks and, using the landmark_mapper, finds the
* corresponding 3D vertex indices and returns them, along with the coordinates of the 3D points.
......@@ -134,7 +142,7 @@ inline cv::Mat fit_shape(cv::Mat affine_camera_matrix, morphablemodel::Morphable
* @return A tuple of [image_points, model_points, vertex_indices].
*/
template<class T>
auto get_corresponding_pointset(const T& landmarks, const core::LandmarkMapper& landmark_mapper, const morphablemodel::MorphableModel& morphable_model)
inline auto get_corresponding_pointset(const T& landmarks, const core::LandmarkMapper& landmark_mapper, const morphablemodel::MorphableModel& morphable_model)
{
using cv::Mat;
using std::vector;
......@@ -172,7 +180,7 @@ auto get_corresponding_pointset(const T& landmarks, const core::LandmarkMapper&
* @return The concatenated vector.
*/
template <class T>
auto concat(const std::vector<T>& vec_a, const std::vector<T>& vec_b)
inline auto concat(const std::vector<T>& vec_a, const std::vector<T>& vec_b)
{
std::vector<T> concatenated_vec;
concatenated_vec.reserve(vec_a.size() + vec_b.size());
......@@ -180,6 +188,222 @@ auto concat(const std::vector<T>& vec_a, const std::vector<T>& vec_b)
concatenated_vec.insert(std::end(concatenated_vec), std::begin(vec_b), std::end(vec_b));
return concatenated_vec;
};
/**
* @brief Fit the pose (camera), shape model, and expression blendshapes to landmarks,
* in an iterative way.
*
* Convenience function that fits pose (camera), the shape model, and expression blendshapes
* to landmarks, in an iterative (alternating) way. It fits both sides of the face contour as well.
*
* If \p pca_shape_coefficients and/or \p blendshape_coefficients are given, they are used as
* starting values in the fitting. When the function returns, they contain the coefficients from
* the last iteration.
*
* Use render::Mesh fit_shape_and_pose(const morphablemodel::MorphableModel&, const std::vector<morphablemodel::Blendshape>&, const core::LandmarkCollection<cv::Vec2f>&, const core::LandmarkMapper&, int, int, const morphablemodel::EdgeTopology&, const fitting::ContourLandmarks&, const fitting::ModelContour&, int, boost::optional<int>, float).
* for a simpler overload with reasonable defaults and no optional output.
*
* \p num_iterations: Results are good for even a single iteration. For single-image fitting and
* for full convergence of all parameters, it can take up to 300 iterations. In tracking,
* particularly if initialising with the previous frame, it works well with as low as 1 to 5
* iterations.
* \p edge_topology is used for the occluding-edge face contour fitting.
* \p contour_landmarks and \p model_contour are used to fit the front-facing contour.
*
* Todo: Add a convergence criterion.
*
* @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
* @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
* @param[in] landmarks 2D landmarks from an image to fit the model to.
* @param[in] landmark_mapper Mapping info from the 2D landmark points to 3D vertex indices.
* @param[in] image_width Width of the input image (needed for the camera model).
* @param[in] image_height Height of the input image (needed for the camera model).
* @param[in] edge_topology Precomputed edge topology of the 3D model, needed for fast edge-lookup.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] num_iterations Number of iterations that the different fitting parts will be alternated for.
* @param[in] num_shape_coefficients_to_fit How many shape-coefficients to fit (all others will stay 0). Should be bigger than zero, or boost::none to fit all coefficients.
* @param[in] lambda Regularisation parameter of the PCA shape fitting.
* @param[in] initial_rendering_params Currently ignored (not used).
* @param[in,out] pca_shape_coefficients If given, will be used as initial PCA shape coefficients to start the fitting. Will contain the final estimated coefficients.
* @param[in,out] blendshape_coefficients If given, will be used as initial expression blendshape coefficients to start the fitting. Will contain the final estimated coefficients.
* @param[out] fitted_image_points Debug parameter: Returns all the 2D points that have been used for the fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients, std::vector<cv::Vec2f>& fitted_image_points)
{
assert(blendshapes.size() > 0);
assert(landmarks.size() >= 4);
assert(image_width > 0 && image_height > 0);
assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit?
assert(pca_shape_coefficients.size() <= morphable_model.get_shape_model().get_num_principal_components());
// More asserts I forgot?
using std::vector;
using cv::Vec2f;
using cv::Vec4f;
using cv::Mat;
if (!num_shape_coefficients_to_fit)
{
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
if (pca_shape_coefficients.empty())
{
pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get());
}
// Todo: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.empty())
{
blendshape_coefficients.resize(blendshapes.size());
}
Mat blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
// Current mesh - either from the given coefficients, or the mean:
Mat current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
Mat current_combined_shape = current_pca_shape + blendshapes_as_basis * Mat(blendshape_coefficients);
auto current_mesh = morphablemodel::detail::sample_to_mesh(current_combined_shape, morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
// The 2D and 3D point correspondences used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM),
// and get the corresponding model points (mean if given no initial coeffs, from the computed shape otherwise):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(current_mesh.vertices[vertex_idx].x, current_mesh.vertices[vertex_idx].y, current_mesh.vertices[vertex_idx].z, current_mesh.vertices[vertex_idx].w);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Need to do an initial pose fit to do the contour fitting inside the loop.
// We'll do an expression fit too, since face shapes vary quite a lot, depending on expressions.
fitting::ScaledOrthoProjectionParameters current_pose;
current_pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image_height);
fitting::RenderingParameters rendering_params(current_pose, image_width, image_height);
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image_width, image_height);
blendshape_coefficients = eos::fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, current_pca_shape, affine_from_ortho, image_points, vertex_indices);
// Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
current_combined_shape = current_pca_shape + morphablemodel::to_matrix(blendshapes) * Mat(blendshape_coefficients);
current_mesh = morphablemodel::detail::sample_to_mesh(current_combined_shape, morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
// The static (fixed) landmark correspondences which will stay the same throughout
// the fitting (the inner face landmarks):
auto fixed_image_points = image_points;
auto fixed_vertex_indices = vertex_indices;
for (int i = 0; i < num_iterations; ++i)
{
image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices;
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params.get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, contour_landmarks, model_contour, yaw_angle, current_mesh, rendering_params.get_modelview(), rendering_params.get_projection(), fitting::get_opencv_viewport(image_width, image_height));
// Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices = concat(vertex_indices, vertex_indices_contour);
image_points = concat(image_points, image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks;
if (yaw_angle >= 0.0f) // positive yaw = subject looking to the left
{ // the left contour is the occluding one we want to use ("away-facing")
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
}
else {
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
}
auto edge_correspondences = fitting::find_occluding_edge_correspondences(current_mesh, edge_topology, rendering_params, occluding_contour_landmarks, 180.0f);
image_points = fitting::concat(image_points, edge_correspondences.first);
vertex_indices = fitting::concat(vertex_indices, edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got:
model_points.clear();
for (const auto& v : vertex_indices)
{
model_points.push_back({ current_mesh.vertices[v][0], current_mesh.vertices[v][1], current_mesh.vertices[v][2], current_mesh.vertices[v][3] });
}
// Re-estimate the pose, using all correspondences:
current_pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image_height);
rendering_params = fitting::RenderingParameters(current_pose, image_width, image_height);
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image_width, image_height);
// Estimate the PCA shape coefficients with the current blendshape coefficients:
Mat mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(blendshape_coefficients);
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices, mean_plus_blendshapes, lambda);
// Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blendshape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, current_pca_shape, affine_from_ortho, image_points, vertex_indices);
current_combined_shape = current_pca_shape + blendshapes_as_basis * Mat(blendshape_coefficients);
current_mesh = morphablemodel::detail::sample_to_mesh(current_combined_shape, morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
}
fitted_image_points = image_points;
return { current_mesh, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
/**
* @brief Fit the pose (camera), shape model, and expression blendshapes to landmarks,
* in an iterative way.
*
* Convenience function that fits pose (camera), the shape model, and expression blendshapes
* to landmarks, in an iterative (alternating) way. It fits both sides of the face contour as well.
*
* If you want to access the values of shape or blendshape coefficients, or want to set starting
* values for them, use the following overload to this function:
* std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel&, const std::vector<morphablemodel::Blendshape>&, const core::LandmarkCollection<cv::Vec2f>&, const core::LandmarkMapper&, int, int, const morphablemodel::EdgeTopology&, const fitting::ContourLandmarks&, const fitting::ModelContour&, int, boost::optional<int>, float, boost::optional<fitting::RenderingParameters>, std::vector<float>&, std::vector<float>&, std::vector<cv::Vec2f>&)
*
* Todo: Add a convergence criterion.
*
* \p num_iterations: Results are good for even a single iteration. For single-image fitting and
* for full convergence of all parameters, it can take up to 300 iterations. In tracking,
* particularly if initialising with the previous frame, it works well with as low as 1 to 5
* iterations.
* \p edge_topology is used for the occluding-edge face contour fitting.
* \p contour_landmarks and \p model_contour are used to fit the front-facing contour.
*
* @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
* @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
* @param[in] landmarks 2D landmarks from an image to fit the model to.
* @param[in] landmark_mapper Mapping info from the 2D landmark points to 3D vertex indices.
* @param[in] image_width Width of the input image (needed for the camera model).
* @param[in] image_height Height of the input image (needed for the camera model).
* @param[in] edge_topology Precomputed edge topology of the 3D model, needed for fast edge-lookup.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] num_iterations Number of iterations that the different fitting parts will be alternated for.
* @param[in] num_shape_coefficients_to_fit How many shape-coefficients to fit (all others will stay 0). Should be bigger than zero, or boost::none to fit all coefficients.
* @param[in] lambda Regularisation parameter of the PCA shape fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 3.0f)
{
std::vector<float> pca_coeffs;
std::vector<float> blendshape_coeffs;
std::vector<cv::Vec2f> fitted_image_points;
return fit_shape_and_pose(morphable_model, blendshapes, landmarks, landmark_mapper, image_width, image_height, edge_topology, contour_landmarks, model_contour, num_iterations, num_shape_coefficients_to_fit, lambda, boost::none, pca_coeffs, blendshape_coeffs, fitted_image_points);
};
} /* namespace fitting */
} /* namespace eos */
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/morphablemodel/EdgeTopology.hpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef EDGETOPOLOGY_HPP_
#define EDGETOPOLOGY_HPP_
#include "cereal/cereal.hpp"
#include "cereal/types/array.hpp"
#include "cereal/types/vector.hpp"
#include "cereal/archives/json.hpp"
#include <array>
#include <vector>
#include <fstream>
namespace eos {
namespace morphablemodel {
/**
* @brief A struct containing a 3D shape model's edge topology.
*
* This struct contains all edges of a 3D mesh, and for each edge, it
* contains the two faces and the two vertices that are adjacent to that
* edge. This is used in the iterated closest edge fitting (ICEF).
*
* Note: The indices are 1-based, so 1 needs to be subtracted before using
* them as mesh indices. An index of 0 as first array element means that
* it's an edge that lies on the mesh boundary, i.e. they are only
* adjacent to one face.
* We should explore a less error-prone way to store this data, but that's
* how it is done in Matlab by the original code.
*
* adjacent_faces.size() is equal to adjacent_vertices.size().
*/
struct EdgeTopology {
std::vector<std::array<int, 2>> adjacent_faces; // num_edges x 2 matrix storing faces adjacent to each edge
std::vector<std::array<int, 2>> adjacent_vertices; // num_edges x 2 matrix storing vertices adjacent to each edge
friend class cereal::access;
/**
* Serialises this class using cereal.
*
* @param[in] ar The archive to serialise to (or to serialise from).
*/
template<class Archive>
void serialize(Archive& archive)
{
archive(CEREAL_NVP(adjacent_faces), CEREAL_NVP(adjacent_vertices));
};
};
/**
* Saves a 3DMM edge topology file to a json file.
*
* @param[in] edge_topology A model's edge topology.
* @param[in] filename The file to write.
* @throws std::runtime_error if unable to open the given file for writing.
*/
inline void save_edge_topology(EdgeTopology edge_topology, std::string filename)
{
std::ofstream file(filename);
if (file.fail()) {
throw std::runtime_error("Error opening file for writing: " + filename);
}
cereal::JSONOutputArchive output_archive(file);
output_archive(cereal::make_nvp("edge_topology", edge_topology));
};
/**
* Load a 3DMM edge topology file from a json file.
*
* @param[in] filename The file to load the edge topology from.
* @return A struct containing the edge topology.
* @throws std::runtime_error if unable to open the given file for writing.
*/
inline EdgeTopology load_edge_topology(std::string filename)
{
EdgeTopology edge_topology;
std::ifstream file(filename);
if (file.fail()) {
throw std::runtime_error("Error opening file for reading: " + filename);
}
cereal::JSONInputArchive output_archive(file);
output_archive(cereal::make_nvp("edge_topology", edge_topology));
return edge_topology;
};
} /* namespace morphablemodel */
} /* namespace eos */
#endif /* EDGETOPOLOGY_HPP_ */
......@@ -24,6 +24,9 @@
#include "eos/render/Mesh.hpp"
#include "glm/vec3.hpp"
#include "glm/geometric.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
......@@ -99,6 +102,24 @@ inline cv::Vec3f calculate_face_normal(const cv::Vec3f& v0, const cv::Vec3f& v1,
return n;
};
/**
* Computes the normal of a face (or triangle), i.e. the
* per-face normal. Return normal will be normalised.
* Assumes the triangle is given in CCW order, i.e. vertices
* in counterclockwise order on the screen are front-facing.
*
* @param[in] v0 First vertex.
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
* @return The unit-length normal of the given triangle.
*/
glm::vec3 compute_face_normal(const glm::vec3& v0, const glm::vec3& v1, const glm::vec3& v2)
{
glm::vec3 n = glm::cross(v1 - v0, v2 - v0); // v0-to-v1 x v0-to-v2
n = glm::normalize(n);
return n;
};
/**
* Draws the texture coordinates (uv-coords) of the given mesh
* into an image by looping over the triangles and drawing each
......
......@@ -20,6 +20,10 @@ Files in this directory:
6 expression blendshapes for the sfm_shape_3448 model. Contains the expressions angry,
disgust, fear, happy, sad and surprised.
- sfm_3448_edge_topology.json:
Contains a precomputed list of the model's edges, and the two faces and vertices that are
adjacent to each edge. Used in the edge-fitting.
- model_contours.json:
Definition of the model's contour vertices of the right and left side of the face.
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -41,6 +41,10 @@ target_link_libraries(scm-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES})
add_executable(bfm-binary-to-cereal bfm-binary-to-cereal.cpp)
target_link_libraries(bfm-binary-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES})
# Reads an edgestruct CSV file created from Matlab, and converts it to json:
add_executable(edgestruct-csv-to-json edgestruct-csv-to-json.cpp)
target_link_libraries(edgestruct-csv-to-json ${Boost_LIBRARIES})
# Store a json file as cereal .bin:
add_executable(json-to-cereal-binary json-to-cereal-binary.cpp)
target_link_libraries(json-to-cereal-binary ${OpenCV_LIBS} ${Boost_LIBRARIES})
......@@ -57,3 +61,4 @@ endif()
install(TARGETS scm-to-cereal DESTINATION bin)
install(TARGETS bfm-binary-to-cereal DESTINATION bin)
install(TARGETS json-to-cereal-binary DESTINATION bin)
install(TARGETS edgestruct-csv-to-json DESTINATION bin)
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: utils/edgestruct-csv-to-json.cpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include "boost/algorithm/string.hpp"
#include <array>
#include <vector>
#include <string>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using std::cout;
using std::endl;
// Careful, we're loading them from a 1-based indexing file! And store it as 1-based.
// Some values will contain zeros in the first element, which is a special value (= edge at the boundary of the mesh).
std::vector<std::array<int, 2>> read_edgestruct_csv(std::string filename)
{
using std::getline;
using std::string;
std::vector<std::array<int, 2>> vec;
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open file: " + filename));
}
string line;
while (getline(file, line))
{
std::vector<string> vals;
boost::split(vals, line, boost::is_any_of(","), boost::token_compress_on);
vec.push_back({ std::stoi(vals[0]), std::stoi(vals[1]) });
}
return vec;
};
/**
* Reads two edgestruct CSV files created from Matlab (one with adjacent faces,
* one with adjacent vertices), converts them to an EdgeTopology struct, and
* stores that as json file.
*/
int main(int argc, char *argv[])
{
fs::path input_adj_faces, input_adj_vertices, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("faces,f", po::value<fs::path>(&input_adj_faces)->required(),
"input edgestruct csv file from Matlab with a list of adjacent faces")
("vertices,v", po::value<fs::path>(&input_adj_vertices)->required(),
"input edgestruct csv file from Matlab with a list of adjacent vertices")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("converted_edge_topology.json"),
"output filename for the converted .json file")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: edgestruct-csv-to-json [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
morphablemodel::EdgeTopology edge_info;
edge_info.adjacent_faces = read_edgestruct_csv(input_adj_faces.string());
edge_info.adjacent_vertices = read_edgestruct_csv(input_adj_vertices.string());
morphablemodel::save_edge_topology(edge_info, outputfile.string());
cout << "Saved EdgeTopology in json file: " << outputfile.string() << "." << endl;
return EXIT_SUCCESS;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment