Commit 9a3ff0d2 authored by Philipp Kopp's avatar Philipp Kopp

added new multi image fitting app and debugging changes

parent 1ddf3476
......@@ -30,6 +30,11 @@ add_executable(fit-model fit-model.cpp)
target_link_libraries(fit-model eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(fit-model "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
# Model fitting example that fits orthographic camera, shape, blendshapes, and contours to multiple images:
add_executable(fit-model-multi fit-model-multi.cpp)
target_link_libraries(fit-model-multi eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(fit-model-multi "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
# Generate random samples from the model:
add_executable(generate-obj generate-obj.cpp)
target_link_libraries(generate-obj eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
......@@ -37,6 +42,7 @@ target_link_libraries(generate-obj eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
# Install these targets:
install(TARGETS fit-model-simple DESTINATION bin)
install(TARGETS fit-model DESTINATION bin)
install(TARGETS fit-model-multi DESTINATION bin)
install(TARGETS generate-obj DESTINATION bin)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/data DESTINATION bin)
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: examples/fit-model.cpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/fitting/fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include <vector>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using eos::core::Landmark;
using eos::core::LandmarkCollection;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Vec4f;
using std::cout;
using std::endl;
using std::vector;
using std::string;
/**
* Reads an ibug .pts landmark file and returns an ordered vector with
* the 68 2D landmark coordinates.
*
* @param[in] filename Path to a .pts file.
* @return An ordered vector with the 68 ibug landmarks.
*/
LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
{
using std::getline;
using cv::Vec2f;
using std::string;
LandmarkCollection<Vec2f> landmarks;
landmarks.reserve(68);
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open landmark file: " + filename));
}
string line;
// Skip the first 3 lines, they're header lines:
getline(file, line); // 'version: 1'
getline(file, line); // 'n_points : 68'
getline(file, line); // '{'
int ibugId = 1;
while (getline(file, line))
{
if (line == "}") { // end of the file
break;
}
std::stringstream lineStream(line);
Landmark<Vec2f> landmark;
landmark.name = std::to_string(ibugId);
if (!(lineStream >> landmark.coordinates[0] >> landmark.coordinates[1])) {
throw std::runtime_error(string("Landmark format error while parsing the line: " + line));
}
// From the iBug website:
// "Please note that the re-annotated data for this challenge are saved in the Matlab convention of 1 being
// the first index, i.e. the coordinates of the top left pixel in an image are x=1, y=1."
// ==> So we shift every point by 1:
landmark.coordinates[0] -= 1.0f;
landmark.coordinates[1] -= 1.0f;
landmarks.emplace_back(landmark);
++ibugId;
}
return landmarks;
};
/**
* Draws the given mesh as wireframe into the image.
*
* It does backface culling, i.e. draws only vertices in CCW order.
*
* @param[in] image An image to draw into.
* @param[in] mesh The mesh to draw.
* @param[in] modelview Model-view matrix to draw the mesh.
* @param[in] projection Projection matrix to draw the mesh.
* @param[in] viewport Viewport to draw the mesh.
* @param[in] colour Colour of the mesh to be drawn.
*/
void draw_wireframe(cv::Mat image, const core::Mesh& mesh, glm::mat4x4 modelview, glm::mat4x4 projection, glm::vec4 viewport, cv::Scalar colour = cv::Scalar(0, 255, 0, 255))
{
for (const auto& triangle : mesh.tvi)
{
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
{
cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
cv::line(image, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), colour);
}
}
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
* In addition to fit-model-simple, this example uses blendshapes, contour-
* fitting, and can iterate the fitting.
*
* 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper.
*/
int main(int argc, char *argv[])
{
fs::path modelfile, isomapfile, mappingsfile, contourfile, edgetopologyfile, blendshapesfile, outputfilebase;
vector<fs::path> imagefiles, landmarksfiles;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("../share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
//("image,i", po::value<vector<fs::path>>(&imagefiles)->required()->default_value("data/image_0010.png"),
("image,i", po::value<vector<fs::path>>(&imagefiles)->multitoken(),
"an input image")
("landmarks,l", po::value<vector<fs::path>>(&landmarksfiles)->multitoken(),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug_to_sfm.txt"),
"landmark identifier to model vertex number mapping")
("model-contour,c", po::value<fs::path>(&contourfile)->required()->default_value("../share/model_contours.json"),
"file with model contour indices")
("edge-topology,e", po::value<fs::path>(&edgetopologyfile)->required()->default_value("../share/sfm_3448_edge_topology.json"),
"file with model's precomputed edge topology")
("blendshapes,b", po::value<fs::path>(&blendshapesfile)->required()->default_value("../share/expression_blendshapes_3448.bin"),
"file with blendshapes")
("output,o", po::value<fs::path>(&outputfilebase)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: fit-model [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
if (landmarksfiles.size() != imagefiles.size()) {
cout << "Number of landmarksfiles not equal to number of images given: "<<landmarksfiles.size() <<"!=" <<imagefiles.size()<< endl;
return EXIT_SUCCESS;
}
if (landmarksfiles.empty()) {
cout << "Please give at least 1 image and landmarkfile" << endl;
return EXIT_SUCCESS;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
vector<Mat> images;
for (auto& imagefile : imagefiles){
images.push_back(cv::imread(imagefile.string()));
}
vector<LandmarkCollection<cv::Vec2f>> landmarkss;
try {
for (auto& landmarksfile : landmarksfiles){
landmarkss.push_back(read_pts_landmarks(landmarksfile.string()));
}
}
catch (const std::runtime_error& e) {
cout << "Error reading the landmarks: " << e.what() << endl;
return EXIT_FAILURE;
}
morphablemodel::MorphableModel morphable_model;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
// The landmark mapper is used to map ibug landmark identifiers to vertex ids:
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// The expression blendshapes:
vector<morphablemodel::Blendshape> blendshapes = morphablemodel::load_blendshapes(blendshapesfile.string());
// These two are used to fit the front-facing contour to the ibug contour landmarks:
fitting::ModelContour model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
fitting::ContourLandmarks ibug_contour = fitting::ContourLandmarks::load(mappingsfile.string());
// The edge topology is used to speed up computation of the occluding face contour fitting:
morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
// Draw the loaded landmarks:
vector<Mat> outimgs;
for (unsigned i =0; i <images.size(); ++i) {
Mat outimg = images[i].clone();
for (auto&& lm : landmarkss[i]) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
outimgs.push_back(outimg);
}
// Fit the model, get back a mesh and the pose:
vector<core::Mesh> meshs;
vector<fitting::RenderingParameters> rendering_paramss;
vector<int> image_widths;
vector<int> image_heights;
for (auto& image : images) {
image_widths.push_back(image.cols);
image_heights.push_back(image.rows);
}
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi(morphable_model, blendshapes, landmarkss, landmark_mapper, image_widths, image_heights, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f);
for (unsigned i =0; i <images.size(); ++i) {
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
// and similarly for pitch and roll.
// Extract the texture from the image using given mesh and camera parameters:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], images[i].cols, images[i].rows);
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, images[i]);
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(outimgs[i], meshs[i], rendering_paramss[i].get_modelview(), rendering_paramss[i].get_projection(), fitting::get_opencv_viewport(images[i].cols, images[i].rows));
fs::path outputfile = outputfilebase;
outputfile += fs::path(imagefiles[i].stem());
outputfile += fs::path(".png");
cv::imwrite(outputfile.string(), outimgs[i]);
// Save the mesh as textured obj:
outputfile.replace_extension(".obj");
core::write_textured_obj(meshs[i], outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
}
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfilebase << "." << endl;
return EXIT_SUCCESS;
}
......@@ -151,7 +151,7 @@ int main(int argc, char *argv[])
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug_to_sfm.txt"),
"landmark identifier to model vertex number mapping")
("model-contour,c", po::value<fs::path>(&contourfile)->required()->default_value("../share/model_contours.json"),
"file with model contour indices")
......@@ -174,7 +174,7 @@ int main(int argc, char *argv[])
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
return EXIT_FAILURE;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
......
......@@ -192,7 +192,7 @@ inline auto concat(const std::vector<T>& vec_a, const std::vector<T>& vec_b)
/**
* @brief Fit the pose (camera), shape model, and expression blendshapes to landmarks,
* in an iterative way.
* in an iterative way. Can fit to more than one set of landmarks, thus multiple images.
*
* Convenience function that fits pose (camera), the shape model, and expression blendshapes
* to landmarks, in an iterative (alternating) way. It fits both sides of the face contour as well.
......@@ -264,7 +264,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// Todo: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() == 0)
if (blendshape_coefficients.empty())
{
for (int j = 0; j < num_images; ++j) {
std::vector<float> current_blendshape_coefficients;
......@@ -338,19 +338,14 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
vector<vector<int>> fixed_vertex_indices (vertex_indices);
vector<vector<Vec2f>> fixed_image_points (image_points);
for (int i = 0; i < num_iterations; ++i)
{
for (int i = 0; i < num_iterations; ++i) {
std::vector<cv::Mat> affine_from_orthos;
std::vector<cv::Mat> mean_plus_blendshapess;
//std::vector<std::vector<Vec2f>> image_points_w_contour;
//std::vector<std::vector<int>> vertex_indices_w_contour;
std::vector<cv::Mat> mean_plus_blendshapes;
image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices;
for (int j = 0; j < num_images; ++j) {
//vertex_indices_w_contour.push_back(vector<int>(fixed_vertex_indices[j]));
//image_points_w_contour.push_back(vector<Vec2f>(fixed_image_points[j]));
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour;
......@@ -392,10 +387,10 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
affine_from_orthos.push_back(affine_from_ortho);
// Estimate the PCA shape coefficients with the current blendshape coefficients:
Mat mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(blendshape_coefficients[j]);
mean_plus_blendshapess.push_back(mean_plus_blendshapes);
Mat current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(blendshape_coefficients[j]);
mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
}
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi(morphable_model, affine_from_orthos, image_points, vertex_indices, mean_plus_blendshapess, lambda, num_shape_coefficients_to_fit);
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi(morphable_model, affine_from_orthos, image_points, vertex_indices, mean_plus_blendshapes, lambda, num_shape_coefficients_to_fit);
// Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
......@@ -408,18 +403,106 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
}
}
//fitted_image_points = image_points;
fitted_image_points = image_points;
return { current_meshs, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
// philipp helper, todo: is the blendshape coefficient thing really solved correctly here?
/**
* @brief Fit the pose (camera), shape model, and expression blendshapes to landmarks,
* in an iterative way. Can fit to more than one set of landmarks, thus multiple images.
*
* Convenience function that fits pose (camera), the shape model, and expression blendshapes
* to landmarks, in an iterative (alternating) way. It fits both sides of the face contour as well.
*
* If you want to access the values of shape or blendshape coefficients, or want to set starting
* values for them, use the following overload to this function:
* std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel&, const std::vector<morphablemodel::Blendshape>&, const core::LandmarkCollection<cv::Vec2f>&, const core::LandmarkMapper&, int, int, const morphablemodel::EdgeTopology&, const fitting::ContourLandmarks&, const fitting::ModelContour&, int, boost::optional<int>, float, boost::optional<fitting::RenderingParameters>, std::vector<float>&, std::vector<float>&, std::vector<cv::Vec2f>&)
*
* Todo: Add a convergence criterion.
*
* \p num_iterations: Results are good for even a single iteration. For single-image fitting and
* for full convergence of all parameters, it can take up to 300 iterations. In tracking,
* particularly if initialising with the previous frame, it works well with as low as 1 to 5
* iterations.
* \p edge_topology is used for the occluding-edge face contour fitting.
* \p contour_landmarks and \p model_contour are used to fit the front-facing contour.
*
* @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
* @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
* @param[in] landmarks 2D landmarks from an image to fit the model to.
* @param[in] landmark_mapper Mapping info from the 2D landmark points to 3D vertex indices.
* @param[in] image_width Width of the input image (needed for the camera model).
* @param[in] image_height Height of the input image (needed for the camera model).
* @param[in] edge_topology Precomputed edge topology of the 3D model, needed for fast edge-lookup.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] num_iterations Number of iterations that the different fitting parts will be alternated for.
* @param[in] num_shape_coefficients_to_fit How many shape-coefficients to fit (all others will stay 0). Should be bigger than zero, or boost::none to fit all coefficients.
* @param[in] lambda Regularisation parameter of the PCA shape fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks, const core::LandmarkMapper& landmark_mapper, std::vector<int> image_width, std::vector<int> image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 30.0f)
{
std::vector<float> pca_shape_coefficients;
std::vector<std::vector<float>> blendshape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
return fit_shape_and_pose_multi(morphable_model, blendshapes, landmarks, landmark_mapper, image_width, image_height, edge_topology, contour_landmarks, model_contour, num_iterations, num_shape_coefficients_to_fit, lambda, boost::none, pca_shape_coefficients, blendshape_coefficients, fitted_image_points);
};
/**
* @brief Fit the pose (camera), shape model, and expression blendshapes to landmarks,
* in an iterative way.
*
* Convenience function that fits pose (camera), the shape model, and expression blendshapes
* to landmarks, in an iterative (alternating) way. It fits both sides of the face contour as well.
*
* If \p pca_shape_coefficients and/or \p blendshape_coefficients are given, they are used as
* starting values in the fitting. When the function returns, they contain the coefficients from
* the last iteration.
*
* Use render::Mesh fit_shape_and_pose(const morphablemodel::MorphableModel&, const std::vector<morphablemodel::Blendshape>&, const core::LandmarkCollection<cv::Vec2f>&, const core::LandmarkMapper&, int, int, const morphablemodel::EdgeTopology&, const fitting::ContourLandmarks&, const fitting::ModelContour&, int, boost::optional<int>, float).
* for a simpler overload with reasonable defaults and no optional output.
*
* \p num_iterations: Results are good for even a single iteration. For single-image fitting and
* for full convergence of all parameters, it can take up to 300 iterations. In tracking,
* particularly if initialising with the previous frame, it works well with as low as 1 to 5
* iterations.
* \p edge_topology is used for the occluding-edge face contour fitting.
* \p contour_landmarks and \p model_contour are used to fit the front-facing contour.
*
* Todo: Add a convergence criterion.
*
* @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
* @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
* @param[in] landmarks 2D landmarks from an image to fit the model to.
* @param[in] landmark_mapper Mapping info from the 2D landmark points to 3D vertex indices.
* @param[in] image_width Width of the input image (needed for the camera model).
* @param[in] image_height Height of the input image (needed for the camera model).
* @param[in] edge_topology Precomputed edge topology of the 3D model, needed for fast edge-lookup.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
* @param[in] model_contour The model contour indices that should be considered to find the closest corresponding 3D vertex.
* @param[in] num_iterations Number of iterations that the different fitting parts will be alternated for.
* @param[in] num_shape_coefficients_to_fit How many shape-coefficients to fit (all others will stay 0). Should be bigger than zero, or boost::none to fit all coefficients.
* @param[in] lambda Regularisation parameter of the PCA shape fitting.
* @param[in] initial_rendering_params Currently ignored (not used).
* @param[in,out] pca_shape_coefficients If given, will be used as initial PCA shape coefficients to start the fitting. Will contain the final estimated coefficients.
* @param[in,out] blendshape_coefficients If given, will be used as initial expression blendshape coefficients to start the fitting. Will contain the final estimated coefficients.
* @param[out] fitted_image_points Debug parameter: Returns all the 2D points that have been used for the fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<core::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients, std::vector<cv::Vec2f>& fitted_image_points)
{
std::vector<std::vector<float>> all_blendshape_coefficients = {blendshape_coefficients};
//we have to create a new vector here if the blendshape_coefficients are empty, as otherwise the new vector is not empty anymore and contains one element
std::vector<std::vector<float>> all_blendshape_coefficients;
if (!blendshape_coefficients.empty())
{
all_blendshape_coefficients = {blendshape_coefficients};
}
std::vector<std::vector<cv::Vec2f>> all_fitted_image_points = {fitted_image_points};
//all_blendshape_coefficients.push_back(blendshape_coefficients);
std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> all_meshs_and_params = fit_shape_and_pose_multi( morphable_model, blendshapes, { landmarks }, landmark_mapper, { image_width }, { image_height }, edge_topology, contour_landmarks, model_contour, num_iterations, num_shape_coefficients_to_fit, lambda, initial_rendering_params, pca_shape_coefficients, all_blendshape_coefficients, all_fitted_image_points);
//std::pair<core::Mesh, fitting::RenderingParameters> mesh_and_param = std::make_pair(all_meshs_and_params.first[0], all_meshs_and_params.second[0]);
return {all_meshs_and_params.first[0], all_meshs_and_params.second[0] };
}
......@@ -467,7 +550,6 @@ inline std::pair<core::Mesh, fitting::RenderingParameters> fit_shape_and_pose(co
} /* namespace fitting */
} /* namespace eos */
......
......@@ -51,7 +51,7 @@ namespace eos {
* @param[in] landmarks 2D landmarks from an image to fit the model to.
* @param[in] vertex_ids The vertex ids in the model that correspond to the 2D points.
* @param[in] base_face The base or reference face from where the fitting is started. Usually this would be the models mean face, which is what will be used if the parameter is not explicitly specified.
* @param[in] lambda The regularisation parameter (weight of the prior towards the mean).
* @param[in] lambda The regularisation parameter (weight of the prior towards the mean). Gets normalized by the number of images given.
* @param[in] num_coefficients_to_fit How many shape-coefficients to fit (all others will stay 0). Should be bigger than zero, or boost::none to fit all coefficients.
* @param[in] detector_standard_deviation The standard deviation of the 2D landmarks given (e.g. of the detector used), in pixels.
* @param[in] model_standard_deviation The standard deviation of the 3D vertex points in the 3D model, projected to 2D (so the value is in pixels).
......@@ -65,6 +65,9 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(morphablemodel::Mo
int num_coeffs_to_fit = num_coefficients_to_fit.get_value_or(morphable_model.get_shape_model().get_num_principal_components());
int num_images = affine_camera_matrix.size();
// the regularisation has to be adjusted when more than one image is given
lambda *= num_images;
int total_num_landmarks_dimension = 0;
for (auto&& l : landmarks) {
total_num_landmarks_dimension += l.size();
......@@ -77,9 +80,8 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(morphablemodel::Mo
// Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera matrix C (P_Affine, affine_camera_matrix) is placed on the diagonal:
Mat P = Mat::zeros(3 * total_num_landmarks_dimension, 4 * total_num_landmarks_dimension, CV_32FC1);
int P_index = 0;
Mat Sigma = Mat::zeros(3 * total_num_landmarks_dimension, 3 * total_num_landmarks_dimension, CV_32FC1);
int Sigma_index = 0; // this runs the same as P_index
Mat Omega;
Mat Omega = Mat::zeros(3 * total_num_landmarks_dimension, 3 * total_num_landmarks_dimension, CV_32FC1);
int Omega_index = 0; // this runs the same as P_index
// The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$
Mat y = Mat::ones(3 * total_num_landmarks_dimension, 1, CV_32FC1);
int y_index = 0; // also runs the same as P_index. Should rename to "running_index"?
......@@ -124,11 +126,10 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(morphablemodel::Mo
float sigma_squared_2D = std::pow(detector_standard_deviation.get_value_or(std::sqrt(3.0f)), 2) + std::pow(model_standard_deviation.get_value_or(0.0f), 2);
//Mat Sigma = Mat::zeros(3 * num_landmarks, 3 * num_landmarks, CV_32FC1);
for (int i = 0; i < 3 * num_landmarks; ++i) {
Sigma.at<float>(Sigma_index, Sigma_index) = 1.0f / std::sqrt(sigma_squared_2D); // the higher the sigma_squared_2D, the smaller the diagonal entries of Sigma will be
++Sigma_index;
// Sigma(i, i) = sqrt(sigma_squared_2D), but then Omega is Sigma.t() * Sigma (squares the diagonal) - so we just assign 1/sigma_squared_2D to Omega here:
Omega.at<float>(Omega_index, Omega_index) = 1.0f / sigma_squared_2D; // the higher the sigma_squared_2D, the smaller the diagonal entries of Sigma will be
++Omega_index;
}
//Mat Omega = Sigma.t() * Sigma; // just squares the diagonal
// => moved outside the loop
// The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$
//Mat y = Mat::ones(3 * num_landmarks, 1, CV_32FC1);
......@@ -151,7 +152,6 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(morphablemodel::Mo
// note: now that a Vec4f is returned, we could use copyTo?
}
}
Omega = Sigma.t() * Sigma; // moved outside the loop. But can do even more efficiently anyway.
// Bring into standard regularised quadratic form with diagonal distance matrix Omega
Mat A = P * V_hat_h; // camera matrix times the basis
......@@ -216,7 +216,6 @@ inline std::vector<float> fit_shape_to_landmarks_linear(const morphablemodel::Mo
}
} /* namespace fitting */
} /* namespace eos */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment