Commit 5fe5a737 authored by Patrik Huber's avatar Patrik Huber

Merge branch 'devel'

# Resolved conflicts:
#	include/eos/fitting/RenderingParameters.hpp
#	include/eos/fitting/contour_correspondence.hpp
parents e00cfa0d 5d745ff8
[submodule "3rdparty/pybind11"] [submodule "3rdparty/pybind11"]
path = 3rdparty/pybind11 path = 3rdparty/pybind11
url = https://github.com/pybind/pybind11.git url = https://github.com/pybind/pybind11.git
[submodule "3rdparty/nanoflann"]
path = 3rdparty/nanoflann
url = https://github.com/jlblancoc/nanoflann.git
[submodule "3rdparty/glm"] [submodule "3rdparty/glm"]
path = 3rdparty/glm path = 3rdparty/glm
url = https://github.com/g-truc/glm.git url = https://github.com/g-truc/glm.git
[submodule "3rdparty/eigen3-nnls"]
path = 3rdparty/eigen3-nnls
url = https://github.com/hmatuschek/eigen3-nnls.git
Subproject commit d20add35bcfc9932671cab9ad786b24fd320a592
Subproject commit 206d65a8498a8dfb1910ac8d3de893286fa89e9a
...@@ -96,6 +96,8 @@ message(STATUS "Eigen3 version: ${EIGEN3_VERSION}") ...@@ -96,6 +96,8 @@ message(STATUS "Eigen3 version: ${EIGEN3_VERSION}")
set(CEREAL_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/cereal-1.1.1/include") set(CEREAL_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/cereal-1.1.1/include")
set(glm_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/glm") set(glm_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/glm")
set(nanoflann_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/nanoflann/include")
set(eigen3_nnls_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/eigen3-nnls/src")
# Header files: # Header files:
set(HEADERS set(HEADERS
...@@ -105,6 +107,7 @@ set(HEADERS ...@@ -105,6 +107,7 @@ set(HEADERS
include/eos/morphablemodel/MorphableModel.hpp include/eos/morphablemodel/MorphableModel.hpp
include/eos/morphablemodel/Blendshape.hpp include/eos/morphablemodel/Blendshape.hpp
include/eos/morphablemodel/coefficients.hpp include/eos/morphablemodel/coefficients.hpp
include/eos/morphablemodel/EdgeTopology.hpp
include/eos/morphablemodel/io/cvssp.hpp include/eos/morphablemodel/io/cvssp.hpp
include/eos/morphablemodel/io/mat_cerealisation.hpp include/eos/morphablemodel/io/mat_cerealisation.hpp
include/eos/fitting/affine_camera_estimation.hpp include/eos/fitting/affine_camera_estimation.hpp
...@@ -116,6 +119,7 @@ set(HEADERS ...@@ -116,6 +119,7 @@ set(HEADERS
include/eos/fitting/linear_shape_fitting.hpp include/eos/fitting/linear_shape_fitting.hpp
include/eos/fitting/contour_correspondence.hpp include/eos/fitting/contour_correspondence.hpp
include/eos/fitting/blendshape_fitting.hpp include/eos/fitting/blendshape_fitting.hpp
include/eos/fitting/closest_edge_fitting.hpp
include/eos/fitting/fitting.hpp include/eos/fitting/fitting.hpp
include/eos/fitting/ceres_nonlinear.hpp include/eos/fitting/ceres_nonlinear.hpp
include/eos/fitting/RenderingParameters.hpp include/eos/fitting/RenderingParameters.hpp
...@@ -136,6 +140,8 @@ include_directories(${Boost_INCLUDE_DIRS}) ...@@ -136,6 +140,8 @@ include_directories(${Boost_INCLUDE_DIRS})
include_directories(${OpenCV_INCLUDE_DIRS}) include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${EIGEN3_INCLUDE_DIR}) include_directories(${EIGEN3_INCLUDE_DIR})
include_directories(${glm_INCLUDE_DIR}) include_directories(${glm_INCLUDE_DIR})
include_directories(${nanoflann_INCLUDE_DIR})
include_directories(${eigen3_nnls_INCLUDE_DIR})
# Custom target for the library, to make the headers show up in IDEs: # Custom target for the library, to make the headers show up in IDEs:
add_custom_target(eos SOURCES ${HEADERS}) add_custom_target(eos SOURCES ${HEADERS})
......
...@@ -33,7 +33,11 @@ if(MSVC) ...@@ -33,7 +33,11 @@ if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif() endif()
# Model fitting (affine cam & shape to landmarks) example: # Simple model fitting (orthographic camera & shape to landmarks) example:
add_executable(fit-model-simple fit-model-simple.cpp)
target_link_libraries(fit-model-simple ${OpenCV_LIBS} ${Boost_LIBRARIES})
# Model fitting example that fits orthographic camera, shape, blendshapes, and contours:
add_executable(fit-model fit-model.cpp) add_executable(fit-model fit-model.cpp)
target_link_libraries(fit-model ${OpenCV_LIBS} ${Boost_LIBRARIES}) target_link_libraries(fit-model ${OpenCV_LIBS} ${Boost_LIBRARIES})
...@@ -55,6 +59,7 @@ target_link_libraries(generate-obj ${OpenCV_LIBS} ${Boost_LIBRARIES}) ...@@ -55,6 +59,7 @@ target_link_libraries(generate-obj ${OpenCV_LIBS} ${Boost_LIBRARIES})
# install target: # install target:
install(TARGETS fit-model-simple DESTINATION bin)
install(TARGETS fit-model DESTINATION bin) install(TARGETS fit-model DESTINATION bin)
install(TARGETS generate-obj DESTINATION bin) install(TARGETS generate-obj DESTINATION bin)
install(DIRECTORY ${CMAKE_SOURCE_DIR}/examples/data DESTINATION bin) install(DIRECTORY ${CMAKE_SOURCE_DIR}/examples/data DESTINATION bin)
...@@ -212,10 +212,9 @@ int main(int argc, char *argv[]) ...@@ -212,10 +212,9 @@ int main(int argc, char *argv[])
constexpr bool use_perspective = false; constexpr bool use_perspective = false;
// These will be the final 2D and 3D points used for the fitting: // These will be the 2D image points and their corresponding 3D vertex id's used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model vector<Vec2f> image_points; // the 2D landmark points
vector<int> vertex_indices; // their vertex indices vector<int> vertex_indices; // their corresponding vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM): // Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) { for (int i = 0; i < landmarks.size(); ++i) {
...@@ -225,7 +224,6 @@ int main(int argc, char *argv[]) ...@@ -225,7 +224,6 @@ int main(int argc, char *argv[])
} }
int vertex_idx = std::stoi(converted_name.get()); int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx); Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx); vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates); image_points.emplace_back(landmarks[i].coordinates);
} }
...@@ -259,7 +257,7 @@ int main(int argc, char *argv[]) ...@@ -259,7 +257,7 @@ int main(int argc, char *argv[])
blendshape_coefficients.resize(6); blendshape_coefficients.resize(6);
Problem camera_costfunction; Problem camera_costfunction;
for (int i = 0; i < model_points.size(); ++i) for (int i = 0; i < image_points.size(); ++i)
{ {
CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & fov/frustum_scale */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective)); CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & fov/frustum_scale */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective));
camera_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]); camera_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]);
...@@ -333,13 +331,11 @@ int main(int argc, char *argv[]) ...@@ -333,13 +331,11 @@ int main(int argc, char *argv[])
// Contour fitting: // Contour fitting:
// These are the additional contour-correspondences we're going to find and then use: // These are the additional contour-correspondences we're going to find and then use:
vector<Vec4f> model_points_contour; // the points in the 3D shape model vector<Vec2f> image_points_contour; // the 2D landmark points
vector<int> vertex_indices_contour; // their vertex indices vector<int> vertex_indices_contour; // their corresponding 3D vertex indices
vector<Vec2f> image_points_contour; // the corresponding 2D landmark points
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id: // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, model_points_contour, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, ibug_contour, model_contour, glm::degrees(euler_angles[1]), morphable_model, t_mtx * rot_mtx, projection_mtx, viewport); std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, ibug_contour, model_contour, glm::degrees(euler_angles[1]), morphable_model.get_mean(), t_mtx * rot_mtx, projection_mtx, viewport);
using eos::fitting::concat; using eos::fitting::concat;
model_points = concat(model_points, model_points_contour);
vertex_indices = concat(vertex_indices, vertex_indices_contour); vertex_indices = concat(vertex_indices, vertex_indices_contour);
image_points = concat(image_points, image_points_contour); image_points = concat(image_points, image_points_contour);
...@@ -347,7 +343,7 @@ int main(int argc, char *argv[]) ...@@ -347,7 +343,7 @@ int main(int argc, char *argv[])
start = std::chrono::steady_clock::now(); start = std::chrono::steady_clock::now();
Problem fitting_costfunction; Problem fitting_costfunction;
// Landmark constraint: // Landmark constraint:
for (int i = 0; i < model_points.size(); ++i) for (int i = 0; i < image_points.size(); ++i)
{ {
CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & focal length */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective)); CostFunction* cost_function = new AutoDiffCostFunction<fitting::LandmarkCost, 2 /* num residuals */, 4 /* camera rotation (quaternion) */, num_cam_trans_intr_params /* camera translation & focal length */, 10 /* shape-coeffs */, 6 /* bs-coeffs */>(new fitting::LandmarkCost(morphable_model.get_shape_model(), blendshapes, image_points[i], vertex_indices[i], image.cols, image.rows, use_perspective));
fitting_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]); fitting_costfunction.AddResidualBlock(cost_function, NULL, &camera_rotation[0], &camera_translation_and_intrinsics[0], &shape_coefficients[0], &blendshape_coefficients[0]);
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: examples/fit-model-simple.cpp
*
* Copyright 2015 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include <vector>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using eos::core::Landmark;
using eos::core::LandmarkCollection;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Vec4f;
using std::cout;
using std::endl;
using std::vector;
using std::string;
/**
* Reads an ibug .pts landmark file and returns an ordered vector with
* the 68 2D landmark coordinates.
*
* @param[in] filename Path to a .pts file.
* @return An ordered vector with the 68 ibug landmarks.
*/
LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
{
using std::getline;
using cv::Vec2f;
using std::string;
LandmarkCollection<Vec2f> landmarks;
landmarks.reserve(68);
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open landmark file: " + filename));
}
string line;
// Skip the first 3 lines, they're header lines:
getline(file, line); // 'version: 1'
getline(file, line); // 'n_points : 68'
getline(file, line); // '{'
int ibugId = 1;
while (getline(file, line))
{
if (line == "}") { // end of the file
break;
}
std::stringstream lineStream(line);
Landmark<Vec2f> landmark;
landmark.name = std::to_string(ibugId);
if (!(lineStream >> landmark.coordinates[0] >> landmark.coordinates[1])) {
throw std::runtime_error(string("Landmark format error while parsing the line: " + line));
}
// From the iBug website:
// "Please note that the re-annotated data for this challenge are saved in the Matlab convention of 1 being
// the first index, i.e. the coordinates of the top left pixel in an image are x=1, y=1."
// ==> So we shift every point by 1:
landmark.coordinates[0] -= 1.0f;
landmark.coordinates[1] -= 1.0f;
landmarks.emplace_back(landmark);
++ibugId;
}
return landmarks;
};
/**
* This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
*
* First, the 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an orthographic camera
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
*/
int main(int argc, char *argv[])
{
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("../share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("data/image_0010.png"),
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: fit-model-simple [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
// Load the image, landmarks, LandmarkMapper and the Morphable Model:
Mat image = cv::imread(imagefile.string());
LandmarkCollection<cv::Vec2f> landmarks;
try {
landmarks = read_pts_landmarks(landmarksfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error reading the landmarks: " << e.what() << endl;
return EXIT_FAILURE;
}
morphablemodel::MorphableModel morphable_model;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
}
catch (const std::runtime_error& e) {
cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE;
}
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// Draw the loaded landmarks:
Mat outimg = image.clone();
for (auto&& lm : landmarks) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
// These will be the final 2D and 3D points used for the fitting:
vector<Vec4f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
return EXIT_SUCCESS;
}
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* File: examples/fit-model.cpp * File: examples/fit-model.cpp
* *
* Copyright 2015 Patrik Huber * Copyright 2016 Patrik Huber
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
...@@ -19,9 +19,9 @@ ...@@ -19,9 +19,9 @@
*/ */
#include "eos/core/Landmark.hpp" #include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp" #include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp" #include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/fitting/RenderingParameters.hpp" #include "eos/morphablemodel/Blendshape.hpp"
#include "eos/fitting/linear_shape_fitting.hpp" #include "eos/fitting/fitting.hpp"
#include "eos/render/utils.hpp" #include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp" #include "eos/render/texture_extraction.hpp"
...@@ -100,18 +100,46 @@ LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename) ...@@ -100,18 +100,46 @@ LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
return landmarks; return landmarks;
}; };
/**
* Draws the given mesh as wireframe into the image.
*
* It does backface culling, i.e. draws only vertices in CCW order.
*
* @param[in] image An image to draw into.
* @param[in] mesh The mesh to draw.
* @param[in] modelview Model-view matrix to draw the mesh.
* @param[in] projection Projection matrix to draw the mesh.
* @param[in] viewport Viewport to draw the mesh.
* @param[in] colour Colour of the mesh to be drawn.
*/
void draw_wireframe(cv::Mat image, const eos::render::Mesh& mesh, glm::mat4x4 modelview, glm::mat4x4 projection, glm::vec4 viewport, cv::Scalar colour = cv::Scalar(0, 255, 0, 255))
{
for (const auto& triangle : mesh.tvi)
{
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
{
cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
cv::line(image, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), colour);
}
}
};
/** /**
* This app demonstrates estimation of the camera and fitting of the shape * This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks. * model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
* In addition to fit-model-simple, this example uses blendshapes, contour-
* fitting, and can iterate the fitting.
* *
* First, the 68 ibug landmarks are loaded from the .pts file and converted * 68 ibug landmarks are loaded from the .pts file and converted
* to vertex indices using the LandmarkMapper. Then, an affine camera matrix * to vertex indices using the LandmarkMapper.
* is estimated, and then, using this camera matrix, the shape is fitted
* to the landmarks.
*/ */
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, outputfile; fs::path modelfile, isomapfile, imagefile, landmarksfile, mappingsfile, contourfile, edgetopologyfile, blendshapesfile, outputfile;
try { try {
po::options_description desc("Allowed options"); po::options_description desc("Allowed options");
desc.add_options() desc.add_options()
...@@ -125,6 +153,12 @@ int main(int argc, char *argv[]) ...@@ -125,6 +153,12 @@ int main(int argc, char *argv[])
"2D landmarks for the image, in ibug .pts format") "2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"), ("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping") "landmark identifier to model vertex number mapping")
("model-contour,c", po::value<fs::path>(&contourfile)->required()->default_value("../share/model_contours.json"),
"file with model contour indices")
("edge-topology,e", po::value<fs::path>(&edgetopologyfile)->required()->default_value("../share/sfm_3448_edge_topology.json"),
"file with model's precomputed edge topology")
("blendshapes,b", po::value<fs::path>(&blendshapesfile)->required()->default_value("../share/expression_blendshapes_3448.bin"),
"file with blendshapes")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"), ("output,o", po::value<fs::path>(&outputfile)->required()->default_value("out"),
"basename for the output rendering and obj files") "basename for the output rendering and obj files")
; ;
...@@ -161,52 +195,45 @@ int main(int argc, char *argv[]) ...@@ -161,52 +195,45 @@ int main(int argc, char *argv[])
cout << "Error loading the Morphable Model: " << e.what() << endl; cout << "Error loading the Morphable Model: " << e.what() << endl;
return EXIT_FAILURE; return EXIT_FAILURE;
} }
// The landmark mapper is used to map ibug landmark identifiers to vertex ids:
core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile); core::LandmarkMapper landmark_mapper = mappingsfile.empty() ? core::LandmarkMapper() : core::LandmarkMapper(mappingsfile);
// The expression blendshapes:
vector<morphablemodel::Blendshape> blendshapes = morphablemodel::load_blendshapes(blendshapesfile.string());
// These two are used to fit the front-facing contour to the ibug contour landmarks:
fitting::ModelContour model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
fitting::ContourLandmarks ibug_contour = fitting::ContourLandmarks::load(mappingsfile.string());
// The edge topology is used to speed up computation of the occluding face contour fitting:
morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
// Draw the loaded landmarks: // Draw the loaded landmarks:
Mat outimg = image.clone(); Mat outimg = image.clone();
for (auto&& lm : landmarks) { for (auto&& lm : landmarks) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 }); cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
} }
// These will be the final 2D and 3D points used for the fitting: // Fit the model, get back a mesh and the pose:
vector<Vec4f> model_points; // the points in the 3D shape model render::Mesh mesh;
vector<int> vertex_indices; // their vertex indices fitting::RenderingParameters rendering_params;
vector<Vec2f> image_points; // the corresponding 2D landmark points std::tie(mesh, rendering_params) = fitting::fit_shape_and_pose(morphable_model, blendshapes, landmarks, landmark_mapper, image.cols, image.rows, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f);
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex = morphable_model.get_shape_model().get_mean_at_point(vertex_idx);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::ScaledOrthoProjectionParameters pose = fitting::estimate_orthographic_projection_linear(image_points, model_points, true, image.rows);
fitting::RenderingParameters rendering_params(pose, image.cols, image.rows);
// The 3D head pose can be recovered as follows: // The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation())); float yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
// and similarly for pitch and roll. // and similarly for pitch and roll.
// Estimate the shape coefficients by fitting the shape to the landmarks:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters: // Extract the texture from the image using given mesh and camera parameters:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image); Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(outimg, mesh, rendering_params.get_modelview(), rendering_params.get_projection(), fitting::get_opencv_viewport(image.cols, image.rows));
outputfile += fs::path(".png");
cv::imwrite(outputfile.string(), outimg);
// Save the mesh as textured obj: // Save the mesh as textured obj:
outputfile += fs::path(".obj"); outputfile.replace_extension(".obj");
render::write_textured_obj(mesh, outputfile.string()); render::write_textured_obj(mesh, outputfile.string());
// And save the isomap: // And save the isomap:
......
...@@ -125,18 +125,19 @@ public: ...@@ -125,18 +125,19 @@ public:
}; };
// This assumes estimate_sop was run on points with OpenCV viewport! I.e. y flipped. // This assumes estimate_sop was run on points with OpenCV viewport! I.e. y flipped.
RenderingParameters(ScaledOrthoProjectionParameters ortho_params, int image_width, int image_height) { RenderingParameters(ScaledOrthoProjectionParameters ortho_params, int screen_width, int screen_height) : camera_type(CameraType::Orthographic), t_x(ortho_params.tx), t_y(ortho_params.ty), screen_width(screen_width), screen_height(screen_height) {
camera_type = CameraType::Orthographic;
rotation = glm::quat(ortho_params.R); rotation = glm::quat(ortho_params.R);
t_x = ortho_params.tx;
t_y = ortho_params.ty;
const auto l = 0.0; const auto l = 0.0;
const auto r = image_width / ortho_params.s; const auto r = screen_width / ortho_params.s;
const auto b = 0.0; // The b and t values are not tested for what happens if the SOP parameters const auto b = 0.0; // The b and t values are not tested for what happens if the SOP parameters
const auto t = image_height / ortho_params.s; // were estimated on a non-flipped viewport. const auto t = screen_height / ortho_params.s; // were estimated on a non-flipped viewport.
frustum = Frustum(l, r, b, t); frustum = Frustum(l, r, b, t);
}; };
auto get_camera_type() const {
return camera_type;
};
glm::quat get_rotation() const { glm::quat get_rotation() const {
return rotation; return rotation;
}; };
...@@ -158,6 +159,18 @@ public: ...@@ -158,6 +159,18 @@ public:
} }
}; };
Frustum get_frustum() const {
return frustum;
};
int get_screen_width() const {
return screen_width;
};
int get_screen_height() const {
return screen_height;
};
private: private:
CameraType camera_type = CameraType::Orthographic; CameraType camera_type = CameraType::Orthographic;
Frustum frustum; // Can construct a glm::ortho or glm::perspective matrix from this. Frustum frustum; // Can construct a glm::ortho or glm::perspective matrix from this.
......
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
#include "eos/morphablemodel/Blendshape.hpp" #include "eos/morphablemodel/Blendshape.hpp"
#include "Eigen/Core" // for nnls.h
#include "nnls.h"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include <vector> #include <vector>
...@@ -34,10 +37,15 @@ namespace eos { ...@@ -34,10 +37,15 @@ namespace eos {
/** /**
* Fits blendshape coefficients to given 2D landmarks, given a current face shape instance. * Fits blendshape coefficients to given 2D landmarks, given a current face shape instance.
* It's a linear, closed-form solution fitting algorithm, with regularisation (constraining the L2-norm of the coefficients). * It's a linear, closed-form solution fitting algorithm, with regularisation (constraining
* the L2-norm of the coefficients). However, there is no constraint on the coefficients,
* so negative coefficients are allowed, which, with linear blendshapes (offsets), will most
* likely not be desired. Thus, prefer the function
* fit_blendshapes_to_landmarks_nnls(std::vector<eos::morphablemodel::Blendshape>, cv::Mat, cv::Mat, std::vector<cv::Vec2f>, std::vector<int>).
* *
* This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear. Instead of the PCA basis, the blendshapes are used, and instead of * This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear.
* the mean, a current face instance is used to do the fitting from. * Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current
* face instance is used to do the fitting from.
* *
* @param[in] blendshapes A vector with blendshapes to estimate the coefficients for. * @param[in] blendshapes A vector with blendshapes to estimate the coefficients for.
* @param[in] face_instance A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix. * @param[in] face_instance A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix.
...@@ -112,6 +120,87 @@ inline std::vector<float> fit_blendshapes_to_landmarks_linear(std::vector<eos::m ...@@ -112,6 +120,87 @@ inline std::vector<float> fit_blendshapes_to_landmarks_linear(std::vector<eos::m
return std::vector<float>(c_s); return std::vector<float>(c_s);
}; };
/**
* Fits blendshape coefficients to given 2D landmarks, given a current face shape instance.
* Uses non-negative least-squares (NNLS) to solve for the coefficients. The NNLS algorithm
* used doesn't support any regularisation.
*
* This algorithm is very similar to the shape fitting in fit_shape_to_landmarks_linear.
* Instead of the PCA basis, the blendshapes are used, and instead of the mean, a current
* face instance is used to do the fitting from.
*
* @param[in] blendshapes A vector with blendshapes to estimate the coefficients for.
* @param[in] face_instance A shape instance from which the blendshape coefficients should be estimated (i.e. the current mesh without expressions, e.g. estimated from a previous PCA-model fitting). A 3m x 1 matrix.
* @param[in] affine_camera_matrix A 3x4 affine camera matrix from model to screen-space (should probably be of type CV_32FC1 as all our calculations are done with float).
* @param[in] landmarks 2D landmarks from an image to fit the blendshapes to.
* @param[in] vertex_ids The vertex ids in the model that correspond to the 2D points.
* @return The estimated blendshape-coefficients.
*/
inline std::vector<float> fit_blendshapes_to_landmarks_nnls(std::vector<eos::morphablemodel::Blendshape> blendshapes, cv::Mat face_instance, cv::Mat affine_camera_matrix, std::vector<cv::Vec2f> landmarks, std::vector<int> vertex_ids)
{
using cv::Mat;
assert(landmarks.size() == vertex_ids.size());
int num_coeffs_to_fit = blendshapes.size();
int num_landmarks = static_cast<int>(landmarks.size());
// Copy all blendshapes into a "basis" matrix with each blendshape being a column:
cv::Mat blendshapes_as_basis(blendshapes[0].deformation.rows, blendshapes.size(), CV_32FC1); // assert blendshapes.size() > 0 and all of them have same number of rows, and 1 col
for (int i = 0; i < blendshapes.size(); ++i)
{
blendshapes[i].deformation.copyTo(blendshapes_as_basis.col(i));
}
// $\hat{V} \in R^{3N\times m-1}$, subselect the rows of the eigenvector matrix $V$ associated with the $N$ feature points
// And we insert a row of zeros after every third row, resulting in matrix $\hat{V}_h \in R^{4N\times m-1}$:
Mat V_hat_h = Mat::zeros(4 * num_landmarks, num_coeffs_to_fit, CV_32FC1);
int row_index = 0;
for (int i = 0; i < num_landmarks; ++i) {
Mat basis_rows = blendshapes_as_basis.rowRange(vertex_ids[i] * 3, (vertex_ids[i] * 3) + 3);
basis_rows.colRange(0, num_coeffs_to_fit).copyTo(V_hat_h.rowRange(row_index, row_index + 3)); // Todo: I think we can remove colRange here, as we always want to use all given blendshapes
row_index += 4; // replace 3 rows and skip the 4th one, it has all zeros
}
// Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera matrix C (P_Affine, affine_camera_matrix) is placed on the diagonal:
Mat P = Mat::zeros(3 * num_landmarks, 4 * num_landmarks, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
Mat submatrix_to_replace = P.colRange(4 * i, (4 * i) + 4).rowRange(3 * i, (3 * i) + 3);
affine_camera_matrix.copyTo(submatrix_to_replace);
}
// The landmarks in matrix notation (in homogeneous coordinates), $3N\times 1$
Mat y = Mat::ones(3 * num_landmarks, 1, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
y.at<float>(3 * i, 0) = landmarks[i][0];
y.at<float>((3 * i) + 1, 0) = landmarks[i][1];
//y.at<float>((3 * i) + 2, 0) = 1; // already 1, stays (homogeneous coordinate)
}
// The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t
Mat v_bar = Mat::ones(4 * num_landmarks, 1, CV_32FC1);
for (int i = 0; i < num_landmarks; ++i) {
cv::Vec4f model_mean(face_instance.at<float>(vertex_ids[i]*3), face_instance.at<float>(vertex_ids[i]*3 + 1), face_instance.at<float>(vertex_ids[i]*3 + 2), 1.0f);
v_bar.at<float>(4 * i, 0) = model_mean[0];
v_bar.at<float>((4 * i) + 1, 0) = model_mean[1];
v_bar.at<float>((4 * i) + 2, 0) = model_mean[2];
//v_bar.at<float>((4 * i) + 3, 0) = 1; // already 1, stays (homogeneous coordinate)
// note: now that a Vec4f is returned, we could use copyTo?
}
// Bring into standard regularised quadratic form:
Mat A = P * V_hat_h; // camera matrix times the basis
Mat b = P * v_bar - y; // camera matrix times the mean, minus the landmarks.
// Solve using NNLS:
using RowMajorMatrixXf = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
Eigen::Map<RowMajorMatrixXf> A_Eigen(A.ptr<float>(), A.rows, A.cols);
Eigen::Map<RowMajorMatrixXf> b_Eigen(b.ptr<float>(), b.rows, b.cols);
Eigen::VectorXf x;
bool non_singular = Eigen::NNLS<Eigen::MatrixXf>::solve(A_Eigen, -b_Eigen, x);
Mat c_s(x.rows(), x.cols(), CV_32FC1, x.data()); // create an OpenCV Mat header for the Eigen data
return std::vector<float>(c_s);
};
} /* namespace fitting */ } /* namespace fitting */
} /* namespace eos */ } /* namespace eos */
......
This diff is collapsed.
This diff is collapsed.
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/morphablemodel/EdgeTopology.hpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef EDGETOPOLOGY_HPP_
#define EDGETOPOLOGY_HPP_
#include "cereal/cereal.hpp"
#include "cereal/types/array.hpp"
#include "cereal/types/vector.hpp"
#include "cereal/archives/json.hpp"
#include <array>
#include <vector>
#include <fstream>
namespace eos {
namespace morphablemodel {
/**
* @brief A struct containing a 3D shape model's edge topology.
*
* This struct contains all edges of a 3D mesh, and for each edge, it
* contains the two faces and the two vertices that are adjacent to that
* edge. This is used in the iterated closest edge fitting (ICEF).
*
* Note: The indices are 1-based, so 1 needs to be subtracted before using
* them as mesh indices. An index of 0 as first array element means that
* it's an edge that lies on the mesh boundary, i.e. they are only
* adjacent to one face.
* We should explore a less error-prone way to store this data, but that's
* how it is done in Matlab by the original code.
*
* adjacent_faces.size() is equal to adjacent_vertices.size().
*/
struct EdgeTopology {
std::vector<std::array<int, 2>> adjacent_faces; // num_edges x 2 matrix storing faces adjacent to each edge
std::vector<std::array<int, 2>> adjacent_vertices; // num_edges x 2 matrix storing vertices adjacent to each edge
friend class cereal::access;
/**
* Serialises this class using cereal.
*
* @param[in] ar The archive to serialise to (or to serialise from).
*/
template<class Archive>
void serialize(Archive& archive)
{
archive(CEREAL_NVP(adjacent_faces), CEREAL_NVP(adjacent_vertices));
};
};
/**
* Saves a 3DMM edge topology file to a json file.
*
* @param[in] edge_topology A model's edge topology.
* @param[in] filename The file to write.
* @throws std::runtime_error if unable to open the given file for writing.
*/
inline void save_edge_topology(EdgeTopology edge_topology, std::string filename)
{
std::ofstream file(filename);
if (file.fail()) {
throw std::runtime_error("Error opening file for writing: " + filename);
}
cereal::JSONOutputArchive output_archive(file);
output_archive(cereal::make_nvp("edge_topology", edge_topology));
};
/**
* Load a 3DMM edge topology file from a json file.
*
* @param[in] filename The file to load the edge topology from.
* @return A struct containing the edge topology.
* @throws std::runtime_error if unable to open the given file for writing.
*/
inline EdgeTopology load_edge_topology(std::string filename)
{
EdgeTopology edge_topology;
std::ifstream file(filename);
if (file.fail()) {
throw std::runtime_error("Error opening file for reading: " + filename);
}
cereal::JSONInputArchive output_archive(file);
output_archive(cereal::make_nvp("edge_topology", edge_topology));
return edge_topology;
};
} /* namespace morphablemodel */
} /* namespace eos */
#endif /* EDGETOPOLOGY_HPP_ */
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
#include "eos/render/Mesh.hpp" #include "eos/render/Mesh.hpp"
#include "glm/vec3.hpp"
#include "glm/geometric.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc/imgproc.hpp"
...@@ -99,6 +102,24 @@ inline cv::Vec3f calculate_face_normal(const cv::Vec3f& v0, const cv::Vec3f& v1, ...@@ -99,6 +102,24 @@ inline cv::Vec3f calculate_face_normal(const cv::Vec3f& v0, const cv::Vec3f& v1,
return n; return n;
}; };
/**
* Computes the normal of a face (or triangle), i.e. the
* per-face normal. Return normal will be normalised.
* Assumes the triangle is given in CCW order, i.e. vertices
* in counterclockwise order on the screen are front-facing.
*
* @param[in] v0 First vertex.
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
* @return The unit-length normal of the given triangle.
*/
glm::vec3 compute_face_normal(const glm::vec3& v0, const glm::vec3& v1, const glm::vec3& v2)
{
glm::vec3 n = glm::cross(v1 - v0, v2 - v0); // v0-to-v1 x v0-to-v2
n = glm::normalize(n);
return n;
};
/** /**
* Draws the texture coordinates (uv-coords) of the given mesh * Draws the texture coordinates (uv-coords) of the given mesh
* into an image by looping over the triangles and drawing each * into an image by looping over the triangles and drawing each
......
...@@ -20,6 +20,10 @@ Files in this directory: ...@@ -20,6 +20,10 @@ Files in this directory:
6 expression blendshapes for the sfm_shape_3448 model. Contains the expressions angry, 6 expression blendshapes for the sfm_shape_3448 model. Contains the expressions angry,
disgust, fear, happy, sad and surprised. disgust, fear, happy, sad and surprised.
- sfm_3448_edge_topology.json:
Contains a precomputed list of the model's edges, and the two faces and vertices that are
adjacent to each edge. Used in the edge-fitting.
- model_contours.json: - model_contours.json:
Definition of the model's contour vertices of the right and left side of the face. Definition of the model's contour vertices of the right and left side of the face.
......
This diff is collapsed.
...@@ -41,6 +41,10 @@ target_link_libraries(scm-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES}) ...@@ -41,6 +41,10 @@ target_link_libraries(scm-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES})
add_executable(bfm-binary-to-cereal bfm-binary-to-cereal.cpp) add_executable(bfm-binary-to-cereal bfm-binary-to-cereal.cpp)
target_link_libraries(bfm-binary-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES}) target_link_libraries(bfm-binary-to-cereal ${OpenCV_LIBS} ${Boost_LIBRARIES})
# Reads an edgestruct CSV file created from Matlab, and converts it to json:
add_executable(edgestruct-csv-to-json edgestruct-csv-to-json.cpp)
target_link_libraries(edgestruct-csv-to-json ${Boost_LIBRARIES})
# Store a json file as cereal .bin: # Store a json file as cereal .bin:
add_executable(json-to-cereal-binary json-to-cereal-binary.cpp) add_executable(json-to-cereal-binary json-to-cereal-binary.cpp)
target_link_libraries(json-to-cereal-binary ${OpenCV_LIBS} ${Boost_LIBRARIES}) target_link_libraries(json-to-cereal-binary ${OpenCV_LIBS} ${Boost_LIBRARIES})
...@@ -57,3 +61,4 @@ endif() ...@@ -57,3 +61,4 @@ endif()
install(TARGETS scm-to-cereal DESTINATION bin) install(TARGETS scm-to-cereal DESTINATION bin)
install(TARGETS bfm-binary-to-cereal DESTINATION bin) install(TARGETS bfm-binary-to-cereal DESTINATION bin)
install(TARGETS json-to-cereal-binary DESTINATION bin) install(TARGETS json-to-cereal-binary DESTINATION bin)
install(TARGETS edgestruct-csv-to-json DESTINATION bin)
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: utils/edgestruct-csv-to-json.cpp
*
* Copyright 2016 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "boost/program_options.hpp"
#include "boost/filesystem.hpp"
#include "boost/algorithm/string.hpp"
#include <array>
#include <vector>
#include <string>
#include <iostream>
#include <fstream>
using namespace eos;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using std::cout;
using std::endl;
// Careful, we're loading them from a 1-based indexing file! And store it as 1-based.
// Some values will contain zeros in the first element, which is a special value (= edge at the boundary of the mesh).
std::vector<std::array<int, 2>> read_edgestruct_csv(std::string filename)
{
using std::getline;
using std::string;
std::vector<std::array<int, 2>> vec;
std::ifstream file(filename);
if (!file.is_open()) {
throw std::runtime_error(string("Could not open file: " + filename));
}
string line;
while (getline(file, line))
{
std::vector<string> vals;
boost::split(vals, line, boost::is_any_of(","), boost::token_compress_on);
vec.push_back({ std::stoi(vals[0]), std::stoi(vals[1]) });
}
return vec;
};
/**
* Reads two edgestruct CSV files created from Matlab (one with adjacent faces,
* one with adjacent vertices), converts them to an EdgeTopology struct, and
* stores that as json file.
*/
int main(int argc, char *argv[])
{
fs::path input_adj_faces, input_adj_vertices, outputfile;
try {
po::options_description desc("Allowed options");
desc.add_options()
("help,h",
"display the help message")
("faces,f", po::value<fs::path>(&input_adj_faces)->required(),
"input edgestruct csv file from Matlab with a list of adjacent faces")
("vertices,v", po::value<fs::path>(&input_adj_vertices)->required(),
"input edgestruct csv file from Matlab with a list of adjacent vertices")
("output,o", po::value<fs::path>(&outputfile)->required()->default_value("converted_edge_topology.json"),
"output filename for the converted .json file")
;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
if (vm.count("help")) {
cout << "Usage: edgestruct-csv-to-json [options]" << endl;
cout << desc;
return EXIT_SUCCESS;
}
po::notify(vm);
}
catch (const po::error& e) {
cout << "Error while parsing command-line arguments: " << e.what() << endl;
cout << "Use --help to display a list of options." << endl;
return EXIT_SUCCESS;
}
morphablemodel::EdgeTopology edge_info;
edge_info.adjacent_faces = read_edgestruct_csv(input_adj_faces.string());
edge_info.adjacent_vertices = read_edgestruct_csv(input_adj_vertices.string());
morphablemodel::save_edge_topology(edge_info, outputfile.string());
cout << "Saved EdgeTopology in json file: " << outputfile.string() << "." << endl;
return EXIT_SUCCESS;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment