Commit b4d3ea48 authored by Richard Torenvliet's avatar Richard Torenvliet

Add a way of rendering the output on the last frame, duplicated fitting...

Add a way of rendering the output on the last frame, duplicated fitting function to improve and easy merge later
parent e94b6716
......@@ -23,7 +23,7 @@ option(EOS_BUILD_EXAMPLES "Build the example applications." ON)
message(STATUS "EOS_BUILD_EXAMPLES: ${EOS_BUILD_EXAMPLES}")
option(EOS_BUILD_CERES_EXAMPLE "Build the fit-model-ceres example (requires Ceres)." OFF)
message(STATUS "EOS_BUILD_CERES_EXAMPLE: ${EOS_BUILD_CERES_EXAMPLE}")
option(EOS_BUILD_UTILS "Build utility applications." OFF)
option(EOS_BUILD_UTILS "Build utility applications." ON)
message(STATUS "EOS_BUILD_UTILS: ${EOS_BUILD_UTILS}")
option(EOS_BUILD_DOCUMENTATION "Build the library documentation." OFF)
message(STATUS "EOS_BUILD_DOCUMENTATION: ${EOS_BUILD_DOCUMENTATION}")
......@@ -97,6 +97,7 @@ set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/fitting.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/ceres_nonlinear.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/RenderingParameters.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/ReconstructionData.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/utils.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render_affine.hpp
......@@ -155,9 +156,7 @@ if(EOS_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
if(EOS_BUILD_UTILS)
add_subdirectory(utils)
endif()
add_subdirectory(utils)
if(EOS_BUILD_DOCUMENTATION)
add_subdirectory(doc)
......
......@@ -153,9 +153,10 @@ namespace eos {
*/
template<typename vec2f>
std::pair <std::vector<core::LandmarkCollection<vec2f>>, std::vector<std::string>>
load_annotations(
std::vector<std::string>annotations,
core::LandmarkMapper mapper, morphablemodel::MorphableModel morphable_model, bool read_from_file = false) {
load_annotations(std::vector<std::string>annotations,
core::LandmarkMapper mapper,
morphablemodel::MorphableModel morphable_model,
bool read_from_file = false) {
std::vector<core::LandmarkCollection<vec2f>> landmark_collection_list;
// load file names from one file.
......@@ -187,6 +188,106 @@ namespace eos {
return std::make_pair(landmark_collection_list, annotations);
}
/**
*
* @tparam vec2f
* @tparam vec4f
* @param landmarks
* @param landmark_mapper
* @param mesh
* @param model_points
* @param vertex_indices
* @param image_points
*/
template <typename vec2f, typename vec4f>
inline void subselect_landmarks(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<Vec2f> current_image_points;
for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
/**
* As the name suggests, in the multi-frame fitting we can subselect landmarks according to the
* landmark mapper.
*
* @tparam vec2f
* @param[in] landmarks
* @param[in] landmark_mapper
* @param[in] meshs
* @param[out] model_points
* @param[out] vertex_indices
* @param[out] image_points
*/
template <typename vec2f, typename vec4f>
inline void subselect_landmarks_multi(const std::vector<core::LandmarkCollection<vec2f>>& landmarks,
const core::LandmarkMapper& landmark_mapper,
vector<eos::core::Mesh> meshs,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<Vec2f> current_image_points;
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM),
// and get the corresponding model points (mean if given no initial coeffs, from the computed shape otherwise):
for (int i = 0; i < landmarks.size(); i++) {
auto landmark_list = landmarks[i];
auto mesh = meshs[i];
for (auto &lm: landmark_list) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
}
}
}
......
//
// Created by RA Torenvliet on 12/04/2017.
//
#ifndef EOS_RECONSTRUCTIONDATA_HPP
#define EOS_RECONSTRUCTIONDATA_HPP
namespace eos {
namespace fitting {
struct ReconstructionData {
morphablemodel::MorphableModel morphable_model;
std::vector <morphablemodel::Blendshape> blendshapes;
std::vector <core::LandmarkCollection<cv::Vec2f>> landmarks;
std::vector <cv::Mat> affine_camera_matrix;
};
}
}
#endif //EOS_RECONSTRUCTIONDATA_HPP
......@@ -838,26 +838,242 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
);
};
//inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
// const morphablemodel::MorphableModel& morphable_model,
// const std::vector<morphablemodel::Blendshape>& blendshapes,
// const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks,
// const core::LandmarkMapper& landmark_mapper,
// int image_width,
// int image_height,
// int num_images,
// const morphablemodel::EdgeTopology& edge_topology,
// const fitting::ContourLandmarks& contour_landmarks,
// const fitting::ModelContour& model_contour,
// int num_iterations,
// boost::optional<int> num_shape_coefficients_to_fit,
// float lambda,
// boost::optional<fitting::RenderingParameters> initial_rendering_params,
// std::vector<float>& pca_shape_coefficients,
// std::vector<std::vector<float>>& blendshape_coefficients,
// std::vector<std::vector<cv::Vec2f>>& fitted_image_points) {
//void calculate_rendering_params() {
// // Need to do an initial pose fit to do the contour fitting inside the loop.
// // We'll do an expression fit too, since face shapes vary quite a lot, depending on expressions.
// vector<fitting::RenderingParameters> rendering_params;
// for (int j = 0; j < num_images; ++j) {
// fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear (image_points[j], model_points[j], true, image_height);
// fitting::RenderingParameters current_rendering_params (current_pose, image_width, image_height);
// rendering_params.push_back (current_rendering_params);
//
//}
// cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix (current_rendering_params, image_width, image_height);
// blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls (blendshapes, current_pca_shape, affine_from_ortho, image_points[j], vertex_indices[j]);
//
// // Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
// current_combined_shapes[j] = current_pca_shape + morphablemodel::to_matrix (blendshapes)
// * Eigen::Map<const Eigen::VectorXf> (blendshape_coefficients[j]
// .data (), blendshape_coefficients[j]
// .size ());
// current_meshs[j] = morphablemodel::sample_to_mesh (current_combined_shapes[j], morphable_model
// .get_color_model ().get_mean (), morphable_model.get_shape_model ().get_triangle_list (), morphable_model
// .get_color_model ()
// .get_triangle_list (), morphable_model
// .get_texture_coordinates ());
// }
//}}
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks,
const core::LandmarkMapper& landmark_mapper,
int image_width,
int image_height,
int num_images,
const morphablemodel::EdgeTopology& edge_topology,
const fitting::ContourLandmarks& contour_landmarks,
const fitting::ModelContour& model_contour,
int num_iterations,
boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params,
std::vector<float>& pca_shape_coefficients,
std::vector<std::vector<float>>& blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>>& fitted_image_points) {
assert(blendshapes.size() > 0);
assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit?
assert(pca_shape_coefficients.size() <= morphable_model.get_shape_model().get_num_principal_components());
using std::vector;
using cv::Vec2f;
using cv::Vec4f;
using cv::Mat;
using Eigen::VectorXf;
using Eigen::MatrixXf;
if (!num_shape_coefficients_to_fit) {
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get());
}
// TODO: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() < num_images) {
for (int j = 0; j < num_images; ++j) {
std::vector<float> current_blendshape_coefficients;
current_blendshape_coefficients.resize(blendshapes.size());
blendshape_coefficients.push_back(current_blendshape_coefficients);
}
}
MatrixXf blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
// Current mesh - either from the given coefficients, or the mean:
VectorXf current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
vector<VectorXf> current_combined_shapes;
vector<eos::core::Mesh> current_meshs;
// The 2D and 3D point correspondences used for the fitting:
vector<vector<Vec4f>> model_points; // the points in the 3D shape model of all frames
vector<vector<int>> vertex_indices; // their vertex indices of all frames
vector<vector<Vec2f>> image_points; // the corresponding 2D landmark points of all frames
vector<fitting::RenderingParameters> rendering_params;
// Generate meshes from current shapes.
for (int j = 0; j < num_images; ++j) {
VectorXf current_combined_shape = current_pca_shape +
blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size()
);
// current_combined_shapes.push_back(current_combined_shape);
eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// current_meshs.push_back(current_mesh);
// Get the locations of the model locations of the meshes, vertex_indices and image points
// (equal to landmark coordinates), for every image / mesh.
core::subselect_landmarks<Vec2f, Vec4f>(
landmarks[j],
landmark_mapper,
current_mesh,
// output parameters
model_points,
vertex_indices,
image_points
);
// Get the current points from the last added image points and model points
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points.back(), model_points.back(), true, image_height
);
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params.push_back(current_rendering_params);
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_ortho, image_points.back(), vertex_indices.back()
);
// Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
current_combined_shape = current_pca_shape +
morphablemodel::to_matrix(blendshapes) *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size()
);
current_combined_shapes.push_back(current_combined_shape);
current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
current_meshs.push_back(current_mesh);
}
// // Need to do an initial pose fit to do the contour fitting inside the loop.
// // We'll do an expression fit too, since face shapes vary quite a lot, depending on expressions.
// vector<fitting::RenderingParameters> rendering_params;
// for (int j = 0; j < num_images; ++j) {
// fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height);
// fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
// rendering_params.push_back(current_rendering_params);
//
// cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
// blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, current_pca_shape, affine_from_ortho, image_points[j], vertex_indices[j]);
//
// // Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
// current_combined_shapes[j] = current_pca_shape + morphablemodel::to_matrix(blendshapes) * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
// current_meshs[j] = morphablemodel::sample_to_mesh(current_combined_shapes[j], morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
// }
// The static (fixed) landmark correspondences which will stay the same throughout
// the fitting (the inner face landmarks):
vector<vector<int>> fixed_vertex_indices (vertex_indices);
vector<vector<Vec2f>> fixed_image_points (image_points);
std::vector<cv::Mat> affine_from_orthos;
std::vector<VectorXf> mean_plus_blendshapes;
image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices;
for (int j = 0; j < num_images; ++j) {
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks[j], contour_landmarks, model_contour, yaw_angle, current_meshs[j], rendering_params[j].get_modelview(), rendering_params[j].get_projection(), fitting::get_opencv_viewport(image_width, image_height));
// Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
image_points[j] = fitting::concat(image_points[j], image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks;
if (yaw_angle >= 0.0f) // positive yaw = subject looking to the left
{ // the left contour is the occluding one we want to use ("away-facing")
auto contour_landmarks_ = core::filter(landmarks[j], contour_landmarks.left_contour); // Can do this outside of the loop
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
}
else {
auto contour_landmarks_ = core::filter(landmarks[j], contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
}
auto edge_correspondences = fitting::find_occluding_edge_correspondences(current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f);
image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got:
model_points[j].clear();
for (const auto& v : vertex_indices[j])
{
model_points[j].push_back({ current_meshs[j].vertices[v][0], current_meshs[j].vertices[v][1], current_meshs[j].vertices[v][2], current_meshs[j].vertices[v][3] });
}
// Re-estimate the pose, using all correspondences:
fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height);
rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
affine_from_orthos.push_back(affine_from_ortho);
// Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
}
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi(morphable_model, affine_from_orthos, image_points, vertex_indices, mean_plus_blendshapes, lambda, num_shape_coefficients_to_fit);
// Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
for (int j = 0; j < num_images; ++j) {
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]);
current_combined_shapes[j] = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
current_meshs[j] = morphablemodel::sample_to_mesh(current_combined_shapes[j], morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
}
fitted_image_points = image_points;
return { current_meshs, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
} /* namespace fitting */
} /* namespace eos */
......
......@@ -59,8 +59,16 @@ namespace eos {
* @param[in] model_standard_deviation The standard deviation of the 3D vertex points in the 3D model, projected to 2D (so the value is in pixels).
* @return The estimated shape-coefficients (alphas).
*/
inline std::vector<float> fit_shape_to_landmarks_linear_multi(const morphablemodel::MorphableModel& morphable_model, std::vector<cv::Mat> affine_camera_matrix, std::vector<std::vector<cv::Vec2f>>& landmarks, std::vector<std::vector<int>>& vertex_ids, std::vector<Eigen::VectorXf> base_face=std::vector<Eigen::VectorXf>(), float lambda=3.0f, boost::optional<int> num_coefficients_to_fit=boost::optional<int>(), boost::optional<float> detector_standard_deviation=boost::optional<float>(), boost::optional<float> model_standard_deviation=boost::optional<float>())
{
inline std::vector<float> fit_shape_to_landmarks_linear_multi(
const morphablemodel::MorphableModel& morphable_model,
std::vector<cv::Mat> affine_camera_matrix,
std::vector<std::vector<cv::Vec2f>>& landmarks,
std::vector<std::vector<int>>& vertex_ids,
std::vector<Eigen::VectorXf> base_face=std::vector<Eigen::VectorXf>(),
float lambda=3.0f,
boost::optional<int> num_coefficients_to_fit=boost::optional<int>(),
boost::optional<float> detector_standard_deviation=boost::optional<float>(),
boost::optional<float> model_standard_deviation=boost::optional<float>()) {
assert(affine_camera_matrix.size() == landmarks.size() && landmarks.size() == vertex_ids.size()); // same number of instances (i.e. images/frames) for each of them
assert(landmarks.size() == vertex_ids.size());
......@@ -100,7 +108,7 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(const morphablemod
int y_index = 0; // also runs the same as P_index. Should rename to "running_index"?
// The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t
VectorXf v_bar = VectorXf::Ones(4 * total_num_landmarks_dimension);
int v_bar_index = 0; // also runs the same as P_index. But be careful, if I change it to be only 1 variable, only increment it once! :-)
int v_bar_index = 0; // also runs the same as P_index. But be careful, if I change it to be only 1 variable, only increment it once! :-),
// Well I think that would make it a bit messy since we need to increment inside the for (landmarks...) loop. Try to refactor some other way.
for (int k = 0; k < num_images; ++k)
......
......@@ -124,8 +124,17 @@ namespace eos {
* @param[in] enable_far_clipping Whether vertices should be clipped against the far plane.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/
inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> model_view_matrix, glm::tmat4x4<float> projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{
inline std::pair<cv::Mat, cv::Mat> render(
core::Mesh mesh,
glm::tmat4x4<float> model_view_matrix,
glm::tmat4x4<float> projection_matrix,
int viewport_width,
int viewport_height,
const boost::optional<Texture>& texture = boost::none,
bool enable_backface_culling = false,
bool enable_near_clipping = true,
bool enable_far_clipping = true,
cv::Mat colourbuffer = cv::Mat::zeros(0, 0, CV_8UC4)) {
// Some internal documentation / old todos or notes:
// maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
// modelViewMatrix goes to eye-space (camera space), projection does ortho or perspective proj.
......@@ -139,7 +148,10 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
using cv::Mat;
using std::vector;
Mat colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
if (colourbuffer.empty()) {
colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
}
Mat depthbuffer = std::numeric_limits<float>::max() * Mat::ones(viewport_height, viewport_width, CV_64FC1);
// Vertex shader:
......@@ -147,15 +159,17 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
// Assemble the vertices, project to clip space, and store as detail::Vertex (the internal representation):
vector<detail::Vertex<float>> clipspace_vertices;
clipspace_vertices.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex
glm::tvec4<float> clipspace_coords = projection_matrix * model_view_matrix * mesh.vertices[i];
glm::tvec3<float> vertex_colour;
if (mesh.colors.empty()) {
vertex_colour = glm::tvec3<float>(0.5f, 0.5f, 0.5f);
}
else {
} else {
vertex_colour = mesh.colors[i];
}
clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]});
}
......@@ -234,6 +248,7 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
return std::make_pair(colourbuffer, depthbuffer);
};
......
......@@ -78,10 +78,10 @@ public:
this->fitting_result = fitting_result;
}
cv::Mat frame;
int frame_number;
float score = 0.0f;
cv::Mat frame;
fitting::FittingResult fitting_result;
};
......@@ -323,7 +323,6 @@ public:
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "video file path: " << videoFilePath << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
......@@ -359,7 +358,6 @@ public:
cv::Mat frame;
cap >> frame;
// Pop if we exceeded max_frames.
if (n_frames > max_frames) {
frame_buffer.pop_front();
......@@ -373,11 +371,11 @@ public:
n_frames++;
std::cout << total_frames_processed << ": laplacian score " << frame_laplacian_score << std::endl;
} else {
std::cout << total_frames_processed << ": skipping frame " << std::endl;
std::cout << total_frames_processed << ": skipping frame(";
if (frame_laplacian_score == 0) {
std::cout << "total black" << std::endl;
std::cout << "total black): " << frame_laplacian_score << std::endl;
} else {
std::cout << "too blurry" << std::endl;
std::cout << "too blurry): " << frame_laplacian_score << std::endl;
}
}
......@@ -495,7 +493,39 @@ private:
unsigned char threshold;
};
} /* namespace video */
/**
* @brief Merges PCA coefficients from a live video with a simple averaging.
*/
class PcaCoefficientMerging
{
public:
/**
* @brief Merges the given new PCA coefficients with all previously processed coefficients.
*
* @param[in] coefficients The new coefficients to add.
* @return The merged coefficients of all images processed so far.
*/
std::vector<float> add_and_merge(const std::vector<float>& coefficients)
{
if (merged_shape_coefficients.empty()) {
merged_shape_coefficients = cv::Mat::zeros(coefficients.size(), 1, CV_32FC1);
}
assert(coefficients.size() == merged_shape_coefficients.rows);
cv::Mat test(coefficients);
merged_shape_coefficients = (merged_shape_coefficients * num_processed_frames + test) / (num_processed_frames + 1.0f);
++num_processed_frames;
return std::vector<float>(merged_shape_coefficients.begin<float>(), merged_shape_coefficients.end<float>());
};
private:
int num_processed_frames = 0;
cv::Mat merged_shape_coefficients;
};
} /* namespace video */
} /* namespace eos */
#endif /* KEYFRAME_HPP_ */
[video]
max_frames=6
min_frames=5
drop_frames=0
laplacian_threshold=1000
......@@ -24,6 +24,7 @@
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
#include "eos/video/Keyframe.hpp"
#include "eos/fitting/ReconstructionData.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
......@@ -55,6 +56,8 @@ using std::string;
using namespace cv;
typedef unsigned int uint;
/**
* Draws the given mesh as wireframe into the image.
*
......@@ -74,8 +77,7 @@ void draw_wireframe(cv::Mat image, const eos::core::Mesh& mesh, glm::mat4x4 mode
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
{
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3))) {
cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
cv::line(image, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), colour);
......@@ -113,7 +115,9 @@ void evaluate_accuracy(
core::LandmarkCollection<cv::Vec2f> landmarks,
core::LandmarkMapper landmark_mapper,
const eos::core::Mesh& mesh,
Mat affine_from_ortho) {
Mat affine_from_ortho,
boost::property_tree::ptree settings) {
vector<float> total_error;
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
......@@ -147,27 +151,157 @@ void evaluate_accuracy(
float accum = 0.0;
float mean_error = std::accumulate(total_error.begin(), total_error.end(), 0) / landmarks.size();
// cacl. standard deviation
std::for_each (std::begin(total_error), std::end(total_error), [&](const float d) {
for(auto &d: total_error) {
accum += (d - mean_error) * (d - mean_error);
});
}
float stddev = std::sqrt(accum / (total_error.size() - 1));
std::cout << "stddev/mean_error: " << stddev << " " << mean_error << std::endl;
}
/**
*
* @param key_frames
* @param rendering_paramss
* @param landmark_list
* @param morphable_model
* @param blend_shapes
* @param meshs
* @param pca_shape_coefficients
* @param blend_shape_coefficients
* @param fitted_image_points
* @param annotations
* @param landmark_mapper
* @param settings
* @param n_iter
*/
void output_render(
std::deque<eos::video::Keyframe> key_frames,
std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
morphablemodel::MorphableModel morphable_model,
vector<morphablemodel::Blendshape> blend_shapes,
vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients,
std::vector<std::vector<float>> blend_shape_coefficients,
std::vector<std::vector<cv::Vec2f>> fitted_image_points,
std::vector<std::string> annotations,
core::LandmarkMapper landmark_mapper,
boost::property_tree::ptree settings,
int n_iter) {
Mat merged_isomap;
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera
eos::video::PcaCoefficientMerging pca_shape_merging;
auto outputfilebase = annotations[0];
for (uint i = 0; i < key_frames.size(); ++i) {
Mat frame = key_frames[i].frame;
auto unmodified_frame = frame.clone();
int frame_number = key_frames[i].frame_number;
// float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
int frame_width = frame.cols;
int frame_height = frame.rows;
// Extract the texture using the fitted mesh from this frame:
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], frame.cols, frame.rows);
Mat isomap = render::extract_texture(
meshs[i],
affine_cam,
unmodified_frame,
true,
render::TextureInterpolation::NearestNeighbour,
512
);
// Merge the isomaps - add the current one to the already merged ones:
merged_isomap = isomap_averaging.add_and_merge(isomap);
}
// Get last frame (that's the newest one).
Mat frame = key_frames.back().frame;
auto last_rendering_params = rendering_paramss.back();
auto last_blend_coefficients = blend_shape_coefficients.back();
int frame_width = frame.cols;
int frame_height = frame.rows;
// Same for the shape:
pca_shape_coefficients = pca_shape_merging.add_and_merge(pca_shape_coefficients);
auto blendshapes_as_basis = morphablemodel::to_matrix(blend_shapes);
auto merged_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients) +
blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(last_blend_coefficients.data(),
last_blend_coefficients.size()
);
auto merged_mesh = morphablemodel::sample_to_mesh(
merged_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering;
// render needs 4 channels 8 bits image, needs a two step conversion.
cvtColor(frame, rendering, CV_BGR2BGRA);
// make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4);
std::tie(rendering, std::ignore) = render::render(
merged_mesh,
last_rendering_params.get_modelview(),
last_rendering_params.get_projection(),
frame_width,
frame_height,
render::create_mipmapped_texture(merged_isomap),
true,
false,
false,
rendering
);
cv::imshow("render", rendering);
cv::waitKey(10);
}
/**
*
* @param key_frames
* @param rendering_paramss
* @param landmark_list
* @param morphable_model
* @param blend_shapes
* @param meshs
* @param pca_shape_coefficients
* @param blend_shape_coefficients
* @param fitted_image_points
* @param annotations
* @param landmark_mapper
* @param settings
* @param n_iter
*/
void evaluate_results(
std::deque<eos::video::Keyframe> key_frames,
std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
morphablemodel::MorphableModel morphable_model,
vector<morphablemodel::Blendshape> blend_shapes,
vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients,
std::vector<std::vector<float>> blend_shape_coefficients,
std::vector<std::vector<cv::Vec2f>> fitted_image_points,
std::vector<std::string> annotations,
core::LandmarkMapper landmark_mapper,
boost::property_tree::ptree settings,
int n_iter) {
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera
......@@ -226,21 +360,32 @@ void evaluate_results(
morphable_model.get_texture_coordinates()
);
std::tie(frontal_rendering, std::ignore) = eos::render::render(neutral_expression, modelview_frontal, glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 256, 256, render::create_mipmapped_texture(isomap), true, false, false);
cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering);
// And save the isomap:
outimg = frame.clone();
cv::imwrite(outputfile + iter + ".isomap.png", isomap);
// merge the isomaps:
merged_isomap = isomap_averaging.add_and_merge(isomap);
std::tie(frontal_rendering, std::ignore) = eos::render::render(
neutral_expression,
rendering_paramss[i].get_modelview(),
rendering_paramss[i].get_projection(),
frame_width,
frame_height,
render::create_mipmapped_texture(merged_isomap),
true,
false,
false,
outimg
);
cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering);
evaluate_accuracy(
landmark_list[i],
landmark_mapper,
meshs[i],
affine_from_ortho
affine_from_ortho,
settings
);
}
......@@ -257,8 +402,22 @@ void evaluate_results(
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
std::tie(frontal_rendering, std::ignore) = render::render(neutral_expression, modelview_frontal, glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 512, 512, render::create_mipmapped_texture(merged_isomap), true, false, false);
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression,
modelview_frontal,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512,
render::create_mipmapped_texture(merged_isomap),
true,
false,
false
);
cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering);
// Save the mesh as textured obj:
......@@ -276,12 +435,6 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) {
boost::property_tree::ptree pt;
boost::property_tree::ini_parser::read_ini(filename, pt);
std::cout << pt.get<std::string>("video.max_frames") << std::endl;
std::cout << pt.get<std::string>("video.drop_frames") << std::endl;
std::cout << pt.get<std::string>("video.min_frames") << std::endl;
std::cout << pt.get<std::string>("video.skip_frames") << std::endl;
std::cout << pt.get<std::string>("video.blur_threshold") << std::endl;
return pt;
}
......@@ -296,7 +449,8 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) {
* @param landmarks
* @return
*/
vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks(std::deque<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks(
std::deque<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
vector<core::LandmarkCollection<cv::Vec2f>> sublist;
for (auto& f : key_frames) {
......@@ -319,7 +473,7 @@ int main(int argc, char *argv[]) {
fs::path modelfile, isomapfile, videofile, landmarksfile, mappingsfile, contourfile, edgetopologyfile, blendshapesfile, outputfile, reconstruction_config;
std::vector<std::string> annotations;
// get annotaitions from one file
// get annotations from one file
bool get_annotations = false;
try {
......@@ -331,7 +485,7 @@ int main(int argc, char *argv[]) {
"a Morphable Model stored as cereal BinaryArchive")
("video,v", po::value<fs::path>(&videofile)->required(),
"an input image")
("config,c", po::value<fs::path>(&reconstruction_config)->default_value("../share/default_reconstruction_config.ini"),
("config,f", po::value<fs::path>(&reconstruction_config)->default_value("../share/default_reconstruction_config.ini"),
"configuration file for the reconstruction")
("get_annotations,g", po::bool_switch(&get_annotations)->default_value(false),
"read .pts annotation file locations from one file, put one file path on each line")
......@@ -365,9 +519,25 @@ int main(int argc, char *argv[]) {
return EXIT_SUCCESS;
}
// start loading prerequisites
// Start loading prerequisites:
morphablemodel::MorphableModel morphable_model;
std::vector<float> pca_shape_coefficients;
// List of blend_shapes coefficients:
std::vector<std::vector<float>> blend_shape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
// These will be the final 2D and 3D points used for the fitting:
vector<cv::Vec3f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<core::Mesh> meshs;
vector<fitting::RenderingParameters> rendering_paramss;
// Load landmarks, LandmarkMapper and the Morphable Model:
auto landmark_mapper = core::LandmarkMapper(mappingsfile);
// Load all annotation files into lists of landmarks:
vector<core::LandmarkCollection<cv::Vec2f>> landmark_list;
try {
morphable_model = morphablemodel::load_model(modelfile.string());
} catch (const std::runtime_error &e) {
......@@ -375,36 +545,32 @@ int main(int argc, char *argv[]) {
return EXIT_FAILURE;
}
// Load landmarks, LandmarkMapper and the Morphable Model:
core::LandmarkMapper landmark_mapper = core::LandmarkMapper(mappingsfile);
// load all annotation files into lists of landmarks
vector<core::LandmarkCollection<cv::Vec2f>> landmark_list;
// Load all annotation using input parameters:
try {
std::tie(landmark_list, annotations) = eos::core::load_annotations<cv::Vec2f>(annotations, landmark_mapper, morphable_model, get_annotations);
std::tie(landmark_list, annotations) = eos::core::load_annotations<cv::Vec2f>(
annotations,
landmark_mapper,
morphable_model,
get_annotations);
} catch(const std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
// The expression blendshapes:
vector<morphablemodel::Blendshape> blend_shapes = morphablemodel::load_blendshapes(blendshapesfile.string());
auto blend_shapes = morphablemodel::load_blendshapes(blendshapesfile.string());
// These two are used to fit the front-facing contour to the ibug contour landmarks:
fitting::ModelContour model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
fitting::ContourLandmarks ibug_contour = fitting::ContourLandmarks::load(mappingsfile.string());
auto model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
auto ibug_contour = fitting::ContourLandmarks::load(mappingsfile.string());
// The edge topology is used to speed up computation of the occluding face contour fitting:
morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
auto edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
// These will be the final 2D and 3D points used for the fitting:
vector<cv::Vec3f> model_points; // the points in the 3D shape model
vector<int> vertex_indices; // their vertex indices
vector<core::Mesh> meshs;
vector<fitting::RenderingParameters> rendering_paramss;
// Read .ini file to accommodate.
auto settings = get_reconstruction_config(reconstruction_config.string());
// Start with the video play and get video file:
BufferedVideoIterator vid_iterator;
boost::property_tree::ptree settings = get_reconstruction_config(reconstruction_config.string());
try {
vid_iterator = BufferedVideoIterator(videofile.string(), settings);
......@@ -413,29 +579,21 @@ int main(int argc, char *argv[]) {
return EXIT_FAILURE;
}
// iteration count
int frame_width = vid_iterator.width;
int frame_height = vid_iterator.height;
// test with loading 10 frames subsequently.
// Start getting video frames:
std::deque<eos::video::Keyframe> key_frames = vid_iterator.next();
eos::fitting::ReconstructionData data;
std::vector<float> pca_shape_coefficients;
std::vector<std::vector<float>> blend_shape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
// Count the amount of iterations:
int n_iter = 0;
while(!(key_frames.empty())) {
if (n_iter == 10) {
break;
}
vector<core::LandmarkCollection<cv::Vec2f>> landmark_sublist = sample_landmarks(
key_frames, landmark_list
);
// if (n_iter == 100) {
// break;
// }
// Generate a sublist of the total available landmark list:
auto landmark_sublist = sample_landmarks(key_frames, landmark_list);
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi(
// Fit shape and pose:
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_2(
morphable_model,
blend_shapes,
landmark_sublist,
......@@ -455,31 +613,29 @@ int main(int argc, char *argv[]) {
fitted_image_points
);
evaluate_results(
// vid_iterator.setReconstructionResults(reconstructionData);
// Render output:
output_render(
key_frames,
rendering_paramss,
landmark_sublist,
morphable_model,
blend_shapes,
meshs,
pca_shape_coefficients,
blend_shape_coefficients,
fitted_image_points,
annotations,
landmark_mapper,
settings,
n_iter
);
// Get new frames:
key_frames = vid_iterator.next();
n_iter++;
}
return EXIT_SUCCESS;
}
// // iterator through all frames, not needed persee.
// for (std::deque<eos::video::Keyframe>::iterator it = frames.begin(); it!=frames.end(); ++it) {
// std::cout << it->score << " ";
// frame_count++;
// }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment