Commit 2e906d10 authored by Richard Torenvliet's avatar Richard Torenvliet

Incorperated posebinning into the video iterator - made video iterator...

Incorperated posebinning into the video iterator - made video iterator threaded so it will search quickly for good frames
parent 16715ec7
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <vector> #include <vector>
#include <string> #include <string>
#include <vector>
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
...@@ -189,27 +188,83 @@ namespace eos { ...@@ -189,27 +188,83 @@ namespace eos {
return std::make_pair(landmark_collection_list, annotations); return std::make_pair(landmark_collection_list, annotations);
} }
/**
* Get a bounding box around the landmarks
* @param landmarks
* @return
*/
cv::Rect get_face_roi(vector<Vec2f>image_points, int image_width, int image_height) {
cv::Rect bbox = cv::boundingRect(image_points);
// cap on the image width and height.
bbox.width = bbox.x + bbox.width < image_width ? bbox.width: image_width - bbox.x - 1;
bbox.height = bbox.y + bbox.height < image_height ? bbox.height: image_height - bbox.y - 1;
return bbox;
}
/** /**
* Get the mesh coordinates.
* *
* @tparam vec2f * @tparam vec2f
* @tparam vec4f * @tparam vec4f
* @param landmarks * @param[in] landmarks
* @param landmark_mapper * @param[in] landmark_mapper
* @param mesh * @param[in] mesh
* @param model_points * @param[in,out] model_points
* @param vertex_indices * @param[in,out] vertex_indices
* @param image_points * @param[in,out] image_points
*/
template <typename vec2f>
inline void get_mesh_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<Vec4f>& model_points,
vector<int>& vertex_indices,
vector<vec2f>& image_points) {
for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
// todo: see how you can support a template for Vec4f.
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(lm.coordinates);
}
}
/**
*
* Get the mesh coordinates, given a set of landmarks.
*
* @tparam vec2f
* @tparam vec4f
* @param[in] landmarks
* @param[in] landmark_mapper
* @param[in] mesh
* @param[in,out] model_points
* @param[in,out] vertex_indices
* @param[in,out] image_points
*/ */
template <typename vec2f, typename vec4f> template <typename vec2f, typename vec4f>
inline void subselect_landmarks(core::LandmarkCollection<vec2f> landmarks, inline void get_landmark_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper, const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh, eos::core::Mesh& mesh,
vector<vector<vec4f>>& model_points, vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices, vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) { vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points; vector<vec4f> current_model_points;
vector<int> current_vertex_indices; vector<int> current_vertex_indices;
vector<Vec2f> current_image_points; vector<vec2f> current_image_points;
for (auto &lm: landmarks) { for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name); auto converted_name = landmark_mapper.convert(lm.name);
...@@ -234,60 +289,6 @@ namespace eos { ...@@ -234,60 +289,6 @@ namespace eos {
vertex_indices.push_back(current_vertex_indices); vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points); image_points.push_back(current_image_points);
} }
/**
* As the name suggests, in the multi-frame fitting we can subselect landmarks according to the
* landmark mapper.
*
* @tparam vec2f
* @param[in] landmarks
* @param[in] landmark_mapper
* @param[in] meshs
* @param[out] model_points
* @param[out] vertex_indices
* @param[out] image_points
*/
template <typename vec2f, typename vec4f>
inline void subselect_landmarks_multi(const std::vector<core::LandmarkCollection<vec2f>>& landmarks,
const core::LandmarkMapper& landmark_mapper,
vector<eos::core::Mesh> meshs,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<Vec2f> current_image_points;
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM),
// and get the corresponding model points (mean if given no initial coeffs, from the computed shape otherwise):
for (int i = 0; i < landmarks.size(); i++) {
auto landmark_list = landmarks[i];
auto mesh = meshs[i];
for (auto &lm: landmark_list) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
}
} }
} }
......
...@@ -42,6 +42,7 @@ namespace eos { ...@@ -42,6 +42,7 @@ namespace eos {
struct FittingResult struct FittingResult
{ {
RenderingParameters rendering_parameters; RenderingParameters rendering_parameters;
core::LandmarkCollection<cv::Vec2f> landmarks;
std::vector<float> pca_shape_coefficients; std::vector<float> pca_shape_coefficients;
std::vector<float> blendshape_coefficients; std::vector<float> blendshape_coefficients;
}; };
......
// #ifndef EOS_RECONSTRUCTIONDATA_HPP_
// Created by RA Torenvliet on 12/04/2017. #define EOS_RECONSTRUCTIONDATA_HPP_
//
#ifndef EOS_RECONSTRUCTIONDATA_HPP #include "eos/core/Landmark.hpp"
#define EOS_RECONSTRUCTIONDATA_HPP #include "eos/core/LandmarkMapper.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "eos/fitting/contour_correspondence.hpp"
namespace eos { namespace eos {
namespace fitting { namespace fitting {
struct ReconstructionData { struct ReconstructionData {
morphablemodel::MorphableModel morphable_model; morphablemodel::MorphableModel morphable_model;
std::vector <morphablemodel::Blendshape> blendshapes; std::vector <morphablemodel::Blendshape> blendshapes;
std::vector <core::LandmarkCollection<cv::Vec2f>> landmarks; eos::core::LandmarkMapper landmark_mapper;
std::vector <cv::Mat> affine_camera_matrix; std::vector <core::LandmarkCollection<cv::Vec2f>> landmark_list;
eos::fitting::ModelContour model_contour;
eos::fitting::ContourLandmarks contour_landmarks;
eos::morphablemodel::EdgeTopology edge_topology;
}; };
} }
......
...@@ -22,8 +22,10 @@ ...@@ -22,8 +22,10 @@
#ifndef FITTING_HPP_ #ifndef FITTING_HPP_
#define FITTING_HPP_ #define FITTING_HPP_
#include "eos/video/Keyframe.hpp"
#include "eos/core/Landmark.hpp" #include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp" #include "eos/core/LandmarkMapper.hpp"
#include "eos/core/landmark_utils.hpp"
#include "eos/core/Mesh.hpp" #include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/MorphableModel.hpp" #include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp" #include "eos/morphablemodel/Blendshape.hpp"
...@@ -809,7 +811,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -809,7 +811,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
* @return The fitted model shape instance and the final pose. * @return The fitted model shape instance and the final pose.
*/ */
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi( inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi(
const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blend_shapes, const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks, const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks,
const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks,
const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 30.0f) const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 30.0f)
{ {
...@@ -819,7 +821,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -819,7 +821,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
return fit_shape_and_pose_multi( return fit_shape_and_pose_multi(
morphable_model, morphable_model,
blend_shapes, blendshapes,
landmarks, landmarks,
landmark_mapper, landmark_mapper,
image_width, image_width,
...@@ -863,10 +865,48 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -863,10 +865,48 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// } // }
//}} //}}
/**
* Generate a new mesh given pca_coefficients and blend_shape_coefficients.
* @param pca_shape_coefficients
* @param blend_shape_coefficients
* @param num_shape_coefficients_to_fit
* @return
*/
inline eos::core::Mesh generate_new_mesh(
morphablemodel::MorphableModel& morphable_model,
std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<float>& pca_shape_coefficients,
std::vector<float>& blendshape_coefficients,
unsigned int num_shape_coefficients_to_fit = 50) {
eos::core::Mesh mesh;
Eigen::MatrixXf blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
Eigen::VectorXf pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
// create new set of blendshape coefficients if empty:
if(blendshape_coefficients.empty()) {
blendshape_coefficients.resize(blendshapes.size());
}
Eigen::VectorXf combined_shape = pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients.data(), blendshape_coefficients.size());
mesh = morphablemodel::sample_to_mesh(
combined_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates());
return mesh;
}
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2( inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
const morphablemodel::MorphableModel& morphable_model, const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<morphablemodel::Blendshape>& blendshapes,
const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks, std::vector<eos::video::Keyframe>& key_frames,
const core::LandmarkMapper& landmark_mapper, const core::LandmarkMapper& landmark_mapper,
int image_width, int image_width,
int image_height, int image_height,
...@@ -875,7 +915,9 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -875,7 +915,9 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
const fitting::ContourLandmarks& contour_landmarks, const fitting::ContourLandmarks& contour_landmarks,
const fitting::ModelContour& model_contour, const fitting::ModelContour& model_contour,
int num_iterations, int num_iterations,
boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, boost::optional<int> num_shape_coefficients_to_fit,
float lambda,
boost::optional<fitting::RenderingParameters> initial_rendering_params,
std::vector<float>& pca_shape_coefficients, std::vector<float>& pca_shape_coefficients,
std::vector<std::vector<float>>& blendshape_coefficients, std::vector<std::vector<float>>& blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>>& fitted_image_points) { std::vector<std::vector<cv::Vec2f>>& fitted_image_points) {
...@@ -901,7 +943,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -901,7 +943,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// TODO: This leaves the following case open: num_coeffs given is empty or defined, but the // TODO: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document! // pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() < num_images) { if (blendshape_coefficients.size() < num_images) {
for (int j = 0; j < num_images; ++j) { for (int j = 0; j < num_images; ++j) {
std::vector<float> current_blendshape_coefficients; std::vector<float> current_blendshape_coefficients;
...@@ -928,37 +969,37 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -928,37 +969,37 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
for (int j = 0; j < num_images; ++j) { for (int j = 0; j < num_images; ++j) {
VectorXf current_combined_shape = current_pca_shape + VectorXf current_combined_shape = current_pca_shape +
blendshapes_as_basis * blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size() Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size());
);
eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh( eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, current_combined_shape,
morphable_model.get_color_model().get_mean(), morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates() morphable_model.get_texture_coordinates());
);
// Get the locations of the model locations of the meshes, vertex_indices and image points // Get the locations of the model locations of the meshes, vertex_indices and image points
// (equal to landmark coordinates), for every image / mesh. // (equal to landmark coordinates), for every image / mesh.
core::subselect_landmarks<Vec2f, Vec4f>( eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
landmarks[j], key_frames[j].fitting_result.landmarks,
landmark_mapper, landmark_mapper,
current_mesh, current_mesh,
// output parameters // output parameters
model_points, model_points,
vertex_indices, vertex_indices,
image_points image_points);
);
// Start constructing a list of rendering parameters needed for reconstruction. // Start constructing a list of rendering parameters needed for reconstruction.
// Get the current points from the last added image points and model points // Get the current points from the last added image points and model points
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points.back(), model_points.back(), true, image_height image_points.back(), model_points.back(), true, image_height);
);
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height); fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params.push_back(current_rendering_params); rendering_params.push_back(current_rendering_params);
// update key frame rendering params
key_frames[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height); Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls( blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
...@@ -998,60 +1039,107 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -998,60 +1039,107 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour: // Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour; vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour; vector<int> vertex_indices_contour;
auto curr_keyframe = key_frames[j];
auto landmarks = curr_keyframe.fitting_result.landmarks;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]); auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id: // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks[j], contour_landmarks, model_contour, yaw_angle, current_meshs[j], rendering_params[j].get_modelview(), rendering_params[j].get_projection(), fitting::get_opencv_viewport(image_width, image_height)); std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(
landmarks,
contour_landmarks,
model_contour,
yaw_angle,
current_meshs[j],
rendering_params[j].get_modelview(),
rendering_params[j].get_projection(),
fitting::get_opencv_viewport(image_width, image_height)
);
// Add the contour correspondences to the set of landmarks that we use for the fitting: // Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour); vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
image_points[j] = fitting::concat(image_points[j], image_points_contour); image_points[j] = fitting::concat(image_points[j], image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs: // Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks; vector<Eigen::Vector2f> occluding_contour_landmarks;
if (yaw_angle >= 0.0f) // positive yaw = subject looking to the left
{ // the left contour is the occluding one we want to use ("away-facing") // positive yaw = subject looking to the left
auto contour_landmarks_ = core::filter(landmarks[j], contour_landmarks.left_contour); // Can do this outside of the loop if (yaw_angle >= 0.0f) {
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); }); // the left contour is the occluding one we want to use ("away-facing")
} auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
else { std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) {
auto contour_landmarks_ = core::filter(landmarks[j], contour_landmarks.right_contour); occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] });
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); }); });
} else {
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) {
occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] });
});
} }
auto edge_correspondences = fitting::find_occluding_edge_correspondences(current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f);
auto edge_correspondences = fitting::find_occluding_edge_correspondences(
current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
);
image_points[j] = fitting::concat(image_points[j], edge_correspondences.first); image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second); vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got: // Get the model points of the current mesh, for all correspondences that we've got:
model_points[j].clear(); model_points[j].clear();
for (const auto& v : vertex_indices[j]) for (const auto& v : vertex_indices[j]) {
{ model_points[j].push_back(
model_points[j].push_back({ current_meshs[j].vertices[v][0], current_meshs[j].vertices[v][1], current_meshs[j].vertices[v][2], current_meshs[j].vertices[v][3] }); {
current_meshs[j].vertices[v][0],
current_meshs[j].vertices[v][1],
current_meshs[j].vertices[v][2],
current_meshs[j].vertices[v][3]
});
} }
// Re-estimate the pose, using all correspondences: // Re-estimate the pose, using all correspondences:
fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height); auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height);
rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height); rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
affine_from_orthos.push_back(affine_from_ortho); affine_from_orthos.push_back(affine_from_ortho);
// Estimate the PCA shape coefficients with the current blendshape coefficients: // Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size()); VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
mean_plus_blendshapes.push_back(current_mean_plus_blendshapes); mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
} }
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi(morphable_model, affine_from_orthos, image_points, vertex_indices, mean_plus_blendshapes, lambda, num_shape_coefficients_to_fit); pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi(
morphable_model,
affine_from_orthos,
image_points,
vertex_indices,
mean_plus_blendshapes,
lambda,
num_shape_coefficients_to_fit
);
// Estimate the blendshape coefficients with the current PCA model estimate: // Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients); current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
for (int j = 0; j < num_images; ++j) { for (int j = 0; j < num_images; ++j) {
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]); blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]
);
current_combined_shapes[j] = current_pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size());
current_combined_shapes[j] = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size()); current_meshs[j] = morphablemodel::sample_to_mesh(
current_meshs[j] = morphablemodel::sample_to_mesh(current_combined_shapes[j], morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates()); current_combined_shapes[j],
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
} }
fitted_image_points = image_points; fitted_image_points = image_points;
return { current_meshs, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices). return { current_meshs, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
}; };
......
...@@ -25,10 +25,15 @@ ...@@ -25,10 +25,15 @@
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp" #include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include "eos/video/Keyframe.hpp"
#include <string> #include <string>
#include <vector> #include <vector>
#include <fstream> #include <fstream>
#include <thread>
#include <mutex>
#include <atomic>
#include <unistd.h>
using cv::Mat; using cv::Mat;
using cv::Vec2f; using cv::Vec2f;
...@@ -44,7 +49,435 @@ using namespace std; ...@@ -44,7 +49,435 @@ using namespace std;
namespace eos { namespace eos {
namespace video { namespace video {
// TODO: maybe move video iterator here.. or remove fil. // TODO: maybe move video iterator here.. or remove fil.
/**
* @brief Computes the variance of laplacian of the given image or patch.
*
* This should compute the variance of the laplacian of a given image or patch, according to the 'LAPV'
* algorithm of Pech 2000.
* It is used as a focus or blurriness measure, i.e. to assess the quality of the given patch.
*
* @param[in] image Input image or patch.
* @return The computed variance of laplacian score.
*/
double variance_of_laplacian(const cv::Mat& image)
{
cv::Mat laplacian;
cv::Laplacian(image, laplacian, CV_64F);
cv::Scalar mu, sigma;
cv::meanStdDev(laplacian, mu, sigma);
double focus_measure = sigma.val[0] * sigma.val[0];
return focus_measure;
};
/**
*
*
* @tparam T
*/
class BufferedVideoIterator
{
public:
int width;
int height;
Mat last_frame;
int last_frame_number;
bool reached_eof;
std::unique_ptr<std::thread> frame_buffer_worker;
BufferedVideoIterator() {};
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, fitting::ReconstructionData reconstruction_data, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "video file path: " << videoFilePath << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
}
cv::VideoCapture tmp_cap(videoFilePath); // open video file
if (!tmp_cap.isOpened()) { // check if we succeeded
throw std::runtime_error("Could not play video");
}
this->cap = tmp_cap;
// copy settings from gathered from a .ini file
this->min_frames = settings.get<int>("video.min_frames", 5);
this->drop_frames = settings.get<int>("video.drop_frames", 0);
this->skip_frames = settings.get<int>("video.skip_frames", 0);
this->frames_per_bin = settings.get<unsigned int>("video.frames_per_bin", 2);
this->num_shape_coefficients_to_fit = settings.get<unsigned int>("video.num_shape_coefficients_to_fit", 50);
this->reconstruction_data = reconstruction_data;
// initialize bins
this->bins.resize(num_yaw_bins);
// reset all
this->n_frames = 0;
this->total_frames = 0;
std::cout << "Buffered video iter: "
"min_frames: " << min_frames << std::endl <<
"drop_frames: " << drop_frames << std::endl <<
"frames_per_bin: " << frames_per_bin << std::endl <<
"num_shape_coefficients_to_fit: " << num_shape_coefficients_to_fit << std::endl;
std::cout << "total frames in video: " << cap.get(CV_CAP_PROP_FRAME_COUNT) << std::endl;
}
bool is_playing() {
return cap.isOpened();
}
/**
*
* @param frame
* @return
*/
Keyframe generate_new_keyframe(cv::Mat frame) {
int frame_height = frame.rows;
int frame_width = frame.cols;
// Reached the end of the landmarks (only applicable for annotated videos):
if (reconstruction_data.landmark_list.size() <= total_frames) {
std::cout << "frame empty? " << frame.empty() << std::endl;
std::cout << "Reached end of landmarks(" <<
reconstruction_data.landmark_list.size() << "/" << total_frames << ")" << std::endl;
return Keyframe();
}
auto landmarks = reconstruction_data.landmark_list[total_frames];
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
vector<cv::Vec4f> model_points;
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
if (num_shape_coefficients_to_fit == 0) {
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
// make a new one
std::vector<float> blend_shape_coefficients;
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit);
}
auto mesh = fitting::generate_new_mesh(
morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients
);
// Will yield model_points, vertex_indices and frame_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height
);
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
// std::cout << face_roi << " ("<< frame_width << "," << frame_height << ")" << std::endl;
// for (auto &&lm : landmarks) {
// cv::rectangle(
// frame, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
//
// cv::imshow("frame", frame);
// cv::waitKey(0);
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, frame, fitting_result, total_frames);
}
/**
* Try to add a new key frame. Look at the laplacian score and the yaw_angle. The yaw_angle will
* determine the bin in which it will be added. The score will be compared to the other frames in the
* same bin, if a bitter score is found, the frames are sorted on the score and cut-off on max frames.
*
* Todo: make sure this this cutting-off is done correct, we don't want faulty frames.
*
* @param keyframe
* @return
*/
bool try_add(Keyframe &keyframe) {
// Determine whether to add or not:
auto yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation()));
auto idx = angle_to_index(yaw_angle);
bool add_frame = false;
keyframe.yaw_angle = yaw_angle;
// Score is 0 for total black frames, we don't want those:
if (keyframe.score == 0) {
return false;
}
// always add when we don't have enough frames
if (bins[idx].size() < frames_per_bin) {
add_frame = true; // definitely adding - we wouldn't have to go through the for-loop on the next line.
}
for (auto&& f : bins[idx]) {
if (keyframe.score > f.score) {
add_frame = true;
}
}
if (!add_frame) {
return false;
}
// Add the keyframe:
bins[idx].push_back(keyframe);
if (bins[idx].size() > frames_per_bin) {
n_frames--;
// need to remove the lowest one:
std::sort(std::begin(bins[idx]), std::end(bins[idx]),
[](const auto& lhs, const auto& rhs) { return lhs.score > rhs.score; });
bins[idx].resize(frames_per_bin);
}
return true;
} }
/**
* Builds string from current state of the buffer as informative as possible.
* TODO: string representation can be better, maybe add a max precision for floats.
*
* @return
*/
std::string to_string() const {
std::string output = std::to_string(n_frames) + ": ";
for (auto&& b : bins) {
output.append("[");
for (auto&& f : b) {
std::size_t idx = angle_to_index(f.yaw_angle);
output.append("(" + std::to_string(f.score));
output.append("," + std::to_string(f.yaw_angle));
output.append("), ");
}
output.append("],");
}
return output;
}
/**
* Get keyframes from all bins as a flattened list of keyframes.
*
* @return std::vector<Keyframe>
*/
std::vector<Keyframe> get_keyframes() const {
std::vector<Keyframe> keyframes;
for (auto&& b : bins) {
for (auto&& f : b) {
keyframes.push_back(f);
}
}
return keyframes;
}
/**
* Start filling the buffer with frames and start a worker thread to keep doing so.
*
* @return
*/
std::vector<Keyframe> start() {
// blocking calling next for the first time, stop if empty frame.
if (!next()) {
__stop();
}
// start a thread to keep getting new frames into the buffer from video:
frame_buffer_worker = std::make_unique<std::thread>(&BufferedVideoIterator::video_iterator, this);
frame_buffer_worker.get()->detach();
return get_keyframes();
}
/**
* Fill the buffer by iterating through the video until the very last frame.
*/
void video_iterator() {
// Go and fill the buffer with more frames while we are reconstructing.
while (next()) { }
// stop the video
__stop();
std::cout << "Video stopped at:" << total_frames << " frames - in buff " << n_frames << std::endl;
}
/**
*
* Set next frame and return frame_buffer. Returns true or false if the next frame is empty.
* Empty frames mean that we reached the end of the video stream (i.e., file or camera).
*
* @return bool if
*/
bool next() {
Mat frame = __get_new_frame();
if (frame.empty()) {
return false;
}
// keep the last frame here, so we can play a video subsequently:
last_frame_number = total_frames;
last_frame = frame;
// TODO: only calculate lapscore within the bounding box of the face.
auto keyframe = generate_new_keyframe(frame);
bool frame_added = try_add(keyframe);
if(frame_added) {
n_frames++;
// Setting that the buffer has changed:
frame_buffer_changed = true;
std::cout << "frame added(" << n_frames << "): " << keyframe.score << ", " << keyframe.yaw_angle << std::endl;
}
total_frames++;
// fill up the buffer until we hit the minimum frames we want in the buffer.
if(n_frames < min_frames && total_frames < 30) {
std::cout << "not enough frames yet: " << n_frames << "/" << min_frames << std::endl;
return next();
}
return true;
}
/**
* Update pca shape coeff. Probably we need to make something with a mutex, for updating and reading
* the pca_shape_coeff in the generate_new_keyframe functionality.
*
* @param pca_shape_coefficients
*/
void update_reconstruction_coeff(std::vector<float> pca_shape_coefficients) {
std::cout << "-- Update pca coeff -- " << std::endl;
this->pca_shape_coefficients = pca_shape_coefficients;
}
// Converts a given yaw angle to an index in the internal bins vector.
// Assumes 9 bins and 20� intervals.
static std::size_t angle_to_index(float yaw_angle)
{
if (yaw_angle <= -70.f)
return 0;
if (yaw_angle <= -50.f)
return 1;
if (yaw_angle <= -30.f)
return 2;
if (yaw_angle <= -10.f)
return 3;
if (yaw_angle <= 10.f)
return 4;
if (yaw_angle <= 30.f)
return 5;
if (yaw_angle <= 50.f)
return 6;
if (yaw_angle <= 70.f)
return 7;
return 8;
};
/**
* Only by calling this function, we return if the buffer has changed since last check.
*
* @return
*/
bool has_changed() {
bool has_changed = frame_buffer_changed;
// record the event if the buffer is changed since last time:
frame_buffer_changed = false;
return has_changed;
}
/**
* Get a new frame from the video source.
*
* @return cv::Mat
*/
Mat __get_new_frame() {
// Get a new frame from camera.
Mat frame;
cap >> frame;
return frame;
};
// todo: move to private if possible.
void __stop() {
cap.release();
};
private:
int num_yaw_bins = 9;
unsigned int frames_per_bin;
bool frame_buffer_changed = false;
cv::VideoCapture cap;
std::deque<Keyframe> frame_buffer;
eos::fitting::ReconstructionData reconstruction_data;
using BinContent = std::vector<Keyframe>;
std::vector<BinContent> bins;
// latest pca_shape_coefficients
std::vector<float> pca_shape_coefficients;
// TODO: make set-able
// total frames in processed, not persee in buffer (skipped frames)
int total_frames;
// total frames in buffer
int n_frames;
// number of frames to skip at before starting
int skip_frames = 0;
// minimum amount of frames to keep in buffer.
int min_frames = 5;
// Note: these settings are for future use
int drop_frames = 0;
unsigned int num_shape_coefficients_to_fit = 0;
// laplacian threshold
// double laplacian_threshold = 10000000;
};
}
} }
#endif #endif
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "eos/fitting/FittingResult.hpp" #include "eos/fitting/FittingResult.hpp"
#include "eos/fitting/RenderingParameters.hpp" #include "eos/fitting/RenderingParameters.hpp"
#include "eos/fitting/ReconstructionData.hpp"
#include "eos/fitting/fitting.hpp"
#include "eos/morphablemodel/Blendshape.hpp" #include "eos/morphablemodel/Blendshape.hpp"
#include "eos/morphablemodel/MorphableModel.hpp" #include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/render/texture_extraction.hpp" #include "eos/render/texture_extraction.hpp"
...@@ -33,6 +35,7 @@ ...@@ -33,6 +35,7 @@
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include <deque> #include <deque>
#include <cstdlib>
namespace eos { namespace eos {
namespace video { namespace video {
...@@ -72,108 +75,26 @@ public: ...@@ -72,108 +75,26 @@ public:
* @param frame * @param frame
* @param fitting_result * @param fitting_result
*/ */
Keyframe(float score, cv::Mat frame, fitting::FittingResult fitting_result) { Keyframe(float score, cv::Mat frame, fitting::FittingResult fitting_result, int frame_number) {
this->frame = frame; this->frame = frame;
this->score = score; this->score = score;
this->fitting_result = fitting_result; this->fitting_result = fitting_result;
this->frame_number = frame_number;
} }
// The original frame
cv::Mat frame; cv::Mat frame;
int frame_number;
float score = 0.0f;
fitting::FittingResult fitting_result; // Frame number such that order between frames can be maintained.
}; int frame_number;
/** // laplacian score for example
* @brief A keyframe selection that selects keyframes according to yaw pose and score. float score = 0.0f;
*
* Separates the +-90� yaw pose range into 20� intervals (i.e. 90 to 70, ..., -10 to 10, ...), and puts frames
* into each bin, until full. Replaces keyframes with better frames if the score is higher than that of
* current keyframes.
*
* The yaw pose bins are currently hard-coded (9 bins, 20� intervals).
*/
struct PoseBinningKeyframeSelector
{
public:
PoseBinningKeyframeSelector(int frames_per_bin = 2) : frames_per_bin(frames_per_bin)
{
bins.resize(num_yaw_bins);
};
bool try_add(float frame_score, cv::Mat image, const fitting::FittingResult& fitting_result) // rotation from in fitting results
{ float yaw_angle = 0.0f;
// Determine whether to add or not:
auto yaw_angle = glm::degrees(glm::yaw(fitting_result.rendering_parameters.get_rotation()));
auto idx = angle_to_index(yaw_angle);
bool add_frame = false;
if (bins[idx].size() < frames_per_bin) // always add when we don't have enough frames
add_frame =
true; // definitely adding - we wouldn't have to go through the for-loop on the next line.
for (auto&& f : bins[idx])
{
if (frame_score > f.score)
add_frame = true;
}
if (!add_frame)
{
return false;
}
// Add the keyframe:
bins[idx].push_back(video::Keyframe{frame_score, image, fitting_result});
if (bins[idx].size() > frames_per_bin)
{
// need to remove the lowest one:
std::sort(std::begin(bins[idx]), std::end(bins[idx]),
[](const auto& lhs, const auto& rhs) { return lhs.score > rhs.score; });
bins[idx].resize(frames_per_bin);
}
return true;
};
// Returns the keyframes as a vector.
std::vector<Keyframe> get_keyframes() const
{
std::vector<Keyframe> keyframes;
for (auto&& b : bins)
{
for (auto&& f : b)
{
keyframes.push_back(f);
}
}
return keyframes;
};
private: fitting::FittingResult fitting_result;
using BinContent = std::vector<Keyframe>;
std::vector<BinContent> bins;
const int num_yaw_bins = 9;
int frames_per_bin;
// Converts a given yaw angle to an index in the internal bins vector.
// Assumes 9 bins and 20� intervals.
static std::size_t angle_to_index(float yaw_angle)
{
if (yaw_angle <= -70.f)
return 0;
if (yaw_angle <= -50.f)
return 1;
if (yaw_angle <= -30.f)
return 2;
if (yaw_angle <= -10.f)
return 3;
if (yaw_angle <= 10.f)
return 4;
if (yaw_angle <= 30.f)
return 5;
if (yaw_angle <= 50.f)
return 6;
if (yaw_angle <= 70.f)
return 7;
return 8;
};
}; };
/** /**
...@@ -270,156 +191,6 @@ cv::Mat merge_weighted_mean(const std::vector<Keyframe>& keyframes, ...@@ -270,156 +191,6 @@ cv::Mat merge_weighted_mean(const std::vector<Keyframe>& keyframes,
return merged_isomap; return merged_isomap;
}; };
/**
* @brief Computes the variance of laplacian of the given image or patch.
*
* This should compute the variance of the laplacian of a given image or patch, according to the 'LAPV'
* algorithm of Pech 2000.
* It is used as a focus or blurriness measure, i.e. to assess the quality of the given patch.
*
* @param[in] image Input image or patch.
* @return The computed variance of laplacian score.
*/
double variance_of_laplacian(const cv::Mat& image)
{
cv::Mat laplacian;
cv::Laplacian(image, laplacian, CV_64F);
cv::Scalar mu, sigma;
cv::meanStdDev(laplacian, mu, sigma);
double focus_measure = sigma.val[0] * sigma.val[0];
return focus_measure;
};
/**
* BufferedVideo Iterator will keep a buffer of the last seen n_frames. By calling .next() it will load a new
* frame from the given video and you will get a pointer to the front of the buffer (which has n_frames).
*
* Just imagine a sliding window accross the video, this is what we aim to implement here.
*
* Example:
* vid_iterator = bufferedvideoiterator<cv::mat>(videofile.string(), landmark_annotation_list);
*
* std::deque<cv::mat> frames = vid_iterator.next();
* while(!(frames.empty())) {
* for (std::deque<cv::mat>::iterator it = frames.begin(); it!=frames.end(); ++it) {
* std::cout << ' ' << *it;
* }
*
* frames = vid_iterator.next();
* }
*
* @tparam T
*/
// Note for this template: later we can use other templates for easy testing (not using cv:Mat but <int> for example).
class BufferedVideoIterator {
public:
int width;
int height;
BufferedVideoIterator() {};
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "video file path: " << videoFilePath << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
}
cv::VideoCapture tmp_cap(videoFilePath); // open video file
if (!tmp_cap.isOpened()) { // check if we succeeded
throw std::runtime_error("Could not play video");
}
this->cap = tmp_cap;
this->max_frames = settings.get<int>("video.max_frames", 5);
this->min_frames = settings.get<int>("video.min_frames", 5);
this->drop_frames = settings.get<int>("video.drop_frames", 0);
this->laplacian_threshold = settings.get<int>("video.blur_threshold", 1000);
// TODO: Implement this.
this->skip_frames = settings.get<int>("video.skip_frames", 0);
}
/**
* Set next frame and return frame_buffer.
*
* @return dequeue<Mat> frame buffer.
*
* TODO: build support for returning landmarks AND frames.
*/
std::deque <Keyframe> next() {
long frame_buffer_length = frame_buffer.size();
// Get a new frame from camera.
cv::Mat frame;
cap >> frame;
// Pop if we exceeded max_frames.
if (n_frames > max_frames) {
frame_buffer.pop_front();
n_frames--;
}
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame));
if (frame_laplacian_score < laplacian_threshold && frame_laplacian_score > 0) {
frame_buffer.push_back(Keyframe(frame_laplacian_score, frame, total_frames_processed));
n_frames++;
std::cout << total_frames_processed << ": laplacian score " << frame_laplacian_score << std::endl;
} else {
std::cout << total_frames_processed << ": skipping frame(";
if (frame_laplacian_score == 0) {
std::cout << "total black): " << frame_laplacian_score << std::endl;
} else {
std::cout << "too blurry): " << frame_laplacian_score << std::endl;
}
}
total_frames_processed++;
// fill up the buffer until we hit the minimum frames we want in the buffer.
if(n_frames < min_frames) {
frame_buffer = next();
}
return frame_buffer;
}
std::deque <Keyframe> get_frame_buffer() {
return frame_buffer;
}
private:
cv::VideoCapture cap;
std::deque<Keyframe> frame_buffer;
// TODO: make set-able
// total frames in processed, not persee in buffer (skipped frames)
int total_frames_processed = 0;
// total frames in buffer
int n_frames = 0;
// number of frames to skip at before starting
int skip_frames = 0;
// minimum amount of frames to keep in buffer.
int min_frames = 5;
// keep max_frames into the buffer.
int max_frames = 5;
// Note: these settings are for future use
int drop_frames = 0;
// laplacian threshold
double laplacian_threshold = 10000000;
};
/** /**
* @brief Merges isomaps from a live video with a weighted averaging, based * @brief Merges isomaps from a live video with a weighted averaging, based
...@@ -487,6 +258,18 @@ public: ...@@ -487,6 +258,18 @@ public:
return merged_isomap_uchar; return merged_isomap_uchar;
}; };
cv::Mat sharpen(const cv::Mat& isomap) {
cv::Mat output;
cv::Mat kernel = (cv::Mat_<float>(5, 5) << -0.125, -0.125, -0.125, -0.125, -0.125,
-0.125, 0.25 , 0.25 , 0.25 , -0.125,
-0.125, 0.25 , 1. , 0.25 , -0.125,
-0.125, 0.25 , 0.25 , 0.25 , -0.125,
-0.125, -0.125, -0.125, -0.125, -0.125);
cv::filter2D(isomap, output, isomap.depth(), kernel);
return output;
}
private: private:
cv::Mat visibility_counter; cv::Mat visibility_counter;
cv::Mat merged_isomap; cv::Mat merged_isomap;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "eos/render/texture_extraction.hpp" #include "eos/render/texture_extraction.hpp"
#include "eos/video/Keyframe.hpp" #include "eos/video/Keyframe.hpp"
#include "eos/fitting/ReconstructionData.hpp" #include "eos/fitting/ReconstructionData.hpp"
#include "eos/video/BufferedVideoIterator.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp" #include "opencv2/opencv.hpp"
...@@ -166,28 +167,28 @@ void evaluate_accuracy( ...@@ -166,28 +167,28 @@ void evaluate_accuracy(
* @param rendering_paramss * @param rendering_paramss
* @param landmark_list * @param landmark_list
* @param morphable_model * @param morphable_model
* @param blend_shapes * @param blendshapes
* @param meshs * @param meshs
* @param pca_shape_coefficients * @param pca_shape_coefficients
* @param blend_shape_coefficients * @param blendshape_coefficients
* @param fitted_image_points * @param fitted_image_points
* @param annotations * @param annotations
* @param landmark_mapper * @param landmark_mapper
* @param settings * @param settings
* @param n_iter * @param n_iter
*/ */
void output_render( void render_output(
std::deque<eos::video::Keyframe> key_frames, std::vector<eos::video::Keyframe> key_frames,
std::vector<fitting::RenderingParameters> rendering_paramss, std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list, std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
morphablemodel::MorphableModel morphable_model,
vector<morphablemodel::Blendshape> blend_shapes,
vector<core::Mesh> meshs, vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients, std::vector<float> pca_shape_coefficients,
std::vector<std::vector<float>> blend_shape_coefficients, std::vector<std::vector<float>> blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>> fitted_image_points, std::vector<std::vector<cv::Vec2f>> fitted_image_points,
std::vector<std::string> annotations, std::vector<std::string> annotations,
core::LandmarkMapper landmark_mapper, fitting::ReconstructionData reconstruction_data,
Mat last_frame,
int last_frame_number,
boost::property_tree::ptree settings, boost::property_tree::ptree settings,
int n_iter) { int n_iter) {
...@@ -195,6 +196,11 @@ void output_render( ...@@ -195,6 +196,11 @@ void output_render(
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera
eos::video::PcaCoefficientMerging pca_shape_merging; eos::video::PcaCoefficientMerging pca_shape_merging;
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
auto landmarks = reconstruction_data.landmark_list[last_frame_number];
auto outputfilebase = annotations[0]; auto outputfilebase = annotations[0];
for (uint i = 0; i < key_frames.size(); ++i) { for (uint i = 0; i < key_frames.size(); ++i) {
...@@ -202,8 +208,11 @@ void output_render( ...@@ -202,8 +208,11 @@ void output_render(
auto unmodified_frame = frame.clone(); auto unmodified_frame = frame.clone();
int frame_number = key_frames[i].frame_number; int frame_number = key_frames[i].frame_number;
float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
cv::imwrite("/tmp/eos/" +
std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
// float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
int frame_width = frame.cols; int frame_width = frame.cols;
int frame_height = frame.rows; int frame_height = frame.rows;
...@@ -215,29 +224,47 @@ void output_render( ...@@ -215,29 +224,47 @@ void output_render(
unmodified_frame, unmodified_frame,
true, true,
render::TextureInterpolation::NearestNeighbour, render::TextureInterpolation::NearestNeighbour,
512 512);
);
// Merge the isomaps - add the current one to the already merged ones: // Merge the isomaps - add the current one to the already merged ones:
merged_isomap = isomap_averaging.add_and_merge(isomap); merged_isomap = isomap_averaging.add_and_merge(isomap);
} }
// Get last frame (that's the newest one). // Get last frame (that's the newest one).
Mat frame = key_frames.back().frame; Mat frame = last_frame.clone();
auto last_rendering_params = rendering_paramss.back();
auto last_blend_coefficients = blend_shape_coefficients.back();
int frame_width = frame.cols; int frame_width = frame.cols;
int frame_height = frame.rows; int frame_height = frame.rows;
vector<cv::Vec4f> model_points;
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
// make a new one
std::vector<float> blend_shape_coefficients;
auto mesh = fitting::generate_new_mesh(
morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height);
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
// Same for the shape: // Same for the shape:
pca_shape_coefficients = pca_shape_merging.add_and_merge(pca_shape_coefficients); pca_shape_coefficients = pca_shape_merging.add_and_merge(pca_shape_coefficients);
auto blendshapes_as_basis = morphablemodel::to_matrix(blend_shapes);
auto blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
auto merged_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients) + auto merged_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients) +
blendshapes_as_basis * blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(last_blend_coefficients.data(), Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
last_blend_coefficients.size() blend_shape_coefficients.size());
);
auto merged_mesh = morphablemodel::sample_to_mesh( auto merged_mesh = morphablemodel::sample_to_mesh(
merged_shape, merged_shape,
...@@ -250,24 +277,33 @@ void output_render( ...@@ -250,24 +277,33 @@ void output_render(
// Render the model in a separate window using the estimated pose, shape and merged texture: // Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering; Mat rendering;
// render needs 4 channels 8 bits image, needs a two step conversion. // render needs 4 channels 8 bits image, needs a two step conversion.
cvtColor(frame, rendering, CV_BGR2BGRA); cvtColor(frame, rendering, CV_BGR2BGRA);
// make sure the image is CV_8UC4, maybe do check first? // make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4); rendering.convertTo(rendering, CV_8UC4);
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_params, frame_width, frame_height);
Mat isomap = render::extract_texture(
merged_mesh,
affine_cam,
last_frame,
true,
render::TextureInterpolation::NearestNeighbour,
512);
std::tie(rendering, std::ignore) = render::render( std::tie(rendering, std::ignore) = render::render(
merged_mesh, merged_mesh,
last_rendering_params.get_modelview(), rendering_params.get_modelview(),
last_rendering_params.get_projection(), rendering_params.get_projection(),
frame_width, frame_width,
frame_height, frame_height,
render::create_mipmapped_texture(merged_isomap), render::create_mipmapped_texture(isomap),
true, true,
false, false,
false, false,
rendering rendering);
);
cv::imshow("render", rendering); cv::imshow("render", rendering);
cv::waitKey(10); cv::waitKey(10);
...@@ -279,10 +315,10 @@ void output_render( ...@@ -279,10 +315,10 @@ void output_render(
* @param rendering_paramss * @param rendering_paramss
* @param landmark_list * @param landmark_list
* @param morphable_model * @param morphable_model
* @param blend_shapes * @param blendshapes
* @param meshs * @param meshs
* @param pca_shape_coefficients * @param pca_shape_coefficients
* @param blend_shape_coefficients * @param blendshape_coefficients
* @param fitted_image_points * @param fitted_image_points
* @param annotations * @param annotations
* @param landmark_mapper * @param landmark_mapper
...@@ -290,111 +326,141 @@ void output_render( ...@@ -290,111 +326,141 @@ void output_render(
* @param n_iter * @param n_iter
*/ */
void evaluate_results( void evaluate_results(
std::deque<eos::video::Keyframe> key_frames, std::vector<eos::video::Keyframe> key_frames,
std::vector<fitting::RenderingParameters> rendering_paramss, std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
morphablemodel::MorphableModel morphable_model,
vector<morphablemodel::Blendshape> blend_shapes,
vector<core::Mesh> meshs, vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients, std::vector<float> pca_shape_coefficients,
std::vector<std::vector<float>> blend_shape_coefficients, std::vector<std::vector<float>> blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>> fitted_image_points, std::vector<std::vector<cv::Vec2f>> fitted_image_points,
std::vector<std::string> annotations, std::vector<std::string> annotations,
core::LandmarkMapper landmark_mapper, fitting::ReconstructionData reconstruction_data,
boost::property_tree::ptree settings, boost::property_tree::ptree settings,
int n_iter) { int n_iter) {
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera WeightedIsomapAveraging isomap_averaging(10.f); // merge all triangles that are facing <60° towards the camera
Mat merged_isomap; Mat merged_isomap;
fs::path outputfilebase = annotations[0]; fs::path outputfilebase = annotations[0];
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
// The 3D head pose can be recovered as follows: // The 3D head pose can be recovered as follows:
for (uint i = 0; i < key_frames.size(); ++i) { for (uint i = 0; i < key_frames.size(); ++i) {
float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
int frame_number = key_frames[i].frame_number; int frame_number = key_frames[i].frame_number;
auto landmarks = key_frames[i].fitting_result.landmarks;
auto rendering_params = key_frames[i].fitting_result.rendering_parameters;
float yaw_angle = key_frames[i].yaw_angle;
Mat frame = key_frames[i].frame; Mat frame = key_frames[i].frame;
int frame_width = frame.cols; int frame_width = frame.cols;
int frame_height = frame.rows; int frame_height = frame.rows;
Mat outimg = frame.clone();
// and similarly for pitch and roll. // and similarly for pitch and roll.
// Extract the texture from the image using given mesh and camera parameters: // Extract the texture from the image using given mesh and camera parameters:
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix( Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(
rendering_paramss[i], frame_width, frame_height rendering_params, frame_width, frame_height);
);
// for (auto &&lm : landmarks) {
// cv::rectangle(
// outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
// // Draw the fitted mesh as wireframe, and save the image:
// draw_wireframe(
// outimg,
// meshs[i],
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// fitting::get_opencv_viewport(frame_width, frame_height));
//
// cv::imshow("Img", outimg);
// cv::waitKey(1);
// Draw the loaded landmarks: // Draw the loaded landmarks:
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame); Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame);
Mat outimg = frame.clone();
for (auto &&lm : landmark_list[i]) {
cv::rectangle(
outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
);
}
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(
outimg,
meshs[i],
rendering_paramss[i].get_modelview(),
rendering_paramss[i].get_projection(),
fitting::get_opencv_viewport(frame_width, frame_height)
);
fs::path path = (fs::path(annotations[frame_number]).parent_path() / "eval");
std::string outputfile = (path / fs::path(annotations[frame_number]).replace_extension("").filename()).string();
std::string iter = "_" + std::to_string(n_iter) + "_" + std::to_string(i);
cv::imwrite(outputfile + iter + ".annotated.png", outimg);
// save frontal rendering with texture:
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::mat4( 1.0 );
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
outimg = frame.clone();
cv::imwrite(outputfile + iter + ".isomap.png", isomap);
// merge the isomaps: // merge the isomaps:
merged_isomap = isomap_averaging.add_and_merge(isomap); merged_isomap = isomap_averaging.add_and_merge(isomap);
std::tie(frontal_rendering, std::ignore) = eos::render::render( // cv::imshow("isomap", isomap);
neutral_expression, // cv::imshow("merged_isomap", merged_isomap);
rendering_paramss[i].get_modelview(), //
rendering_paramss[i].get_projection(), // Mat outimg = frame.clone();
frame_width, //
frame_height, // for (auto &&lm : landmarks) {
render::create_mipmapped_texture(merged_isomap), // cv::rectangle(
true, // outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
false, // cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
false, // );
outimg // }
); // // Draw the fitted mesh as wireframe, and save the image:
// draw_wireframe(
cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering); // outimg,
evaluate_accuracy( // meshs[i],
landmark_list[i], // rendering_params.get_modelview(),
landmark_mapper, // rendering_params.get_projection(),
meshs[i], // fitting::get_opencv_viewport(frame_width, frame_height));
affine_from_ortho, //
settings // cv::imshow("Img", outimg);
); // cv::waitKey(0);
// fs::path path = (fs::path(annotations[frame_number]).parent_path() / "eval");
// std::string outputfile = (path / fs::path(annotations[frame_number]).replace_extension("").filename()).string();
// std::string iter = "_" + std::to_string(n_iter) + "_" + std::to_string(i);
// cv::imwrite(outputfile + iter + ".annotated.png", outimg);
//
// // save frontal rendering with texture:
// glm::mat4 modelview_frontal = glm::mat4( 1.0 );
// core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
// morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
// morphable_model.get_color_model().get_mean(),
// morphable_model.get_shape_model().get_triangle_list(),
// morphable_model.get_color_model().get_triangle_list(),
// morphable_model.get_texture_coordinates()
// );
// cv::imwrite(outputfile + iter + ".isomap.png", isomap);
// Mat frontal_rendering;
// std::tie(frontal_rendering, std::ignore) = eos::render::render(
// neutral_expression,
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// frame_width,
// frame_height,
// render::create_mipmapped_texture(merged_isomap),
// true,
// false,
// false
// );
// cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering);
// evaluate_accuracy(
// landmarks,
// landmark_mapper,
// meshs[i],
// affine_from_ortho,
// settings
// );
} }
// save the merged isomap: // save the merged isomap:
std::string iter = "_" + std::to_string(n_iter); std::string iter = "_" + std::to_string(n_iter);
fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval"); fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval");
fs::create_directory(path);
std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string(); std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string();
cv::imwrite(outputfile + iter + ".isomap.png", merged_isomap); // sharpen isomap
Mat sharpen = isomap_averaging.sharpen(merged_isomap);
cv::imwrite(outputfile + iter + ".isomap.png", sharpen);
// save the frontal rendering with merged isomap: // save the frontal rendering with merged isomap:
Mat frontal_rendering; Mat frontal_rendering;
...@@ -407,23 +473,20 @@ void evaluate_results( ...@@ -407,23 +473,20 @@ void evaluate_results(
morphable_model.get_texture_coordinates() morphable_model.get_texture_coordinates()
); );
std::tie(frontal_rendering, std::ignore) = render::render( // std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression, // neutral_expression,
modelview_frontal, // modelview_frontal,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), // glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512, // 512, 512,
render::create_mipmapped_texture(merged_isomap), // render::create_mipmapped_texture(merged_isomap),
true, // true,
false, // false,
false // false
); // );
// cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering);
cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering);
// Save the mesh as textured obj: // Save the mesh as textured obj:
core::write_textured_obj(morphable_model.draw_sample( core::write_textured_obj(neutral_expression, outputfile + iter + ".obj");
pca_shape_coefficients, std::vector<float>()), outputfile + iter + ".obj");
std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl; std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl;
} }
...@@ -450,7 +513,7 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) { ...@@ -450,7 +513,7 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) {
* @return * @return
*/ */
vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks( vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks(
std::deque<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) { std::vector<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
vector<core::LandmarkCollection<cv::Vec2f>> sublist; vector<core::LandmarkCollection<cv::Vec2f>> sublist;
for (auto& f : key_frames) { for (auto& f : key_frames) {
...@@ -523,8 +586,8 @@ int main(int argc, char *argv[]) { ...@@ -523,8 +586,8 @@ int main(int argc, char *argv[]) {
morphablemodel::MorphableModel morphable_model; morphablemodel::MorphableModel morphable_model;
std::vector<float> pca_shape_coefficients; std::vector<float> pca_shape_coefficients;
// List of blend_shapes coefficients: // List of blendshapes coefficients:
std::vector<std::vector<float>> blend_shape_coefficients; std::vector<std::vector<float>> blendshape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points; std::vector<std::vector<cv::Vec2f>> fitted_image_points;
// These will be the final 2D and 3D points used for the fitting: // These will be the final 2D and 3D points used for the fitting:
...@@ -558,7 +621,7 @@ int main(int argc, char *argv[]) { ...@@ -558,7 +621,7 @@ int main(int argc, char *argv[]) {
} }
// The expression blendshapes: // The expression blendshapes:
auto blend_shapes = morphablemodel::load_blendshapes(blendshapesfile.string()); auto blendshapes = morphablemodel::load_blendshapes(blendshapesfile.string());
// These two are used to fit the front-facing contour to the ibug contour landmarks: // These two are used to fit the front-facing contour to the ibug contour landmarks:
auto model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string()); auto model_contour = contourfile.empty() ? fitting::ModelContour() : fitting::ModelContour::load(contourfile.string());
...@@ -568,35 +631,39 @@ int main(int argc, char *argv[]) { ...@@ -568,35 +631,39 @@ int main(int argc, char *argv[]) {
// Read .ini file to accommodate. // Read .ini file to accommodate.
auto settings = get_reconstruction_config(reconstruction_config.string()); auto settings = get_reconstruction_config(reconstruction_config.string());
auto reconstruction_data = eos::fitting::ReconstructionData{
morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology};
// Start with the video play and get video file: // Start with the video play and get video file:
BufferedVideoIterator vid_iterator; BufferedVideoIterator vid_iterator;
try { try {
vid_iterator = BufferedVideoIterator(videofile.string(), settings); vid_iterator = BufferedVideoIterator(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) { } catch(std::runtime_error &e) {
std::cout << e.what() << std::endl; std::cout << e.what() << std::endl;
return EXIT_FAILURE; return EXIT_FAILURE;
} }
// Start getting video frames: // Start getting video frames:
std::deque<eos::video::Keyframe> key_frames = vid_iterator.next(); vid_iterator.start();
eos::fitting::ReconstructionData data;
// Count the amount of iterations: // Count the amount of iterations:
int n_iter = 0; int n_iter = 0;
while(!(key_frames.empty())) { while(vid_iterator.is_playing()) {
// if (n_iter == 100) { auto key_frames = vid_iterator.get_keyframes();
// break;
// }
// Generate a sublist of the total available landmark list: // Generate a sublist of the total available landmark list:
auto landmark_sublist = sample_landmarks(key_frames, landmark_list); auto landmark_sublist = sample_landmarks(key_frames, landmark_list);
// std::cout << vid_iterator.to_string() << std::endl;
// Fit shape and pose: // it makes no sense to update pca_coeff if nothing in the buffer has changed:
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_2( if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
// Fit shape and pose:
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_2(
morphable_model, morphable_model,
blend_shapes, blendshapes,
landmark_sublist, key_frames,
landmark_mapper, landmark_mapper,
key_frames[0].frame.cols, key_frames[0].frame.cols,
key_frames[0].frame.rows, key_frames[0].frame.rows,
...@@ -609,33 +676,68 @@ int main(int argc, char *argv[]) { ...@@ -609,33 +676,68 @@ int main(int argc, char *argv[]) {
30.0f, 30.0f,
boost::none, boost::none,
pca_shape_coefficients, pca_shape_coefficients,
blend_shape_coefficients, blendshape_coefficients,
fitted_image_points fitted_image_points
); );
// vid_iterator.setReconstructionResults(reconstructionData); vid_iterator.update_reconstruction_coeff(pca_shape_coefficients);
// Render output: evaluate_results(
output_render(
key_frames, key_frames,
rendering_paramss, rendering_paramss,
landmark_sublist,
morphable_model,
blend_shapes,
meshs, meshs,
pca_shape_coefficients, pca_shape_coefficients,
blend_shape_coefficients, blendshape_coefficients,
fitted_image_points, fitted_image_points,
annotations, annotations,
landmark_mapper, reconstruction_data,
settings, settings,
n_iter n_iter
); );
} else {
std::cout << "No reconstruction - buffer did not change" << std::endl;
}
// // Render output:
// render_output(
// key_frames,
// rendering_paramss,
// landmark_sublist,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// vid_iterator.last_frame,
// vid_iterator.last_frame_number,
// settings,
// n_iter
// );
// Get new frames: // Get new frames:
key_frames = vid_iterator.next();
n_iter++; n_iter++;
} }
auto key_frames = vid_iterator.get_keyframes();
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
evaluate_results(
key_frames,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
//todo: we could build our final obj here?
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment