Commit 574ccbce authored by Richard Torenvliet's avatar Richard Torenvliet

Use a smart pointer for the keyframes. Makes sure that generated mesh belongs to a keyframe

parent de4c961c
...@@ -880,7 +880,6 @@ inline eos::core::Mesh generate_new_mesh( ...@@ -880,7 +880,6 @@ inline eos::core::Mesh generate_new_mesh(
return mesh; return mesh;
} }
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2( inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
const morphablemodel::MorphableModel& morphable_model, const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<morphablemodel::Blendshape>& blendshapes,
...@@ -1128,7 +1127,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1128,7 +1127,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_parallel( inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_parallel(
const morphablemodel::MorphableModel& morphable_model, const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<eos::video::Keyframe>& keyframes, std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
// std::vector<eos::video::Keyframe>& keyframes,
const core::LandmarkMapper& landmark_mapper, const core::LandmarkMapper& landmark_mapper,
int image_width, int image_width,
int image_height, int image_height,
...@@ -1224,7 +1224,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1224,7 +1224,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// (equal to landmark coordinates), for every image / mesh. // (equal to landmark coordinates), for every image / mesh.
std::tie(curr_model_points, curr_vertex_indices, curr_image_points) = std::tie(curr_model_points, curr_vertex_indices, curr_image_points) =
eos::core::get_landmark_coordinates<Vec2f, Vec4f>( eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh); keyframes[j].get()->fitting_result.landmarks, landmark_mapper, current_mesh);
// Start constructing a list of rendering parameters needed for reconstruction. // Start constructing a list of rendering parameters needed for reconstruction.
// Get the current points from the last added image points and model points // Get the current points from the last added image points and model points
...@@ -1234,9 +1234,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1234,9 +1234,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height); fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params[j] = current_rendering_params; rendering_params[j] = current_rendering_params;
// update key frame rendering params
keyframes[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height); Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
// if no contour // if no contour
...@@ -1289,8 +1286,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1289,8 +1286,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
vector<Vec2f> image_points_contour; vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour; vector<int> vertex_indices_contour;
auto curr_keyframe = keyframes[j]; auto curr_keyframe = keyframes[j].get();
auto landmarks = curr_keyframe.fitting_result.landmarks; auto landmarks = curr_keyframe->fitting_result.landmarks;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]); auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id: // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
...@@ -1332,95 +1329,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1332,95 +1329,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size()); VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
mean_plus_blendshapes.push_back(current_mean_plus_blendshapes); mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
} }
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
// vector<Vec2f> image_points_contour;
// vector<int> vertex_indices_contour;
//
// auto curr_keyframe = keyframes[j];
// auto landmarks = curr_keyframe.fitting_result.landmarks;
// auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
//
// // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
// std::tie(image_points_contour, std::ignore, vertex_indices_contour) =
// fitting::get_contour_correspondences(
// landmarks,
// contour_landmarks,
// model_contour,
// yaw_angle,
// current_meshs[j],
// rendering_params[j].get_modelview(),
// rendering_params[j].get_projection(),
// fitting::get_opencv_viewport(image_width, image_height)
// );
//
// // Add the contour correspondences to the set of landmarks that we use for the fitting:
// vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
// image_points[j] = fitting::concat(image_points[j], image_points_contour);
//
// // Fit the occluding (away-facing) contour using the detected contour LMs:
// vector<Eigen::Vector2f> occluding_contour_landmarks;
//
// // positive yaw = subject looking to the left
// if (yaw_angle >= 0.0f)
// {
// // the left contour is the occluding one we want to use ("away-facing")
// auto contour_landmarks_ =
// core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
// else
// {
// auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
//
// auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
// current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
// );
//
// image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
// vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
//
// // Get the model points of the current mesh, for all correspondences that we've got:
// model_points[j].clear();
//
// for (const auto &v : vertex_indices[j])
// {
// model_points[j].push_back(
// {
// current_meshs[j].vertices[v][0],
// current_meshs[j].vertices[v][1],
// current_meshs[j].vertices[v][2],
// current_meshs[j].vertices[v][3]
// });
// }
//
// // Re-estimate the pose, using all correspondences:
// auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j],
// model_points[j],
// true,
// image_height);
// rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
//
// Mat affine_from_ortho =
// fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
// affine_from_orthos.push_back(affine_from_ortho);
//
// // Estimate the PCA shape coefficients with the current blendshape coefficients:
// VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
// blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
// blendshape_coefficients[j].size());
// mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel( pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel(
morphable_model, morphable_model,
...@@ -1457,8 +1365,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1457,8 +1365,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// save it to the keyframe, we might need it for showing the reconstruction. // save it to the keyframe, we might need it for showing the reconstruction.
// we could make it optional // we could make it optional
keyframes[j].fitting_result.mesh = current_meshs[j]; keyframes[j].get()->fitting_result.mesh = current_meshs[j];
keyframes[j].fitting_result.rendering_parameters = rendering_params[j]; keyframes[j].get()->fitting_result.rendering_parameters = rendering_params[j];
} }
} }
......
...@@ -610,7 +610,7 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -610,7 +610,7 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
#ifdef _OPENMP #ifdef _OPENMP
// vector<detail::TriangleToRasterize> triangles_to_raster; // vector<detail::TriangleToRasterize> triangles_to_raster;
inline void raster_triangle_parallel(vector<detail::TriangleToRasterize> triangles_to_raster, cv::Mat colourbuffer, cv::Mat depthbuffer, boost::optional<Texture> texture, bool enable_far_clipping) inline void raster_triangle_parallel(std::vector<detail::TriangleToRasterize> triangles_to_raster, cv::Mat colourbuffer, cv::Mat depthbuffer, boost::optional<Texture> texture, bool enable_far_clipping)
{ {
using cv::Vec2f; using cv::Vec2f;
using cv::Vec3f; using cv::Vec3f;
......
...@@ -22,22 +22,26 @@ ...@@ -22,22 +22,26 @@
#ifndef BUFFERED_VIDEO_ITERATOR_HPP_ #ifndef BUFFERED_VIDEO_ITERATOR_HPP_
#define BUFFERED_VIDEO_ITERATOR_HPP_ #define BUFFERED_VIDEO_ITERATOR_HPP_
#include "eos/video/Keyframe.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp" #include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include "eos/video/Keyframe.hpp"
#include <string> #include <string>
#include <vector> #include <vector>
#include <fstream> #include <fstream>
#include <thread> #include <thread>
#include <mutex> #include <mutex>
#include <atomic> #include <atomic>
#include <unistd.h> #include <unistd.h>
#include <boost/thread/thread.hpp>
#include "glm/gtc/matrix_transform.hpp" #include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/quaternion.hpp" #include "glm/gtc/quaternion.hpp"
using cv::Mat; using cv::Mat;
using cv::Vec2f; using cv::Vec2f;
using cv::Vec3f; using cv::Vec3f;
...@@ -160,7 +164,7 @@ public: ...@@ -160,7 +164,7 @@ public:
* @param frame * @param frame
* @return Keyframe * @return Keyframe
*/ */
Keyframe generate_new_keyframe(cv::Mat frame) { Keyframe* generate_new_keyframe(cv::Mat frame) {
int frame_height = frame.rows; int frame_height = frame.rows;
int frame_width = frame.cols; int frame_width = frame.cols;
...@@ -169,55 +173,69 @@ public: ...@@ -169,55 +173,69 @@ public:
auto landmark_mapper = reconstruction_data.landmark_mapper; auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes; auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model; auto morphable_model = reconstruction_data.morphable_model;
auto edge_topology = reconstruction_data.edge_topology;
auto ibug_contour = reconstruction_data.contour_landmarks;
auto model_contour = reconstruction_data.model_contour;
// Reached the end of the landmarks (only applicable for annotated videos): vector<cv::Vec4f> model_points;
if (reconstruction_data.landmark_list.size() <= total_frames) { vector<int> vertex_indices;
std::cout << "Reached end of landmarks(" << vector<cv::Vec2f> image_points;
reconstruction_data.landmark_list.size() <<
"/" << total_frames << ")" << std::endl;
return Keyframe(); // make a new one
} std::vector<float> blend_shape_coefficients;
if (pca_shape_coefficients.empty()) { if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit); pca_shape_coefficients.resize(num_shape_coefficients_to_fit);
} }
// make a new one auto mesh = fitting::generate_new_mesh(
std::vector<float> blend_shape_coefficients; morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients
);
vector<cv::Vec4f> model_points; // Will yield model_points, vertex_indices and frame_points
vector<int> vertex_indices; // todo: should this function not come from mesh?
vector<cv::Vec2f> image_points;
// set all fitting params we found for this Keyframe
fitting::FittingResult fitting_result;
fitting_result.landmarks = landmarks;
// current pca_coeff will be the mean for the first iterations. auto keyframe = Keyframe(0.0f, frame, fitting_result, total_frames);
auto mesh = fitting::generate_new_mesh(morphable_model, blendshapes, pca_shape_coefficients, blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points std::vector<std::shared_ptr<Keyframe>> keyframes;
// todo: should this function not come from mesh? keyframes.push_back(std::make_shared<Keyframe>(keyframe));
core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( vector<core::Mesh> meshs;
image_points, model_points, true, frame_height); vector<fitting::RenderingParameters> rendering_paramss;
// List of blendshapes coefficients:
std::vector<std::vector<float>> blendshape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height); std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_params, frame_width, frame_height); morphable_model,
blendshapes,
auto blendshapes_as_basis = morphablemodel::to_matrix(blendshapes); keyframes,
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients); landmark_mapper,
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls( frame_width,
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices); frame_height,
auto merged_shape = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(), static_cast<int>(keyframes.size()),
blend_shape_coefficients.size()); edge_topology,
ibug_contour,
auto merged_mesh = morphablemodel::sample_to_mesh( model_contour,
merged_shape, 10, // get from settings, needs to be the same.
morphable_model.get_color_model().get_mean(), boost::none,
morphable_model.get_shape_model().get_triangle_list(), 30.0f,
morphable_model.get_color_model().get_triangle_list(), boost::none,
morphable_model.get_texture_coordinates() pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
settings
); );
auto curr_keyframe = keyframes[0].get();
// Render the model in a separate window using the estimated pose, shape and merged texture: // Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering; Mat rendering;
...@@ -227,21 +245,23 @@ public: ...@@ -227,21 +245,23 @@ public:
// make sure the image is CV_8UC4, maybe do check first? // make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4); rendering.convertTo(rendering, CV_8UC4);
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame, true, render::TextureInterpolation::NearestNeighbour, 512);
cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(curr_keyframe->fitting_result.rendering_parameters, frame_width, frame_height);
Mat isomap = render::extract_texture(curr_keyframe->fitting_result.mesh, affine_from_ortho, curr_keyframe->frame, true, render::TextureInterpolation::NearestNeighbour, 512);
// Merge the isomaps - add the current one to the already merged ones: // Merge the isomaps - add the current one to the already merged ones:
Mat merged_isomap = isomap_averaging.add_and_merge(isomap); Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering; Mat frontal_rendering;
auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f )); auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f ));
rendering_params.set_rotation(rot_mtx_y); curr_keyframe->fitting_result.rendering_parameters.set_rotation(rot_mtx_y);
auto modelview_no_translation = rendering_params.get_modelview(); auto modelview_no_translation = curr_keyframe->fitting_result.rendering_parameters.get_modelview();
modelview_no_translation[3][0] = 0; modelview_no_translation[3][0] = 0;
modelview_no_translation[3][1] = 0; modelview_no_translation[3][1] = 0;
std::tie(frontal_rendering, std::ignore) = render::render( std::tie(frontal_rendering, std::ignore) = render::render(
merged_mesh, curr_keyframe->fitting_result.mesh,
modelview_no_translation, modelview_no_translation,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
256, 256, 256, 256,
...@@ -253,12 +273,10 @@ public: ...@@ -253,12 +273,10 @@ public:
cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR); cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR);
fitting::FittingResult fitting_result; curr_keyframe->frame = frontal_rendering;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks; return curr_keyframe;
fitting_result.mesh = mesh;
return Keyframe(0.0f, frontal_rendering, fitting_result, total_frames);
} }
/** /**
...@@ -327,7 +345,7 @@ public: ...@@ -327,7 +345,7 @@ public:
} }
// makes a copy of the frame // makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame); Keyframe *keyframe = generate_new_keyframe(frame);
// if(wireframe) { // if(wireframe) {
// draw_wireframe(keyframe.frame, keyframe); // draw_wireframe(keyframe.frame, keyframe);
...@@ -337,12 +355,12 @@ public: ...@@ -337,12 +355,12 @@ public:
// draw_landmarks(keyframe.frame, keyframe); // draw_landmarks(keyframe.frame, keyframe);
// } // }
writer.write(keyframe.frame); writer.write(keyframe->frame);
total_frames++; total_frames++;
if (show_video) { if (show_video) {
std::cout << "show video" << std::endl; std::cout << "show video" << std::endl;
cv::imshow("video", keyframe.frame); cv::imshow("video", keyframe->frame);
cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0)); cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
} }
...@@ -442,6 +460,7 @@ public: ...@@ -442,6 +460,7 @@ public:
int width; int width;
int height; int height;
int last_frame_number; int last_frame_number;
Keyframe last_keyframe; Keyframe last_keyframe;
std::unique_ptr<std::thread> frame_buffer_worker; std::unique_ptr<std::thread> frame_buffer_worker;
...@@ -549,8 +568,8 @@ public: ...@@ -549,8 +568,8 @@ public:
// set all fitting params we found for this Keyframe // set all fitting params we found for this Keyframe
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height); fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result; fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks; fitting_result.landmarks = landmarks;
fitting_result.rendering_parameters = rendering_params;
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height); cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
...@@ -568,9 +587,9 @@ public: ...@@ -568,9 +587,9 @@ public:
* @param keyframe * @param keyframe
* @return * @return
*/ */
bool try_add(Keyframe &keyframe) { bool try_add(Keyframe keyframe) {
// Determine whether to add or not: // Determine whether to add or not:
auto yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation())); float yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation()));
auto idx = angle_to_index(yaw_angle); auto idx = angle_to_index(yaw_angle);
bool add_frame = false; bool add_frame = false;
...@@ -581,13 +600,15 @@ public: ...@@ -581,13 +600,15 @@ public:
return false; return false;
} }
std::cout << "idx: " << idx << std::endl;
// always add when we don't have enough frames // always add when we don't have enough frames
if (bins[idx].size() < frames_per_bin) { if (bins[idx].size() < frames_per_bin) {
add_frame = true; // definitely adding - we wouldn't have to go through the for-loop on the next line. add_frame = true; // definitely adding - we wouldn't have to go through the for-loop on the next line.
} }
for (auto&& f : bins[idx]) { for (auto&& f : bins[idx]) {
if (keyframe.score > f.score) { if (keyframe.score > f.get()->score) {
add_frame = true; add_frame = true;
} }
} }
...@@ -597,12 +618,12 @@ public: ...@@ -597,12 +618,12 @@ public:
} }
// Add the keyframe: // Add the keyframe:
bins[idx].push_back(keyframe); bins[idx].push_back(std::make_shared<Keyframe>(keyframe));
if (bins[idx].size() > frames_per_bin) { if (bins[idx].size() > frames_per_bin) {
n_frames--; n_frames--;
// need to remove the lowest one: // need to remove the lowest one:
std::sort(std::begin(bins[idx]), std::end(bins[idx]), std::sort(std::begin(bins[idx]), std::end(bins[idx]),
[](const auto& lhs, const auto& rhs) { return lhs.score > rhs.score; }); [](const auto& lhs, const auto& rhs) { return lhs->score > rhs->score; });
bins[idx].resize(frames_per_bin); bins[idx].resize(frames_per_bin);
} }
...@@ -622,9 +643,9 @@ public: ...@@ -622,9 +643,9 @@ public:
output.append("["); output.append("[");
for (auto&& f : b) { for (auto&& f : b) {
std::size_t idx = angle_to_index(f.yaw_angle); std::size_t idx = angle_to_index(f->yaw_angle);
output.append("(" + std::to_string(f.score)); output.append("(" + std::to_string(f->score));
output.append("," + std::to_string(f.yaw_angle)); output.append("," + std::to_string(f->yaw_angle));
output.append("), "); output.append("), ");
} }
...@@ -639,8 +660,8 @@ public: ...@@ -639,8 +660,8 @@ public:
* *
* @return std::vector<Keyframe> * @return std::vector<Keyframe>
*/ */
std::vector<Keyframe> get_keyframes() const { std::vector<std::shared_ptr<Keyframe>> get_keyframes() const {
std::vector<Keyframe> keyframes; std::vector<std::shared_ptr<Keyframe>> keyframes;
for (auto&& b : bins) { for (auto&& b : bins) {
for (auto&& f : b) { for (auto&& f : b) {
keyframes.push_back(f); keyframes.push_back(f);
...@@ -655,7 +676,7 @@ public: ...@@ -655,7 +676,7 @@ public:
* *
* @return * @return
*/ */
std::vector<Keyframe> start() { std::vector<std::shared_ptr<Keyframe>> start() {
// blocking calling next for the first time, stop if empty frame. // blocking calling next for the first time, stop if empty frame.
if (!next()) { if (!next()) {
__stop(); __stop();
...@@ -809,7 +830,7 @@ private: ...@@ -809,7 +830,7 @@ private:
cv::VideoCapture cap; cv::VideoCapture cap;
eos::fitting::ReconstructionData reconstruction_data; eos::fitting::ReconstructionData reconstruction_data;
using BinContent = std::vector<Keyframe>; using BinContent = std::vector<std::shared_ptr<Keyframe>>;
std::vector<BinContent> bins; std::vector<BinContent> bins;
// latest pca_shape_coefficients // latest pca_shape_coefficients
......
...@@ -49,6 +49,7 @@ using eos::core::LandmarkCollection; ...@@ -49,6 +49,7 @@ using eos::core::LandmarkCollection;
using eos::video::BufferedVideoIterator; using eos::video::BufferedVideoIterator;
using eos::video::WeightedIsomapAveraging; using eos::video::WeightedIsomapAveraging;
using eos::video::ReconstructionVideoWriter; using eos::video::ReconstructionVideoWriter;
using eos::video::Keyframe;
using cv::Mat; using cv::Mat;
using cv::Vec2f; using cv::Vec2f;
using cv::Vec3f; using cv::Vec3f;
...@@ -164,7 +165,7 @@ void evaluate_accuracy( ...@@ -164,7 +165,7 @@ void evaluate_accuracy(
/** /**
* *
* @param key_frames * @param keyframes
* @param rendering_paramss * @param rendering_paramss
* @param landmark_list * @param landmark_list
* @param morphable_model * @param morphable_model
...@@ -179,7 +180,7 @@ void evaluate_accuracy( ...@@ -179,7 +180,7 @@ void evaluate_accuracy(
* @param n_iter * @param n_iter
*/ */
void render_output( void render_output(
std::vector<eos::video::Keyframe> key_frames, std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
std::vector<fitting::RenderingParameters> rendering_paramss, std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list, std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
vector<core::Mesh> meshs, vector<core::Mesh> meshs,
...@@ -209,12 +210,12 @@ void render_output( ...@@ -209,12 +210,12 @@ void render_output(
auto outputfilebase = annotations[0]; auto outputfilebase = annotations[0];
for (uint i = 0; i < key_frames.size(); ++i) { for (uint i = 0; i < keyframes.size(); ++i) {
Mat frame = key_frames[i].frame; Mat frame = keyframes[i].get()->frame;
auto unmodified_frame = frame.clone(); auto unmodified_frame = frame.clone();
int frame_number = key_frames[i].frame_number; int frame_number = keyframes[i].get()->frame_number;
float yaw_angle = key_frames[i].yaw_angle;// glm::degrees(glm::yaw(rendering_paramss[i].get_rotation())); float yaw_angle = keyframes[i].get()->yaw_angle;// glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
cv::imwrite("/tmp/eos/" + std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame); cv::imwrite("/tmp/eos/" + std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
...@@ -325,7 +326,7 @@ void render_output( ...@@ -325,7 +326,7 @@ void render_output(
/** /**
* *
* @param key_frames * @param keyframes
* @param rendering_paramss * @param rendering_paramss
* @param landmark_list * @param landmark_list
* @param morphable_model * @param morphable_model
...@@ -339,8 +340,7 @@ void render_output( ...@@ -339,8 +340,7 @@ void render_output(
* @param settings * @param settings
* @param n_iter * @param n_iter
*/ */
void evaluate_results( void evaluate_results(std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
std::vector<eos::video::Keyframe> key_frames,
std::vector<fitting::RenderingParameters> rendering_paramss, std::vector<fitting::RenderingParameters> rendering_paramss,
vector<core::Mesh> meshs, vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients, std::vector<float> pca_shape_coefficients,
...@@ -351,6 +351,7 @@ void evaluate_results( ...@@ -351,6 +351,7 @@ void evaluate_results(
boost::property_tree::ptree settings, boost::property_tree::ptree settings,
int n_iter) { int n_iter) {
std::string output_path = settings.get<std::string>("output.output_path", "/tmp"); std::string output_path = settings.get<std::string>("output.output_path", "/tmp");
float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f); float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f);
...@@ -362,19 +363,18 @@ void evaluate_results( ...@@ -362,19 +363,18 @@ void evaluate_results(
auto blendshapes = reconstruction_data.blendshapes; auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model; auto morphable_model = reconstruction_data.morphable_model;
// The 3D head pose can be recovered as follows: // The 3D head pose can be recovered as follows:
for (uint i = 0; i < key_frames.size(); ++i) { for (uint i = 0; i < keyframes.size(); ++i) {
int frame_number = key_frames[i].frame_number; Mat frame = keyframes[i].get()->frame;
auto landmarks = key_frames[i].fitting_result.landmarks;
auto rendering_params = key_frames[i].fitting_result.rendering_parameters;
float yaw_angle = key_frames[i].yaw_angle;
Mat frame = key_frames[i].frame;
int frame_width = frame.cols; int frame_width = frame.cols;
int frame_height = frame.rows; int frame_height = frame.rows;
int frame_number = keyframes[i].get()->frame_number;
auto landmarks = keyframes[i].get()->fitting_result.landmarks;
auto rendering_params = keyframes[i].get()->fitting_result.rendering_parameters;
auto yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
Mat outimg = frame.clone(); Mat outimg = frame.clone();
// and similarly for pitch and roll. // and similarly for pitch and roll.
...@@ -388,22 +388,18 @@ void evaluate_results( ...@@ -388,22 +388,18 @@ void evaluate_results(
cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0} cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
); );
} }
// Draw the fitted mesh as wireframe, and save the image: // Draw the fitted mesh as wireframe, and save the image:
draw_wireframe( draw_wireframe(
outimg, outimg,
meshs[i], keyframes[i].get()->fitting_result.mesh,
rendering_params.get_modelview(), rendering_params.get_modelview(),
rendering_params.get_projection(), rendering_params.get_projection(),
fitting::get_opencv_viewport(frame_width, frame_height)); fitting::get_opencv_viewport(frame_width, frame_height));
// bool eyes_open = isomap_averaging.has_eyes_open(frame, landmarks);
//
// if (!eyes_open) {
// continue;
// }
// cv::imshow("Img", outimg); cv::imshow("Img", outimg);
// cv::waitKey(0); cv::waitKey(20);
// Draw the loaded landmarks: // Draw the loaded landmarks:
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame); Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame);
...@@ -411,34 +407,28 @@ void evaluate_results( ...@@ -411,34 +407,28 @@ void evaluate_results(
// merge the isomaps: // merge the isomaps:
merged_isomap = isomap_averaging.add_and_merge(isomap); merged_isomap = isomap_averaging.add_and_merge(isomap);
// cv::imshow("isomap", isomap); evaluate_accuracy(
// cv::imshow("merged_isomap", merged_isomap); landmarks,
// landmark_mapper,
// Mat outimg = frame.clone(); meshs[i],
affine_from_ortho,
settings
);
}
// save the merged isomap:
// std::string iter = "_" + std::to_string(n_iter);
// fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval");
// fs::create_directory(path);
// std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string();
// //
// for (auto &&lm : landmarks) { // // sharpen isomap
// cv::rectangle( // Mat sharpen = isomap_averaging.sharpen(merged_isomap);
// outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
// // Draw the fitted mesh as wireframe, and save the image:
// draw_wireframe(
// outimg,
// meshs[i],
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// fitting::get_opencv_viewport(frame_width, frame_height));
// //
// cv::imshow("Img", outimg); // cv::imwrite(outputfile + iter + ".isomap.png", sharpen);
// cv::waitKey(0);
// fs::path path = (fs::path(annotations[frame_number]).parent_path() / "eval");
// std::string outputfile = (path / fs::path(annotations[frame_number]).replace_extension("").filename()).string();
// std::string iter = "_" + std::to_string(n_iter) + "_" + std::to_string(i);
// cv::imwrite(outputfile + iter + ".annotated.png", outimg);
// //
// // save frontal rendering with texture: // // save the frontal rendering with merged isomap:
// Mat frontal_rendering;
// glm::mat4 modelview_frontal = glm::mat4( 1.0 ); // glm::mat4 modelview_frontal = glm::mat4( 1.0 );
// core::Mesh neutral_expression = morphablemodel::sample_to_mesh( // core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
// morphable_model.get_shape_model().draw_sample(pca_shape_coefficients), // morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
...@@ -448,53 +438,6 @@ void evaluate_results( ...@@ -448,53 +438,6 @@ void evaluate_results(
// morphable_model.get_texture_coordinates() // morphable_model.get_texture_coordinates()
// ); // );
// cv::imwrite(outputfile + iter + ".isomap.png", isomap);
// Mat frontal_rendering;
// std::tie(frontal_rendering, std::ignore) = eos::render::render(
// neutral_expression,
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// frame_width,
// frame_height,
// render::create_mipmapped_texture(merged_isomap),
// true,
// false,
// false
// );
// cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering);
// evaluate_accuracy(
// landmarks,
// landmark_mapper,
// meshs[i],
// affine_from_ortho,
// settings
// );
}
// save the merged isomap:
std::string iter = "_" + std::to_string(n_iter);
fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval");
fs::create_directory(path);
std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string();
// sharpen isomap
Mat sharpen = isomap_averaging.sharpen(merged_isomap);
cv::imwrite(outputfile + iter + ".isomap.png", sharpen);
// save the frontal rendering with merged isomap:
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::mat4( 1.0 );
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// std::tie(frontal_rendering, std::ignore) = render::render( // std::tie(frontal_rendering, std::ignore) = render::render(
// neutral_expression, // neutral_expression,
// modelview_frontal, // modelview_frontal,
...@@ -508,8 +451,8 @@ void evaluate_results( ...@@ -508,8 +451,8 @@ void evaluate_results(
// cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering); // cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering);
// Save the mesh as textured obj: // Save the mesh as textured obj:
core::write_textured_obj(neutral_expression, outputfile + iter + ".obj"); // core::write_textured_obj(neutral_expression, outputfile + iter + ".obj");
std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl; // std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl;
} }
/** /**
...@@ -530,16 +473,17 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) { ...@@ -530,16 +473,17 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) {
* causes un-alignment of the total landmarks list and the list of frames. This samples the correct landmark * causes un-alignment of the total landmarks list and the list of frames. This samples the correct landmark
* annotations with the based on a given keyframe list. * annotations with the based on a given keyframe list.
* *
* @param key_frames * @param keyframes
* @param landmarks * @param landmarks
* @return * @return
*/ */
vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks( vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks(
std::vector<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) { std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
vector<core::LandmarkCollection<cv::Vec2f>> sublist; vector<core::LandmarkCollection<cv::Vec2f>> sublist;
for (auto& f : key_frames) { for (auto& f : keyframes) {
sublist.push_back(landmarks[f.frame_number]); sublist.push_back(landmarks[f.get()->frame_number]);
} }
return sublist; return sublist;
...@@ -668,7 +612,13 @@ int main(int argc, char *argv[]) { ...@@ -668,7 +612,13 @@ int main(int argc, char *argv[]) {
} }
// Start getting video frames: // Start getting video frames:
bool use_pose_binning = settings.get<bool>("frames.use_pose_binning", true);
if (use_pose_binning) {
vid_iterator.start(); vid_iterator.start();
}
ReconstructionVideoWriter vid_writer; ReconstructionVideoWriter vid_writer;
try { try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings); vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
...@@ -683,26 +633,34 @@ int main(int argc, char *argv[]) { ...@@ -683,26 +633,34 @@ int main(int argc, char *argv[]) {
// Count the amount of iterations: // Count the amount of iterations:
int n_iter = 0; int n_iter = 0;
while(vid_iterator.is_playing()) { while(vid_iterator.is_playing() || n_iter < 20) {
auto key_frames = vid_iterator.get_keyframes(); std::vector<std::shared_ptr<Keyframe>> keyframes;
if (use_pose_binning) {
keyframes = vid_iterator.get_keyframes();
} else {
vid_iterator.next();
keyframes.push_back(std::make_shared<Keyframe>(vid_iterator.get_last_keyframe()));
}
// Generate a sublist of the total available landmark list: // Generate a sublist of the total available landmark list:
auto landmark_sublist = sample_landmarks(key_frames, landmark_list); auto landmark_sublist = sample_landmarks(keyframes, landmark_list);
// it makes no sense to update pca_coeff if nothing in the buffer has changed: // it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) { if (vid_iterator.has_changed() || use_pose_binning == false) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< num_iterations << std::endl; std::cout << "Going to reconstruct with " << keyframes.size() << " images."<< num_iterations << std::endl;
// Fit shape and pose: // Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel( std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
morphable_model, morphable_model,
blendshapes, blendshapes,
key_frames, keyframes,
landmark_mapper, landmark_mapper,
key_frames[0].frame.cols, keyframes[0].get()->frame.cols,
key_frames[0].frame.rows, keyframes[0].get()->frame.rows,
static_cast<int>(key_frames.size()), static_cast<int>(keyframes.size()),
edge_topology, edge_topology,
ibug_contour, ibug_contour,
model_contour, model_contour,
...@@ -716,39 +674,45 @@ int main(int argc, char *argv[]) { ...@@ -716,39 +674,45 @@ int main(int argc, char *argv[]) {
settings settings
); );
std::cout << vid_iterator.to_string() << std::endl;
auto t2 = std::chrono::high_resolution_clock::now(); auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took " std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (key_frames.size() * num_iterations) << "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (keyframes.size() * num_iterations)
<< "ms)" << std::endl; << "ms)" << std::endl;
//
// if(settings.get<bool>("output.make_video", false)) {
// vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
// vid_writer.next();
// }
if(settings.get<bool>("output.make_video", false)) { evaluate_results(
vid_writer.update_reconstruction_coeff(pca_shape_coefficients); keyframes,
vid_writer.next(); rendering_paramss,
} meshs,
pca_shape_coefficients,
// evaluate_results( blendshape_coefficients,
// key_frames, fitted_image_points,
// rendering_paramss, annotations,
// meshs, reconstruction_data,
// pca_shape_coefficients, settings,
// blendshape_coefficients, n_iter
// fitted_image_points, );
// annotations,
// reconstruction_data,
// settings,
// n_iter
// );
} else { } else {
// std::cout << "No reconstruction - buffer did not change" << std::endl; std::cout << vid_iterator.to_string() << std::endl;
std::cout << "No reconstruction - buffer did not change" << std::endl;
} }
std::cout << n_iter << std::endl;
// Get new frames: // Get new frames:
n_iter++; n_iter++;
} }
// vid_writer.__stop(); std::cout << "stopping?" << std::endl;
vid_writer.__stop();
// if(settings.get<bool>("output.make_video", false)) { // if(settings.get<bool>("output.make_video", false)) {
// // Render output: // // Render output:
// std::cout << "Waiting for video to be completed..." << std::endl; // std::cout << "Waiting for video to be completed..." << std::endl;
...@@ -768,20 +732,20 @@ int main(int argc, char *argv[]) { ...@@ -768,20 +732,20 @@ int main(int argc, char *argv[]) {
// vid_writer.__stop(); // vid_writer.__stop();
// } // }
auto key_frames = vid_iterator.get_keyframes(); // auto keyframes = vid_iterator.get_keyframes();
//
evaluate_results( // evaluate_results(
key_frames, // keyframes,
rendering_paramss, // rendering_paramss,
meshs, // meshs,
pca_shape_coefficients, // pca_shape_coefficients,
blendshape_coefficients, // blendshape_coefficients,
fitted_image_points, // fitted_image_points,
annotations, // annotations,
reconstruction_data, // reconstruction_data,
settings, // settings,
n_iter // n_iter
); // );
//todo: we could build our final obj here? //todo: we could build our final obj here?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment