Commit 574ccbce authored by Richard Torenvliet's avatar Richard Torenvliet

Use a smart pointer for the keyframes. Makes sure that generated mesh belongs to a keyframe

parent de4c961c
......@@ -880,7 +880,6 @@ inline eos::core::Mesh generate_new_mesh(
return mesh;
}
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
......@@ -1128,7 +1127,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_parallel(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<eos::video::Keyframe>& keyframes,
std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
// std::vector<eos::video::Keyframe>& keyframes,
const core::LandmarkMapper& landmark_mapper,
int image_width,
int image_height,
......@@ -1224,7 +1224,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// (equal to landmark coordinates), for every image / mesh.
std::tie(curr_model_points, curr_vertex_indices, curr_image_points) =
eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh);
keyframes[j].get()->fitting_result.landmarks, landmark_mapper, current_mesh);
// Start constructing a list of rendering parameters needed for reconstruction.
// Get the current points from the last added image points and model points
......@@ -1234,9 +1234,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params[j] = current_rendering_params;
// update key frame rendering params
keyframes[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
// if no contour
......@@ -1289,8 +1286,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto curr_keyframe = keyframes[j];
auto landmarks = curr_keyframe.fitting_result.landmarks;
auto curr_keyframe = keyframes[j].get();
auto landmarks = curr_keyframe->fitting_result.landmarks;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
......@@ -1332,95 +1329,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
}
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
// vector<Vec2f> image_points_contour;
// vector<int> vertex_indices_contour;
//
// auto curr_keyframe = keyframes[j];
// auto landmarks = curr_keyframe.fitting_result.landmarks;
// auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
//
// // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
// std::tie(image_points_contour, std::ignore, vertex_indices_contour) =
// fitting::get_contour_correspondences(
// landmarks,
// contour_landmarks,
// model_contour,
// yaw_angle,
// current_meshs[j],
// rendering_params[j].get_modelview(),
// rendering_params[j].get_projection(),
// fitting::get_opencv_viewport(image_width, image_height)
// );
//
// // Add the contour correspondences to the set of landmarks that we use for the fitting:
// vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
// image_points[j] = fitting::concat(image_points[j], image_points_contour);
//
// // Fit the occluding (away-facing) contour using the detected contour LMs:
// vector<Eigen::Vector2f> occluding_contour_landmarks;
//
// // positive yaw = subject looking to the left
// if (yaw_angle >= 0.0f)
// {
// // the left contour is the occluding one we want to use ("away-facing")
// auto contour_landmarks_ =
// core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
// else
// {
// auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
//
// auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
// current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
// );
//
// image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
// vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
//
// // Get the model points of the current mesh, for all correspondences that we've got:
// model_points[j].clear();
//
// for (const auto &v : vertex_indices[j])
// {
// model_points[j].push_back(
// {
// current_meshs[j].vertices[v][0],
// current_meshs[j].vertices[v][1],
// current_meshs[j].vertices[v][2],
// current_meshs[j].vertices[v][3]
// });
// }
//
// // Re-estimate the pose, using all correspondences:
// auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j],
// model_points[j],
// true,
// image_height);
// rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
//
// Mat affine_from_ortho =
// fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
// affine_from_orthos.push_back(affine_from_ortho);
//
// // Estimate the PCA shape coefficients with the current blendshape coefficients:
// VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
// blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
// blendshape_coefficients[j].size());
// mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel(
morphable_model,
......@@ -1457,8 +1365,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// save it to the keyframe, we might need it for showing the reconstruction.
// we could make it optional
keyframes[j].fitting_result.mesh = current_meshs[j];
keyframes[j].fitting_result.rendering_parameters = rendering_params[j];
keyframes[j].get()->fitting_result.mesh = current_meshs[j];
keyframes[j].get()->fitting_result.rendering_parameters = rendering_params[j];
}
}
......
......@@ -610,7 +610,7 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
#ifdef _OPENMP
// vector<detail::TriangleToRasterize> triangles_to_raster;
inline void raster_triangle_parallel(vector<detail::TriangleToRasterize> triangles_to_raster, cv::Mat colourbuffer, cv::Mat depthbuffer, boost::optional<Texture> texture, bool enable_far_clipping)
inline void raster_triangle_parallel(std::vector<detail::TriangleToRasterize> triangles_to_raster, cv::Mat colourbuffer, cv::Mat depthbuffer, boost::optional<Texture> texture, bool enable_far_clipping)
{
using cv::Vec2f;
using cv::Vec3f;
......
......@@ -22,22 +22,26 @@
#ifndef BUFFERED_VIDEO_ITERATOR_HPP_
#define BUFFERED_VIDEO_ITERATOR_HPP_
#include "eos/video/Keyframe.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "eos/video/Keyframe.hpp"
#include <string>
#include <vector>
#include <fstream>
#include <thread>
#include <mutex>
#include <atomic>
#include <unistd.h>
#include <boost/thread/thread.hpp>
#include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/quaternion.hpp"
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
......@@ -160,7 +164,7 @@ public:
* @param frame
* @return Keyframe
*/
Keyframe generate_new_keyframe(cv::Mat frame) {
Keyframe* generate_new_keyframe(cv::Mat frame) {
int frame_height = frame.rows;
int frame_width = frame.cols;
......@@ -169,55 +173,69 @@ public:
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
auto edge_topology = reconstruction_data.edge_topology;
auto ibug_contour = reconstruction_data.contour_landmarks;
auto model_contour = reconstruction_data.model_contour;
// Reached the end of the landmarks (only applicable for annotated videos):
if (reconstruction_data.landmark_list.size() <= total_frames) {
std::cout << "Reached end of landmarks(" <<
reconstruction_data.landmark_list.size() <<
"/" << total_frames << ")" << std::endl;
vector<cv::Vec4f> model_points;
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
return Keyframe();
}
// make a new one
std::vector<float> blend_shape_coefficients;
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit);
}
// make a new one
std::vector<float> blend_shape_coefficients;
auto mesh = fitting::generate_new_mesh(
morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients
);
vector<cv::Vec4f> model_points;
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
// Will yield model_points, vertex_indices and frame_points
// todo: should this function not come from mesh?
// set all fitting params we found for this Keyframe
fitting::FittingResult fitting_result;
fitting_result.landmarks = landmarks;
// current pca_coeff will be the mean for the first iterations.
auto mesh = fitting::generate_new_mesh(morphable_model, blendshapes, pca_shape_coefficients, blend_shape_coefficients);
auto keyframe = Keyframe(0.0f, frame, fitting_result, total_frames);
// Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh?
core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
std::vector<std::shared_ptr<Keyframe>> keyframes;
keyframes.push_back(std::make_shared<Keyframe>(keyframe));
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height);
vector<core::Mesh> meshs;
vector<fitting::RenderingParameters> rendering_paramss;
// List of blendshapes coefficients:
std::vector<std::vector<float>> blendshape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_params, frame_width, frame_height);
auto blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices);
auto merged_shape = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blend_shape_coefficients.size());
auto merged_mesh = morphablemodel::sample_to_mesh(
merged_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
morphable_model,
blendshapes,
keyframes,
landmark_mapper,
frame_width,
frame_height,
static_cast<int>(keyframes.size()),
edge_topology,
ibug_contour,
model_contour,
10, // get from settings, needs to be the same.
boost::none,
30.0f,
boost::none,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
settings
);
auto curr_keyframe = keyframes[0].get();
// Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering;
......@@ -227,21 +245,23 @@ public:
// make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4);
auto t1 = std::chrono::high_resolution_clock::now();
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame, true, render::TextureInterpolation::NearestNeighbour, 512);
cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(curr_keyframe->fitting_result.rendering_parameters, frame_width, frame_height);
Mat isomap = render::extract_texture(curr_keyframe->fitting_result.mesh, affine_from_ortho, curr_keyframe->frame, true, render::TextureInterpolation::NearestNeighbour, 512);
// Merge the isomaps - add the current one to the already merged ones:
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering;
auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f ));
rendering_params.set_rotation(rot_mtx_y);
curr_keyframe->fitting_result.rendering_parameters.set_rotation(rot_mtx_y);
auto modelview_no_translation = rendering_params.get_modelview();
auto modelview_no_translation = curr_keyframe->fitting_result.rendering_parameters.get_modelview();
modelview_no_translation[3][0] = 0;
modelview_no_translation[3][1] = 0;
std::tie(frontal_rendering, std::ignore) = render::render(
merged_mesh,
curr_keyframe->fitting_result.mesh,
modelview_no_translation,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
256, 256,
......@@ -253,12 +273,10 @@ public:
cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
fitting_result.mesh = mesh;
curr_keyframe->frame = frontal_rendering;
return curr_keyframe;
return Keyframe(0.0f, frontal_rendering, fitting_result, total_frames);
}
/**
......@@ -327,7 +345,7 @@ public:
}
// makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame);
Keyframe *keyframe = generate_new_keyframe(frame);
// if(wireframe) {
// draw_wireframe(keyframe.frame, keyframe);
......@@ -337,12 +355,12 @@ public:
// draw_landmarks(keyframe.frame, keyframe);
// }
writer.write(keyframe.frame);
writer.write(keyframe->frame);
total_frames++;
if (show_video) {
std::cout << "show video" << std::endl;
cv::imshow("video", keyframe.frame);
cv::imshow("video", keyframe->frame);
cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
}
......@@ -442,6 +460,7 @@ public:
int width;
int height;
int last_frame_number;
Keyframe last_keyframe;
std::unique_ptr<std::thread> frame_buffer_worker;
......@@ -549,8 +568,8 @@ public:
// set all fitting params we found for this Keyframe
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
fitting_result.rendering_parameters = rendering_params;
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
......@@ -568,9 +587,9 @@ public:
* @param keyframe
* @return
*/
bool try_add(Keyframe &keyframe) {
bool try_add(Keyframe keyframe) {
// Determine whether to add or not:
auto yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation()));
float yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation()));
auto idx = angle_to_index(yaw_angle);
bool add_frame = false;
......@@ -581,13 +600,15 @@ public:
return false;
}
std::cout << "idx: " << idx << std::endl;
// always add when we don't have enough frames
if (bins[idx].size() < frames_per_bin) {
add_frame = true; // definitely adding - we wouldn't have to go through the for-loop on the next line.
}
for (auto&& f : bins[idx]) {
if (keyframe.score > f.score) {
if (keyframe.score > f.get()->score) {
add_frame = true;
}
}
......@@ -597,12 +618,12 @@ public:
}
// Add the keyframe:
bins[idx].push_back(keyframe);
bins[idx].push_back(std::make_shared<Keyframe>(keyframe));
if (bins[idx].size() > frames_per_bin) {
n_frames--;
// need to remove the lowest one:
std::sort(std::begin(bins[idx]), std::end(bins[idx]),
[](const auto& lhs, const auto& rhs) { return lhs.score > rhs.score; });
[](const auto& lhs, const auto& rhs) { return lhs->score > rhs->score; });
bins[idx].resize(frames_per_bin);
}
......@@ -622,9 +643,9 @@ public:
output.append("[");
for (auto&& f : b) {
std::size_t idx = angle_to_index(f.yaw_angle);
output.append("(" + std::to_string(f.score));
output.append("," + std::to_string(f.yaw_angle));
std::size_t idx = angle_to_index(f->yaw_angle);
output.append("(" + std::to_string(f->score));
output.append("," + std::to_string(f->yaw_angle));
output.append("), ");
}
......@@ -639,8 +660,8 @@ public:
*
* @return std::vector<Keyframe>
*/
std::vector<Keyframe> get_keyframes() const {
std::vector<Keyframe> keyframes;
std::vector<std::shared_ptr<Keyframe>> get_keyframes() const {
std::vector<std::shared_ptr<Keyframe>> keyframes;
for (auto&& b : bins) {
for (auto&& f : b) {
keyframes.push_back(f);
......@@ -655,7 +676,7 @@ public:
*
* @return
*/
std::vector<Keyframe> start() {
std::vector<std::shared_ptr<Keyframe>> start() {
// blocking calling next for the first time, stop if empty frame.
if (!next()) {
__stop();
......@@ -809,7 +830,7 @@ private:
cv::VideoCapture cap;
eos::fitting::ReconstructionData reconstruction_data;
using BinContent = std::vector<Keyframe>;
using BinContent = std::vector<std::shared_ptr<Keyframe>>;
std::vector<BinContent> bins;
// latest pca_shape_coefficients
......
......@@ -49,6 +49,7 @@ using eos::core::LandmarkCollection;
using eos::video::BufferedVideoIterator;
using eos::video::WeightedIsomapAveraging;
using eos::video::ReconstructionVideoWriter;
using eos::video::Keyframe;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
......@@ -164,7 +165,7 @@ void evaluate_accuracy(
/**
*
* @param key_frames
* @param keyframes
* @param rendering_paramss
* @param landmark_list
* @param morphable_model
......@@ -179,7 +180,7 @@ void evaluate_accuracy(
* @param n_iter
*/
void render_output(
std::vector<eos::video::Keyframe> key_frames,
std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
std::vector<fitting::RenderingParameters> rendering_paramss,
std::vector<core::LandmarkCollection<cv::Vec2f>> landmark_list,
vector<core::Mesh> meshs,
......@@ -209,12 +210,12 @@ void render_output(
auto outputfilebase = annotations[0];
for (uint i = 0; i < key_frames.size(); ++i) {
Mat frame = key_frames[i].frame;
for (uint i = 0; i < keyframes.size(); ++i) {
Mat frame = keyframes[i].get()->frame;
auto unmodified_frame = frame.clone();
int frame_number = key_frames[i].frame_number;
float yaw_angle = key_frames[i].yaw_angle;// glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
int frame_number = keyframes[i].get()->frame_number;
float yaw_angle = keyframes[i].get()->yaw_angle;// glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
cv::imwrite("/tmp/eos/" + std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
......@@ -325,7 +326,7 @@ void render_output(
/**
*
* @param key_frames
* @param keyframes
* @param rendering_paramss
* @param landmark_list
* @param morphable_model
......@@ -339,8 +340,7 @@ void render_output(
* @param settings
* @param n_iter
*/
void evaluate_results(
std::vector<eos::video::Keyframe> key_frames,
void evaluate_results(std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes,
std::vector<fitting::RenderingParameters> rendering_paramss,
vector<core::Mesh> meshs,
std::vector<float> pca_shape_coefficients,
......@@ -351,6 +351,7 @@ void evaluate_results(
boost::property_tree::ptree settings,
int n_iter) {
std::string output_path = settings.get<std::string>("output.output_path", "/tmp");
float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f);
......@@ -362,19 +363,18 @@ void evaluate_results(
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
// The 3D head pose can be recovered as follows:
for (uint i = 0; i < key_frames.size(); ++i) {
int frame_number = key_frames[i].frame_number;
auto landmarks = key_frames[i].fitting_result.landmarks;
auto rendering_params = key_frames[i].fitting_result.rendering_parameters;
float yaw_angle = key_frames[i].yaw_angle;
Mat frame = key_frames[i].frame;
for (uint i = 0; i < keyframes.size(); ++i) {
Mat frame = keyframes[i].get()->frame;
int frame_width = frame.cols;
int frame_height = frame.rows;
int frame_number = keyframes[i].get()->frame_number;
auto landmarks = keyframes[i].get()->fitting_result.landmarks;
auto rendering_params = keyframes[i].get()->fitting_result.rendering_parameters;
auto yaw_angle = glm::degrees(glm::yaw(rendering_params.get_rotation()));
Mat outimg = frame.clone();
// and similarly for pitch and roll.
......@@ -388,22 +388,18 @@ void evaluate_results(
cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
);
}
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(
outimg,
meshs[i],
keyframes[i].get()->fitting_result.mesh,
rendering_params.get_modelview(),
rendering_params.get_projection(),
fitting::get_opencv_viewport(frame_width, frame_height));
// bool eyes_open = isomap_averaging.has_eyes_open(frame, landmarks);
//
// if (!eyes_open) {
// continue;
// }
// cv::imshow("Img", outimg);
// cv::waitKey(0);
cv::imshow("Img", outimg);
cv::waitKey(20);
// Draw the loaded landmarks:
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame);
......@@ -411,34 +407,28 @@ void evaluate_results(
// merge the isomaps:
merged_isomap = isomap_averaging.add_and_merge(isomap);
// cv::imshow("isomap", isomap);
// cv::imshow("merged_isomap", merged_isomap);
//
// Mat outimg = frame.clone();
evaluate_accuracy(
landmarks,
landmark_mapper,
meshs[i],
affine_from_ortho,
settings
);
}
// save the merged isomap:
// std::string iter = "_" + std::to_string(n_iter);
// fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval");
// fs::create_directory(path);
// std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string();
//
// for (auto &&lm : landmarks) {
// cv::rectangle(
// outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
// // Draw the fitted mesh as wireframe, and save the image:
// draw_wireframe(
// outimg,
// meshs[i],
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// fitting::get_opencv_viewport(frame_width, frame_height));
// // sharpen isomap
// Mat sharpen = isomap_averaging.sharpen(merged_isomap);
//
// cv::imshow("Img", outimg);
// cv::waitKey(0);
// fs::path path = (fs::path(annotations[frame_number]).parent_path() / "eval");
// std::string outputfile = (path / fs::path(annotations[frame_number]).replace_extension("").filename()).string();
// std::string iter = "_" + std::to_string(n_iter) + "_" + std::to_string(i);
// cv::imwrite(outputfile + iter + ".annotated.png", outimg);
// cv::imwrite(outputfile + iter + ".isomap.png", sharpen);
//
// // save frontal rendering with texture:
// // save the frontal rendering with merged isomap:
// Mat frontal_rendering;
// glm::mat4 modelview_frontal = glm::mat4( 1.0 );
// core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
// morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
......@@ -448,53 +438,6 @@ void evaluate_results(
// morphable_model.get_texture_coordinates()
// );
// cv::imwrite(outputfile + iter + ".isomap.png", isomap);
// Mat frontal_rendering;
// std::tie(frontal_rendering, std::ignore) = eos::render::render(
// neutral_expression,
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// frame_width,
// frame_height,
// render::create_mipmapped_texture(merged_isomap),
// true,
// false,
// false
// );
// cv::imwrite(outputfile + iter + ".frontal.png", frontal_rendering);
// evaluate_accuracy(
// landmarks,
// landmark_mapper,
// meshs[i],
// affine_from_ortho,
// settings
// );
}
// save the merged isomap:
std::string iter = "_" + std::to_string(n_iter);
fs::path path = (fs::path(annotations[n_iter]).parent_path() / "eval");
fs::create_directory(path);
std::string outputfile = (path / fs::path(annotations[n_iter]).replace_extension("").filename()).string();
// sharpen isomap
Mat sharpen = isomap_averaging.sharpen(merged_isomap);
cv::imwrite(outputfile + iter + ".isomap.png", sharpen);
// save the frontal rendering with merged isomap:
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::mat4( 1.0 );
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// std::tie(frontal_rendering, std::ignore) = render::render(
// neutral_expression,
// modelview_frontal,
......@@ -508,8 +451,8 @@ void evaluate_results(
// cv::imwrite(outputfile + iter + ".merged_frontal.png", frontal_rendering);
// Save the mesh as textured obj:
core::write_textured_obj(neutral_expression, outputfile + iter + ".obj");
std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl;
// core::write_textured_obj(neutral_expression, outputfile + iter + ".obj");
// std::cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile + iter + ".obj" << std::endl;
}
/**
......@@ -530,16 +473,17 @@ boost::property_tree::ptree get_reconstruction_config(std::string filename) {
* causes un-alignment of the total landmarks list and the list of frames. This samples the correct landmark
* annotations with the based on a given keyframe list.
*
* @param key_frames
* @param keyframes
* @param landmarks
* @return
*/
vector<core::LandmarkCollection<cv::Vec2f>> sample_landmarks(
std::vector<eos::video::Keyframe> key_frames, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
std::vector<std::shared_ptr<eos::video::Keyframe>> keyframes, vector<core::LandmarkCollection<cv::Vec2f>> landmarks) {
vector<core::LandmarkCollection<cv::Vec2f>> sublist;
for (auto& f : key_frames) {
sublist.push_back(landmarks[f.frame_number]);
for (auto& f : keyframes) {
sublist.push_back(landmarks[f.get()->frame_number]);
}
return sublist;
......@@ -668,7 +612,13 @@ int main(int argc, char *argv[]) {
}
// Start getting video frames:
bool use_pose_binning = settings.get<bool>("frames.use_pose_binning", true);
if (use_pose_binning) {
vid_iterator.start();
}
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
......@@ -683,26 +633,34 @@ int main(int argc, char *argv[]) {
// Count the amount of iterations:
int n_iter = 0;
while(vid_iterator.is_playing()) {
auto key_frames = vid_iterator.get_keyframes();
while(vid_iterator.is_playing() || n_iter < 20) {
std::vector<std::shared_ptr<Keyframe>> keyframes;
if (use_pose_binning) {
keyframes = vid_iterator.get_keyframes();
} else {
vid_iterator.next();
keyframes.push_back(std::make_shared<Keyframe>(vid_iterator.get_last_keyframe()));
}
// Generate a sublist of the total available landmark list:
auto landmark_sublist = sample_landmarks(key_frames, landmark_list);
auto landmark_sublist = sample_landmarks(keyframes, landmark_list);
// it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< num_iterations << std::endl;
if (vid_iterator.has_changed() || use_pose_binning == false) {
std::cout << "Going to reconstruct with " << keyframes.size() << " images."<< num_iterations << std::endl;
// Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now();
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
morphable_model,
blendshapes,
key_frames,
keyframes,
landmark_mapper,
key_frames[0].frame.cols,
key_frames[0].frame.rows,
static_cast<int>(key_frames.size()),
keyframes[0].get()->frame.cols,
keyframes[0].get()->frame.rows,
static_cast<int>(keyframes.size()),
edge_topology,
ibug_contour,
model_contour,
......@@ -716,39 +674,45 @@ int main(int argc, char *argv[]) {
settings
);
std::cout << vid_iterator.to_string() << std::endl;
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (key_frames.size() * num_iterations)
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (keyframes.size() * num_iterations)
<< "ms)" << std::endl;
//
// if(settings.get<bool>("output.make_video", false)) {
// vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
// vid_writer.next();
// }
if(settings.get<bool>("output.make_video", false)) {
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
vid_writer.next();
}
// evaluate_results(
// key_frames,
// rendering_paramss,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// settings,
// n_iter
// );
evaluate_results(
keyframes,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
} else {
// std::cout << "No reconstruction - buffer did not change" << std::endl;
std::cout << vid_iterator.to_string() << std::endl;
std::cout << "No reconstruction - buffer did not change" << std::endl;
}
std::cout << n_iter << std::endl;
// Get new frames:
n_iter++;
}
// vid_writer.__stop();
std::cout << "stopping?" << std::endl;
vid_writer.__stop();
// if(settings.get<bool>("output.make_video", false)) {
// // Render output:
// std::cout << "Waiting for video to be completed..." << std::endl;
......@@ -768,20 +732,20 @@ int main(int argc, char *argv[]) {
// vid_writer.__stop();
// }
auto key_frames = vid_iterator.get_keyframes();
evaluate_results(
key_frames,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
// auto keyframes = vid_iterator.get_keyframes();
//
// evaluate_results(
// keyframes,
// rendering_paramss,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// settings,
// n_iter
// );
//todo: we could build our final obj here?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment