Commit de4c961c authored by Richard Torenvliet's avatar Richard Torenvliet

Correct mistake concerning num-iterations and made it a setting

parent c5628500
...@@ -215,7 +215,7 @@ namespace eos { ...@@ -215,7 +215,7 @@ namespace eos {
* @param[in,out] image_points * @param[in,out] image_points
*/ */
template <typename vec2f> template <typename vec2f>
inline void get_mesh_coordinates(core::LandmarkCollection<vec2f> landmarks, inline void get_landmark_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper, const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh, eos::core::Mesh& mesh,
vector<Vec4f>& model_points, vector<Vec4f>& model_points,
......
...@@ -468,6 +468,7 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -468,6 +468,7 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
// #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices) // #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices)
// #pragma omp parallel for // #pragma omp parallel for
// TODO: make portable!
#pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh) #pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh)
{ {
#pragma omp parallel for #pragma omp parallel for
...@@ -510,8 +511,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -510,8 +511,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
} }
} }
auto t2 = std::chrono::high_resolution_clock::now();
// copy the results to final vertex ids // copy the results to final vertex ids
std::vector<int> final_vertex_ids; std::vector<int> final_vertex_ids;
for (int i = 0; i < occluding_vertices.size(); ++i) { for (int i = 0; i < occluding_vertices.size(); ++i) {
...@@ -522,13 +521,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -522,13 +521,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
} }
} }
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("S %lu %lld ms (mean: %f)\n",
final_vertex_ids.size(),
final_timing,
static_cast<float>(final_timing) / final_vertex_ids.size()
);
return final_vertex_ids; return final_vertex_ids;
}; };
......
This diff is collapsed.
This diff is collapsed.
...@@ -141,7 +141,6 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -141,7 +141,6 @@ inline std::pair<cv::Mat, cv::Mat> render(
// bool enable_texturing = false; Maybe re-add later, not sure // bool enable_texturing = false; Maybe re-add later, not sure
// take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call. // take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.
auto t1 = std::chrono::high_resolution_clock::now();
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model. assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
// another assert: If cv::Mat texture != empty, then we need texcoords? // another assert: If cv::Mat texture != empty, then we need texcoords?
...@@ -173,14 +172,10 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -173,14 +172,10 @@ inline std::pair<cv::Mat, cv::Mat> render(
clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]}); clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]});
} }
// All vertices are in clip-space now. // All vertices are in clip-space now.
// Prepare the rasterisation stage. // Prepare the rasterisation stage.
// For every vertex/tri: // For every vertex/tri:
vector<detail::TriangleToRasterize> triangles_to_raster; vector<detail::TriangleToRasterize> triangles_to_raster;
//#pragma omp target
// {
//#pragma omp parallel for
for(int i = 0; i < mesh.tvi.size(); i++) { for(int i = 0; i < mesh.tvi.size(); i++) {
const auto tri_indices = mesh.tvi[i]; const auto tri_indices = mesh.tvi[i];
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process... // Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
...@@ -191,6 +186,8 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -191,6 +186,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case. // However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative. // 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3]; unsigned char visibility_bits[3];
#pragma omp simd
for (unsigned char k = 0; k < 3; k++) { for (unsigned char k = 0; k < 3; k++) {
visibility_bits[k] = 0; visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0]; float x_cc = clipspace_vertices[tri_indices[k]].position[0];
...@@ -264,16 +261,8 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -264,16 +261,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
} }
} }
} }
// }
detail::raster_triangle_parallel(triangles_to_raster, colourbuffer, depthbuffer, texture, enable_far_clipping);
// Fragment/pixel shader: Colour the pixel values
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
auto t2 = std::chrono::high_resolution_clock::now();
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("Tri %lu %lld ms\n", triangles_to_raster.size(), final_timing);
return std::make_pair(colourbuffer, depthbuffer); return std::make_pair(colourbuffer, depthbuffer);
}; };
......
This diff is collapsed.
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#include <atomic> #include <atomic>
#include <unistd.h> #include <unistd.h>
#include <glm/gtx/rotate_vector.hpp> #include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/quaternion.hpp"
using cv::Mat; using cv::Mat;
using cv::Vec2f; using cv::Vec2f;
...@@ -135,15 +136,11 @@ public: ...@@ -135,15 +136,11 @@ public:
total_frames++; total_frames++;
} }
std::cout << frame.size() << std::endl;
std::cout << frame.cols << std::endl;
std::cout << frame.rows << std::endl;
// Take over the size of the original video or take width / height from the settings. // Take over the size of the original video or take width / height from the settings.
int frame_width = settings.get<int>("frames.width", frame.cols); output_width = settings.get<int>("output.width", frame.cols);
int frame_height = settings.get<int>("frames.height", frame.rows); output_height = settings.get<int>("output.height", frame.rows);
Size frame_size = Size(frame_width, frame_height); Size frame_size = Size(output_width, output_height);
// Initialize writer with given output file // Initialize writer with given output file
VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size); VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size);
...@@ -193,15 +190,12 @@ public: ...@@ -193,15 +190,12 @@ public:
vector<int> vertex_indices; vector<int> vertex_indices;
vector<cv::Vec2f> image_points; vector<cv::Vec2f> image_points;
auto mesh = fitting::generate_new_mesh( // current pca_coeff will be the mean for the first iterations.
morphable_model, auto mesh = fitting::generate_new_mesh(morphable_model, blendshapes, pca_shape_coefficients, blend_shape_coefficients);
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points // Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height); image_points, model_points, true, frame_height);
...@@ -213,8 +207,7 @@ public: ...@@ -213,8 +207,7 @@ public:
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients); auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls( blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices); blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices);
auto merged_shape = current_pca_shape + auto merged_shape = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blend_shape_coefficients.size()); blend_shape_coefficients.size());
auto merged_mesh = morphablemodel::sample_to_mesh( auto merged_mesh = morphablemodel::sample_to_mesh(
...@@ -225,8 +218,6 @@ public: ...@@ -225,8 +218,6 @@ public:
morphable_model.get_texture_coordinates() morphable_model.get_texture_coordinates()
); );
auto R = rendering_params.get_rotation();
// Render the model in a separate window using the estimated pose, shape and merged texture: // Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering; Mat rendering;
...@@ -235,45 +226,39 @@ public: ...@@ -235,45 +226,39 @@ public:
// make sure the image is CV_8UC4, maybe do check first? // make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4); rendering.convertTo(rendering, CV_8UC4);
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame); auto t1 = std::chrono::high_resolution_clock::now();
Mat merged_isomap = isomap_averaging.add_and_merge(isomap); Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame, true, render::TextureInterpolation::NearestNeighbour, 512);
// Merge the isomaps - add the current one to the already merged ones:
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering; Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f));
std::cout << angle << std::endl;
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// angle -= 10.0; auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f ));
rendering_params.set_rotation(rot_mtx_y);
auto modelview_no_translation = rendering_params.get_modelview();
modelview_no_translation[3][0] = 0;
modelview_no_translation[3][1] = 0;
std::tie(frontal_rendering, std::ignore) = render::render( std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression, merged_mesh,
modelview_frontal, modelview_no_translation,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512, 256, 256,
render::create_mipmapped_texture(merged_isomap), render::create_mipmapped_texture(merged_isomap),
true, true,
false, false,
false false
); );
// cv::imshow("rendering", frontal_rendering); cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR);
// cv::waitKey(0);
fitting::FittingResult fitting_result; fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params; fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks; fitting_result.landmarks = landmarks;
fitting_result.mesh = mesh; fitting_result.mesh = mesh;
// output this? return Keyframe(0.0f, frontal_rendering, fitting_result, total_frames);
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, rendering, fitting_result, total_frames);
} }
/** /**
...@@ -344,22 +329,22 @@ public: ...@@ -344,22 +329,22 @@ public:
// makes a copy of the frame // makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame); Keyframe keyframe = generate_new_keyframe(frame);
if(wireframe) { // if(wireframe) {
draw_wireframe(keyframe.frame, keyframe); // draw_wireframe(keyframe.frame, keyframe);
} // }
//
if(landmarks) { // if(landmarks) {
draw_landmarks(keyframe.frame, keyframe); // draw_landmarks(keyframe.frame, keyframe);
} // }
writer.write(keyframe.frame); writer.write(keyframe.frame);
total_frames++; total_frames++;
// if (show_video) { if (show_video) {
// std::cout << "show video" << std::endl; std::cout << "show video" << std::endl;
// cv::imshow("video", frame); cv::imshow("video", keyframe.frame);
// cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0)); cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
// } }
return true; return true;
} }
...@@ -424,7 +409,10 @@ public: ...@@ -424,7 +409,10 @@ public:
private: private:
float angle = -45.0; int output_width;
int output_height;
float angle = -45.0f;
int total_frames = 0; int total_frames = 0;
int num_shape_coefficients_to_fit = 0; int num_shape_coefficients_to_fit = 0;
// merge all triangles that are facing <60° towards the camera // merge all triangles that are facing <60° towards the camera
...@@ -552,12 +540,13 @@ public: ...@@ -552,12 +540,13 @@ public:
// Will yield model_points, vertex_indices and frame_points // Will yield model_points, vertex_indices and frame_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height image_points, model_points, true, frame_height
); );
// set all fitting params we found for this Keyframe
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height); fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result; fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params; fitting_result.rendering_parameters = rendering_params;
......
...@@ -263,6 +263,9 @@ public: ...@@ -263,6 +263,9 @@ public:
cv::Mat add_and_merge(const cv::Mat& isomap) cv::Mat add_and_merge(const cv::Mat& isomap)
{ {
// Merge isomaps, add the current to the already merged, pixel by pixel: // Merge isomaps, add the current to the already merged, pixel by pixel:
#pragma omp target
{
#pragma omp parallel for
for (int r = 0; r < isomap.rows; ++r) for (int r = 0; r < isomap.rows; ++r)
{ {
for (int c = 0; c < isomap.cols; ++c) for (int c = 0; c < isomap.cols; ++c)
...@@ -273,13 +276,21 @@ public: ...@@ -273,13 +276,21 @@ public:
} }
// we're sure to have a visible pixel, merge it: // we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1) // merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] = (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1); merged_isomap.at<cv::Vec4f>(r, c)[0] =
merged_isomap.at<cv::Vec4f>(r, c)[1] = (merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1); (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c)
merged_isomap.at<cv::Vec4f>(r, c)[2] = (merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1); + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] = 255; // as soon as we've seen the pixel visible once, we set it to visible. merged_isomap.at<cv::Vec4f>(r, c)[1] =
(merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] =
(merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] =
255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c); ++visibility_counter.at<int>(r, c);
} }
} }
}
cv::Mat merged_isomap_uchar; cv::Mat merged_isomap_uchar;
merged_isomap.convertTo(merged_isomap_uchar, CV_8UC4); merged_isomap.convertTo(merged_isomap_uchar, CV_8UC4);
return merged_isomap_uchar; return merged_isomap_uchar;
......
...@@ -252,7 +252,7 @@ void render_output( ...@@ -252,7 +252,7 @@ void render_output(
// Will yield model_points, vertex_indices and image_points // Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height); image_points, model_points, true, frame_height);
...@@ -656,6 +656,7 @@ int main(int argc, char *argv[]) { ...@@ -656,6 +656,7 @@ int main(int argc, char *argv[]) {
auto reconstruction_data = eos::fitting::ReconstructionData{ auto reconstruction_data = eos::fitting::ReconstructionData{
morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology}; morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology};
// Start with the video play and get video file: // Start with the video play and get video file:
BufferedVideoIterator vid_iterator; BufferedVideoIterator vid_iterator;
...@@ -668,6 +669,15 @@ int main(int argc, char *argv[]) { ...@@ -668,6 +669,15 @@ int main(int argc, char *argv[]) {
// Start getting video frames: // Start getting video frames:
vid_iterator.start(); vid_iterator.start();
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
int num_iterations = settings.get<int>("reconstruction.num_iterations", 10);
// vid_writer.start(); // vid_writer.start();
// Count the amount of iterations: // Count the amount of iterations:
...@@ -681,7 +691,7 @@ int main(int argc, char *argv[]) { ...@@ -681,7 +691,7 @@ int main(int argc, char *argv[]) {
// it makes no sense to update pca_coeff if nothing in the buffer has changed: // it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) { if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl; std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< num_iterations << std::endl;
// Fit shape and pose: // Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
...@@ -696,7 +706,7 @@ int main(int argc, char *argv[]) { ...@@ -696,7 +706,7 @@ int main(int argc, char *argv[]) {
edge_topology, edge_topology,
ibug_contour, ibug_contour,
model_contour, model_contour,
50, num_iterations,
boost::none, boost::none,
30.0f, 30.0f,
boost::none, boost::none,
...@@ -709,9 +719,13 @@ int main(int argc, char *argv[]) { ...@@ -709,9 +719,13 @@ int main(int argc, char *argv[]) {
auto t2 = std::chrono::high_resolution_clock::now(); auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took " std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / key_frames.size() << "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (key_frames.size() * num_iterations)
<< "ms)" << std::endl; << "ms)" << std::endl;
if(settings.get<bool>("output.make_video", false)) {
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
vid_writer.next();
}
// evaluate_results( // evaluate_results(
// key_frames, // key_frames,
...@@ -735,39 +749,39 @@ int main(int argc, char *argv[]) { ...@@ -735,39 +749,39 @@ int main(int argc, char *argv[]) {
} }
// vid_writer.__stop(); // vid_writer.__stop();
if(settings.get<bool>("output.make_video", false)) { // if(settings.get<bool>("output.make_video", false)) {
ReconstructionVideoWriter vid_writer; // // Render output:
try { // std::cout << "Waiting for video to be completed..." << std::endl;
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings); // vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
} catch(std::runtime_error &e) { //
std::cout << e.what() << std::endl; // int count = 0;
return EXIT_FAILURE; // while (count < 40) {
} // auto t1 = std::chrono::high_resolution_clock::now();
// Render output: // vid_writer.next();
std::cout << "Waiting for video to be completed..." << std::endl; // auto t2 = std::chrono::high_resolution_clock::now();
vid_writer.update_reconstruction_coeff(pca_shape_coefficients); // printf("Frame %d/%d (%d)\n", vid_writer.get_frame_number(), vid_iterator.get_frame_number(), count);
//
while (vid_writer.next()) { // std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << "ms" << std::endl;
printf("%d/%d\r", vid_writer.get_frame_number(), vid_iterator.get_frame_number()); // count++;
} // }
//
// vid_writer.__stop();
// }
} auto key_frames = vid_iterator.get_keyframes();
// auto key_frames = vid_iterator.get_keyframes(); evaluate_results(
// std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl; key_frames,
// rendering_paramss,
// evaluate_results( meshs,
// key_frames, pca_shape_coefficients,
// rendering_paramss, blendshape_coefficients,
// meshs, fitted_image_points,
// pca_shape_coefficients, annotations,
// blendshape_coefficients, reconstruction_data,
// fitted_image_points, settings,
// annotations, n_iter
// reconstruction_data, );
// settings,
// n_iter
// );
//todo: we could build our final obj here? //todo: we could build our final obj here?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment