Commit de4c961c authored by Richard Torenvliet's avatar Richard Torenvliet

Correct mistake concerning num-iterations and made it a setting

parent c5628500
......@@ -215,7 +215,7 @@ namespace eos {
* @param[in,out] image_points
*/
template <typename vec2f>
inline void get_mesh_coordinates(core::LandmarkCollection<vec2f> landmarks,
inline void get_landmark_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<Vec4f>& model_points,
......
......@@ -468,6 +468,7 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
auto t1 = std::chrono::high_resolution_clock::now();
// #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices)
// #pragma omp parallel for
// TODO: make portable!
#pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh)
{
#pragma omp parallel for
......@@ -510,8 +511,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
}
}
auto t2 = std::chrono::high_resolution_clock::now();
// copy the results to final vertex ids
std::vector<int> final_vertex_ids;
for (int i = 0; i < occluding_vertices.size(); ++i) {
......@@ -522,13 +521,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
}
}
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("S %lu %lld ms (mean: %f)\n",
final_vertex_ids.size(),
final_timing,
static_cast<float>(final_timing) / final_vertex_ids.size()
);
return final_vertex_ids;
};
......
This diff is collapsed.
This diff is collapsed.
......@@ -141,7 +141,6 @@ inline std::pair<cv::Mat, cv::Mat> render(
// bool enable_texturing = false; Maybe re-add later, not sure
// take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.
auto t1 = std::chrono::high_resolution_clock::now();
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
// another assert: If cv::Mat texture != empty, then we need texcoords?
......@@ -173,14 +172,10 @@ inline std::pair<cv::Mat, cv::Mat> render(
clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]});
}
// All vertices are in clip-space now.
// Prepare the rasterisation stage.
// For every vertex/tri:
vector<detail::TriangleToRasterize> triangles_to_raster;
//#pragma omp target
// {
//#pragma omp parallel for
for(int i = 0; i < mesh.tvi.size(); i++) {
const auto tri_indices = mesh.tvi[i];
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
......@@ -191,6 +186,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3];
#pragma omp simd
for (unsigned char k = 0; k < 3; k++) {
visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0];
......@@ -264,16 +261,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
}
}
}
// }
// Fragment/pixel shader: Colour the pixel values
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
auto t2 = std::chrono::high_resolution_clock::now();
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("Tri %lu %lld ms\n", triangles_to_raster.size(), final_timing);
detail::raster_triangle_parallel(triangles_to_raster, colourbuffer, depthbuffer, texture, enable_far_clipping);
return std::make_pair(colourbuffer, depthbuffer);
};
......
This diff is collapsed.
......@@ -35,7 +35,8 @@
#include <atomic>
#include <unistd.h>
#include <glm/gtx/rotate_vector.hpp>
#include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/quaternion.hpp"
using cv::Mat;
using cv::Vec2f;
......@@ -135,15 +136,11 @@ public:
total_frames++;
}
std::cout << frame.size() << std::endl;
std::cout << frame.cols << std::endl;
std::cout << frame.rows << std::endl;
// Take over the size of the original video or take width / height from the settings.
int frame_width = settings.get<int>("frames.width", frame.cols);
int frame_height = settings.get<int>("frames.height", frame.rows);
output_width = settings.get<int>("output.width", frame.cols);
output_height = settings.get<int>("output.height", frame.rows);
Size frame_size = Size(frame_width, frame_height);
Size frame_size = Size(output_width, output_height);
// Initialize writer with given output file
VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size);
......@@ -193,15 +190,12 @@ public:
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
auto mesh = fitting::generate_new_mesh(
morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients);
// current pca_coeff will be the mean for the first iterations.
auto mesh = fitting::generate_new_mesh(morphable_model, blendshapes, pca_shape_coefficients, blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height);
......@@ -213,8 +207,7 @@ public:
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices);
auto merged_shape = current_pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
auto merged_shape = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blend_shape_coefficients.size());
auto merged_mesh = morphablemodel::sample_to_mesh(
......@@ -225,8 +218,6 @@ public:
morphable_model.get_texture_coordinates()
);
auto R = rendering_params.get_rotation();
// Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering;
......@@ -235,45 +226,39 @@ public:
// make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4);
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame);
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
auto t1 = std::chrono::high_resolution_clock::now();
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame, true, render::TextureInterpolation::NearestNeighbour, 512);
// Merge the isomaps - add the current one to the already merged ones:
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f));
std::cout << angle << std::endl;
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// angle -= 10.0;
auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f ));
rendering_params.set_rotation(rot_mtx_y);
auto modelview_no_translation = rendering_params.get_modelview();
modelview_no_translation[3][0] = 0;
modelview_no_translation[3][1] = 0;
std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression,
modelview_frontal,
merged_mesh,
modelview_no_translation,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512,
256, 256,
render::create_mipmapped_texture(merged_isomap),
true,
false,
false
);
// cv::imshow("rendering", frontal_rendering);
// cv::waitKey(0);
cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
fitting_result.mesh = mesh;
// output this?
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, rendering, fitting_result, total_frames);
return Keyframe(0.0f, frontal_rendering, fitting_result, total_frames);
}
/**
......@@ -344,22 +329,22 @@ public:
// makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame);
if(wireframe) {
draw_wireframe(keyframe.frame, keyframe);
}
if(landmarks) {
draw_landmarks(keyframe.frame, keyframe);
}
// if(wireframe) {
// draw_wireframe(keyframe.frame, keyframe);
// }
//
// if(landmarks) {
// draw_landmarks(keyframe.frame, keyframe);
// }
writer.write(keyframe.frame);
total_frames++;
// if (show_video) {
// std::cout << "show video" << std::endl;
// cv::imshow("video", frame);
// cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
// }
if (show_video) {
std::cout << "show video" << std::endl;
cv::imshow("video", keyframe.frame);
cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
}
return true;
}
......@@ -424,7 +409,10 @@ public:
private:
float angle = -45.0;
int output_width;
int output_height;
float angle = -45.0f;
int total_frames = 0;
int num_shape_coefficients_to_fit = 0;
// merge all triangles that are facing <60° towards the camera
......@@ -552,12 +540,13 @@ public:
// Will yield model_points, vertex_indices and frame_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height
);
// set all fitting params we found for this Keyframe
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
......
......@@ -263,21 +263,32 @@ public:
cv::Mat add_and_merge(const cv::Mat& isomap)
{
// Merge isomaps, add the current to the already merged, pixel by pixel:
for (int r = 0; r < isomap.rows; ++r)
#pragma omp target
{
for (int c = 0; c < isomap.cols; ++c)
#pragma omp parallel for
for (int r = 0; r < isomap.rows; ++r)
{
if (isomap.at<cv::Vec4b>(r, c)[3] <= threshold)
for (int c = 0; c < isomap.cols; ++c)
{
continue; // ignore this pixel, not visible in the extracted isomap of this current frame
if (isomap.at<cv::Vec4b>(r, c)[3] <= threshold)
{
continue; // ignore this pixel, not visible in the extracted isomap of this current frame
}
// we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] =
(merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[1] =
(merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] =
(merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] =
255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c);
}
// we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] = (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[1] = (merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] = (merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] = 255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c);
}
}
cv::Mat merged_isomap_uchar;
......
......@@ -252,7 +252,7 @@ void render_output(
// Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height);
......@@ -656,6 +656,7 @@ int main(int argc, char *argv[]) {
auto reconstruction_data = eos::fitting::ReconstructionData{
morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology};
// Start with the video play and get video file:
BufferedVideoIterator vid_iterator;
......@@ -668,6 +669,15 @@ int main(int argc, char *argv[]) {
// Start getting video frames:
vid_iterator.start();
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
int num_iterations = settings.get<int>("reconstruction.num_iterations", 10);
// vid_writer.start();
// Count the amount of iterations:
......@@ -681,7 +691,7 @@ int main(int argc, char *argv[]) {
// it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< num_iterations << std::endl;
// Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now();
......@@ -696,7 +706,7 @@ int main(int argc, char *argv[]) {
edge_topology,
ibug_contour,
model_contour,
50,
num_iterations,
boost::none,
30.0f,
boost::none,
......@@ -709,9 +719,13 @@ int main(int argc, char *argv[]) {
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / key_frames.size()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (key_frames.size() * num_iterations)
<< "ms)" << std::endl;
if(settings.get<bool>("output.make_video", false)) {
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
vid_writer.next();
}
// evaluate_results(
// key_frames,
......@@ -735,39 +749,39 @@ int main(int argc, char *argv[]) {
}
// vid_writer.__stop();
if(settings.get<bool>("output.make_video", false)) {
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
// Render output:
std::cout << "Waiting for video to be completed..." << std::endl;
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
while (vid_writer.next()) {
printf("%d/%d\r", vid_writer.get_frame_number(), vid_iterator.get_frame_number());
}
}
// auto key_frames = vid_iterator.get_keyframes();
// std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
// if(settings.get<bool>("output.make_video", false)) {
// // Render output:
// std::cout << "Waiting for video to be completed..." << std::endl;
// vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
//
// evaluate_results(
// key_frames,
// rendering_paramss,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// settings,
// n_iter
// );
// int count = 0;
// while (count < 40) {
// auto t1 = std::chrono::high_resolution_clock::now();
// vid_writer.next();
// auto t2 = std::chrono::high_resolution_clock::now();
// printf("Frame %d/%d (%d)\n", vid_writer.get_frame_number(), vid_iterator.get_frame_number(), count);
//
// std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << "ms" << std::endl;
// count++;
// }
//
// vid_writer.__stop();
// }
auto key_frames = vid_iterator.get_keyframes();
evaluate_results(
key_frames,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
//todo: we could build our final obj here?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment