Commit de4c961c authored by Richard Torenvliet's avatar Richard Torenvliet

Correct mistake concerning num-iterations and made it a setting

parent c5628500
...@@ -215,7 +215,7 @@ namespace eos { ...@@ -215,7 +215,7 @@ namespace eos {
* @param[in,out] image_points * @param[in,out] image_points
*/ */
template <typename vec2f> template <typename vec2f>
inline void get_mesh_coordinates(core::LandmarkCollection<vec2f> landmarks, inline void get_landmark_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper, const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh, eos::core::Mesh& mesh,
vector<Vec4f>& model_points, vector<Vec4f>& model_points,
......
...@@ -468,6 +468,7 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -468,6 +468,7 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
// #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices) // #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices)
// #pragma omp parallel for // #pragma omp parallel for
// TODO: make portable!
#pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh) #pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh)
{ {
#pragma omp parallel for #pragma omp parallel for
...@@ -510,8 +511,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -510,8 +511,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
} }
} }
auto t2 = std::chrono::high_resolution_clock::now();
// copy the results to final vertex ids // copy the results to final vertex ids
std::vector<int> final_vertex_ids; std::vector<int> final_vertex_ids;
for (int i = 0; i < occluding_vertices.size(); ++i) { for (int i = 0; i < occluding_vertices.size(); ++i) {
...@@ -522,13 +521,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co ...@@ -522,13 +521,6 @@ std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, co
} }
} }
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("S %lu %lld ms (mean: %f)\n",
final_vertex_ids.size(),
final_timing,
static_cast<float>(final_timing) / final_vertex_ids.size()
);
return final_vertex_ids; return final_vertex_ids;
}; };
......
...@@ -1143,7 +1143,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1143,7 +1143,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
std::vector<float>& pca_shape_coefficients, std::vector<float>& pca_shape_coefficients,
std::vector<std::vector<float>>& blendshape_coefficients, std::vector<std::vector<float>>& blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>>& fitted_image_points, std::vector<std::vector<cv::Vec2f>>& fitted_image_points,
boost::property_tree::ptree settings) { boost::property_tree::ptree settings)
{
assert(blendshapes.size() > 0); assert(blendshapes.size() > 0);
assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit? assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit?
...@@ -1160,18 +1161,22 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1160,18 +1161,22 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
int NUM_THREADS = settings.get<int>("reconstruction.num_threads", 1); int NUM_THREADS = settings.get<int>("reconstruction.num_threads", 1);
bool use_contours = settings.get<bool>("reconstruction.use_contours", true); bool use_contours = settings.get<bool>("reconstruction.use_contours", true);
if (!num_shape_coefficients_to_fit) { if (!num_shape_coefficients_to_fit)
{
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components(); num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
} }
if (pca_shape_coefficients.empty()) { if (pca_shape_coefficients.empty())
{
pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get()); pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get());
} }
// TODO: This leaves the following case open: num_coeffs given is empty or defined, but the // TODO: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document! // pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() < num_images) { if (blendshape_coefficients.size() < num_images)
for (int j = 0; j < num_images; ++j) { {
for (int j = 0; j < num_images; ++j)
{
std::vector<float> current_blendshape_coefficients; std::vector<float> current_blendshape_coefficients;
current_blendshape_coefficients.resize(blendshapes.size()); current_blendshape_coefficients.resize(blendshapes.size());
blendshape_coefficients.push_back(current_blendshape_coefficients); blendshape_coefficients.push_back(current_blendshape_coefficients);
...@@ -1191,16 +1196,18 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1191,16 +1196,18 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
vector<vector<Vec2f>> image_points(num_images); // the corresponding 2D landmark points of all frames. vector<vector<Vec2f>> image_points(num_images); // the corresponding 2D landmark points of all frames.
vector<fitting::RenderingParameters> rendering_params(num_images); // list of rendering params for all frames. vector<fitting::RenderingParameters> rendering_params(num_images); // list of rendering params for all frames.
std::vector<cv::Mat> affine_from_orthos(num_images); // std::vector<cv::Mat> affine_from_orthos(num_images);
std::vector<VectorXf> mean_plus_blendshapes(num_images); // std::vector<VectorXf> mean_plus_blendshapes(num_images);
#pragma omp parallel num_threads(NUM_THREADS) #pragma omp parallel num_threads(NUM_THREADS)
{ {
#pragma omp for #pragma omp for
for (int j = 0; j < num_images; ++j) { for (int j = 0; j < num_images; ++j)
{
VectorXf current_combined_shape = current_pca_shape + VectorXf current_combined_shape = current_pca_shape +
blendshapes_as_basis * blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size()); Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh( eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, current_combined_shape,
...@@ -1215,7 +1222,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1215,7 +1222,8 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// Get the locations of the model locations of the meshes, vertex_indices and image points // Get the locations of the model locations of the meshes, vertex_indices and image points
// (equal to landmark coordinates), for every image / mesh. // (equal to landmark coordinates), for every image / mesh.
std::tie(curr_model_points, curr_vertex_indices, curr_image_points) = eos::core::get_landmark_coordinates<Vec2f, Vec4f>( std::tie(curr_model_points, curr_vertex_indices, curr_image_points) =
eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh); keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh);
// Start constructing a list of rendering parameters needed for reconstruction. // Start constructing a list of rendering parameters needed for reconstruction.
...@@ -1232,50 +1240,51 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1232,50 +1240,51 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height); Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
// if no contour // if no contour
affine_from_orthos[j] = affine_from_ortho; // affine_from_orthos[j] = affine_from_ortho;
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls( blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_ortho, curr_image_points, curr_vertex_indices); blendshapes, current_pca_shape, affine_from_ortho, curr_image_points, curr_vertex_indices);
// Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given): // Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
current_combined_shape = current_pca_shape + current_combined_shapes[j] = current_pca_shape +
morphablemodel::to_matrix(blendshapes) * morphablemodel::to_matrix(blendshapes) *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size() Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size()
); );
current_combined_shapes[j] = current_combined_shape; current_meshs[j] = morphablemodel::sample_to_mesh(
current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, morphable_model.get_color_model().get_mean(), current_combined_shape, morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates() morphable_model.get_texture_coordinates()
); );
current_meshs[j] = current_mesh;
model_points[j] = curr_model_points; model_points[j] = curr_model_points;
vertex_indices[j] = curr_vertex_indices; vertex_indices[j] = curr_vertex_indices;
image_points[j] = curr_image_points; image_points[j] = curr_image_points;
// Estimate the PCA shape coefficients with the current blendshape coefficients: // Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + // VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), // blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size()); // blendshape_coefficients[j].size());
mean_plus_blendshapes[j] = current_mean_plus_blendshapes; // mean_plus_blendshapes[j] = current_mean_plus_blendshapes;
} }
} }
// The static (fixed) landmark correspondences which will stay the same throughout // The static (fixed) landmark correspondences which will stay the same throughout
// the fitting (the inner face landmarks): // the fitting (the inner face landmarks):
vector<vector<int>> fixed_vertex_indices (vertex_indices); vector<vector<int>> fixed_vertex_indices(vertex_indices);
vector<vector<Vec2f>> fixed_image_points (image_points); vector<vector<Vec2f>> fixed_image_points(image_points);
if (use_contours)
{
for (int i = 0; i < num_iterations; ++i) {
std::vector<cv::Mat> affine_from_orthos;
std::vector<VectorXf> mean_plus_blendshapes;
image_points = fixed_image_points; image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices; vertex_indices = fixed_vertex_indices;
if (use_contours) { for (int j = 0; j < num_images; ++j) {
for (int j = 0; j < num_images; ++j)
{
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour: // Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour; vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour; vector<int> vertex_indices_contour;
...@@ -1285,88 +1294,133 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1285,88 +1294,133 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]); auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id: // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(landmarks, contour_landmarks, model_contour, yaw_angle, current_meshs[j], rendering_params[j].get_modelview(), rendering_params[j].get_projection(), fitting::get_opencv_viewport(image_width, image_height));
fitting::get_contour_correspondences(
landmarks,
contour_landmarks,
model_contour,
yaw_angle,
current_meshs[j],
rendering_params[j].get_modelview(),
rendering_params[j].get_projection(),
fitting::get_opencv_viewport(image_width, image_height)
);
// Add the contour correspondences to the set of landmarks that we use for the fitting: // Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour); vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
image_points[j] = fitting::concat(image_points[j], image_points_contour); image_points[j] = fitting::concat(image_points[j], image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs: // Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks; vector<Eigen::Vector2f> occluding_contour_landmarks;
if (yaw_angle >= 0.0f) // positive yaw = subject looking to the left
// positive yaw = subject looking to the left { // the left contour is the occluding one we want to use ("away-facing")
if (yaw_angle >= 0.0f) auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
{ std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
// the left contour is the occluding one we want to use ("away-facing")
auto contour_landmarks_ =
core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
std::for_each(begin(contour_landmarks_),
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
} }
else else {
{
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour); auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_), std::for_each(begin(contour_landmarks_), end(contour_landmarks_), [&occluding_contour_landmarks](auto&& lm) { occluding_contour_landmarks.push_back({ lm.coordinates[0], lm.coordinates[1] }); });
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
} }
auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f);
auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
);
image_points[j] = fitting::concat(image_points[j], edge_correspondences.first); image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second); vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got: // Get the model points of the current mesh, for all correspondences that we've got:
model_points[j].clear(); model_points[j].clear();
for (const auto& v : vertex_indices[j])
for (const auto &v : vertex_indices[j])
{
model_points[j].push_back(
{ {
current_meshs[j].vertices[v][0], model_points[j].push_back({ current_meshs[j].vertices[v][0], current_meshs[j].vertices[v][1], current_meshs[j].vertices[v][2], current_meshs[j].vertices[v][3] });
current_meshs[j].vertices[v][1],
current_meshs[j].vertices[v][2],
current_meshs[j].vertices[v][3]
});
} }
// Re-estimate the pose, using all correspondences: // Re-estimate the pose, using all correspondences:
auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height);
model_points[j],
true,
image_height);
rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height); rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
Mat affine_from_ortho = cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height); affine_from_orthos.push_back(affine_from_ortho);
affine_from_orthos[j] = affine_from_ortho;
// Estimate the PCA shape coefficients with the current blendshape coefficients: // Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),blendshape_coefficients[j].size());
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
blendshape_coefficients[j].size());
mean_plus_blendshapes[j] = current_mean_plus_blendshapes;
}
} }
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
// vector<Vec2f> image_points_contour;
// vector<int> vertex_indices_contour;
//
// auto curr_keyframe = keyframes[j];
// auto landmarks = curr_keyframe.fitting_result.landmarks;
// auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
//
// // For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
// std::tie(image_points_contour, std::ignore, vertex_indices_contour) =
// fitting::get_contour_correspondences(
// landmarks,
// contour_landmarks,
// model_contour,
// yaw_angle,
// current_meshs[j],
// rendering_params[j].get_modelview(),
// rendering_params[j].get_projection(),
// fitting::get_opencv_viewport(image_width, image_height)
// );
//
// // Add the contour correspondences to the set of landmarks that we use for the fitting:
// vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
// image_points[j] = fitting::concat(image_points[j], image_points_contour);
//
// // Fit the occluding (away-facing) contour using the detected contour LMs:
// vector<Eigen::Vector2f> occluding_contour_landmarks;
//
// // positive yaw = subject looking to the left
// if (yaw_angle >= 0.0f)
// {
// // the left contour is the occluding one we want to use ("away-facing")
// auto contour_landmarks_ =
// core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
// else
// {
// auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
// std::for_each(begin(contour_landmarks_),
// end(contour_landmarks_),
// [&occluding_contour_landmarks](auto &&lm)
// {
// occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
// });
// }
//
// auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
// current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
// );
//
// image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
// vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
//
// // Get the model points of the current mesh, for all correspondences that we've got:
// model_points[j].clear();
//
// for (const auto &v : vertex_indices[j])
// {
// model_points[j].push_back(
// {
// current_meshs[j].vertices[v][0],
// current_meshs[j].vertices[v][1],
// current_meshs[j].vertices[v][2],
// current_meshs[j].vertices[v][3]
// });
// }
//
// // Re-estimate the pose, using all correspondences:
// auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j],
// model_points[j],
// true,
// image_height);
// rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
//
// Mat affine_from_ortho =
// fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
// affine_from_orthos.push_back(affine_from_ortho);
//
// // Estimate the PCA shape coefficients with the current blendshape coefficients:
// VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
// blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
// blendshape_coefficients[j].size());
// mean_plus_blendshapes.push_back(current_mean_plus_blendshapes);
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel( pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel(
morphable_model, morphable_model,
...@@ -1381,10 +1435,11 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1381,10 +1435,11 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// Estimate the blendshape coefficients with the current PCA model estimate: // Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients); current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
#pragma omp parallel num_threads(NUM_THREADS) #pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j)
{ {
#pragma omp for
for (int j = 0; j < num_images; ++j) {
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls( blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j] blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]
); );
...@@ -1407,6 +1462,9 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete ...@@ -1407,6 +1462,9 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
} }
} }
}
}
fitted_image_points = image_points; fitted_image_points = image_points;
return {current_meshs, rendering_params}; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices). return {current_meshs, rendering_params}; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
......
...@@ -473,6 +473,7 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -473,6 +473,7 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
{ {
using cv::Vec2f; using cv::Vec2f;
using cv::Vec3f; using cv::Vec3f;
for (int yi = triangle.min_y; yi <= triangle.max_y; ++yi) for (int yi = triangle.min_y; yi <= triangle.max_y; ++yi)
{ {
for (int xi = triangle.min_x; xi <= triangle.max_x; ++xi) for (int xi = triangle.min_x; xi <= triangle.max_x; ++xi)
...@@ -482,9 +483,18 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -482,9 +483,18 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
const float y = static_cast<float>(yi) + 0.5f; const float y = static_cast<float>(yi) + 0.5f;
// these will be used for barycentric weights computation // these will be used for barycentric weights computation
const double one_over_v0ToLine12 = 1.0 / implicit_line(triangle.v0.position[0], triangle.v0.position[1], triangle.v1.position, triangle.v2.position); const double one_over_v0ToLine12 = 1.0 / implicit_line(triangle.v0.position[0],
const double one_over_v1ToLine20 = 1.0 / implicit_line(triangle.v1.position[0], triangle.v1.position[1], triangle.v2.position, triangle.v0.position); triangle.v0.position[1],
const double one_over_v2ToLine01 = 1.0 / implicit_line(triangle.v2.position[0], triangle.v2.position[1], triangle.v0.position, triangle.v1.position); triangle.v1.position,
triangle.v2.position);
const double one_over_v1ToLine20 = 1.0 / implicit_line(triangle.v1.position[0],
triangle.v1.position[1],
triangle.v2.position,
triangle.v0.position);
const double one_over_v2ToLine01 = 1.0 / implicit_line(triangle.v2.position[0],
triangle.v2.position[1],
triangle.v0.position,
triangle.v1.position);
// affine barycentric weights // affine barycentric weights
double alpha = implicit_line(x, y, triangle.v1.position, triangle.v2.position) * one_over_v0ToLine12; double alpha = implicit_line(x, y, triangle.v1.position, triangle.v2.position) * one_over_v0ToLine12;
double beta = implicit_line(x, y, triangle.v2.position, triangle.v0.position) * one_over_v1ToLine20; double beta = implicit_line(x, y, triangle.v2.position, triangle.v0.position) * one_over_v1ToLine20;
...@@ -496,7 +506,9 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -496,7 +506,9 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
const int pixel_index_row = yi; const int pixel_index_row = yi;
const int pixel_index_col = xi; const int pixel_index_col = xi;
const double z_affine = alpha*static_cast<double>(triangle.v0.position[2]) + beta*static_cast<double>(triangle.v1.position[2]) + gamma*static_cast<double>(triangle.v2.position[2]); const double z_affine = alpha * static_cast<double>(triangle.v0.position[2])
+ beta * static_cast<double>(triangle.v1.position[2])
+ gamma * static_cast<double>(triangle.v2.position[2]);
bool draw = true; bool draw = true;
if (enable_far_clipping) if (enable_far_clipping)
...@@ -511,31 +523,49 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -511,31 +523,49 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
if (z_affine < depthbuffer.at<double>(pixel_index_row, pixel_index_col) && draw) if (z_affine < depthbuffer.at<double>(pixel_index_row, pixel_index_col) && draw)
{ {
// perspective-correct barycentric weights // perspective-correct barycentric weights
double d = alpha*triangle.one_over_z0 + beta*triangle.one_over_z1 + gamma*triangle.one_over_z2; double
d = alpha * triangle.one_over_z0 + beta * triangle.one_over_z1 + gamma * triangle.one_over_z2;
d = 1.0 / d; d = 1.0 / d;
alpha *= d*triangle.one_over_z0; // In case of affine cam matrix, everything is 1 and a/b/g don't get changed. alpha *= d * triangle
beta *= d*triangle.one_over_z1; .one_over_z0; // In case of affine cam matrix, everything is 1 and a/b/g don't get changed.
gamma *= d*triangle.one_over_z2; beta *= d * triangle.one_over_z1;
gamma *= d * triangle.one_over_z2;
// attributes interpolation // attributes interpolation
glm::tvec3<float> color_persp = static_cast<float>(alpha)*triangle.v0.color + static_cast<float>(beta)*triangle.v1.color + static_cast<float>(gamma)*triangle.v2.color; // Note: color might be empty if we use texturing and the shape-only model - but it works nonetheless? I think I set the vertex-colour to 127 in the shape-only model. glm::tvec3<float> color_persp =
glm::tvec2<float> texcoords_persp = static_cast<float>(alpha)*triangle.v0.texcoords + static_cast<float>(beta)*triangle.v1.texcoords + static_cast<float>(gamma)*triangle.v2.texcoords; static_cast<float>(alpha) * triangle.v0.color + static_cast<float>(beta) * triangle.v1.color
+ static_cast<float>(gamma) * triangle.v2
.color; // Note: color might be empty if we use texturing and the shape-only model - but it works nonetheless? I think I set the vertex-colour to 127 in the shape-only model.
glm::tvec2<float> texcoords_persp = static_cast<float>(alpha) * triangle.v0.texcoords
+ static_cast<float>(beta) * triangle.v1.texcoords
+ static_cast<float>(gamma) * triangle.v2.texcoords;
glm::tvec3<float> pixel_color; glm::tvec3<float> pixel_color;
// Pixel Shader: // Pixel Shader:
if (texture) { // We use texturing if (texture)
{ // We use texturing
// check if texture != NULL? // check if texture != NULL?
// partial derivatives (for mip-mapping) // partial derivatives (for mip-mapping)
const float u_over_z = -(triangle.alphaPlane.a*x + triangle.alphaPlane.b*y + triangle.alphaPlane.d) * triangle.one_over_alpha_c; const float u_over_z =
const float v_over_z = -(triangle.betaPlane.a*x + triangle.betaPlane.b*y + triangle.betaPlane.d) * triangle.one_over_beta_c; -(triangle.alphaPlane.a * x + triangle.alphaPlane.b * y + triangle.alphaPlane.d)
const float one_over_z = -(triangle.gammaPlane.a*x + triangle.gammaPlane.b*y + triangle.gammaPlane.d) * triangle.one_over_gamma_c; * triangle.one_over_alpha_c;
const float v_over_z =
-(triangle.betaPlane.a * x + triangle.betaPlane.b * y + triangle.betaPlane.d)
* triangle.one_over_beta_c;
const float one_over_z =
-(triangle.gammaPlane.a * x + triangle.gammaPlane.b * y + triangle.gammaPlane.d)
* triangle.one_over_gamma_c;
const float one_over_squared_one_over_z = 1.0f / std::pow(one_over_z, 2); const float one_over_squared_one_over_z = 1.0f / std::pow(one_over_z, 2);
// partial derivatives of U/V coordinates with respect to X/Y pixel's screen coordinates // partial derivatives of U/V coordinates with respect to X/Y pixel's screen coordinates
float dudx = one_over_squared_one_over_z * (triangle.alpha_ffx * one_over_z - u_over_z * triangle.gamma_ffx); float dudx = one_over_squared_one_over_z
float dudy = one_over_squared_one_over_z * (triangle.beta_ffx * one_over_z - v_over_z * triangle.gamma_ffx); * (triangle.alpha_ffx * one_over_z - u_over_z * triangle.gamma_ffx);
float dvdx = one_over_squared_one_over_z * (triangle.alpha_ffy * one_over_z - u_over_z * triangle.gamma_ffy); float dudy = one_over_squared_one_over_z
float dvdy = one_over_squared_one_over_z * (triangle.beta_ffy * one_over_z - v_over_z * triangle.gamma_ffy); * (triangle.beta_ffx * one_over_z - v_over_z * triangle.gamma_ffx);
float dvdx = one_over_squared_one_over_z
* (triangle.alpha_ffy * one_over_z - u_over_z * triangle.gamma_ffy);
float dvdy = one_over_squared_one_over_z
* (triangle.beta_ffy * one_over_z - v_over_z * triangle.gamma_ffy);
dudx *= texture.get().mipmaps[0].cols; dudx *= texture.get().mipmaps[0].cols;
dudy *= texture.get().mipmaps[0].cols; dudy *= texture.get().mipmaps[0].cols;
...@@ -543,18 +573,25 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -543,18 +573,25 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
dvdy *= texture.get().mipmaps[0].rows; dvdy *= texture.get().mipmaps[0].rows;
// The Texture is in BGR, thus tex2D returns BGR // The Texture is in BGR, thus tex2D returns BGR
glm::tvec3<float> texture_color = detail::tex2d(texcoords_persp, texture.get(), dudx, dudy, dvdx, dvdy); // uses the current texture glm::tvec3<float> texture_color = detail::tex2d(texcoords_persp,
texture.get(),
dudx,
dudy,
dvdx,
dvdy); // uses the current texture
pixel_color = glm::tvec3<float>(texture_color[2], texture_color[1], texture_color[0]); pixel_color = glm::tvec3<float>(texture_color[2], texture_color[1], texture_color[0]);
// other: color.mul(tex2D(texture, texCoord)); // other: color.mul(tex2D(texture, texCoord));
// Old note: for texturing, we load the texture as BGRA, so the colors get the wrong way in the next few lines... // Old note: for texturing, we load the texture as BGRA, so the colors get the wrong way in the next few lines...
} }
else { // We use vertex-coloring else
{ // We use vertex-coloring
// color_persp is in RGB // color_persp is in RGB
pixel_color = color_persp; pixel_color = color_persp;
} }
// clamp bytes to 255 // clamp bytes to 255
const unsigned char red = static_cast<unsigned char>(255.0f * std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?) const unsigned char red = static_cast<unsigned char>(255.0f
* std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?)
const unsigned char green = static_cast<unsigned char>(255.0f * std::min(pixel_color[1], 1.0f)); const unsigned char green = static_cast<unsigned char>(255.0f * std::min(pixel_color[1], 1.0f));
const unsigned char blue = static_cast<unsigned char>(255.0f * std::min(pixel_color[2], 1.0f)); const unsigned char blue = static_cast<unsigned char>(255.0f * std::min(pixel_color[2], 1.0f));
...@@ -570,6 +607,156 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -570,6 +607,156 @@ inline void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer,
} }
}; };
#ifdef _OPENMP
// vector<detail::TriangleToRasterize> triangles_to_raster;
inline void raster_triangle_parallel(vector<detail::TriangleToRasterize> triangles_to_raster, cv::Mat colourbuffer, cv::Mat depthbuffer, boost::optional<Texture> texture, bool enable_far_clipping)
{
using cv::Vec2f;
using cv::Vec3f;
// these will be used for barycentric weights computation
for(int i = 0; i < triangles_to_raster.size(); i++)
{
auto triangle = triangles_to_raster[i];
const double one_over_v0ToLine12 = 1.0 / implicit_line(triangle.v0.position[0],
triangle.v0.position[1],
triangle.v1.position,
triangle.v2.position);
const double one_over_v1ToLine20 = 1.0 / implicit_line(triangle.v1.position[0],
triangle.v1.position[1],
triangle.v2.position,
triangle.v0.position);
const double one_over_v2ToLine01 = 1.0 / implicit_line(triangle.v2.position[0],
triangle.v2.position[1],
triangle.v0.position,
triangle.v1.position);
// Fragment/pixel shader: Colour the pixel values
// buffer for alpha beta gamma
auto t1 = std::chrono::high_resolution_clock::now();
for (int yi = triangle.min_y; yi <= triangle.max_y; ++yi)
{
const float y = static_cast<float>(yi) + 0.5f;
for (int xi = triangle.min_x; xi <= triangle.max_x; ++xi)
{
// we want centers of pixels to be used in computations. Todo: Do we?
const float x = static_cast<float>(xi) + 0.5f;
// affine barycentric weights
double alpha = implicit_line(x, y, triangle.v1.position, triangle.v2.position) * one_over_v0ToLine12;
double beta = implicit_line(x, y, triangle.v2.position, triangle.v0.position) * one_over_v1ToLine20;
double gamma = implicit_line(x, y, triangle.v0.position, triangle.v1.position) * one_over_v2ToLine01;
// if pixel (x, y) is inside the triangle or on one of its edges
if (alpha >= 0 && beta >= 0 && gamma >= 0)
{
const int pixel_index_row = yi;
const int pixel_index_col = xi;
const double z_affine = alpha * static_cast<double>(triangle.v0.position[2])
+ beta * static_cast<double>(triangle.v1.position[2])
+ gamma * static_cast<double>(triangle.v2.position[2]);
bool draw = true;
if (enable_far_clipping)
{
if (z_affine > 1.0)
{
draw = false;
}
}
// The '<= 1.0' clips against the far-plane in NDC. We clip against the near-plane earlier.
//if (z_affine < depthbuffer.at<double>(pixelIndexRow, pixelIndexCol)/* && z_affine <= 1.0*/) // what to do in ortho case without n/f "squashing"? should we always squash? or a flag?
if (z_affine < depthbuffer.at<double>(pixel_index_row, pixel_index_col) && draw)
{
// perspective-correct barycentric weights
double d =
alpha * triangle.one_over_z0 + beta * triangle.one_over_z1 + gamma * triangle.one_over_z2;
d = 1.0 / d;
alpha *= d * triangle
.one_over_z0; // In case of affine cam matrix, everything is 1 and a/b/g don't get changed.
beta *= d * triangle.one_over_z1;
gamma *= d * triangle.one_over_z2;
// attributes interpolation
glm::tvec3<float> color_persp =
static_cast<float>(alpha) * triangle.v0.color + static_cast<float>(beta) * triangle.v1.color
+ static_cast<float>(gamma) * triangle.v2
.color; // Note: color might be empty if we use texturing and the shape-only model - but it works nonetheless? I think I set the vertex-colour to 127 in the shape-only model.
glm::tvec2<float> texcoords_persp = static_cast<float>(alpha) * triangle.v0.texcoords
+ static_cast<float>(beta) * triangle.v1.texcoords
+ static_cast<float>(gamma) * triangle.v2.texcoords;
glm::tvec3<float> pixel_color;
// Pixel Shader:
if (texture)
{ // We use texturing
// check if texture != NULL?
// partial derivatives (for mip-mapping)
const float u_over_z =
-(triangle.alphaPlane.a * x + triangle.alphaPlane.b * y + triangle.alphaPlane.d)
* triangle.one_over_alpha_c;
const float v_over_z =
-(triangle.betaPlane.a * x + triangle.betaPlane.b * y + triangle.betaPlane.d)
* triangle.one_over_beta_c;
const float one_over_z =
-(triangle.gammaPlane.a * x + triangle.gammaPlane.b * y + triangle.gammaPlane.d)
* triangle.one_over_gamma_c;
const float one_over_squared_one_over_z = 1.0f / std::pow(one_over_z, 2);
// partial derivatives of U/V coordinates with respect to X/Y pixel's screen coordinates
float dudx = one_over_squared_one_over_z
* (triangle.alpha_ffx * one_over_z - u_over_z * triangle.gamma_ffx);
float dudy = one_over_squared_one_over_z
* (triangle.beta_ffx * one_over_z - v_over_z * triangle.gamma_ffx);
float dvdx = one_over_squared_one_over_z
* (triangle.alpha_ffy * one_over_z - u_over_z * triangle.gamma_ffy);
float dvdy = one_over_squared_one_over_z
* (triangle.beta_ffy * one_over_z - v_over_z * triangle.gamma_ffy);
dudx *= texture.get().mipmaps[0].cols;
dudy *= texture.get().mipmaps[0].cols;
dvdx *= texture.get().mipmaps[0].rows;
dvdy *= texture.get().mipmaps[0].rows;
// The Texture is in BGR, thus tex2D returns BGR
glm::tvec3<float> texture_color = detail::tex2d(texcoords_persp,
texture.get(),
dudx,
dudy,
dvdx,
dvdy); // uses the current texture
pixel_color = glm::tvec3<float>(texture_color[2], texture_color[1], texture_color[0]);
// other: color.mul(tex2D(texture, texCoord));
// Old note: for texturing, we load the texture as BGRA, so the colors get the wrong way in the next few lines...
}
else
{ // We use vertex-coloring
// color_persp is in RGB
pixel_color = color_persp;
}
// clamp bytes to 255
const unsigned char red = static_cast<unsigned char>(255.0f
* std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?)
const unsigned char green = static_cast<unsigned char>(255.0f * std::min(pixel_color[1], 1.0f));
const unsigned char blue = static_cast<unsigned char>(255.0f * std::min(pixel_color[2], 1.0f));
// update buffers
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[0] = blue;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[1] = green;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[2] = red;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[3] = 255; // alpha channel
depthbuffer.at<double>(pixel_index_row, pixel_index_col) = z_affine;
}
}
}
}
}
};
#endif // _OPENMP
} /* namespace detail */ } /* namespace detail */
} /* namespace render */ } /* namespace render */
} /* namespace eos */ } /* namespace eos */
......
...@@ -141,7 +141,6 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -141,7 +141,6 @@ inline std::pair<cv::Mat, cv::Mat> render(
// bool enable_texturing = false; Maybe re-add later, not sure // bool enable_texturing = false; Maybe re-add later, not sure
// take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call. // take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.
auto t1 = std::chrono::high_resolution_clock::now();
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model. assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
// another assert: If cv::Mat texture != empty, then we need texcoords? // another assert: If cv::Mat texture != empty, then we need texcoords?
...@@ -173,14 +172,10 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -173,14 +172,10 @@ inline std::pair<cv::Mat, cv::Mat> render(
clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]}); clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]});
} }
// All vertices are in clip-space now. // All vertices are in clip-space now.
// Prepare the rasterisation stage. // Prepare the rasterisation stage.
// For every vertex/tri: // For every vertex/tri:
vector<detail::TriangleToRasterize> triangles_to_raster; vector<detail::TriangleToRasterize> triangles_to_raster;
//#pragma omp target
// {
//#pragma omp parallel for
for(int i = 0; i < mesh.tvi.size(); i++) { for(int i = 0; i < mesh.tvi.size(); i++) {
const auto tri_indices = mesh.tvi[i]; const auto tri_indices = mesh.tvi[i];
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process... // Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
...@@ -191,6 +186,8 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -191,6 +186,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case. // However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative. // 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3]; unsigned char visibility_bits[3];
#pragma omp simd
for (unsigned char k = 0; k < 3; k++) { for (unsigned char k = 0; k < 3; k++) {
visibility_bits[k] = 0; visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0]; float x_cc = clipspace_vertices[tri_indices[k]].position[0];
...@@ -264,16 +261,8 @@ inline std::pair<cv::Mat, cv::Mat> render( ...@@ -264,16 +261,8 @@ inline std::pair<cv::Mat, cv::Mat> render(
} }
} }
} }
// }
detail::raster_triangle_parallel(triangles_to_raster, colourbuffer, depthbuffer, texture, enable_far_clipping);
// Fragment/pixel shader: Colour the pixel values
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
auto t2 = std::chrono::high_resolution_clock::now();
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("Tri %lu %lld ms\n", triangles_to_raster.size(), final_timing);
return std::make_pair(colourbuffer, depthbuffer); return std::make_pair(colourbuffer, depthbuffer);
}; };
......
...@@ -78,7 +78,7 @@ namespace detail { cv::Mat interpolate_black_line(cv::Mat isomap); } ...@@ -78,7 +78,7 @@ namespace detail { cv::Mat interpolate_black_line(cv::Mat isomap); }
* @param[in] mesh A mesh with texture coordinates. * @param[in] mesh A mesh with texture coordinates.
* @param[in] affine_camera_matrix An estimated 3x4 affine camera matrix. * @param[in] affine_camera_matrix An estimated 3x4 affine camera matrix.
* @param[in] image The image to extract the texture from. Should be 8UC3, other types not supported yet. * @param[in] image The image to extract the texture from. Should be 8UC3, other types not supported yet.
* @param[in] compute_view_angle A flag whether the view angle of each vertex should be computed and returned. If set to true, the angle will be encoded into the alpha channel (0 meaning occluded or facing away 90°, 127 meaning facing a 45° angle and 255 meaning front-facing, and all values in between). If set to false, the alpha channel will only contain 0 for occluded vertices and 255 for visible vertices. * @param[in] compute_view_angle A flag whether the view angle of each vertex should be computed and returned. If set to true, the angle will be encoded into the alpha channel (0 meaning occluded or facing away 90�, 127 meaning facing a 45� angle and 255 meaning front-facing, and all values in between). If set to false, the alpha channel will only contain 0 for occluded vertices and 255 for visible vertices.
* @param[in] mapping_type The interpolation type to be used for the extraction. * @param[in] mapping_type The interpolation type to be used for the extraction.
* @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512. * @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512.
* @return The extracted texture as isomap (texture map). * @return The extracted texture as isomap (texture map).
...@@ -109,7 +109,7 @@ inline cv::Mat extract_texture(const core::Mesh& mesh, cv::Mat affine_camera_mat ...@@ -109,7 +109,7 @@ inline cv::Mat extract_texture(const core::Mesh& mesh, cv::Mat affine_camera_mat
* @param[in] affine_camera_matrix An estimated 3x4 affine camera matrix. * @param[in] affine_camera_matrix An estimated 3x4 affine camera matrix.
* @param[in] image The image to extract the texture from. * @param[in] image The image to extract the texture from.
* @param[in] depthbuffer A pre-calculated depthbuffer image. * @param[in] depthbuffer A pre-calculated depthbuffer image.
* @param[in] compute_view_angle A flag whether the view angle of each vertex should be computed and returned. If set to true, the angle will be encoded into the alpha channel (0 meaning occluded or facing away 90°, 127 meaning facing a 45° angle and 255 meaning front-facing, and all values in between). If set to false, the alpha channel will only contain 0 for occluded vertices and 255 for visible vertices. * @param[in] compute_view_angle A flag whether the view angle of each vertex should be computed and returned. If set to true, the angle will be encoded into the alpha channel (0 meaning occluded or facing away 90°, 127 meaning facing a 45° angle and 255 meaning front-facing, and all values in between). If set to false, the alpha channel will only contain 0 for occluded vertices and 255 for visible vertices.
* @param[in] mapping_type The interpolation type to be used for the extraction. * @param[in] mapping_type The interpolation type to be used for the extraction.
* @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512. * @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512.
* @return The extracted texture as isomap (texture map). * @return The extracted texture as isomap (texture map).
...@@ -184,11 +184,11 @@ inline cv::Mat extract_texture(core::Mesh mesh, cv::Mat affine_camera_matrix, cv ...@@ -184,11 +184,11 @@ inline cv::Mat extract_texture(core::Mesh mesh, cv::Mat affine_camera_matrix, cv
const float angle = -face_normal_transformed[2]; // flip sign, see above const float angle = -face_normal_transformed[2]; // flip sign, see above
assert(angle >= -1.f && angle <= 1.f); assert(angle >= -1.f && angle <= 1.f);
// angle is [-1, 1]. // angle is [-1, 1].
// * +1 means 0° (same direction) // * +1 means 0° (same direction)
// * 0 means 90° // * 0 means 90°
// * -1 means 180° (facing opposite directions) // * -1 means 180° (facing opposite directions)
// It's a linear relation, so +0.5 is 45° etc. // It's a linear relation, so +0.5 is 45° etc.
// An angle larger than 90° means the vertex won't be rendered anyway (because it's back-facing) so we encode 0° to 90°. // An angle larger than 90° means the vertex won't be rendered anyway (because it's back-facing) so we encode 0° to 90°.
if (angle < 0.0f) { if (angle < 0.0f) {
alpha_value = 0.0f; alpha_value = 0.0f;
} else { } else {
...@@ -390,6 +390,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat ...@@ -390,6 +390,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat
vector<bool> visibility_ray; vector<bool> visibility_ray;
vector<vec4> rotated_vertices; vector<vec4> rotated_vertices;
auto t1 = std::chrono::high_resolution_clock::now();
// In perspective case... does the perspective projection matrix not change visibility? Do we not need to // In perspective case... does the perspective projection matrix not change visibility? Do we not need to
// apply it? // apply it?
// (If so, then we can change the two input matrices to this function to one (mvp_matrix)). // (If so, then we can change the two input matrices to this function to one (mvp_matrix)).
...@@ -397,6 +399,9 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat ...@@ -397,6 +399,9 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat
[&rotated_vertices, &view_model_matrix](auto&& v) { [&rotated_vertices, &view_model_matrix](auto&& v) {
rotated_vertices.push_back(view_model_matrix * v); rotated_vertices.push_back(view_model_matrix * v);
}); });
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 1 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
// This code is duplicated from the edge-fitting. I think I can put this into a function in the library. // This code is duplicated from the edge-fitting. I think I can put this into a function in the library.
for (const auto& vertex : rotated_vertices) for (const auto& vertex : rotated_vertices)
{ {
...@@ -429,6 +434,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat ...@@ -429,6 +434,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat
} }
visibility_ray.push_back(visible); visibility_ray.push_back(visible);
} }
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 2 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
vector<vec4> wnd_coords; // will contain [x_wnd, y_wnd, z_ndc, 1/w_clip] vector<vec4> wnd_coords; // will contain [x_wnd, y_wnd, z_ndc, 1/w_clip]
for (auto&& vtx : mesh.vertices) for (auto&& vtx : mesh.vertices)
...@@ -441,6 +448,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat ...@@ -441,6 +448,8 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat
wnd_coords.push_back(clip_coords); wnd_coords.push_back(clip_coords);
} }
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 3 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
// Go on with extracting: This only needs the rasteriser/FS, not the whole Renderer. // Go on with extracting: This only needs the rasteriser/FS, not the whole Renderer.
const int tex_width = isomap_resolution; const int tex_width = isomap_resolution;
const int tex_height = const int tex_height =
...@@ -483,10 +492,211 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat ...@@ -483,10 +492,211 @@ cv::Mat extract_texture(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat
extraction_rasterizer.raster_triangle(pa, pb, pc, image_to_extract_from_as_tex); extraction_rasterizer.raster_triangle(pa, pb, pc, image_to_extract_from_as_tex);
} }
} }
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 4 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
return extraction_rasterizer.colorbuffer; return extraction_rasterizer.colorbuffer;
}; };
#ifdef _OPENMP
/**
* @brief Extracts the texture of the face from the given image and stores it as isomap (a rectangular texture map).
*
* New texture extraction, will replace above one at some point.
* Copy the documentation from above extract_texture function, once we replace it.
*
* Note/Todo: Add an overload that takes a vector of bool / visible vertices, for the case when we already computed the visibility? (e.g. for edge-fitting)
*
* @param[in] mesh A mesh with texture coordinates.
* @param[in] view_model_matrix Todo.
* @param[in] projection_matrix Todo.
* @param[in] viewport Not needed at the moment. Might be, if we change clip_to_screen_space() to take a viewport.
* @param[in] image The image to extract the texture from. Todo: Does it have to be 8UC3 or something, or does it not matter?
* @param[in] compute_view_angle Unused at the moment.
* @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512.
* @return The extracted texture as isomap (texture map).
*/
cv::Mat extract_texture_parallel(core::Mesh mesh, glm::mat4x4 view_model_matrix, glm::mat4x4 projection_matrix,
glm::vec4 /*viewport, not needed at the moment */, cv::Mat image,
bool /* compute_view_angle, unused atm */, int isomap_resolution = 512)
{
using detail::divide_by_w;
using glm::vec2;
using glm::vec3;
using glm::vec4;
using std::vector;
// actually we only need a rasteriser for this!
Rasterizer<ExtractionFragmentShader> extraction_rasterizer(isomap_resolution, isomap_resolution);
Texture image_to_extract_from_as_tex = create_mipmapped_texture(image, 1);
extraction_rasterizer.enable_depth_test = false;
extraction_rasterizer.extracting_tex = true;
vector<vec4> rotated_vertices;
auto t1 = std::chrono::high_resolution_clock::now();
// In perspective case... does the perspective projection matrix not change visibility? Do we not need to
// apply it?
// (If so, then we can change the two input matrices to this function to one (mvp_matrix)).
std::for_each(std::begin(mesh.vertices), std::end(mesh.vertices),
[&rotated_vertices, &view_model_matrix](auto&& v) {
rotated_vertices.push_back(view_model_matrix * v);
});
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 1 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
std::vector<int> visibility_ray(rotated_vertices.size());
std::cout << "roated vertices size: " << rotated_vertices.size() << std::endl;
// #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices)
// #pragma omp parallel for
// TODO: make portable!
#pragma omp target map(alloc:visibility_ray) map(from:rotated_vertices, mesh)
{
#pragma omp parallel for
for (int i = 0; i < rotated_vertices.size(); i++) {
const auto vertex = rotated_vertices[i];
int visible = 1;
glm::vec3 ray_origin(vertex);
// we shoot the ray from the vertex towards the camera
glm::vec3 ray_direction(0.0f, 0.0f, 1.0f);
// For every tri of the rotated mesh:
for (int j = 0; j < mesh.tvi.size(); j++) {
auto tri = mesh.tvi[j];
auto &v0 = rotated_vertices[tri[0]];
auto &v1 = rotated_vertices[tri[1]];
auto &v2 = rotated_vertices[tri[2]];
auto intersect = fitting::ray_triangle_intersect(ray_origin,
ray_direction,
glm::vec3(v0),
glm::vec3(v1),
glm::vec3(v2),
false);
// first is bool intersect, second is the distance t
if (intersect.first == true) {
// We've hit a triangle. Ray hit its own triangle. If it's behind the ray origin, ignore the intersection:
// Check if in front or behind?
if (intersect.second.get() <= 1e-4) {
continue; // the intersection is behind the vertex, we don't care about it
}
// Otherwise, we've hit a genuine triangle, and the vertex is not visible:
visible = 0;
break;
}
}
visibility_ray[i] = visible;
}
}
// This code is duplicated from the edge-fitting. I think I can put this into a function in the library.
// for (const auto& vertex : rotated_vertices)
// {
// bool visible = true;
// // For every tri of the rotated mesh:
// for (auto&& tri : mesh.tvi)
// {
// auto& v0 = rotated_vertices[tri[0]]; // const?
// auto& v1 = rotated_vertices[tri[1]];
// auto& v2 = rotated_vertices[tri[2]];
//
// vec3 ray_origin(vertex);
// vec3 ray_direction(0.0f, 0.0f, 1.0f); // we shoot the ray from the vertex towards the camera
// auto intersect = fitting::ray_triangle_intersect(ray_origin, ray_direction, vec3(v0), vec3(v1),
// vec3(v2), false);
// // first is bool intersect, second is the distance t
// if (intersect.first == true)
// {
// // We've hit a triangle. Ray hit its own triangle. If it's behind the ray origin, ignore the
// // intersection:
// // Check if in front or behind?
// if (intersect.second.get() <= 1e-4)
// {
// continue; // the intersection is behind the vertex, we don't care about it
// }
// // Otherwise, we've hit a genuine triangle, and the vertex is not visible:
// visible = false;
// break;
// }
// }
// visibility_ray.push_back(visible);
// }
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 2 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
vector<vec4> wnd_coords; // will contain [x_wnd, y_wnd, z_ndc, 1/w_clip]
for (auto&& vtx : mesh.vertices)
{
auto clip_coords = projection_matrix * view_model_matrix * vtx;
clip_coords = divide_by_w(clip_coords);
const vec2 screen_coords = clip_to_screen_space(clip_coords.x, clip_coords.y, image.cols, image.rows);
clip_coords.x = screen_coords.x;
clip_coords.y = screen_coords.y;
wnd_coords.push_back(clip_coords);
}
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 3 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
// Go on with extracting: This only needs the rasteriser/FS, not the whole Renderer.
const int tex_width = isomap_resolution;
const int tex_height =
isomap_resolution; // keeping this in case we need non-square texture maps at some point
// for (const auto& tvi : mesh.tvi)
#pragma omp target
{
#pragma omp parallel for
for (int i = 0; i < mesh.tvi.size(); i++) {
const auto tvi = mesh.tvi[i];
if (visibility_ray[tvi[0]] && visibility_ray[tvi[1]] &&
visibility_ray[tvi[2]]) // can also try using ||, but...
{
// Test with a rendered & re-extracted texture shows that we're off by a pixel or more,
// definitely need to correct this. Probably here.
// It looks like it is 1-2 pixels off. Definitely a bit more than 1.
detail::Vertex<double> pa{
vec4(mesh.texcoords[tvi[0]][0] * tex_width, mesh.texcoords[tvi[0]][1] * tex_height,
wnd_coords[tvi[0]].z /* z_ndc */, wnd_coords[tvi[0]].w /* 1/w_clip */),
vec3(/* empty */),
vec2(
wnd_coords[tvi[0]].x / image.cols,
/* maybe 1 - ... ? */ wnd_coords[tvi[0]].y /
image
.rows /* wndcoords of the projected/rendered model triangle (in the input img). Normalised to 0,1. */)};
detail::Vertex<double> pb{
vec4(mesh.texcoords[tvi[1]][0] * tex_width, mesh.texcoords[tvi[1]][1] * tex_height,
wnd_coords[tvi[1]].z /* z_ndc */, wnd_coords[tvi[1]].w /* 1/w_clip */),
vec3(/* empty */),
vec2(
wnd_coords[tvi[1]].x / image.cols,
/* maybe 1 - ... ? */ wnd_coords[tvi[1]].y /
image
.rows /* wndcoords of the projected/rendered model triangle (in the input img). Normalised to 0,1. */)};
detail::Vertex<double> pc{
vec4(mesh.texcoords[tvi[2]][0] * tex_width, mesh.texcoords[tvi[2]][1] * tex_height,
wnd_coords[tvi[2]].z /* z_ndc */, wnd_coords[tvi[2]].w /* 1/w_clip */),
vec3(/* empty */),
vec2(
wnd_coords[tvi[2]].x / image.cols,
/* maybe 1 - ... ? */ wnd_coords[tvi[2]].y /
image.rows /* wndcoords of the projected/rendered model triangle (in the input img). Normalised to 0,1. */)};
extraction_rasterizer.raster_triangle(pa, pb, pc, image_to_extract_from_as_tex);
}
}
}
t2 = std::chrono::high_resolution_clock::now();
std::cout << "extract 4 " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
return extraction_rasterizer.colorbuffer;
};
#endif // _OPENMP
} /* namespace v2 */ } /* namespace v2 */
namespace detail { namespace detail {
......
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#include <atomic> #include <atomic>
#include <unistd.h> #include <unistd.h>
#include <glm/gtx/rotate_vector.hpp> #include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/quaternion.hpp"
using cv::Mat; using cv::Mat;
using cv::Vec2f; using cv::Vec2f;
...@@ -135,15 +136,11 @@ public: ...@@ -135,15 +136,11 @@ public:
total_frames++; total_frames++;
} }
std::cout << frame.size() << std::endl;
std::cout << frame.cols << std::endl;
std::cout << frame.rows << std::endl;
// Take over the size of the original video or take width / height from the settings. // Take over the size of the original video or take width / height from the settings.
int frame_width = settings.get<int>("frames.width", frame.cols); output_width = settings.get<int>("output.width", frame.cols);
int frame_height = settings.get<int>("frames.height", frame.rows); output_height = settings.get<int>("output.height", frame.rows);
Size frame_size = Size(frame_width, frame_height); Size frame_size = Size(output_width, output_height);
// Initialize writer with given output file // Initialize writer with given output file
VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size); VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size);
...@@ -193,15 +190,12 @@ public: ...@@ -193,15 +190,12 @@ public:
vector<int> vertex_indices; vector<int> vertex_indices;
vector<cv::Vec2f> image_points; vector<cv::Vec2f> image_points;
auto mesh = fitting::generate_new_mesh( // current pca_coeff will be the mean for the first iterations.
morphable_model, auto mesh = fitting::generate_new_mesh(morphable_model, blendshapes, pca_shape_coefficients, blend_shape_coefficients);
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points // Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height); image_points, model_points, true, frame_height);
...@@ -213,8 +207,7 @@ public: ...@@ -213,8 +207,7 @@ public:
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients); auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls( blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices); blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices);
auto merged_shape = current_pca_shape + auto merged_shape = current_pca_shape + blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blend_shape_coefficients.size()); blend_shape_coefficients.size());
auto merged_mesh = morphablemodel::sample_to_mesh( auto merged_mesh = morphablemodel::sample_to_mesh(
...@@ -225,8 +218,6 @@ public: ...@@ -225,8 +218,6 @@ public:
morphable_model.get_texture_coordinates() morphable_model.get_texture_coordinates()
); );
auto R = rendering_params.get_rotation();
// Render the model in a separate window using the estimated pose, shape and merged texture: // Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering; Mat rendering;
...@@ -235,45 +226,39 @@ public: ...@@ -235,45 +226,39 @@ public:
// make sure the image is CV_8UC4, maybe do check first? // make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4); rendering.convertTo(rendering, CV_8UC4);
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame); auto t1 = std::chrono::high_resolution_clock::now();
Mat merged_isomap = isomap_averaging.add_and_merge(isomap); Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame, true, render::TextureInterpolation::NearestNeighbour, 512);
// Merge the isomaps - add the current one to the already merged ones:
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering; Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f));
std::cout << angle << std::endl;
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// angle -= 10.0; auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f ));
rendering_params.set_rotation(rot_mtx_y);
auto modelview_no_translation = rendering_params.get_modelview();
modelview_no_translation[3][0] = 0;
modelview_no_translation[3][1] = 0;
std::tie(frontal_rendering, std::ignore) = render::render( std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression, merged_mesh,
modelview_frontal, modelview_no_translation,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512, 256, 256,
render::create_mipmapped_texture(merged_isomap), render::create_mipmapped_texture(merged_isomap),
true, true,
false, false,
false false
); );
// cv::imshow("rendering", frontal_rendering); cvtColor(frontal_rendering, frontal_rendering, CV_BGRA2BGR);
// cv::waitKey(0);
fitting::FittingResult fitting_result; fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params; fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks; fitting_result.landmarks = landmarks;
fitting_result.mesh = mesh; fitting_result.mesh = mesh;
// output this? return Keyframe(0.0f, frontal_rendering, fitting_result, total_frames);
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, rendering, fitting_result, total_frames);
} }
/** /**
...@@ -344,22 +329,22 @@ public: ...@@ -344,22 +329,22 @@ public:
// makes a copy of the frame // makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame); Keyframe keyframe = generate_new_keyframe(frame);
if(wireframe) { // if(wireframe) {
draw_wireframe(keyframe.frame, keyframe); // draw_wireframe(keyframe.frame, keyframe);
} // }
//
if(landmarks) { // if(landmarks) {
draw_landmarks(keyframe.frame, keyframe); // draw_landmarks(keyframe.frame, keyframe);
} // }
writer.write(keyframe.frame); writer.write(keyframe.frame);
total_frames++; total_frames++;
// if (show_video) { if (show_video) {
// std::cout << "show video" << std::endl; std::cout << "show video" << std::endl;
// cv::imshow("video", frame); cv::imshow("video", keyframe.frame);
// cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0)); cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
// } }
return true; return true;
} }
...@@ -424,7 +409,10 @@ public: ...@@ -424,7 +409,10 @@ public:
private: private:
float angle = -45.0; int output_width;
int output_height;
float angle = -45.0f;
int total_frames = 0; int total_frames = 0;
int num_shape_coefficients_to_fit = 0; int num_shape_coefficients_to_fit = 0;
// merge all triangles that are facing <60° towards the camera // merge all triangles that are facing <60° towards the camera
...@@ -552,12 +540,13 @@ public: ...@@ -552,12 +540,13 @@ public:
// Will yield model_points, vertex_indices and frame_points // Will yield model_points, vertex_indices and frame_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height image_points, model_points, true, frame_height
); );
// set all fitting params we found for this Keyframe
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height); fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
fitting::FittingResult fitting_result; fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params; fitting_result.rendering_parameters = rendering_params;
......
...@@ -263,6 +263,9 @@ public: ...@@ -263,6 +263,9 @@ public:
cv::Mat add_and_merge(const cv::Mat& isomap) cv::Mat add_and_merge(const cv::Mat& isomap)
{ {
// Merge isomaps, add the current to the already merged, pixel by pixel: // Merge isomaps, add the current to the already merged, pixel by pixel:
#pragma omp target
{
#pragma omp parallel for
for (int r = 0; r < isomap.rows; ++r) for (int r = 0; r < isomap.rows; ++r)
{ {
for (int c = 0; c < isomap.cols; ++c) for (int c = 0; c < isomap.cols; ++c)
...@@ -273,13 +276,21 @@ public: ...@@ -273,13 +276,21 @@ public:
} }
// we're sure to have a visible pixel, merge it: // we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1) // merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] = (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1); merged_isomap.at<cv::Vec4f>(r, c)[0] =
merged_isomap.at<cv::Vec4f>(r, c)[1] = (merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1); (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c)
merged_isomap.at<cv::Vec4f>(r, c)[2] = (merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1); + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] = 255; // as soon as we've seen the pixel visible once, we set it to visible. merged_isomap.at<cv::Vec4f>(r, c)[1] =
(merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] =
(merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] =
255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c); ++visibility_counter.at<int>(r, c);
} }
} }
}
cv::Mat merged_isomap_uchar; cv::Mat merged_isomap_uchar;
merged_isomap.convertTo(merged_isomap_uchar, CV_8UC4); merged_isomap.convertTo(merged_isomap_uchar, CV_8UC4);
return merged_isomap_uchar; return merged_isomap_uchar;
......
...@@ -252,7 +252,7 @@ void render_output( ...@@ -252,7 +252,7 @@ void render_output(
// Will yield model_points, vertex_indices and image_points // Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh? // todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points); core::get_landmark_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear( auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height); image_points, model_points, true, frame_height);
...@@ -656,6 +656,7 @@ int main(int argc, char *argv[]) { ...@@ -656,6 +656,7 @@ int main(int argc, char *argv[]) {
auto reconstruction_data = eos::fitting::ReconstructionData{ auto reconstruction_data = eos::fitting::ReconstructionData{
morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology}; morphable_model, blendshapes, landmark_mapper, landmark_list, model_contour, ibug_contour, edge_topology};
// Start with the video play and get video file: // Start with the video play and get video file:
BufferedVideoIterator vid_iterator; BufferedVideoIterator vid_iterator;
...@@ -668,6 +669,15 @@ int main(int argc, char *argv[]) { ...@@ -668,6 +669,15 @@ int main(int argc, char *argv[]) {
// Start getting video frames: // Start getting video frames:
vid_iterator.start(); vid_iterator.start();
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
int num_iterations = settings.get<int>("reconstruction.num_iterations", 10);
// vid_writer.start(); // vid_writer.start();
// Count the amount of iterations: // Count the amount of iterations:
...@@ -681,7 +691,7 @@ int main(int argc, char *argv[]) { ...@@ -681,7 +691,7 @@ int main(int argc, char *argv[]) {
// it makes no sense to update pca_coeff if nothing in the buffer has changed: // it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) { if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl; std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< num_iterations << std::endl;
// Fit shape and pose: // Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now(); auto t1 = std::chrono::high_resolution_clock::now();
...@@ -696,7 +706,7 @@ int main(int argc, char *argv[]) { ...@@ -696,7 +706,7 @@ int main(int argc, char *argv[]) {
edge_topology, edge_topology,
ibug_contour, ibug_contour,
model_contour, model_contour,
50, num_iterations,
boost::none, boost::none,
30.0f, 30.0f,
boost::none, boost::none,
...@@ -709,9 +719,13 @@ int main(int argc, char *argv[]) { ...@@ -709,9 +719,13 @@ int main(int argc, char *argv[]) {
auto t2 = std::chrono::high_resolution_clock::now(); auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took " std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / key_frames.size() << "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / (key_frames.size() * num_iterations)
<< "ms)" << std::endl; << "ms)" << std::endl;
if(settings.get<bool>("output.make_video", false)) {
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
vid_writer.next();
}
// evaluate_results( // evaluate_results(
// key_frames, // key_frames,
...@@ -735,39 +749,39 @@ int main(int argc, char *argv[]) { ...@@ -735,39 +749,39 @@ int main(int argc, char *argv[]) {
} }
// vid_writer.__stop(); // vid_writer.__stop();
if(settings.get<bool>("output.make_video", false)) { // if(settings.get<bool>("output.make_video", false)) {
ReconstructionVideoWriter vid_writer; // // Render output:
try { // std::cout << "Waiting for video to be completed..." << std::endl;
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings); // vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
} catch(std::runtime_error &e) { //
std::cout << e.what() << std::endl; // int count = 0;
return EXIT_FAILURE; // while (count < 40) {
} // auto t1 = std::chrono::high_resolution_clock::now();
// Render output: // vid_writer.next();
std::cout << "Waiting for video to be completed..." << std::endl; // auto t2 = std::chrono::high_resolution_clock::now();
vid_writer.update_reconstruction_coeff(pca_shape_coefficients); // printf("Frame %d/%d (%d)\n", vid_writer.get_frame_number(), vid_iterator.get_frame_number(), count);
//
while (vid_writer.next()) { // std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << "ms" << std::endl;
printf("%d/%d\r", vid_writer.get_frame_number(), vid_iterator.get_frame_number()); // count++;
} // }
//
// vid_writer.__stop();
// }
} auto key_frames = vid_iterator.get_keyframes();
// auto key_frames = vid_iterator.get_keyframes(); evaluate_results(
// std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl; key_frames,
// rendering_paramss,
// evaluate_results( meshs,
// key_frames, pca_shape_coefficients,
// rendering_paramss, blendshape_coefficients,
// meshs, fitted_image_points,
// pca_shape_coefficients, annotations,
// blendshape_coefficients, reconstruction_data,
// fitted_image_points, settings,
// annotations, n_iter
// reconstruction_data, );
// settings,
// n_iter
// );
//todo: we could build our final obj here? //todo: we could build our final obj here?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment