Commit c5628500 authored by Richard Torenvliet's avatar Richard Torenvliet

Test with OpenMP GPU - added for occluding boundary vertices

parent e861a303
......@@ -113,6 +113,7 @@ set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/video/Keyframe.hpp
)
# Set OpenMP compiler flags if possible
find_package(OpenMP)
if (OPENMP_FOUND)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
......
......@@ -42,9 +42,10 @@ namespace eos {
struct FittingResult
{
RenderingParameters rendering_parameters;
core::LandmarkCollection<cv::Vec2f> landmarks;
std::vector<float> pca_shape_coefficients;
std::vector<float> blendshape_coefficients;
core::LandmarkCollection<cv::Vec2f> landmarks;
core::Mesh mesh;
};
} /* namespace fitting */
......
......@@ -45,6 +45,7 @@
#include <utility>
#include <cstddef>
namespace eos {
namespace fitting {
......@@ -391,7 +392,11 @@ inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_c
}
return { image_points, vertex_indices };
};
#ifdef _OPENMP
/**
* OpenMP version
*
* @brief Computes the vertices that lie on occluding boundaries, given a particular pose.
*
* This algorithm computes the edges that lie on occluding boundaries of the mesh.
......@@ -404,7 +409,7 @@ inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_c
* @param[in] R The rotation (pose) under which the occluding boundaries should be computed.
* @return A vector with unique vertex id's making up the edges.
*/
inline std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, glm::mat4x4 R)
std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, glm::mat4x4 R)
{
// Rotate the mesh:
std::vector<glm::vec4> rotated_vertices;
......@@ -423,7 +428,8 @@ inline std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& m
// Find occluding edges:
std::vector<int> occluding_edges_indices;
for (int edge_idx = 0; edge_idx < edge_topology.adjacent_faces.size(); ++edge_idx) // For each edge... Ef contains the indices of the two adjacent faces
for (int edge_idx = 0; edge_idx < edge_topology.adjacent_faces.size();
++edge_idx) // For each edge... Ef contains the indices of the two adjacent faces
{
const auto &edge = edge_topology.adjacent_faces[edge_idx];
if (edge[0] == 0) // ==> NOTE/Todo Need to change this if we use 0-based indexing!
......@@ -455,65 +461,81 @@ inline std::vector<int> occluding_boundary_vertices_parallel(const core::Mesh& m
// Remove duplicate vertex id's (std::unique works only on sorted sequences):
std::sort(begin(occluding_vertices), end(occluding_vertices));
occluding_vertices.erase(std::unique(begin(occluding_vertices), end(occluding_vertices)), end(occluding_vertices));
// Perform ray-casting to find out which vertices are not visible (i.e. self-occluded):
std::vector<bool> visibility;
// Use map to allow parallelism, all vertex_ids are unique
std::vector<int> vertex_id_visible_map(occluding_vertices.size());
int NUM_THREADS = 4;
auto t1 = std::chrono::high_resolution_clock::now();
// we shoot the ray from the vertex towards the camera
glm::vec3 ray_direction(0.0f, 0.0f, 1.0f);
#pragma omp parallel for schedule num_threads(NUM_THREADS)
for(int i = 0; i < occluding_vertices.size(); i++) {
// #pragma omp target map(to: occluding_vertices, mesh, rotated_vertices)
// #pragma omp parallel for
#pragma omp target map(alloc:vertex_id_visible_map) map(from:occluding_vertices, rotated_vertices, mesh)
{
#pragma omp parallel for
for (int i = 0; i < occluding_vertices.size(); i++) {
const auto vertex_idx = occluding_vertices[i];
bool visible = true;
int visible = 1;
glm::vec3 ray_origin(rotated_vertices[vertex_idx]);
// we shoot the ray from the vertex towards the camera
glm::vec3 ray_direction(0.0f, 0.0f, 1.0f);
// For every tri of the rotated mesh:
for(int j = 0; j < mesh.tvi.size(); j++) {
for (int j = 0; j < mesh.tvi.size(); j++) {
auto tri = mesh.tvi[j];
auto &v0 = rotated_vertices[tri[0]];
auto &v1 = rotated_vertices[tri[1]];
auto &v2 = rotated_vertices[tri[2]];
auto intersect = ray_triangle_intersect(ray_origin, ray_direction, glm::vec3(v0), glm::vec3(v1), glm::vec3(v2), false);
auto intersect = ray_triangle_intersect(ray_origin,
ray_direction,
glm::vec3(v0),
glm::vec3(v1),
glm::vec3(v2),
false);
// first is bool intersect, second is the distance t
if (intersect.first == true) {
// We've hit a triangle. Ray hit its own triangle. If it's behind the ray origin, ignore the intersection:
// Check if in front or behind?
if (intersect.second.get() <= 1e-4)
{
if (intersect.second.get() <= 1e-4) {
continue; // the intersection is behind the vertex, we don't care about it
}
// Otherwise, we've hit a genuine triangle, and the vertex is not visible:
visible = false;
visible = 0;
break;
}
}
visibility.push_back(visible);
vertex_id_visible_map[i] = visible;
}
}
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "test derp 1: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << "ms" << std::endl;
// Remove vertices from occluding boundary list that are not visible:
// copy the results to final vertex ids
std::vector<int> final_vertex_ids;
for (int i = 0; i < occluding_vertices.size(); ++i)
{
if (visibility[i] == true)
{
final_vertex_ids.push_back(occluding_vertices[i]);
for (int i = 0; i < occluding_vertices.size(); ++i) {
const auto vertex_idx = occluding_vertices[i];
int visible = vertex_id_visible_map[i];
if (visible == 1) {
final_vertex_ids.push_back(vertex_idx);
}
}
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("S %lu %lld ms (mean: %f)\n",
final_vertex_ids.size(),
final_timing,
static_cast<float>(final_timing) / final_vertex_ids.size()
);
return final_vertex_ids;
};
/**
*
* OpenMP version
*
* @brief For a given list of 2D edge points, find corresponding 3D vertex IDs.
*
* This algorithm first computes the 3D mesh's occluding boundary vertices under
......@@ -593,6 +615,7 @@ inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_c
}
return { image_points, vertex_indices };
};
#endif // _OPENMP
......
......@@ -43,7 +43,9 @@
#include <vector>
#include <algorithm>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace eos {
namespace fitting {
......@@ -842,32 +844,6 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
);
};
//void calculate_rendering_params() {
// // Need to do an initial pose fit to do the contour fitting inside the loop.
// // We'll do an expression fit too, since face shapes vary quite a lot, depending on expressions.
// vector<fitting::RenderingParameters> rendering_params;
// for (int j = 0; j < num_images; ++j) {
// fitting::ScaledOrthoProjectionParameters current_pose = fitting::estimate_orthographic_projection_linear (image_points[j], model_points[j], true, image_height);
// fitting::RenderingParameters current_rendering_params (current_pose, image_width, image_height);
// rendering_params.push_back (current_rendering_params);
//
// cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix (current_rendering_params, image_width, image_height);
// blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls (blendshapes, current_pca_shape, affine_from_ortho, image_points[j], vertex_indices[j]);
//
// // Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
// current_combined_shapes[j] = current_pca_shape + morphablemodel::to_matrix (blendshapes)
// * Eigen::Map<const Eigen::VectorXf> (blendshape_coefficients[j]
// .data (), blendshape_coefficients[j]
// .size ());
// current_meshs[j] = morphablemodel::sample_to_mesh (current_combined_shapes[j], morphable_model
// .get_color_model ().get_mean (), morphable_model.get_shape_model ().get_triangle_list (), morphable_model
// .get_color_model ()
// .get_triangle_list (), morphable_model
// .get_texture_coordinates ());
// }
//}}
/**
* Generate a new mesh given pca_coefficients and blend_shape_coefficients.
* @param pca_shape_coefficients
......@@ -905,279 +881,10 @@ inline eos::core::Mesh generate_new_mesh(
}
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_parallel(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<eos::video::Keyframe>& key_frames,
const core::LandmarkMapper& landmark_mapper,
int image_width,
int image_height,
int num_images,
const morphablemodel::EdgeTopology& edge_topology,
const fitting::ContourLandmarks& contour_landmarks,
const fitting::ModelContour& model_contour,
int num_iterations,
boost::optional<int> num_shape_coefficients_to_fit,
float lambda,
boost::optional<fitting::RenderingParameters> initial_rendering_params,
std::vector<float>& pca_shape_coefficients,
std::vector<std::vector<float>>& blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>>& fitted_image_points) {
assert(blendshapes.size() > 0);
assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit?
assert(pca_shape_coefficients.size() <= morphable_model.get_shape_model().get_num_principal_components());
using std::vector;
using cv::Vec2f;
using cv::Vec4f;
using cv::Mat;
using Eigen::VectorXf;
using Eigen::MatrixXf;
if (!num_shape_coefficients_to_fit) {
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get());
}
// TODO: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() < num_images) {
for (int j = 0; j < num_images; ++j) {
std::vector<float> current_blendshape_coefficients;
current_blendshape_coefficients.resize(blendshapes.size());
blendshape_coefficients.push_back(current_blendshape_coefficients);
}
}
MatrixXf blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
// Current mesh - either from the given coefficients, or the mean:
VectorXf current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
vector<VectorXf> current_combined_shapes(num_images);
vector<eos::core::Mesh> current_meshs(num_images);
// The 2D and 3D point correspondences used for the fitting:
vector<vector<Vec4f>> model_points(num_images); // the points in the 3D shape model of all frames.
vector<vector<int>> vertex_indices(num_images); // their vertex indices of all frames.
vector<vector<Vec2f>> image_points(num_images); // the corresponding 2D landmark points of all frames.
int NUM_THREADS = 4;
vector<fitting::RenderingParameters> rendering_params(num_images); // list of rendering params for all frames.
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j) {
VectorXf current_combined_shape = current_pca_shape +
blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size());
eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates());
vector<Vec4f> curr_model_points;
vector<int> curr_vertex_indices;
vector<Vec2f> curr_image_points;
// Get the locations of the model locations of the meshes, vertex_indices and image points
// (equal to landmark coordinates), for every image / mesh.
std::tie(curr_model_points, curr_vertex_indices, curr_image_points) = eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
key_frames[j].fitting_result.landmarks, landmark_mapper, current_mesh);
// Start constructing a list of rendering parameters needed for reconstruction.
// Get the current points from the last added image points and model points
auto current_pose = fitting::estimate_orthographic_projection_linear(
curr_image_points, curr_model_points, true, image_height);
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params[j] = current_rendering_params;
// update key frame rendering params
key_frames[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_ortho, curr_image_points, curr_vertex_indices);
// Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
current_combined_shape = current_pca_shape +
morphablemodel::to_matrix(blendshapes) *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size()
);
current_combined_shapes[j] = current_combined_shape;
current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
current_meshs[j] = current_mesh;
model_points[j] = curr_model_points;
vertex_indices[j] = curr_vertex_indices;
image_points[j] = curr_image_points;
}
}
// The static (fixed) landmark correspondences which will stay the same throughout
// the fitting (the inner face landmarks):
vector<vector<int>> fixed_vertex_indices (vertex_indices);
vector<vector<Vec2f>> fixed_image_points (image_points);
std::vector<cv::Mat> affine_from_orthos(num_images);
std::vector<VectorXf> mean_plus_blendshapes(num_images);
image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices;
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j) {
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto curr_keyframe = key_frames[j];
auto landmarks = curr_keyframe.fitting_result.landmarks;
float yaw_angle = curr_keyframe.yaw_angle;
// auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) = fitting::get_contour_correspondences(
landmarks,
contour_landmarks,
model_contour,
yaw_angle,
current_meshs[j],
rendering_params[j].get_modelview(),
rendering_params[j].get_projection(),
fitting::get_opencv_viewport(image_width, image_height)
);
// Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
image_points[j] = fitting::concat(image_points[j], image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks;
// positive yaw = subject looking to the left
if (yaw_angle >= 0.0f) {
// the left contour is the occluding one we want to use ("away-facing")
auto contour_landmarks_ =
core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
std::for_each(begin(contour_landmarks_),
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
}
else
{
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_),
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
}
auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
);
image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got:
model_points[j].clear();
for (const auto &v : vertex_indices[j])
{
model_points[j].push_back(
{
current_meshs[j].vertices[v][0],
current_meshs[j].vertices[v][1],
current_meshs[j].vertices[v][2],
current_meshs[j].vertices[v][3]
});
}
// Re-estimate the pose, using all correspondences:
auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j], model_points[j], true, image_height);
rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
affine_from_orthos[j] = affine_from_ortho;
// Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
mean_plus_blendshapes[j] = current_mean_plus_blendshapes;
}
}
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel(
morphable_model,
affine_from_orthos,
image_points,
vertex_indices,
mean_plus_blendshapes,
lambda,
num_shape_coefficients_to_fit
);
// Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j)
{
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]
);
current_combined_shapes[j] = current_pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
current_meshs[j] = morphablemodel::sample_to_mesh(
current_combined_shapes[j],
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
}
}
fitted_image_points = image_points;
return {current_meshs, rendering_params}; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_2(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<eos::video::Keyframe>& key_frames,
std::vector<eos::video::Keyframe>& keyframes,
const core::LandmarkMapper& landmark_mapper,
int image_width,
int image_height,
......@@ -1253,9 +960,9 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
// (equal to landmark coordinates), for every image / mesh.
// std::tie(curr_model_points, curr_vertex_indices, curr_image_points) = eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
// key_frames[j].fitting_result.landmarks, landmark_mapper, current_mesh);
// keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh);
eos::core::get_landmark_coordinates_inline<Vec2f, Vec4f>(
key_frames[j].fitting_result.landmarks,
keyframes[j].fitting_result.landmarks,
landmark_mapper,
current_mesh,
// output parameters
......@@ -1272,7 +979,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
rendering_params.push_back(current_rendering_params);
// update key frame rendering params
key_frames[j].fitting_result.rendering_parameters = current_rendering_params;
keyframes[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
......@@ -1314,7 +1021,7 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto curr_keyframe = key_frames[j];
auto curr_keyframe = keyframes[j];
auto landmarks = curr_keyframe.fitting_result.landmarks;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
......@@ -1417,6 +1124,295 @@ inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParamete
return { current_meshs, rendering_params }; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
#ifdef _OPENMP
inline std::pair<std::vector<core::Mesh>, std::vector<fitting::RenderingParameters>> fit_shape_and_pose_multi_parallel(
const morphablemodel::MorphableModel& morphable_model,
const std::vector<morphablemodel::Blendshape>& blendshapes,
std::vector<eos::video::Keyframe>& keyframes,
const core::LandmarkMapper& landmark_mapper,
int image_width,
int image_height,
int num_images,
const morphablemodel::EdgeTopology& edge_topology,
const fitting::ContourLandmarks& contour_landmarks,
const fitting::ModelContour& model_contour,
int num_iterations,
boost::optional<int> num_shape_coefficients_to_fit,
float lambda,
boost::optional<fitting::RenderingParameters> initial_rendering_params,
std::vector<float>& pca_shape_coefficients,
std::vector<std::vector<float>>& blendshape_coefficients,
std::vector<std::vector<cv::Vec2f>>& fitted_image_points,
boost::property_tree::ptree settings) {
assert(blendshapes.size() > 0);
assert(num_iterations > 0); // Can we allow 0, for only the initial pose-fit?
assert(pca_shape_coefficients.size() <= morphable_model.get_shape_model().get_num_principal_components());
using std::vector;
using cv::Vec2f;
using cv::Vec4f;
using cv::Mat;
using Eigen::VectorXf;
using Eigen::MatrixXf;
// read settings and trade-offs:
int NUM_THREADS = settings.get<int>("reconstruction.num_threads", 1);
bool use_contours = settings.get<bool>("reconstruction.use_contours", true);
if (!num_shape_coefficients_to_fit) {
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit.get());
}
// TODO: This leaves the following case open: num_coeffs given is empty or defined, but the
// pca_shape_coefficients given is != num_coeffs or the model's max-coeffs. What to do then? Handle & document!
if (blendshape_coefficients.size() < num_images) {
for (int j = 0; j < num_images; ++j) {
std::vector<float> current_blendshape_coefficients;
current_blendshape_coefficients.resize(blendshapes.size());
blendshape_coefficients.push_back(current_blendshape_coefficients);
}
}
MatrixXf blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
// Current mesh - either from the given coefficients, or the mean:
VectorXf current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
vector<VectorXf> current_combined_shapes(num_images);
vector<eos::core::Mesh> current_meshs(num_images);
// The 2D and 3D point correspondences used for the fitting:
vector<vector<Vec4f>> model_points(num_images); // the points in the 3D shape model of all frames.
vector<vector<int>> vertex_indices(num_images); // their vertex indices of all frames.
vector<vector<Vec2f>> image_points(num_images); // the corresponding 2D landmark points of all frames.
vector<fitting::RenderingParameters> rendering_params(num_images); // list of rendering params for all frames.
std::vector<cv::Mat> affine_from_orthos(num_images);
std::vector<VectorXf> mean_plus_blendshapes(num_images);
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j) {
VectorXf current_combined_shape = current_pca_shape +
blendshapes_as_basis *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size());
eos::core::Mesh current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates());
vector<Vec4f> curr_model_points;
vector<int> curr_vertex_indices;
vector<Vec2f> curr_image_points;
// Get the locations of the model locations of the meshes, vertex_indices and image points
// (equal to landmark coordinates), for every image / mesh.
std::tie(curr_model_points, curr_vertex_indices, curr_image_points) = eos::core::get_landmark_coordinates<Vec2f, Vec4f>(
keyframes[j].fitting_result.landmarks, landmark_mapper, current_mesh);
// Start constructing a list of rendering parameters needed for reconstruction.
// Get the current points from the last added image points and model points
auto current_pose = fitting::estimate_orthographic_projection_linear(
curr_image_points, curr_model_points, true, image_height);
fitting::RenderingParameters current_rendering_params(current_pose, image_width, image_height);
rendering_params[j] = current_rendering_params;
// update key frame rendering params
keyframes[j].fitting_result.rendering_parameters = current_rendering_params;
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(current_rendering_params, image_width, image_height);
// if no contour
affine_from_orthos[j] = affine_from_ortho;
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_ortho, curr_image_points, curr_vertex_indices);
// Mesh with same PCA coeffs as before, but new expression fit (this is relevant if no initial blendshape coeffs have been given):
current_combined_shape = current_pca_shape +
morphablemodel::to_matrix(blendshapes) *
Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(), blendshape_coefficients[j].size()
);
current_combined_shapes[j] = current_combined_shape;
current_mesh = morphablemodel::sample_to_mesh(
current_combined_shape, morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
current_meshs[j] = current_mesh;
model_points[j] = curr_model_points;
vertex_indices[j] = curr_vertex_indices;
image_points[j] = curr_image_points;
// Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
mean_plus_blendshapes[j] = current_mean_plus_blendshapes;
}
}
// The static (fixed) landmark correspondences which will stay the same throughout
// the fitting (the inner face landmarks):
vector<vector<int>> fixed_vertex_indices (vertex_indices);
vector<vector<Vec2f>> fixed_image_points (image_points);
image_points = fixed_image_points;
vertex_indices = fixed_vertex_indices;
if (use_contours) {
for (int j = 0; j < num_images; ++j)
{
// Given the current pose, find 2D-3D contour correspondences of the front-facing face contour:
vector<Vec2f> image_points_contour;
vector<int> vertex_indices_contour;
auto curr_keyframe = keyframes[j];
auto landmarks = curr_keyframe.fitting_result.landmarks;
auto yaw_angle = glm::degrees(glm::eulerAngles(rendering_params[j].get_rotation())[1]);
// For each 2D contour landmark, get the corresponding 3D vertex point and vertex id:
std::tie(image_points_contour, std::ignore, vertex_indices_contour) =
fitting::get_contour_correspondences(
landmarks,
contour_landmarks,
model_contour,
yaw_angle,
current_meshs[j],
rendering_params[j].get_modelview(),
rendering_params[j].get_projection(),
fitting::get_opencv_viewport(image_width, image_height)
);
// Add the contour correspondences to the set of landmarks that we use for the fitting:
vertex_indices[j] = fitting::concat(vertex_indices[j], vertex_indices_contour);
image_points[j] = fitting::concat(image_points[j], image_points_contour);
// Fit the occluding (away-facing) contour using the detected contour LMs:
vector<Eigen::Vector2f> occluding_contour_landmarks;
// positive yaw = subject looking to the left
if (yaw_angle >= 0.0f)
{
// the left contour is the occluding one we want to use ("away-facing")
auto contour_landmarks_ =
core::filter(landmarks, contour_landmarks.left_contour); // Can do this outside of the loop
std::for_each(begin(contour_landmarks_),
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
}
else
{
auto contour_landmarks_ = core::filter(landmarks, contour_landmarks.right_contour);
std::for_each(begin(contour_landmarks_),
end(contour_landmarks_),
[&occluding_contour_landmarks](auto &&lm)
{
occluding_contour_landmarks.push_back({lm.coordinates[0], lm.coordinates[1]});
});
}
auto edge_correspondences = fitting::find_occluding_edge_correspondences_parallel(
current_meshs[j], edge_topology, rendering_params[j], occluding_contour_landmarks, 180.0f
);
image_points[j] = fitting::concat(image_points[j], edge_correspondences.first);
vertex_indices[j] = fitting::concat(vertex_indices[j], edge_correspondences.second);
// Get the model points of the current mesh, for all correspondences that we've got:
model_points[j].clear();
for (const auto &v : vertex_indices[j])
{
model_points[j].push_back(
{
current_meshs[j].vertices[v][0],
current_meshs[j].vertices[v][1],
current_meshs[j].vertices[v][2],
current_meshs[j].vertices[v][3]
});
}
// Re-estimate the pose, using all correspondences:
auto current_pose = fitting::estimate_orthographic_projection_linear(image_points[j],
model_points[j],
true,
image_height);
rendering_params[j] = fitting::RenderingParameters(current_pose, image_width, image_height);
Mat affine_from_ortho =
fitting::get_3x4_affine_camera_matrix(rendering_params[j], image_width, image_height);
affine_from_orthos[j] = affine_from_ortho;
// Estimate the PCA shape coefficients with the current blendshape coefficients:
VectorXf current_mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
mean_plus_blendshapes[j] = current_mean_plus_blendshapes;
}
}
pca_shape_coefficients = fitting::fit_shape_to_landmarks_linear_multi_parallel(
morphable_model,
affine_from_orthos,
image_points,
vertex_indices,
mean_plus_blendshapes,
lambda,
num_shape_coefficients_to_fit
);
// Estimate the blendshape coefficients with the current PCA model estimate:
current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp for
for (int j = 0; j < num_images; ++j) {
blendshape_coefficients[j] = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_from_orthos[j], image_points[j], vertex_indices[j]
);
current_combined_shapes[j] = current_pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blendshape_coefficients[j].data(),
blendshape_coefficients[j].size());
current_meshs[j] = morphablemodel::sample_to_mesh(
current_combined_shapes[j],
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// save it to the keyframe, we might need it for showing the reconstruction.
// we could make it optional
keyframes[j].fitting_result.mesh = current_meshs[j];
keyframes[j].fitting_result.rendering_parameters = rendering_params[j];
}
}
fitted_image_points = image_points;
return {current_meshs, rendering_params}; // I think we could also work with a Mat face_instance in this function instead of a Mesh, but it would convolute the code more (i.e. more complicated to access vertices).
};
#endif // if OpenMP
} /* namespace fitting */
} /* namespace eos */
......
......@@ -315,8 +315,8 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi_parallel(
#pragma omp parallel num_threads(num_threads)
{
#pragma omp for
for (int i = 0; i < num_landmarks; ++i)
{
for (int i = 0; i < num_landmarks; ++i) {
// calculate index from landmark offset (not always the same landmarks amount).
int index = (landmark_offset_index[k] * 4) + 4 * i;
int offset = landmark_offset_index[k] + i;
auto vertex_id = vertex_ids[k][i];
......@@ -328,16 +328,14 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi_parallel(
basis_rows_.block(0, 0, basis_rows_.rows(), num_coeffs_to_fit);
// Set y with landmark coordinates (will be used in linear lsq later)
#pragma omp simd
for (int j = 0; j < 2; j++)
{
#pragma omp simd
for (int j = 0; j < 2; j++) {
y(3 * offset + j) = landmarks[k][i][j];
}
// Set v_bar with face mesh (will be used in linear lsq later)
#pragma omp simd
for (int j = 0; j < 3; j++)
{
#pragma omp simd
for (int j = 0; j < 3; j++) {
v_bar(4 * offset + j) = base_face[k](vertex_id * 3 + j);
}
}
......@@ -368,13 +366,13 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi_parallel(
auto t2 = std::chrono::high_resolution_clock::now();
t1 = std::chrono::high_resolution_clock::now();
// fill P with coefficients
P.setFromTriplets(P_coefficients.begin(), P_coefficients.end());
MatrixXf one;
MatrixXf two;
MatrixXf A;
MatrixXf b;
MatrixXf rhs;
#pragma omp parallel sections num_threads(2)
{
......@@ -382,6 +380,7 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi_parallel(
{
A = P * V_hat_h; // camera matrix times the basis
one = A.transpose() * Omega.asDiagonal();
rhs = -one * b; // It's -A^t*Omega^t*b, but we don't need to transpose Omega, since it's a diagonal matrix, and Omega^t = Omega.
}
#pragma omp section
......@@ -392,12 +391,8 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi_parallel(
}
const MatrixXf AtOmegaAReg = one * A + two;
const MatrixXf rhs = -one * b; // It's -A^t*Omega^t*b, but we don't need to transpose Omega, since it's a diagonal matrix, and Omega^t = Omega.
const VectorXf c_s = AtOmegaAReg.colPivHouseholderQr().solve(rhs);
t2 = std::chrono::high_resolution_clock::now();
std::cout << "matrix xf " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << "ms" << std::endl;
return std::vector<float>(c_s.data(), c_s.data() + c_s.size());
};
#endif
......
......@@ -141,6 +141,7 @@ inline std::pair<cv::Mat, cv::Mat> render(
// bool enable_texturing = false; Maybe re-add later, not sure
// take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.
auto t1 = std::chrono::high_resolution_clock::now();
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
// another assert: If cv::Mat texture != empty, then we need texcoords?
......@@ -177,77 +178,102 @@ inline std::pair<cv::Mat, cv::Mat> render(
// Prepare the rasterisation stage.
// For every vertex/tri:
vector<detail::TriangleToRasterize> triangles_to_raster;
for (const auto& tri_indices : mesh.tvi) {
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
// classify vertices visibility with respect to the planes of the view frustum
// we're in clip-coords (NDC), so just check if outside [-1, 1] x ...
// Actually we're in clip-coords and it's not the same as NDC. We're only in NDC after the division by w.
// We should do the clipping in clip-coords though. See http://www.songho.ca/opengl/gl_projectionmatrix.html for more details.
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3];
for (unsigned char k = 0; k < 3; k++)
{
visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0];
float y_cc = clipspace_vertices[tri_indices[k]].position[1];
float z_cc = clipspace_vertices[tri_indices[k]].position[2];
float w_cc = clipspace_vertices[tri_indices[k]].position[3];
if (x_cc < -w_cc) // true if outside of view frustum. False if on or inside the plane.
visibility_bits[k] |= 1; // set bit if outside of frustum
if (x_cc > w_cc)
visibility_bits[k] |= 2;
if (y_cc < -w_cc)
visibility_bits[k] |= 4;
if (y_cc > w_cc)
visibility_bits[k] |= 8;
if (enable_near_clipping && z_cc < -w_cc) // near plane frustum clipping
visibility_bits[k] |= 16;
if (enable_far_clipping && z_cc > w_cc) // far plane frustum clipping
visibility_bits[k] |= 32;
} // if all bits are 0, then it's inside the frustum
// all vertices are not visible - reject the triangle.
if ((visibility_bits[0] & visibility_bits[1] & visibility_bits[2]) > 0)
{
continue;
}
// all vertices are visible - pass the whole triangle to the rasterizer. = All bits of all 3 triangles are 0.
if ((visibility_bits[0] | visibility_bits[1] | visibility_bits[2]) == 0)
{
boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(clipspace_vertices[tri_indices[0]], clipspace_vertices[tri_indices[1]], clipspace_vertices[tri_indices[2]], viewport_width, viewport_height, enable_backface_culling);
if (t) {
triangles_to_raster.push_back(*t);
//#pragma omp target
// {
//#pragma omp parallel for
for(int i = 0; i < mesh.tvi.size(); i++) {
const auto tri_indices = mesh.tvi[i];
// Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
// classify vertices visibility with respect to the planes of the view frustum
// we're in clip-coords (NDC), so just check if outside [-1, 1] x ...
// Actually we're in clip-coords and it's not the same as NDC. We're only in NDC after the division by w.
// We should do the clipping in clip-coords though. See http://www.songho.ca/opengl/gl_projectionmatrix.html for more details.
// However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
// 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
unsigned char visibility_bits[3];
for (unsigned char k = 0; k < 3; k++) {
visibility_bits[k] = 0;
float x_cc = clipspace_vertices[tri_indices[k]].position[0];
float y_cc = clipspace_vertices[tri_indices[k]].position[1];
float z_cc = clipspace_vertices[tri_indices[k]].position[2];
float w_cc = clipspace_vertices[tri_indices[k]].position[3];
if (x_cc < -w_cc) // true if outside of view frustum. False if on or inside the plane.
visibility_bits[k] |= 1; // set bit if outside of frustum
if (x_cc > w_cc)
visibility_bits[k] |= 2;
if (y_cc < -w_cc)
visibility_bits[k] |= 4;
if (y_cc > w_cc)
visibility_bits[k] |= 8;
if (enable_near_clipping && z_cc < -w_cc) // near plane frustum clipping
visibility_bits[k] |= 16;
if (enable_far_clipping && z_cc > w_cc) // far plane frustum clipping
visibility_bits[k] |= 32;
} // if all bits are 0, then it's inside the frustum
// all vertices are not visible - reject the triangle.
if ((visibility_bits[0] & visibility_bits[1] & visibility_bits[2]) > 0)
{
continue;
}
continue;
}
// at this moment the triangle is known to be intersecting one of the view frustum's planes
std::vector<detail::Vertex<float>> vertices;
vertices.push_back(clipspace_vertices[tri_indices[0]]);
vertices.push_back(clipspace_vertices[tri_indices[1]]);
vertices.push_back(clipspace_vertices[tri_indices[2]]);
// split the triangle if it intersects the near plane:
if (enable_near_clipping)
{
vertices = detail::clip_polygon_to_plane_in_4d(vertices, glm::tvec4<float>(0.0f, 0.0f, -1.0f, -1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?)
}
// triangulation of the polygon formed of vertices array
if (vertices.size() >= 3)
{
for (unsigned char k = 0; k < vertices.size() - 2; k++)
// all vertices are visible - pass the whole triangle to the rasterizer. = All bits of all 3 triangles are 0.
if ((visibility_bits[0] | visibility_bits[1] | visibility_bits[2]) == 0)
{
boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(vertices[0], vertices[1 + k], vertices[2 + k], viewport_width, viewport_height, enable_backface_culling);
if (t) {
boost::optional<detail::TriangleToRasterize> t =
detail::process_prospective_tri(clipspace_vertices[tri_indices[0]],
clipspace_vertices[tri_indices[1]],
clipspace_vertices[tri_indices[2]],
viewport_width,
viewport_height,
enable_backface_culling);
if (t)
{
triangles_to_raster.push_back(*t);
}
continue;
}
// at this moment the triangle is known to be intersecting one of the view frustum's planes
std::vector<detail::Vertex<float>> vertices;
vertices.push_back(clipspace_vertices[tri_indices[0]]);
vertices.push_back(clipspace_vertices[tri_indices[1]]);
vertices.push_back(clipspace_vertices[tri_indices[2]]);
// split the triangle if it intersects the near plane:
if (enable_near_clipping)
{
vertices = detail::clip_polygon_to_plane_in_4d(vertices,
glm::tvec4<float>(0.0f,
0.0f,
-1.0f,
-1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?)
}
// triangulation of the polygon formed of vertices array
if (vertices.size() >= 3)
{
for (unsigned char k = 0; k < vertices.size() - 2; k++)
{
boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(vertices[0],
vertices[1 + k],
vertices[2 + k],
viewport_width,
viewport_height,
enable_backface_culling);
if (t)
{
triangles_to_raster.push_back(*t);
}
}
}
}
}
// }
// Fragment/pixel shader: Colour the pixel values
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
auto t2 = std::chrono::high_resolution_clock::now();
auto final_timing = std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count();
printf("Tri %lu %lld ms\n", triangles_to_raster.size(), final_timing);
return std::make_pair(colourbuffer, depthbuffer);
};
......
......@@ -35,6 +35,8 @@
#include <atomic>
#include <unistd.h>
#include <glm/gtx/rotate_vector.hpp>
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
......@@ -49,6 +51,8 @@ using namespace std;
namespace eos {
namespace video {
// TODO: maybe move video iterator here.. or remove fil.
/**
* @brief Computes the variance of laplacian of the given image or patch.
*
......@@ -71,6 +75,374 @@ double variance_of_laplacian(const cv::Mat& image)
return focus_measure;
};
class ReconstructionVideoWriter
{
public:
std::unique_ptr<std::thread> frame_buffer_worker;
boost::property_tree::ptree settings;
ReconstructionVideoWriter() {};
ReconstructionVideoWriter(std::string video_file,
fitting::ReconstructionData reconstruction_data,
boost::property_tree::ptree settings) {
// make available in settings, would be a bit difficult to parse, but not impossible ofc.
int codec = CV_FOURCC('a','v','c','1');
fs::path output_file_path = settings.get<fs::path>("output.file_path", "/tmp/video.mp4");
// Remove output video file if exists, OpenCV does not overwrite by default so will stop if we don't do this.
if(fs::exists(output_file_path)) {
fs::remove(output_file_path);
}
fps = settings.get<int>("output.fps", 30);
show_video = settings.get<bool>("output.show_video", false);
wireframe = settings.get<bool>("output.wireframe", false);
landmarks = settings.get<bool>("output.landmarks", false);
float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f);
WeightedIsomapAveraging isomap_averaging(merge_isomap_face_angle);
unsigned int num_shape_coeff = reconstruction_data.
morphable_model.get_shape_model().get_num_principal_components();
this->num_shape_coefficients_to_fit = settings.get<unsigned int>(
"frames.num_shape_coefficients_to_fit", num_shape_coeff);
// Open source video file.
std::ifstream file(video_file);
std::cout << "Opening video: " << video_file << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + video_file);
}
cv::VideoCapture tmp_cap(video_file); // open video file
if (!tmp_cap.isOpened()) {
throw std::runtime_error("Could not play video");
}
cap = tmp_cap;
Mat frame;
// reset total frames, just to make sure.
total_frames = 0;
// Get the first frame to see, sometimes frames are empty at the start, keep reading until we hit one.
while(frame.empty()) {
cap.read(frame);
total_frames++;
}
std::cout << frame.size() << std::endl;
std::cout << frame.cols << std::endl;
std::cout << frame.rows << std::endl;
// Take over the size of the original video or take width / height from the settings.
int frame_width = settings.get<int>("frames.width", frame.cols);
int frame_height = settings.get<int>("frames.height", frame.rows);
Size frame_size = Size(frame_width, frame_height);
// Initialize writer with given output file
VideoWriter tmp_writer(output_file_path.string(), codec, fps, frame_size);
if (!tmp_writer.isOpened()) {
cout << "Could not open the output video for write: " << output_file_path << endl;
throw std::runtime_error("Could open output video");
}
writer = tmp_writer;
this->reconstruction_data = reconstruction_data;
};
/**
* Generate a new keyframe containing information about pose and landmarks
* These are needed to determine if we want the image in the first place.
*
* @param frame
* @return Keyframe
*/
Keyframe generate_new_keyframe(cv::Mat frame) {
int frame_height = frame.rows;
int frame_width = frame.cols;
// Get the necessary information for reconstruction.
auto landmarks = reconstruction_data.landmark_list[total_frames];
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
auto morphable_model = reconstruction_data.morphable_model;
// Reached the end of the landmarks (only applicable for annotated videos):
if (reconstruction_data.landmark_list.size() <= total_frames) {
std::cout << "Reached end of landmarks(" <<
reconstruction_data.landmark_list.size() <<
"/" << total_frames << ")" << std::endl;
return Keyframe();
}
if (pca_shape_coefficients.empty()) {
pca_shape_coefficients.resize(num_shape_coefficients_to_fit);
}
// make a new one
std::vector<float> blend_shape_coefficients;
vector<cv::Vec4f> model_points;
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
auto mesh = fitting::generate_new_mesh(
morphable_model,
blendshapes,
pca_shape_coefficients, // current pca_coeff will be the mean for the first iterations.
blend_shape_coefficients);
// Will yield model_points, vertex_indices and image_points
// todo: should this function not come from mesh?
core::get_mesh_coordinates(landmarks, landmark_mapper, mesh, model_points, vertex_indices, image_points);
auto current_pose = fitting::estimate_orthographic_projection_linear(
image_points, model_points, true, frame_height);
fitting::RenderingParameters rendering_params(current_pose, frame_width, frame_height);
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_params, frame_width, frame_height);
auto blendshapes_as_basis = morphablemodel::to_matrix(blendshapes);
auto current_pca_shape = morphable_model.get_shape_model().draw_sample(pca_shape_coefficients);
blend_shape_coefficients = fitting::fit_blendshapes_to_landmarks_nnls(
blendshapes, current_pca_shape, affine_cam, image_points, vertex_indices);
auto merged_shape = current_pca_shape +
blendshapes_as_basis * Eigen::Map<const Eigen::VectorXf>(blend_shape_coefficients.data(),
blend_shape_coefficients.size());
auto merged_mesh = morphablemodel::sample_to_mesh(
merged_shape,
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
auto R = rendering_params.get_rotation();
// Render the model in a separate window using the estimated pose, shape and merged texture:
Mat rendering;
// render needs 4 channels 8 bits image, needs a two step conversion.
cvtColor(frame, rendering, CV_BGR2BGRA);
// make sure the image is CV_8UC4, maybe do check first?
rendering.convertTo(rendering, CV_8UC4);
Mat isomap = render::extract_texture(merged_mesh, affine_cam, frame);
Mat merged_isomap = isomap_averaging.add_and_merge(isomap);
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::rotate(glm::mat4(1.0f), angle, glm::vec3(0.0f, 1.0f, 0.0f));
std::cout << angle << std::endl;
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(
morphable_model.get_shape_model().draw_sample(pca_shape_coefficients),
morphable_model.get_color_model().get_mean(),
morphable_model.get_shape_model().get_triangle_list(),
morphable_model.get_color_model().get_triangle_list(),
morphable_model.get_texture_coordinates()
);
// angle -= 10.0;
std::tie(frontal_rendering, std::ignore) = render::render(
neutral_expression,
modelview_frontal,
glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f),
512, 512,
render::create_mipmapped_texture(merged_isomap),
true,
false,
false
);
// cv::imshow("rendering", frontal_rendering);
// cv::waitKey(0);
fitting::FittingResult fitting_result;
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
fitting_result.mesh = mesh;
// output this?
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, rendering, fitting_result, total_frames);
}
/**
* Checks if the writer is still open. The writer thread will close if the last frame is processed.
*
* @return bool
*/
bool is_writing() {
return writer.isOpened();
}
/**
* Get a new frame from the video source.
*
* @return cv::Mat
*/
Mat __get_new_frame() {
// Get a new frame from camera.
Mat frame;
cap.read(frame);
return frame;
};
/**
* Stop by releasing the VideoCapture.
*/
void __stop() {
writer.release();
cap.release();
};
/**
* Start filling the buffer with frames and start a worker thread to keep doing so.
*
* @return
*/
void start() {
// start a thread to keep getting new frames into the buffer from video:
frame_buffer_worker = std::make_unique<std::thread>(&ReconstructionVideoWriter::video_iterator, this);
frame_buffer_worker.get()->detach();
}
/**
* Fill the buffer by iterating through the video until the very last frame.
*/
void video_iterator() {
// Go and fill the buffer with more frames while we are reconstructing.
while (next()) { }
// stop the video
__stop();
std::cout << "Video stopped at: " << total_frames;
};
/**
*
* @param keyframe
*/
bool next() {
Mat frame = __get_new_frame();
if (frame.empty()) {
return false;
}
// makes a copy of the frame
Keyframe keyframe = generate_new_keyframe(frame);
if(wireframe) {
draw_wireframe(keyframe.frame, keyframe);
}
if(landmarks) {
draw_landmarks(keyframe.frame, keyframe);
}
writer.write(keyframe.frame);
total_frames++;
// if (show_video) {
// std::cout << "show video" << std::endl;
// cv::imshow("video", frame);
// cv::waitKey(static_cast<int>((1.0 / fps) * 1000.0));
// }
return true;
}
/**
*
* @param pca_shape_coefficients
*/
void update_reconstruction_coeff(std::vector<float> pca_shape_coefficients) {
this->pca_shape_coefficients = pca_shape_coefficients;
}
/**
*
* @param image
* @param landmarks
*/
void draw_landmarks(Mat &frame, Keyframe keyframe) {
auto landmarks = keyframe.fitting_result.landmarks;
for (auto &&lm : landmarks) {
cv::rectangle(frame, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
);
}
}
/**
* Draws the given mesh as wireframe into the image.
*
* It does backface culling, i.e. draws only vertices in CCW order.
*
* @param[in] image An image to draw into.
* @param[in] mesh The mesh to draw.
* @param[in] modelview Model-view matrix to draw the mesh.
* @param[in] projection Projection matrix to draw the mesh.
* @param[in] viewport Viewport to draw the mesh.
* @param[in] colour Colour of the mesh to be drawn.
*/
void draw_wireframe(Mat &frame, Keyframe keyframe, cv::Scalar colour = cv::Scalar(0, 255, 0, 255)) {
// make a copy, we dont want to alter the original
auto mesh = keyframe.fitting_result.mesh;
auto modelview = keyframe.fitting_result.rendering_parameters.get_modelview();
auto projection = keyframe.fitting_result.rendering_parameters.get_projection();
auto viewport = fitting::get_opencv_viewport(frame.cols, frame.rows);
for (const auto& triangle : mesh.tvi)
{
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3))) {
cv::line(frame, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(frame, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
cv::line(frame, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), colour);
}
}
};
int get_frame_number() {
return total_frames;
}
private:
float angle = -45.0;
int total_frames = 0;
int num_shape_coefficients_to_fit = 0;
// merge all triangles that are facing <60° towards the camera
WeightedIsomapAveraging isomap_averaging;
// video options
bool show_video = false;
bool wireframe = false;
bool landmarks = false;
int fps;
cv::VideoCapture cap;
cv::VideoWriter writer;
// latest pca_shape_coefficients
std::vector<float> pca_shape_coefficients;
eos::fitting::ReconstructionData reconstruction_data;
};
/**
*
*
......@@ -81,58 +453,53 @@ class BufferedVideoIterator
public:
int width;
int height;
Mat last_frame;
int last_frame_number;
bool reached_eof;
Keyframe last_keyframe;
std::unique_ptr<std::thread> frame_buffer_worker;
BufferedVideoIterator() {};
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, fitting::ReconstructionData reconstruction_data, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "Opening video: " << videoFilePath << std::endl;
BufferedVideoIterator(std::string video_file, fitting::ReconstructionData reconstruction_data, boost::property_tree::ptree settings) {
std::ifstream file(video_file);
std::cout << "Opening video: " << video_file << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
throw std::runtime_error("Error opening given file: " + video_file);
}
cv::VideoCapture tmp_cap(videoFilePath); // open video file
cv::VideoCapture tmp_cap(video_file); // open video file
if (!tmp_cap.isOpened()) {
throw std::runtime_error("Could not play video");
}
cap = tmp_cap;
this->reconstruction_data = reconstruction_data;
// copy settings from gathered from a .ini file
min_frames = settings.get<int>("video.min_frames", 5);
drop_frames = settings.get<int>("video.drop_frames", 0);
skip_frames = settings.get<int>("video.skip_frames", 0);
frames_per_bin = settings.get<unsigned int>("video.frames_per_bin", 2);
// Initialize settings
min_frames = settings.get<int>("frames.min_frames", 5);
drop_frames = settings.get<int>("frames.drop_frames", 0);
skip_frames = settings.get<int>("frames.skip_frames", 0);
frames_per_bin = settings.get<unsigned int>("frames.frames_per_bin", 2);
unsigned int num_shape_coeff = reconstruction_data.morphable_model.get_shape_model().get_num_principal_components();
this->reconstruction_data = reconstruction_data;
unsigned int num_shape_coeff = reconstruction_data.
morphable_model.get_shape_model().get_num_principal_components();
this->num_shape_coefficients_to_fit = settings.get<unsigned int>(
"video.num_shape_coefficients_to_fit", num_shape_coeff);
"frames.num_shape_coefficients_to_fit", num_shape_coeff);
// initialize bins
bins.resize(num_yaw_bins);
// reset frame count
// reset frame count (to be sure)
n_frames = 0;
total_frames = 0;
std::cout << "Settings: " << std::endl <<
"min_frames: " << min_frames << std::endl <<
"drop_frames: " << drop_frames << std::endl <<
"frames_per_bin: " << frames_per_bin << std::endl <<
"num_shape_coefficients_to_fit: " << num_shape_coefficients_to_fit << std::endl;
std::cout << "total frames in video: " << cap.get(CV_CAP_PROP_FRAME_COUNT) << std::endl;
// std::cout << "Settings: " << std::endl <<
// "min_frames: " << min_frames << std::endl <<
// "drop_frames: " << drop_frames << std::endl <<
// "frames_per_bin: " << frames_per_bin << std::endl <<
// "num_shape_coefficients_to_fit: " << num_shape_coefficients_to_fit << std::endl;
}
bool is_playing() {
......@@ -323,7 +690,11 @@ public:
__stop();
std::cout << "Video stopped at:" << total_frames << " frames - in buff " << n_frames << std::endl;
}
};
Keyframe get_last_keyframe() {
return last_keyframe;
};
/**
*
......@@ -341,25 +712,24 @@ public:
// keep the last frame here, so we can play a video subsequently:
last_frame_number = total_frames;
last_frame = frame;
// TODO: only calculate lapscore within the bounding box of the face.
auto keyframe = generate_new_keyframe(frame);
bool frame_added = try_add(keyframe);
// Added or not, we put this as the last keyframe
last_keyframe = keyframe;
if(frame_added) {
n_frames++;
// Setting that the buffer has changed:
frame_buffer_changed = true;
std::cout << "frame added(" << total_frames << "): " << keyframe.score << ", " << keyframe.yaw_angle << std::endl;
}
total_frames++;
// fill up the buffer until we hit the minimum frames we want in the buffer.
if(n_frames < min_frames && total_frames < 30) {
std::cout << "not enough frames yet: " << n_frames << "/" << min_frames << std::endl;
if(n_frames < min_frames) {
return next();
}
......@@ -442,9 +812,12 @@ public:
cap.release();
};
int get_frame_number() {
return total_frames;
}
private:
cv::VideoCapture cap;
std::deque<Keyframe> frame_buffer;
eos::fitting::ReconstructionData reconstruction_data;
using BinContent = std::vector<Keyframe>;
......@@ -458,7 +831,7 @@ private:
unsigned int frames_per_bin;
// TODO: make set-able
// total frames in processed, not persee in buffer (skipped frames)
// total frames in processed, not necessarily in buffer (skipped frames)
int total_frames;
// total frames in buffer
......
......@@ -39,8 +39,13 @@ add_executable(accuracy-evaluation accuracy-evaluation.cpp)
target_link_libraries(accuracy-evaluation eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(accuracy-evaluation "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
add_executable(openmp-test openmp-test.cpp)
target_link_libraries(openmp-test eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(openmp-test "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
# Install targets:
install(TARGETS scm-to-cereal DESTINATION bin)
install(TARGETS bfm-binary-to-cereal DESTINATION bin)
install(TARGETS edgestruct-csv-to-json DESTINATION bin)
install(TARGETS accuracy-evaluation DESTINATION bin)
install(TARGETS openmp-test DESTINATION bin)
......@@ -48,6 +48,7 @@ using eos::core::Landmark;
using eos::core::LandmarkCollection;
using eos::video::BufferedVideoIterator;
using eos::video::WeightedIsomapAveraging;
using eos::video::ReconstructionVideoWriter;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
......@@ -192,8 +193,13 @@ void render_output(
boost::property_tree::ptree settings,
int n_iter) {
std::string output_path = settings.get<std::string>("output.output_path", "/tmp");
float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f);
bool make_video = settings.get<bool>("output.save_video", false);
bool show_video = settings.get<bool>("output.show_video", false);
Mat merged_isomap;
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera
WeightedIsomapAveraging isomap_averaging(merge_isomap_face_angle); // merge all triangles that are facing <60° towards the camera
eos::video::PcaCoefficientMerging pca_shape_merging;
auto landmark_mapper = reconstruction_data.landmark_mapper;
......@@ -208,10 +214,9 @@ void render_output(
auto unmodified_frame = frame.clone();
int frame_number = key_frames[i].frame_number;
float yaw_angle = glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
float yaw_angle = key_frames[i].yaw_angle;// glm::degrees(glm::yaw(rendering_paramss[i].get_rotation()));
cv::imwrite("/tmp/eos/" +
std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
cv::imwrite("/tmp/eos/" + std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
// Extract the texture using the fitted mesh from this frame:
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], frame.cols, frame.rows);
......@@ -302,8 +307,20 @@ void render_output(
false,
rendering);
cv::imshow("render", rendering);
cv::waitKey(10);
// if(save_video) {
// cv::imshow("render", rendering);
// cv::waitKey(1);
// }
//
// if(show_video) {
// cv::imshow("render", rendering);
// cv::waitKey(1);
// }
//
// eos::video::ReconstructionVideoWriter outputVideo;
// Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size
// (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
// outputVideo.open(NAME , ex, inputVideo.get(CV_CAP_PROP_FPS),S, true);
}
/**
......@@ -334,8 +351,10 @@ void evaluate_results(
boost::property_tree::ptree settings,
int n_iter) {
WeightedIsomapAveraging isomap_averaging(10.f); // merge all triangles that are facing <60° towards the camera
std::string output_path = settings.get<std::string>("output.output_path", "/tmp");
float merge_isomap_face_angle = settings.get<float>("output.merge_isomap_face_angle", 60.f);
WeightedIsomapAveraging isomap_averaging(merge_isomap_face_angle); // merge all triangles that are facing <60° towards the camera
Mat merged_isomap;
fs::path outputfilebase = annotations[0];
......@@ -377,11 +396,11 @@ void evaluate_results(
rendering_params.get_projection(),
fitting::get_opencv_viewport(frame_width, frame_height));
bool eyes_open = isomap_averaging.has_eyes_open(frame, landmarks);
if (!eyes_open) {
continue;
}
// bool eyes_open = isomap_averaging.has_eyes_open(frame, landmarks);
//
// if (!eyes_open) {
// continue;
// }
// cv::imshow("Img", outimg);
// cv::waitKey(0);
......@@ -649,19 +668,21 @@ int main(int argc, char *argv[]) {
// Start getting video frames:
vid_iterator.start();
// vid_writer.start();
// Count the amount of iterations:
int n_iter = 0;
while(vid_iterator.is_playing()) {
auto key_frames = vid_iterator.get_keyframes();
// Generate a sublist of the total available landmark list:
auto landmark_sublist = sample_landmarks(key_frames, landmark_list);
// std::cout << vid_iterator.to_string() << std::endl;
// it makes no sense to update pca_coeff if nothing in the buffer has changed:
if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
// Fit shape and pose:
auto t1 = std::chrono::high_resolution_clock::now();
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
......@@ -681,7 +702,8 @@ int main(int argc, char *argv[]) {
boost::none,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points
fitted_image_points,
settings
);
auto t2 = std::chrono::high_resolution_clock::now();
......@@ -690,62 +712,62 @@ int main(int argc, char *argv[]) {
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / key_frames.size()
<< "ms)" << std::endl;
vid_iterator.update_reconstruction_coeff(pca_shape_coefficients);
evaluate_results(
key_frames,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
} else {
std::cout << "No reconstruction - buffer did not change" << std::endl;
}
// // Render output:
// render_output(
// evaluate_results(
// key_frames,
// rendering_paramss,
// landmark_sublist,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// vid_iterator.last_frame,
// vid_iterator.last_frame_number,
// settings,
// n_iter
// );
// );
} else {
// std::cout << "No reconstruction - buffer did not change" << std::endl;
}
// Get new frames:
n_iter++;
}
auto key_frames = vid_iterator.get_keyframes();
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
evaluate_results(
key_frames,
rendering_paramss,
meshs,
pca_shape_coefficients,
blendshape_coefficients,
fitted_image_points,
annotations,
reconstruction_data,
settings,
n_iter
);
// vid_writer.__stop();
if(settings.get<bool>("output.make_video", false)) {
ReconstructionVideoWriter vid_writer;
try {
vid_writer = ReconstructionVideoWriter(videofile.string(), reconstruction_data, settings);
} catch(std::runtime_error &e) {
std::cout << e.what() << std::endl;
return EXIT_FAILURE;
}
// Render output:
std::cout << "Waiting for video to be completed..." << std::endl;
vid_writer.update_reconstruction_coeff(pca_shape_coefficients);
while (vid_writer.next()) {
printf("%d/%d\r", vid_writer.get_frame_number(), vid_iterator.get_frame_number());
}
}
// auto key_frames = vid_iterator.get_keyframes();
// std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
//
// evaluate_results(
// key_frames,
// rendering_paramss,
// meshs,
// pca_shape_coefficients,
// blendshape_coefficients,
// fitted_image_points,
// annotations,
// reconstruction_data,
// settings,
// n_iter
// );
//todo: we could build our final obj here?
......
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <complex>
#include <iostream>
#include <omp.h>
typedef std::complex<double> complex;
/**
* Calculates distance between two images, returns -1 if dims are not the same.
*
* @param one
* @param two
* @return
*/
float calculate_distance_images(cv::Mat one, cv::Mat two) {
int width = one.cols;
int height = one.rows;
if(width != two.cols || height != two.rows) {
return -1;
}
float distance = 0.0;
for (int y = 0; y < height - 1; y++) {
for (int x = 0; x < width -1; x++) {
distance += fabs(one.at<uchar>(y, x) - two.at<uchar>(y, x));
}
}
return distance;
}
void cpu(cv::Mat A, cv::Mat Anew) {
int n = A.rows;
int m = A.cols;
float error = 1000;
float tol = 30;
int iter = 0;
int iter_max = 100;
auto t1 = std::chrono::high_resolution_clock::now();
while (error > tol && iter < iter_max)
{
error = 0.0;
#pragma omp parallel num_threads(4)
{
#pragma omp for reduction(max:error)
for (int j = 1; j < n - 1; j++)
{
for (int i = 1; i < m - 1; i++)
{
Anew.at<uchar>(j, i) = 0.25 * (A.at<uchar>(j, i + 1) + A.at<uchar>(j, i - 1)
+ A.at<uchar>(j - 1, i) + A.at<uchar>(j + 1, i));
error = fmax(error, fabs(Anew.at<uchar>(j, i) - A.at<uchar>(j, i)));
}
}
}
iter++;
}
}
void gpu(cv::Mat A, cv::Mat Anew) {
int n = A.rows;
int m = A.cols;
float error = 1000;
float tol = 30;
int iter = 0;
int iter_max = 100;
auto t1 = std::chrono::high_resolution_clock::now();
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp target
{
#pragma omp parallel for reduction(max:error)
for (int j = 1; j < n - 1; j++)
{
for (int i = 1; i < m - 1; i++)
{
Anew.at<uchar>(j, i) = 0.25 * (A.at<uchar>(j, i + 1) + A.at<uchar>(j, i - 1)
+ A.at<uchar>(j - 1, i) + A.at<uchar>(j + 1, i));
error = fmax(error, fabs(Anew.at<uchar>(j, i) - A.at<uchar>(j, i)));
}
}
iter++;
}
}
}
int main(int argc, char** argv)
{
cv::Mat original = cv::imread("/data/img.jpg");
cv::cvtColor(original, original, CV_BGR2GRAY);
cv::Mat gpuImage = original.clone();
cv::Mat gpuImageNew = original.clone();
auto t1 = std::chrono::high_resolution_clock::now();
gpu(gpuImage, gpuImageNew);
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "gpu time: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms" << std::endl;
cv::Mat cpuImage = original.clone();
cv::Mat cpuImageNew = original.clone();
t1 = std::chrono::high_resolution_clock::now();
cpu(cpuImage, cpuImageNew);
t2 = std::chrono::high_resolution_clock::now();
float distance = calculate_distance_images(gpuImageNew, cpuImageNew);
std::cout << "cpu time: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms" << std::endl;
std::cout << "Distance gpu - cpu: " << distance << std::endl;
cv::imshow("gpu", gpuImageNew);
cv::imshow("cpu", cpuImageNew);
cv::waitKey(0);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment