Commit e861a303 authored by Richard Torenvliet's avatar Richard Torenvliet

Add various OpenMP pragmas, including changes to make it possible - speedup is gained

parent 2e906d10
......@@ -113,8 +113,17 @@ set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/video/Keyframe.hpp
)
find_package(OpenMP)
if (OPENMP_FOUND)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif()
message(STATUS ${OpenMP_CXX_FLAGS})
message(STATUS ${OpenMP_INCLUDE_DIR})
add_library(eos INTERFACE)
target_compile_features(eos INTERFACE ${EOS_CXX_COMPILE_FEATURES})
# Add header includes:
target_include_directories(eos INTERFACE "include")
target_include_directories(eos INTERFACE ${CEREAL_INCLUDE_DIR})
......@@ -125,6 +134,7 @@ target_include_directories(eos INTERFACE ${glm_INCLUDE_DIR})
target_include_directories(eos INTERFACE ${nanoflann_INCLUDE_DIR})
target_include_directories(eos INTERFACE ${eigen3_nnls_INCLUDE_DIR})
# Custom target for the library, to make the headers show up in IDEs:
add_custom_target(eos-headers SOURCES ${HEADERS})
source_group(core REGULAR_EXPRESSION include/eos/core/*)
......
......@@ -8,7 +8,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/opencv.hpp"
#include "eos/morphablemodel/morphablemodel.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/core/Landmark.hpp"
......@@ -256,15 +256,13 @@ namespace eos {
* @param[in,out] image_points
*/
template <typename vec2f, typename vec4f>
inline void get_landmark_coordinates(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<vec2f> current_image_points;
std::tuple<vector<vec4f>, vector<int>, vector<vec2f>> get_landmark_coordinates(
core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh) {
vector<vec4f> model_points;
vector<int> vertex_indices;
vector<vec2f> image_points;
for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name);
......@@ -280,15 +278,62 @@ namespace eos {
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
model_points.emplace_back(vertex);
vertex_indices.emplace_back(vertex_idx);
image_points.emplace_back(lm.coordinates);
}
return std::make_tuple(model_points, vertex_indices, image_points);
}
/**
*
* Get the mesh coordinates, given a set of landmarks.
*
* @tparam vec2f
* @tparam vec4f
* @param[in] landmarks
* @param[in] landmark_mapper
* @param[in] mesh
* @param[in,out] model_points
* @param[in,out] vertex_indices
* @param[in,out] image_points
*/
template <typename vec2f, typename vec4f>
inline void get_landmark_coordinates_inline(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<vec2f> current_image_points;
for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
}
}
......
This diff is collapsed.
......@@ -93,40 +93,44 @@ public:
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, fitting::ReconstructionData reconstruction_data, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "video file path: " << videoFilePath << std::endl;
std::cout << "Opening video: " << videoFilePath << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
}
cv::VideoCapture tmp_cap(videoFilePath); // open video file
if (!tmp_cap.isOpened()) { // check if we succeeded
if (!tmp_cap.isOpened()) {
throw std::runtime_error("Could not play video");
}
this->cap = tmp_cap;
cap = tmp_cap;
this->reconstruction_data = reconstruction_data;
// copy settings from gathered from a .ini file
this->min_frames = settings.get<int>("video.min_frames", 5);
this->drop_frames = settings.get<int>("video.drop_frames", 0);
this->skip_frames = settings.get<int>("video.skip_frames", 0);
this->frames_per_bin = settings.get<unsigned int>("video.frames_per_bin", 2);
this->num_shape_coefficients_to_fit = settings.get<unsigned int>("video.num_shape_coefficients_to_fit", 50);
min_frames = settings.get<int>("video.min_frames", 5);
drop_frames = settings.get<int>("video.drop_frames", 0);
skip_frames = settings.get<int>("video.skip_frames", 0);
frames_per_bin = settings.get<unsigned int>("video.frames_per_bin", 2);
this->reconstruction_data = reconstruction_data;
unsigned int num_shape_coeff = reconstruction_data.morphable_model.get_shape_model().get_num_principal_components();
this->num_shape_coefficients_to_fit = settings.get<unsigned int>(
"video.num_shape_coefficients_to_fit", num_shape_coeff);
// initialize bins
this->bins.resize(num_yaw_bins);
bins.resize(num_yaw_bins);
// reset all
this->n_frames = 0;
this->total_frames = 0;
// reset frame count
n_frames = 0;
total_frames = 0;
std::cout << "Buffered video iter: "
std::cout << "Settings: " << std::endl <<
"min_frames: " << min_frames << std::endl <<
"drop_frames: " << drop_frames << std::endl <<
"frames_per_bin: " << frames_per_bin << std::endl <<
"num_shape_coefficients_to_fit: " << num_shape_coefficients_to_fit << std::endl;
"drop_frames: " << drop_frames << std::endl <<
"frames_per_bin: " << frames_per_bin << std::endl <<
"num_shape_coefficients_to_fit: " << num_shape_coefficients_to_fit << std::endl;
std::cout << "total frames in video: " << cap.get(CV_CAP_PROP_FRAME_COUNT) << std::endl;
}
......@@ -136,9 +140,11 @@ public:
}
/**
* Generate a new keyframe containing information about pose and landmarks
* These are needed to determine if we want the image in the first place.
*
* @param frame
* @return
* @return Keyframe
*/
Keyframe generate_new_keyframe(cv::Mat frame) {
int frame_height = frame.rows;
......@@ -153,6 +159,7 @@ public:
return Keyframe();
}
// Get the necessary information for reconstruction.
auto landmarks = reconstruction_data.landmark_list[total_frames];
auto landmark_mapper = reconstruction_data.landmark_mapper;
auto blendshapes = reconstruction_data.blendshapes;
......@@ -162,10 +169,6 @@ public:
vector<int> vertex_indices;
vector<cv::Vec2f> image_points;
if (num_shape_coefficients_to_fit == 0) {
num_shape_coefficients_to_fit = morphable_model.get_shape_model().get_num_principal_components();
}
// make a new one
std::vector<float> blend_shape_coefficients;
......@@ -193,16 +196,6 @@ public:
fitting_result.rendering_parameters = rendering_params;
fitting_result.landmarks = landmarks;
// std::cout << face_roi << " ("<< frame_width << "," << frame_height << ")" << std::endl;
// for (auto &&lm : landmarks) {
// cv::rectangle(
// frame, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
//
// cv::imshow("frame", frame);
// cv::waitKey(0);
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
......@@ -359,7 +352,7 @@ public:
// Setting that the buffer has changed:
frame_buffer_changed = true;
std::cout << "frame added(" << n_frames << "): " << keyframe.score << ", " << keyframe.yaw_angle << std::endl;
std::cout << "frame added(" << total_frames << "): " << keyframe.score << ", " << keyframe.yaw_angle << std::endl;
}
total_frames++;
......@@ -384,8 +377,14 @@ public:
this->pca_shape_coefficients = pca_shape_coefficients;
}
// Converts a given yaw angle to an index in the internal bins vector.
// Assumes 9 bins and 20� intervals.
/**
*
* Converts a given yaw angle to an index in the internal bins vector.
* Assumes 9 bins and 20� intervals.
*
* @param yaw_angle
* @return
*/
static std::size_t angle_to_index(float yaw_angle)
{
if (yaw_angle <= -70.f)
......@@ -436,15 +435,14 @@ public:
// todo: move to private if possible.
/**
* Stop by releasing the VideoCapture.
*/
void __stop() {
cap.release();
};
private:
int num_yaw_bins = 9;
unsigned int frames_per_bin;
bool frame_buffer_changed = false;
cv::VideoCapture cap;
std::deque<Keyframe> frame_buffer;
eos::fitting::ReconstructionData reconstruction_data;
......@@ -455,6 +453,10 @@ private:
// latest pca_shape_coefficients
std::vector<float> pca_shape_coefficients;
std::size_t num_yaw_bins = 9;
bool frame_buffer_changed = false;
unsigned int frames_per_bin;
// TODO: make set-able
// total frames in processed, not persee in buffer (skipped frames)
int total_frames;
......@@ -472,9 +474,6 @@ private:
int drop_frames = 0;
unsigned int num_shape_coefficients_to_fit = 0;
// laplacian threshold
// double laplacian_threshold = 10000000;
};
}
......
......@@ -227,6 +227,33 @@ public:
threshold = static_cast<unsigned char>(alpha_thresh);
};
/**
*
* @param frame
* @param landmarks
* @return
*/
bool has_eyes_open(cv::Mat frame, core::LandmarkCollection<Vec2f> landmarks) {
std::vector<cv::Vec2f> right_eye;
std::vector<cv::Vec2f> left_eye;
for(auto &&lm: landmarks) {
int landmark_index = std::stoi(lm.name);
if(landmark_index >= 37 && landmark_index <= 42) {
right_eye.push_back(lm.coordinates);
}
if(landmark_index >= 43 && landmark_index <= 48) {
left_eye.push_back(lm.coordinates);
}
}
cv::Rect right_box = cv::boundingRect(right_eye);
cv::Rect left_box = cv::boundingRect(left_eye);
return true;
}
/**
* @brief Merges the given new isomap with all previously processed isomaps.
*
......
......@@ -18,8 +18,8 @@
*/
#include "eos/core/landmark_utils.hpp"
#include "eos/render/render.hpp"
#include "eos/morphablemodel/morphablemodel.hpp"
#include "eos/morphablemodel/blendshape.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/fitting/fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
......@@ -213,9 +213,6 @@ void render_output(
cv::imwrite("/tmp/eos/" +
std::to_string(frame_number) + "_" + std::to_string(yaw_angle) + ".png", frame);
int frame_width = frame.cols;
int frame_height = frame.rows;
// Extract the texture using the fitted mesh from this frame:
Mat affine_cam = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], frame.cols, frame.rows);
Mat isomap = render::extract_texture(
......@@ -366,22 +363,28 @@ void evaluate_results(
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(
rendering_params, frame_width, frame_height);
// for (auto &&lm : landmarks) {
// cv::rectangle(
// outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
// cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
// );
// }
// // Draw the fitted mesh as wireframe, and save the image:
// draw_wireframe(
// outimg,
// meshs[i],
// rendering_params.get_modelview(),
// rendering_params.get_projection(),
// fitting::get_opencv_viewport(frame_width, frame_height));
//
for (auto &&lm : landmarks) {
cv::rectangle(
outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f),
cv::Point2f(lm.coordinates[0], lm.coordinates[1] + 2.0f), {255, 0, 0}
);
}
// Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(
outimg,
meshs[i],
rendering_params.get_modelview(),
rendering_params.get_projection(),
fitting::get_opencv_viewport(frame_width, frame_height));
bool eyes_open = isomap_averaging.has_eyes_open(frame, landmarks);
if (!eyes_open) {
continue;
}
// cv::imshow("Img", outimg);
// cv::waitKey(1);
// cv::waitKey(0);
// Draw the loaded landmarks:
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, frame);
......@@ -660,7 +663,8 @@ int main(int argc, char *argv[]) {
if (vid_iterator.has_changed()) {
std::cout << "Going to reconstruct with " << key_frames.size() << " images."<< std::endl;
// Fit shape and pose:
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_2(
auto t1 = std::chrono::high_resolution_clock::now();
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi_parallel(
morphable_model,
blendshapes,
key_frames,
......@@ -680,6 +684,12 @@ int main(int argc, char *argv[]) {
fitted_image_points
);
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Reconstruction took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()
<< "ms, mean(" << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() / key_frames.size()
<< "ms)" << std::endl;
vid_iterator.update_reconstruction_coeff(pca_shape_coefficients);
evaluate_results(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment