Commit b4d3ea48 authored by Richard Torenvliet's avatar Richard Torenvliet

Add a way of rendering the output on the last frame, duplicated fitting...

Add a way of rendering the output on the last frame, duplicated fitting function to improve and easy merge later
parent e94b6716
......@@ -23,7 +23,7 @@ option(EOS_BUILD_EXAMPLES "Build the example applications." ON)
message(STATUS "EOS_BUILD_EXAMPLES: ${EOS_BUILD_EXAMPLES}")
option(EOS_BUILD_CERES_EXAMPLE "Build the fit-model-ceres example (requires Ceres)." OFF)
message(STATUS "EOS_BUILD_CERES_EXAMPLE: ${EOS_BUILD_CERES_EXAMPLE}")
option(EOS_BUILD_UTILS "Build utility applications." OFF)
option(EOS_BUILD_UTILS "Build utility applications." ON)
message(STATUS "EOS_BUILD_UTILS: ${EOS_BUILD_UTILS}")
option(EOS_BUILD_DOCUMENTATION "Build the library documentation." OFF)
message(STATUS "EOS_BUILD_DOCUMENTATION: ${EOS_BUILD_DOCUMENTATION}")
......@@ -97,6 +97,7 @@ set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/fitting.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/ceres_nonlinear.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/RenderingParameters.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/ReconstructionData.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/utils.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render_affine.hpp
......@@ -155,9 +156,7 @@ if(EOS_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
if(EOS_BUILD_UTILS)
add_subdirectory(utils)
endif()
add_subdirectory(utils)
if(EOS_BUILD_DOCUMENTATION)
add_subdirectory(doc)
......
......@@ -153,9 +153,10 @@ namespace eos {
*/
template<typename vec2f>
std::pair <std::vector<core::LandmarkCollection<vec2f>>, std::vector<std::string>>
load_annotations(
std::vector<std::string>annotations,
core::LandmarkMapper mapper, morphablemodel::MorphableModel morphable_model, bool read_from_file = false) {
load_annotations(std::vector<std::string>annotations,
core::LandmarkMapper mapper,
morphablemodel::MorphableModel morphable_model,
bool read_from_file = false) {
std::vector<core::LandmarkCollection<vec2f>> landmark_collection_list;
// load file names from one file.
......@@ -180,13 +181,113 @@ namespace eos {
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
for (int i = 0; i < landmarks.size(); ++i) {
image_points.emplace_back(landmarks[i]);
}
}
landmark_collection_list.push_back(image_points);
}
return std::make_pair(landmark_collection_list, annotations);
}
/**
*
* @tparam vec2f
* @tparam vec4f
* @param landmarks
* @param landmark_mapper
* @param mesh
* @param model_points
* @param vertex_indices
* @param image_points
*/
template <typename vec2f, typename vec4f>
inline void subselect_landmarks(core::LandmarkCollection<vec2f> landmarks,
const core::LandmarkMapper& landmark_mapper,
eos::core::Mesh& mesh,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<Vec2f> current_image_points;
for (auto &lm: landmarks) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
/**
* As the name suggests, in the multi-frame fitting we can subselect landmarks according to the
* landmark mapper.
*
* @tparam vec2f
* @param[in] landmarks
* @param[in] landmark_mapper
* @param[in] meshs
* @param[out] model_points
* @param[out] vertex_indices
* @param[out] image_points
*/
template <typename vec2f, typename vec4f>
inline void subselect_landmarks_multi(const std::vector<core::LandmarkCollection<vec2f>>& landmarks,
const core::LandmarkMapper& landmark_mapper,
vector<eos::core::Mesh> meshs,
vector<vector<vec4f>>& model_points,
vector<vector<int>>& vertex_indices,
vector<vector<vec2f>>& image_points) {
vector<Vec4f> current_model_points;
vector<int> current_vertex_indices;
vector<Vec2f> current_image_points;
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM),
// and get the corresponding model points (mean if given no initial coeffs, from the computed shape otherwise):
for (int i = 0; i < landmarks.size(); i++) {
auto landmark_list = landmarks[i];
auto mesh = meshs[i];
for (auto &lm: landmark_list) {
auto converted_name = landmark_mapper.convert(lm.name);
if (!converted_name) { // no mapping defined for the current landmark
continue;
}
int vertex_idx = std::stoi(converted_name.get());
Vec4f vertex(
mesh.vertices[vertex_idx].x,
mesh.vertices[vertex_idx].y,
mesh.vertices[vertex_idx].z,
mesh.vertices[vertex_idx].w
);
current_model_points.emplace_back(vertex);
current_vertex_indices.emplace_back(vertex_idx);
current_image_points.emplace_back(lm.coordinates);
}
model_points.push_back(current_model_points);
vertex_indices.push_back(current_vertex_indices);
image_points.push_back(current_image_points);
}
}
}
}
......
//
// Created by RA Torenvliet on 12/04/2017.
//
#ifndef EOS_RECONSTRUCTIONDATA_HPP
#define EOS_RECONSTRUCTIONDATA_HPP
namespace eos {
namespace fitting {
struct ReconstructionData {
morphablemodel::MorphableModel morphable_model;
std::vector <morphablemodel::Blendshape> blendshapes;
std::vector <core::LandmarkCollection<cv::Vec2f>> landmarks;
std::vector <cv::Mat> affine_camera_matrix;
};
}
}
#endif //EOS_RECONSTRUCTIONDATA_HPP
This diff is collapsed.
......@@ -59,8 +59,16 @@ namespace eos {
* @param[in] model_standard_deviation The standard deviation of the 3D vertex points in the 3D model, projected to 2D (so the value is in pixels).
* @return The estimated shape-coefficients (alphas).
*/
inline std::vector<float> fit_shape_to_landmarks_linear_multi(const morphablemodel::MorphableModel& morphable_model, std::vector<cv::Mat> affine_camera_matrix, std::vector<std::vector<cv::Vec2f>>& landmarks, std::vector<std::vector<int>>& vertex_ids, std::vector<Eigen::VectorXf> base_face=std::vector<Eigen::VectorXf>(), float lambda=3.0f, boost::optional<int> num_coefficients_to_fit=boost::optional<int>(), boost::optional<float> detector_standard_deviation=boost::optional<float>(), boost::optional<float> model_standard_deviation=boost::optional<float>())
{
inline std::vector<float> fit_shape_to_landmarks_linear_multi(
const morphablemodel::MorphableModel& morphable_model,
std::vector<cv::Mat> affine_camera_matrix,
std::vector<std::vector<cv::Vec2f>>& landmarks,
std::vector<std::vector<int>>& vertex_ids,
std::vector<Eigen::VectorXf> base_face=std::vector<Eigen::VectorXf>(),
float lambda=3.0f,
boost::optional<int> num_coefficients_to_fit=boost::optional<int>(),
boost::optional<float> detector_standard_deviation=boost::optional<float>(),
boost::optional<float> model_standard_deviation=boost::optional<float>()) {
assert(affine_camera_matrix.size() == landmarks.size() && landmarks.size() == vertex_ids.size()); // same number of instances (i.e. images/frames) for each of them
assert(landmarks.size() == vertex_ids.size());
......@@ -100,7 +108,7 @@ inline std::vector<float> fit_shape_to_landmarks_linear_multi(const morphablemod
int y_index = 0; // also runs the same as P_index. Should rename to "running_index"?
// The mean, with an added homogeneous coordinate (x_1, y_1, z_1, 1, x_2, ...)^t
VectorXf v_bar = VectorXf::Ones(4 * total_num_landmarks_dimension);
int v_bar_index = 0; // also runs the same as P_index. But be careful, if I change it to be only 1 variable, only increment it once! :-)
int v_bar_index = 0; // also runs the same as P_index. But be careful, if I change it to be only 1 variable, only increment it once! :-),
// Well I think that would make it a bit messy since we need to increment inside the for (landmarks...) loop. Try to refactor some other way.
for (int k = 0; k < num_images; ++k)
......
......@@ -124,8 +124,17 @@ namespace eos {
* @param[in] enable_far_clipping Whether vertices should be clipped against the far plane.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/
inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> model_view_matrix, glm::tmat4x4<float> projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{
inline std::pair<cv::Mat, cv::Mat> render(
core::Mesh mesh,
glm::tmat4x4<float> model_view_matrix,
glm::tmat4x4<float> projection_matrix,
int viewport_width,
int viewport_height,
const boost::optional<Texture>& texture = boost::none,
bool enable_backface_culling = false,
bool enable_near_clipping = true,
bool enable_far_clipping = true,
cv::Mat colourbuffer = cv::Mat::zeros(0, 0, CV_8UC4)) {
// Some internal documentation / old todos or notes:
// maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
// modelViewMatrix goes to eye-space (camera space), projection does ortho or perspective proj.
......@@ -139,7 +148,10 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
using cv::Mat;
using std::vector;
Mat colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
if (colourbuffer.empty()) {
colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
}
Mat depthbuffer = std::numeric_limits<float>::max() * Mat::ones(viewport_height, viewport_width, CV_64FC1);
// Vertex shader:
......@@ -147,15 +159,17 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
// Assemble the vertices, project to clip space, and store as detail::Vertex (the internal representation):
vector<detail::Vertex<float>> clipspace_vertices;
clipspace_vertices.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex
glm::tvec4<float> clipspace_coords = projection_matrix * model_view_matrix * mesh.vertices[i];
glm::tvec3<float> vertex_colour;
if (mesh.colors.empty()) {
vertex_colour = glm::tvec3<float>(0.5f, 0.5f, 0.5f);
}
else {
} else {
vertex_colour = mesh.colors[i];
}
clipspace_vertices.push_back(detail::Vertex<float>{clipspace_coords, vertex_colour, mesh.texcoords[i]});
}
......@@ -234,6 +248,7 @@ inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> m
for (const auto& tri : triangles_to_raster) {
detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
}
return std::make_pair(colourbuffer, depthbuffer);
};
......
......@@ -78,10 +78,10 @@ public:
this->fitting_result = fitting_result;
}
cv::Mat frame;
int frame_number;
float score = 0.0f;
cv::Mat frame;
fitting::FittingResult fitting_result;
};
......@@ -323,7 +323,6 @@ public:
// TODO: build support for setting the amount of max_frames in the buffer.
BufferedVideoIterator(std::string videoFilePath, boost::property_tree::ptree settings) {
std::ifstream file(videoFilePath);
std::cout << "video file path: " << videoFilePath << std::endl;
if (!file.is_open()) {
throw std::runtime_error("Error opening given file: " + videoFilePath);
......@@ -359,7 +358,6 @@ public:
cv::Mat frame;
cap >> frame;
// Pop if we exceeded max_frames.
if (n_frames > max_frames) {
frame_buffer.pop_front();
......@@ -373,11 +371,11 @@ public:
n_frames++;
std::cout << total_frames_processed << ": laplacian score " << frame_laplacian_score << std::endl;
} else {
std::cout << total_frames_processed << ": skipping frame " << std::endl;
std::cout << total_frames_processed << ": skipping frame(";
if (frame_laplacian_score == 0) {
std::cout << "total black" << std::endl;
std::cout << "total black): " << frame_laplacian_score << std::endl;
} else {
std::cout << "too blurry" << std::endl;
std::cout << "too blurry): " << frame_laplacian_score << std::endl;
}
}
......@@ -495,7 +493,39 @@ private:
unsigned char threshold;
};
} /* namespace video */
/**
* @brief Merges PCA coefficients from a live video with a simple averaging.
*/
class PcaCoefficientMerging
{
public:
/**
* @brief Merges the given new PCA coefficients with all previously processed coefficients.
*
* @param[in] coefficients The new coefficients to add.
* @return The merged coefficients of all images processed so far.
*/
std::vector<float> add_and_merge(const std::vector<float>& coefficients)
{
if (merged_shape_coefficients.empty()) {
merged_shape_coefficients = cv::Mat::zeros(coefficients.size(), 1, CV_32FC1);
}
assert(coefficients.size() == merged_shape_coefficients.rows);
cv::Mat test(coefficients);
merged_shape_coefficients = (merged_shape_coefficients * num_processed_frames + test) / (num_processed_frames + 1.0f);
++num_processed_frames;
return std::vector<float>(merged_shape_coefficients.begin<float>(), merged_shape_coefficients.end<float>());
};
private:
int num_processed_frames = 0;
cv::Mat merged_shape_coefficients;
};
} /* namespace video */
} /* namespace eos */
#endif /* KEYFRAME_HPP_ */
[video]
max_frames=6
min_frames=5
drop_frames=0
laplacian_threshold=1000
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment