Commit d34e1793 authored by Philipp Kopp's avatar Philipp Kopp

added fit model multi app

parent 8a2f08f8
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "eos/fitting/fitting.hpp" #include "eos/fitting/fitting.hpp"
#include "eos/render/utils.hpp" #include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp" #include "eos/render/texture_extraction.hpp"
#include "eos/render/render.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
...@@ -128,6 +129,78 @@ void draw_wireframe(cv::Mat image, const core::Mesh& mesh, glm::mat4x4 modelview ...@@ -128,6 +129,78 @@ void draw_wireframe(cv::Mat image, const core::Mesh& mesh, glm::mat4x4 modelview
} }
}; };
/**
* @brief Merges isomaps from a live video with a weighted averaging, based
* on the view angle of each vertex to the camera.
*
* An optional merge_threshold can be specified upon construction. Pixels with
* a view-angle above that threshold will be completely discarded. All pixels
* below the threshold are merged with a weighting based on its vertex view-angle.
* Assumes the isomaps to be 512x512.
*/
class WeightedIsomapAveraging
{
public:
/**
* @brief Constructs a new object that will hold the current averaged isomap and
* be able to add frames from a live video and merge them on-the-fly.
*
* The threshold means: Each triangle with a view angle smaller than the given angle will be used to merge.
* The default threshold (90°) means all triangles, as long as they're a little bit visible, are merged.
*
* @param[in] merge_threshold View-angle merge threshold, in degrees, from 0 to 90.
*/
WeightedIsomapAveraging(float merge_threshold = 90.0f)
{
assert(merge_threshold >= 0.f && merge_threshold <= 90.f);
visibility_counter = cv::Mat::zeros(512, 512, CV_32SC1);
merged_isomap = cv::Mat::zeros(512, 512, CV_32FC4);
// map 0° to 255, 90° to 0:
float alpha_thresh = (-255.f / 90.f) * merge_threshold + 255.f;
if (alpha_thresh < 0.f) // could maybe happen due to float inaccuracies / rounding?
alpha_thresh = 0.0f;
threshold = static_cast<unsigned char>(alpha_thresh);
};
/**
* @brief Merges the given new isomap with all previously processed isomaps.
*
* @param[in] isomap The new isomap to add.
* @return The merged isomap of all images processed so far, as 8UC4.
*/
cv::Mat add_and_merge(const cv::Mat& isomap)
{
// Merge isomaps, add the current to the already merged, pixel by pixel:
for (int r = 0; r < isomap.rows; ++r)
{
for (int c = 0; c < isomap.cols; ++c)
{
if (isomap.at<cv::Vec4b>(r, c)[3] <= threshold)
{
continue; // ignore this pixel, not visible in the extracted isomap of this current frame
}
// we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] = (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[1] = (merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] = (merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] = 255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c);
}
}
cv::Mat merged_isomap_uchar;
merged_isomap.convertTo(merged_isomap_uchar, CV_8UC4);
return merged_isomap_uchar;
};
private:
cv::Mat visibility_counter;
cv::Mat merged_isomap;
unsigned char threshold;
};
/** /**
* This app demonstrates estimation of the camera and fitting of the shape * This app demonstrates estimation of the camera and fitting of the shape
* model of a 3D Morphable Model from an ibug LFPW image with its landmarks. * model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
...@@ -224,16 +297,6 @@ int main(int argc, char *argv[]) ...@@ -224,16 +297,6 @@ int main(int argc, char *argv[])
// The edge topology is used to speed up computation of the occluding face contour fitting: // The edge topology is used to speed up computation of the occluding face contour fitting:
morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string()); morphablemodel::EdgeTopology edge_topology = morphablemodel::load_edge_topology(edgetopologyfile.string());
// Draw the loaded landmarks:
vector<Mat> outimgs;
for (unsigned i =0; i <images.size(); ++i) {
Mat outimg = images[i].clone();
for (auto&& lm : landmarkss[i]) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
outimgs.push_back(outimg);
}
// Fit the model, get back a mesh and the pose: // Fit the model, get back a mesh and the pose:
vector<core::Mesh> meshs; vector<core::Mesh> meshs;
vector<fitting::RenderingParameters> rendering_paramss; vector<fitting::RenderingParameters> rendering_paramss;
...@@ -243,7 +306,22 @@ int main(int argc, char *argv[]) ...@@ -243,7 +306,22 @@ int main(int argc, char *argv[])
image_widths.push_back(image.cols); image_widths.push_back(image.cols);
image_heights.push_back(image.rows); image_heights.push_back(image.rows);
} }
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi(morphable_model, blendshapes, landmarkss, landmark_mapper, image_widths, image_heights, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f);
std::vector<float> pca_shape_coefficients;
std::vector<std::vector<float>> blendshape_coefficients;
std::vector<std::vector<cv::Vec2f>> fitted_image_points;
std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi(morphable_model, blendshapes, landmarkss, landmark_mapper, image_widths, image_heights, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f, boost::none, pca_shape_coefficients, blendshape_coefficients, fitted_image_points);
//std::tie(meshs, rendering_paramss) = fitting::fit_shape_and_pose_multi(morphable_model, blendshapes, landmarks, landmark_mapper, image_width, image_height, edge_topology, contour_landmarks, model_contour, num_iterations, num_shape_coefficients_to_fit, lambda, boost::none, pca_shape_coefficients, blendshape_coefficients, fitted_image_points);
//fit_shape_and_pose_multi(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<core::LandmarkCollection<cv::Vec2f>>& landmarks, const core::LandmarkMapper& landmark_mapper, std::vector<int> image_width, std::vector<int> image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, std::vector<float>& pca_shape_coefficients, std::vector<std::vector<float>>& blendshape_coefficients, std::vector<std::vector<cv::Vec2f>>& fitted_image_points)
std::cout<<"final pca shape coefficients: ";
for (auto i: pca_shape_coefficients)
std::cout << i << ' ';
std::cout << std::endl;
WeightedIsomapAveraging isomap_averaging(60.f); // merge all triangles that are facing <60° towards the camera
Mat merged_isomap;
for (unsigned i =0; i <images.size(); ++i) { for (unsigned i =0; i <images.size(); ++i) {
...@@ -255,23 +333,55 @@ int main(int argc, char *argv[]) ...@@ -255,23 +333,55 @@ int main(int argc, char *argv[])
Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], images[i].cols, images[i].rows); Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_paramss[i], images[i].cols, images[i].rows);
Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, images[i]); Mat isomap = render::extract_texture(meshs[i], affine_from_ortho, images[i]);
// Draw the loaded landmarks:
Mat outimg = images[i].clone();
for (auto&& lm : landmarkss[i]) {
cv::rectangle(outimg, cv::Point2f(lm.coordinates[0] - 2.0f, lm.coordinates[1] - 2.0f), cv::Point2f(lm.coordinates[0] + 2.0f, lm.coordinates[1] + 2.0f), { 255, 0, 0 });
}
// Draw the fitted mesh as wireframe, and save the image: // Draw the fitted mesh as wireframe, and save the image:
draw_wireframe(outimgs[i], meshs[i], rendering_paramss[i].get_modelview(), rendering_paramss[i].get_projection(), fitting::get_opencv_viewport(images[i].cols, images[i].rows)); draw_wireframe(outimg, meshs[i], rendering_paramss[i].get_modelview(), rendering_paramss[i].get_projection(), fitting::get_opencv_viewport(images[i].cols, images[i].rows));
fs::path outputfile = outputfilebase; fs::path outputfile = outputfilebase;
outputfile += fs::path(imagefiles[i].stem()); outputfile += fs::path(imagefiles[i].stem());
outputfile += fs::path(".png"); outputfile += fs::path(".png");
cv::imwrite(outputfile.string(), outimgs[i]); cv::imwrite(outputfile.string(), outimg);
// Save the mesh as textured obj: //save frontal rendering with texture:
outputfile.replace_extension(".obj"); Mat frontal_rendering;
core::write_textured_obj(meshs[i], outputfile.string()); glm::mat4 modelview_frontal = glm::mat4( 1.0 );
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(morphable_model.get_shape_model().draw_sample(pca_shape_coefficients), morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
std::tie(frontal_rendering, std::ignore) = render::render(neutral_expression, modelview_frontal, glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 256, 256, render::create_mipmapped_texture(isomap), true, false, false);
outputfile.replace_extension(".frontal.png");
cv::imwrite(outputfile.string(), frontal_rendering);
outputfile.replace_extension("");
// And save the isomap: // And save the isomap:
outputfile.replace_extension(".isomap.png"); outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap); cv::imwrite(outputfile.string(), isomap);
// merge the isomaps:
merged_isomap = isomap_averaging.add_and_merge(isomap);
} }
// save the merged isomap:
fs::path outputfile = outputfilebase;
outputfile +=fs::path("merged.isomap.png");
cv::imwrite(outputfile.string(), merged_isomap);
outputfile.replace_extension("");
// save the frontal rendering with merged isomap:
Mat frontal_rendering;
glm::mat4 modelview_frontal = glm::mat4( 1.0 );
core::Mesh neutral_expression = morphablemodel::sample_to_mesh(morphable_model.get_shape_model().draw_sample(pca_shape_coefficients), morphable_model.get_color_model().get_mean(), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
std::tie(frontal_rendering, std::ignore) = render::render(neutral_expression, modelview_frontal, glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 512, 512, render::create_mipmapped_texture(merged_isomap), true, false, false);
outputfile.replace_extension(".frontal.png");
cv::imwrite(outputfile.string(), frontal_rendering);
outputfile.replace_extension("");
// Save the mesh as textured obj:
outputfile.replace_extension(".obj");
core::write_textured_obj(morphable_model.draw_sample(pca_shape_coefficients,std::vector<float>()), outputfile.string());
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfilebase << "." << endl; cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfilebase << "." << endl;
return EXIT_SUCCESS; return EXIT_SUCCESS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment