Commit 1635a8d4 authored by Patrik Huber's avatar Patrik Huber

Moved Mesh to core namespace

Adjusted all includes, files and bindings.
parent 871d0b18
......@@ -69,6 +69,7 @@ set(eigen3_nnls_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/eigen3-nnls/src")
set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/core/Landmark.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/core/LandmarkMapper.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/core/Mesh.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/morphablemodel/PcaModel.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/morphablemodel/MorphableModel.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/morphablemodel/Blendshape.hpp
......@@ -89,7 +90,6 @@ set(HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/fitting.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/ceres_nonlinear.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/fitting/RenderingParameters.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/Mesh.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/utils.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render.hpp
${CMAKE_CURRENT_SOURCE_DIR}/include/eos/render/render_affine.hpp
......
......@@ -421,7 +421,7 @@ int main(int argc, char *argv[])
return std::vector<float>(std::begin(vec), std::end(vec));
};
auto shape_ceres = morphable_model.get_shape_model().draw_sample(shape_coefficients) + to_matrix(blendshapes) * Mat(vectord_to_vectorf(blendshape_coefficients), true);
render::Mesh mesh = morphablemodel::sample_to_mesh(shape_ceres, morphable_model.get_color_model().draw_sample(colour_coefficients), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
core::Mesh mesh = morphablemodel::sample_to_mesh(shape_ceres, morphable_model.get_color_model().draw_sample(colour_coefficients), morphable_model.get_shape_model().get_triangle_list(), morphable_model.get_color_model().get_triangle_list(), morphable_model.get_texture_coordinates());
for (auto&& idx : vertex_indices)
{
glm::dvec3 point_3d(mesh.vertices[idx][0], mesh.vertices[idx][1], mesh.vertices[idx][2]); // The 3D model point
......@@ -440,7 +440,7 @@ int main(int argc, char *argv[])
cout << fitting_log.str();
outputfile.replace_extension(".obj");
render::write_obj(mesh, outputfile.string());
core::write_obj(mesh, outputfile.string());
return EXIT_SUCCESS;
}
......@@ -200,14 +200,14 @@ int main(int argc, char *argv[])
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
core::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string());
core::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
......
......@@ -112,14 +112,14 @@ LandmarkCollection<cv::Vec2f> read_pts_landmarks(std::string filename)
* @param[in] viewport Viewport to draw the mesh.
* @param[in] colour Colour of the mesh to be drawn.
*/
void draw_wireframe(cv::Mat image, const eos::render::Mesh& mesh, glm::mat4x4 modelview, glm::mat4x4 projection, glm::vec4 viewport, cv::Scalar colour = cv::Scalar(0, 255, 0, 255))
void draw_wireframe(cv::Mat image, const core::Mesh& mesh, glm::mat4x4 modelview, glm::mat4x4 projection, glm::vec4 viewport, cv::Scalar colour = cv::Scalar(0, 255, 0, 255))
{
for (const auto& triangle : mesh.tvi)
{
const auto p1 = glm::project({ mesh.vertices[triangle[0]][0], mesh.vertices[triangle[0]][1], mesh.vertices[triangle[0]][2] }, modelview, projection, viewport);
const auto p2 = glm::project({ mesh.vertices[triangle[1]][0], mesh.vertices[triangle[1]][1], mesh.vertices[triangle[1]][2] }, modelview, projection, viewport);
const auto p3 = glm::project({ mesh.vertices[triangle[2]][0], mesh.vertices[triangle[2]][1], mesh.vertices[triangle[2]][2] }, modelview, projection, viewport);
if (eos::render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
if (render::detail::are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3)))
{
cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), colour);
cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), colour);
......@@ -215,7 +215,7 @@ int main(int argc, char *argv[])
}
// Fit the model, get back a mesh and the pose:
render::Mesh mesh;
core::Mesh mesh;
fitting::RenderingParameters rendering_params;
std::tie(mesh, rendering_params) = fitting::fit_shape_and_pose(morphable_model, blendshapes, landmarks, landmark_mapper, image.cols, image.rows, edge_topology, ibug_contour, model_contour, 50, boost::none, 30.0f);
......@@ -234,7 +234,7 @@ int main(int argc, char *argv[])
// Save the mesh as textured obj:
outputfile.replace_extension(".obj");
render::write_textured_obj(mesh, outputfile.string());
core::write_textured_obj(mesh, outputfile.string());
// And save the isomap:
outputfile.replace_extension(".isomap.png");
......
......@@ -89,9 +89,9 @@ int main(int argc, char *argv[])
colour_coefficients.resize(morphable_model.get_color_model().get_num_principal_components());
}
render::Mesh sample_mesh = morphable_model.draw_sample(shape_coefficients, colour_coefficients); // if one of the two vectors is empty, it uses get_mean()
core::Mesh sample_mesh = morphable_model.draw_sample(shape_coefficients, colour_coefficients); // if one of the two vectors is empty, it uses get_mean()
render::write_obj(sample_mesh, output_file.string());
core::write_obj(sample_mesh, output_file.string());
cv::Mat rendering;
std::tie(rendering, std::ignore) = render::render(sample_mesh, glm::mat4x4(1.0f), glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 512, 512, boost::none, true, false, false);
output_file.replace_extension(".png");
......
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/render/Mesh.hpp
* File: include/eos/core/Mesh.hpp
*
* Copyright 2014, 2015 Patrik Huber
*
......@@ -35,7 +35,7 @@
#include <fstream>
namespace eos {
namespace render {
namespace core {
/**
* @brief This class represents a 3D mesh consisting of vertices, vertex colour
......@@ -46,9 +46,9 @@ namespace eos {
*/
struct Mesh
{
std::vector<glm::tvec4<float>> vertices; ///< 3D vertex positions.
std::vector<glm::tvec3<float>> colors; ///< Colour information for each vertex. Expected to be in RGB order.
std::vector<glm::tvec2<float>> texcoords; ///< Texture coordinates for each vertex.
std::vector<glm::vec4> vertices; ///< 3D vertex positions.
std::vector<glm::vec3> colors; ///< Colour information for each vertex. Expected to be in RGB order.
std::vector<glm::vec2> texcoords; ///< Texture coordinates for each vertex.
std::vector<std::array<int, 3>> tvi; ///< Triangle vertex indices
std::vector<std::array<int, 3>> tci; ///< Triangle color indices
......@@ -153,7 +153,7 @@ inline void write_textured_obj(Mesh mesh, std::string filename)
return;
};
} /* namespace render */
} /* namespace core */
} /* namespace eos */
#endif /* MESH_HPP_ */
......@@ -22,9 +22,9 @@
#ifndef CLOSESTEDGEFITTING_HPP_
#define CLOSESTEDGEFITTING_HPP_
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/render/Mesh.hpp"
#include "eos/render/utils.hpp"
#include "nanoflann.hpp"
......@@ -122,7 +122,7 @@ inline std::pair<bool, boost::optional<float>> ray_triangle_intersect(const glm:
* @param[in] R The rotation (pose) under which the occluding boundaries should be computed.
* @return A vector with unique vertex id's making up the edges.
*/
inline std::vector<int> occluding_boundary_vertices(const render::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, glm::mat4x4 R)
inline std::vector<int> occluding_boundary_vertices(const core::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, glm::mat4x4 R)
{
// Rotate the mesh:
std::vector<glm::vec4> rotated_vertices;
......@@ -337,7 +337,7 @@ struct KDTreeVectorOfVectorsAdaptor
* @param[in] distance_threshold All correspondences below this threshold.
* @return A pair consisting of the used image edge points and their associated 3D vertex index.
*/
inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_correspondences(const render::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, const fitting::RenderingParameters& rendering_parameters, const std::vector<Eigen::Vector2f>& image_edges, float distance_threshold = 64.0f)
inline std::pair<std::vector<cv::Vec2f>, std::vector<int>> find_occluding_edge_correspondences(const core::Mesh& mesh, const morphablemodel::EdgeTopology& edge_topology, const fitting::RenderingParameters& rendering_parameters, const std::vector<Eigen::Vector2f>& image_edges, float distance_threshold = 64.0f)
{
assert(rendering_parameters.get_camera_type() == fitting::CameraType::Orthographic);
using std::vector;
......
......@@ -23,6 +23,7 @@
#define CONTOURCORRESPONDENCE_HPP_
#include "eos/core/Landmark.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "cereal/archives/json.hpp"
......@@ -46,7 +47,7 @@ namespace eos {
struct ModelContour;
struct ContourLandmarks;
std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_angle, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour);
std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport);
std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport);
/**
......@@ -200,7 +201,7 @@ struct ContourLandmarks
* @param[in] viewport Current viewport to use.
* @return A tuple with the 2D contour landmark points, the corresponding points in the 3D shape model and their vertex indices.
*/
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour, float yaw_angle, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour, float yaw_angle, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
{
// Select which side of the contour we'll use:
std::vector<int> model_contour_indices;
......@@ -272,7 +273,7 @@ std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_a
* @param[in] viewport Current viewport to use.
* @return A tuple with the 2D contour landmark points, the corresponding points in the 3D shape model and their vertex indices.
*/
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const render::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
{
// These are the additional contour-correspondences we're going to find and then use!
std::vector<cv::Vec4f> model_points_cnt; // the points in the 3D shape model
......
......@@ -24,6 +24,7 @@
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/morphablemodel/EdgeTopology.hpp"
......@@ -230,7 +231,7 @@ inline auto concat(const std::vector<T>& vec_a, const std::vector<T>& vec_b)
* @param[out] fitted_image_points Debug parameter: Returns all the 2D points that have been used for the fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients, std::vector<cv::Vec2f>& fitted_image_points)
inline std::pair<core::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, boost::optional<int> num_shape_coefficients_to_fit, float lambda, boost::optional<fitting::RenderingParameters> initial_rendering_params, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients, std::vector<cv::Vec2f>& fitted_image_points)
{
assert(blendshapes.size() > 0);
assert(landmarks.size() >= 4);
......@@ -397,7 +398,7 @@ inline std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(
* @param[in] lambda Regularisation parameter of the PCA shape fitting.
* @return The fitted model shape instance and the final pose.
*/
inline std::pair<render::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 30.0f)
inline std::pair<core::Mesh, fitting::RenderingParameters> fit_shape_and_pose(const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const core::LandmarkCollection<cv::Vec2f>& landmarks, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations = 5, boost::optional<int> num_shape_coefficients_to_fit = boost::none, float lambda = 30.0f)
{
std::vector<float> pca_coeffs;
std::vector<float> blendshape_coeffs;
......
......@@ -24,7 +24,7 @@
#include "eos/morphablemodel/PcaModel.hpp"
#include "eos/render/Mesh.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/io/mat_cerealisation.hpp"
#include "cereal/cereal.hpp"
......@@ -42,7 +42,7 @@
// Forward declaration:
namespace eos { namespace morphablemodel {
eos::render::Mesh sample_to_mesh(cv::Mat shape_instance, cv::Mat color_instance, std::vector<std::array<int, 3>> tvi, std::vector<std::array<int, 3>> tci, std::vector<cv::Vec2f> texture_coordinates = std::vector<cv::Vec2f>());
eos::core::Mesh sample_to_mesh(cv::Mat shape_instance, cv::Mat color_instance, std::vector<std::array<int, 3>> tvi, std::vector<std::array<int, 3>> tci, std::vector<cv::Vec2f> texture_coordinates = std::vector<cv::Vec2f>());
} }
namespace eos {
......@@ -125,14 +125,14 @@ public:
*
* @return An mesh instance of the mean of the Morphable Model.
*/
render::Mesh get_mean() const
core::Mesh get_mean() const
{
assert(shape_model.get_data_dimension() == color_model.get_data_dimension() || !has_color_model()); // The number of vertices (= model.getDataDimension() / 3) has to be equal for both models, or, alternatively, it has to be a shape-only model.
cv::Mat shape = shape_model.get_mean();
cv::Mat color = color_model.get_mean();
render::Mesh mesh;
core::Mesh mesh;
if (has_texture_coordinates()) {
mesh = sample_to_mesh(shape, color, shape_model.get_triangle_list(), color_model.get_triangle_list(), texture_coordinates);
}
......@@ -151,14 +151,14 @@ public:
* @param[in] color_sigma The colour model standard deviation.
* @return A random sample from the model.
*/
render::Mesh draw_sample(float shape_sigma = 1.0f, float color_sigma = 1.0f)
core::Mesh draw_sample(float shape_sigma = 1.0f, float color_sigma = 1.0f)
{
assert(shape_model.get_data_dimension() == color_model.get_data_dimension()); // The number of vertices (= model.getDataDimension() / 3) has to be equal for both models.
cv::Mat shape_sample = shape_model.draw_sample(shape_sigma);
cv::Mat color_sample = color_model.draw_sample(color_sigma);
render::Mesh mesh;
core::Mesh mesh;
if (has_texture_coordinates()) {
mesh = sample_to_mesh(shape_sample, color_sample, shape_model.get_triangle_list(), color_model.get_triangle_list(), texture_coordinates);
}
......@@ -182,7 +182,7 @@ public:
* @param[in] color_coefficients The PCA coefficients used to generate the vertex colouring.
* @return A model instance with given coefficients.
*/
render::Mesh draw_sample(std::vector<float> shape_coefficients, std::vector<float> color_coefficients) const
core::Mesh draw_sample(std::vector<float> shape_coefficients, std::vector<float> color_coefficients) const
{
assert(shape_model.get_data_dimension() == color_model.get_data_dimension() || !has_color_model()); // The number of vertices (= model.getDataDimension() / 3) has to be equal for both models, or, alternatively, it has to be a shape-only model.
......@@ -202,7 +202,7 @@ public:
color_sample = color_model.draw_sample(color_coefficients);
}
render::Mesh mesh;
core::Mesh mesh;
if (has_texture_coordinates()) {
mesh = sample_to_mesh(shape_sample, color_sample, shape_model.get_triangle_list(), color_model.get_triangle_list(), texture_coordinates);
}
......@@ -311,13 +311,13 @@ inline void save_model(MorphableModel model, std::string filename)
* @param[in] texture_coordinates Optional texture coordinates for each vertex.
* @return A mesh created from given parameters.
*/
inline render::Mesh sample_to_mesh(cv::Mat shape_instance, cv::Mat color_instance, std::vector<std::array<int, 3>> tvi, std::vector<std::array<int, 3>> tci, std::vector<cv::Vec2f> texture_coordinates /* = std::vector<cv::Vec2f>() */)
inline core::Mesh sample_to_mesh(cv::Mat shape_instance, cv::Mat color_instance, std::vector<std::array<int, 3>> tvi, std::vector<std::array<int, 3>> tci, std::vector<cv::Vec2f> texture_coordinates /* = std::vector<cv::Vec2f>() */)
{
assert(shape_instance.rows == color_instance.rows || color_instance.empty()); // The number of vertices (= model.getDataDimension() / 3) has to be equal for both models, or, alternatively, it has to be a shape-only model.
auto num_vertices = shape_instance.rows / 3;
render::Mesh mesh;
core::Mesh mesh;
// Construct the mesh vertices:
mesh.vertices.resize(num_vertices);
......
......@@ -22,6 +22,8 @@
#ifndef RENDER_HPP_
#define RENDER_HPP_
#include "eos/core/Mesh.hpp"
#include "eos/render/detail/render_detail.hpp"
#include "eos/render/utils.hpp"
......@@ -122,7 +124,7 @@ namespace eos {
* @param[in] enable_far_clipping Whether vertices should be clipped against the far plane.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/
inline std::pair<cv::Mat, cv::Mat> render(Mesh mesh, glm::tmat4x4<float> model_view_matrix, glm::tmat4x4<float> projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
inline std::pair<cv::Mat, cv::Mat> render(core::Mesh mesh, glm::tmat4x4<float> model_view_matrix, glm::tmat4x4<float> projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{
// Some internal documentation / old todos or notes:
// maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
......
......@@ -22,9 +22,9 @@
#ifndef RENDER_AFFINE_HPP_
#define RENDER_AFFINE_HPP_
#include "eos/core/Mesh.hpp"
#include "eos/render/detail/render_detail.hpp"
#include "eos/render/detail/render_affine_detail.hpp"
#include "eos/render/Mesh.hpp"
#include "glm/vec2.hpp"
#include "glm/vec3.hpp"
......@@ -53,7 +53,7 @@ namespace eos {
* @param[in] do_backface_culling Whether the renderer should perform backface culling.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/
inline std::pair<cv::Mat, cv::Mat> render_affine(Mesh mesh, cv::Mat affine_camera_matrix, int viewport_width, int viewport_height, bool do_backface_culling = true)
inline std::pair<cv::Mat, cv::Mat> render_affine(core::Mesh mesh, cv::Mat affine_camera_matrix, int viewport_width, int viewport_height, bool do_backface_culling = true)
{
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
//assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
......
......@@ -22,8 +22,8 @@
#ifndef TEXTURE_EXTRACTION_HPP_
#define TEXTURE_EXTRACTION_HPP_
#include "eos/core/Mesh.hpp"
#include "eos/render/detail/texture_extraction_detail.hpp"
#include "eos/render/Mesh.hpp"
#include "eos/render/render_affine.hpp"
#include "eos/render/detail/render_detail.hpp"
......@@ -50,7 +50,7 @@ enum class TextureInterpolation {
};
// Forward declarations:
cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, cv::Mat depthbuffer, bool compute_view_angle, TextureInterpolation mapping_type, int isomap_resolution);
cv::Mat extract_texture(core::Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, cv::Mat depthbuffer, bool compute_view_angle, TextureInterpolation mapping_type, int isomap_resolution);
namespace detail { cv::Mat interpolate_black_line(cv::Mat isomap); }
/**
......@@ -75,7 +75,7 @@ namespace detail { cv::Mat interpolate_black_line(cv::Mat isomap); }
* @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512.
* @return The extracted texture as isomap (texture map).
*/
inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, bool compute_view_angle = false, TextureInterpolation mapping_type = TextureInterpolation::NearestNeighbour, int isomap_resolution = 512)
inline cv::Mat extract_texture(core::Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, bool compute_view_angle = false, TextureInterpolation mapping_type = TextureInterpolation::NearestNeighbour, int isomap_resolution = 512)
{
// Render the model to get a depth buffer:
cv::Mat depthbuffer;
......@@ -106,7 +106,7 @@ inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat
* @param[in] isomap_resolution The resolution of the generated isomap. Defaults to 512x512.
* @return The extracted texture as isomap (texture map).
*/
inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, cv::Mat depthbuffer, bool compute_view_angle = false, TextureInterpolation mapping_type = TextureInterpolation::NearestNeighbour, int isomap_resolution = 512)
inline cv::Mat extract_texture(core::Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat image, cv::Mat depthbuffer, bool compute_view_angle = false, TextureInterpolation mapping_type = TextureInterpolation::NearestNeighbour, int isomap_resolution = 512)
{
assert(mesh.vertices.size() == mesh.texcoords.size());
assert(image.type() == CV_8UC3); // the other cases are not yet supported
......
......@@ -22,7 +22,7 @@
#ifndef RENDER_UTILS_HPP_
#define RENDER_UTILS_HPP_
#include "eos/render/Mesh.hpp"
#include "eos/core/Mesh.hpp"
#include "glm/vec3.hpp"
#include "glm/geometric.hpp"
......@@ -129,7 +129,7 @@ glm::vec3 compute_face_normal(const glm::vec3& v0, const glm::vec3& v1, const gl
* @param[in] image An optional image to draw onto.
* @return An image with the texture coordinate triangles drawn in it, 512x512 if no image is given.
*/
inline cv::Mat draw_texcoords(Mesh mesh, cv::Mat image = cv::Mat())
inline cv::Mat draw_texcoords(core::Mesh mesh, cv::Mat image = cv::Mat())
{
using cv::Point2f;
using cv::Scalar;
......
......@@ -18,13 +18,13 @@
* limitations under the License.
*/
#include "eos/core/LandmarkMapper.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
#include "eos/morphablemodel/EdgeTopology.hpp"
#include "eos/fitting/contour_correspondence.hpp"
#include "eos/fitting/fitting.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/render/Mesh.hpp"
#include "mexplus_eigen.hpp"
#include "mexplus_eos_types.hpp"
......@@ -88,7 +88,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
boost::optional<int> num_shape_coefficients_to_fit = num_shape_coeffs == -1 ? boost::none : boost::optional<int>(num_shape_coeffs);
// Now do the actual fitting:
render::Mesh mesh;
core::Mesh mesh;
fitting::RenderingParameters rendering_parameters;
std::tie(mesh, rendering_parameters) = fitting::fit_shape_and_pose(morphable_model, blendshapes, landmarks, landmark_mapper, image_width, image_height, edge_topology, contour_landmarks, model_contour, num_iterations, num_shape_coefficients_to_fit, lambda);
......
......@@ -22,7 +22,7 @@
#ifndef MEXPLUS_EOS_TYPES_HPP_
#define MEXPLUS_EOS_TYPES_HPP_
#include "eos/render/Mesh.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "mexplus/mxarray.h"
......@@ -199,7 +199,7 @@ mxArray* MxArray::from(const std::vector<std::array<int, 3>>& data)
* @return An mxArray containing a Matlab struct with all vertex, colour, texcoords and triangle data.
*/
template<>
mxArray* MxArray::from(const eos::render::Mesh& mesh) {
mxArray* MxArray::from(const eos::core::Mesh& mesh) {
MxArray out_array(MxArray::Struct());
out_array.set("vertices", mesh.vertices);
......
......@@ -18,6 +18,7 @@
* limitations under the License.
*/
#include "eos/core/LandmarkMapper.hpp"
#include "eos/core/Mesh.hpp"
#include "eos/morphablemodel/PcaModel.hpp"
#include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/morphablemodel/Blendshape.hpp"
......@@ -26,7 +27,6 @@
#include "eos/fitting/fitting.hpp"
#include "eos/fitting/orthographic_camera_estimation_linear.hpp"
#include "eos/fitting/RenderingParameters.hpp"
#include "eos/render/Mesh.hpp"
#include "eos/render/texture_extraction.hpp"
#include "opencv2/core/core.hpp"
......@@ -55,6 +55,7 @@ PYBIND11_PLUGIN(eos) {
/**
* Bindings for the eos::core namespace:
* - LandmarkMapper
* - Mesh
*/
py::module core_module = eos_module.def_submodule("core", "Essential functions and classes to work with 3D face models and landmarks.");
py::class_<core::LandmarkMapper>(core_module, "LandmarkMapper", "Represents a mapping from one kind of landmarks to a different format(e.g.model vertices).")
......@@ -65,20 +66,12 @@ PYBIND11_PLUGIN(eos) {
// We can't expose the convert member function yet - need std::optional (or some trick with self/this and a lambda)
;
/**
* Bindings for the eos::render namespace:
* (Note: Defining Mesh before using it below in fitting::fit_shape_and_pose)
* TODO: Will move Mesh to eos::core namespace.
* - Mesh
*/
py::module render_module = eos_module.def_submodule("render", "3D mesh and texture extraction functionality.");
py::class_<render::Mesh>(render_module, "Mesh", "This class represents a 3D mesh consisting of vertices, vertex colour information and texture coordinates.")
.def_readwrite("vertices", &render::Mesh::vertices, "Vertices")
.def_readwrite("tvi", &render::Mesh::tvi, "Triangle vertex indices")
.def_readwrite("colors", &render::Mesh::colors, "Colour data")
.def_readwrite("tci", &render::Mesh::tci, "Triangle colour indices (usually the same as tvi)")
.def_readwrite("texcoords", &render::Mesh::texcoords, "Texture coordinates")
py::class_<core::Mesh>(core_module, "Mesh", "This class represents a 3D mesh consisting of vertices, vertex colour information and texture coordinates.")
.def_readwrite("vertices", &core::Mesh::vertices, "Vertices")
.def_readwrite("tvi", &core::Mesh::tvi, "Triangle vertex indices")
.def_readwrite("colors", &core::Mesh::colors, "Colour data")
.def_readwrite("tci", &core::Mesh::tci, "Triangle colour indices (usually the same as tvi)")
.def_readwrite("texcoords", &core::Mesh::texcoords, "Texture coordinates")
;
/**
......@@ -203,10 +196,11 @@ PYBIND11_PLUGIN(eos) {
/**
* Bindings for the eos::render namespace:
* (Note: Defining down here because we need fitting::RenderingParameters to be already exposed)
* - extract_texture()
*/
render_module.def("extract_texture", [](const render::Mesh& mesh, const fitting::RenderingParameters& rendering_params, cv::Mat image, bool compute_view_angle, int isomap_resolution) {
py::module render_module = eos_module.def_submodule("render", "3D mesh and texture extraction functionality.");
render_module.def("extract_texture", [](const core::Mesh& mesh, const fitting::RenderingParameters& rendering_params, cv::Mat image, bool compute_view_angle, int isomap_resolution) {
cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
return render::extract_texture(mesh, affine_from_ortho, image, compute_view_angle, render::TextureInterpolation::NearestNeighbour, isomap_resolution);
}, "Extracts the texture of the face from the given image and stores it as isomap (a rectangular texture map).", py::arg("mesh"), py::arg("rendering_params"), py::arg("image"), py::arg("compute_view_angle") = false, py::arg("isomap_resolution") = 512);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment