Commit db012785 authored by Patrik Huber's avatar Patrik Huber

Merge pull request #6 from patrikhuber/devel

v0.6.0: OpenGL-like rendering, texture rendering, texture extraction view-angle computation
parents d1bf81f0 b041fa1d
......@@ -25,6 +25,7 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
endif()
endif()
# Note: gcc is fine without -pthreads.
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # the quotes are needed here, maybe because "MSVC" seems to be a keyword
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19)
message(FATAL_ERROR "Visual Studio 2015 or newer is required.")
......@@ -38,7 +39,10 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(HAS_CXX14_FLAG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
endif()
else()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pthreads")
# Eigen::LevenbergMarquardt probably needs -pthreads.
# Cleaner way would be to add it to fit-model's target_link_libraries, but that requires a CMake >2.8.10.
else() # no GNU, no MSVC, no Clang
message(WARNING "You are using an unsupported compiler. Compilation has only been tested with MSVC, GCC and Clang.")
check_cxx_compiler_flag(-std=c++14 HAS_CXX14_FLAG)
if(HAS_CXX14_FLAG)
......@@ -96,8 +100,10 @@ set(HEADERS
include/eos/fitting/linear_shape_fitting.hpp
include/eos/render/Mesh.hpp
include/eos/render/utils.hpp
include/eos/render/render.hpp
include/eos/render/render_affine.hpp
include/eos/render/detail/render_detail.hpp
include/eos/render/detail/render_affine_detail.hpp
include/eos/render/texture_extraction.hpp
include/eos/render/detail/texture_extraction_detail.hpp
)
......
......@@ -19,7 +19,7 @@
*/
#include "eos/core/Landmark.hpp"
#include "eos/core/LandmarkMapper.hpp"
#include "eos/fitting/affine_camera_estimation.hpp"
#include "eos/fitting/nonlinear_camera_estimation.hpp"
#include "eos/fitting/linear_shape_fitting.hpp"
#include "eos/render/utils.hpp"
#include "eos/render/texture_extraction.hpp"
......@@ -120,11 +120,11 @@ int main(int argc, char *argv[])
desc.add_options()
("help,h",
"display the help message")
("model,m", po::value<fs::path>(&modelfile)->required(),
("model,m", po::value<fs::path>(&modelfile)->required()->default_value("../share/sfm_shape_3448.bin"),
"a Morphable Model stored as cereal BinaryArchive")
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("data/image_0001.png"),
("image,i", po::value<fs::path>(&imagefile)->required()->default_value("data/image_0010.png"),
"an input image")
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0001.pts"),
("landmarks,l", po::value<fs::path>(&landmarksfile)->required()->default_value("data/image_0010.pts"),
"2D landmarks for the image, in ibug .pts format")
("mapping,p", po::value<fs::path>(&mappingsfile)->required()->default_value("../share/ibug2did.txt"),
"landmark identifier to model vertex number mapping")
......@@ -178,7 +178,6 @@ int main(int argc, char *argv[])
vector<Vec2f> image_points; // the corresponding 2D landmark points
// Sub-select all the landmarks which we have a mapping for (i.e. that are defined in the 3DMM):
//std::transform(begin(landmarks), end(landmarks), begin(landmarks), [&landmark_mapper](const Landmark<Vec2f>& lm) { });
for (int i = 0; i < landmarks.size(); ++i) {
auto converted_name = landmark_mapper.convert(landmarks[i].name);
if (!converted_name) { // no mapping defined for the current landmark
......@@ -191,42 +190,32 @@ int main(int argc, char *argv[])
image_points.emplace_back(landmarks[i].coordinates);
}
// Estimate the camera from the 2D - 3D point correspondences
Mat affine_cam = fitting::estimate_affine_camera(image_points, model_points);
// Estimate the camera (pose) from the 2D - 3D point correspondences
fitting::OrthographicRenderingParameters rendering_params = fitting::estimate_orthographic_camera(image_points, model_points, image.cols, image.rows);
Mat affine_from_ortho = get_3x4_affine_camera_matrix(rendering_params, image.cols, image.rows);
// Draw the mean-face landmarks projected using the estimated camera:
for (auto&& vertex : model_points) {
Vec2f screen_point(Mat(affine_cam * Mat(vertex)).at<float>(0), Mat(affine_cam * Mat(vertex)).at<float>(1));
cv::circle(outimg, cv::Point2f(screen_point), 5, { 0.0f, 255.0f, 0.0f });
}
// The 3D head pose can be recovered as follows:
float yaw_angle = glm::degrees(rendering_params.r_y);
// and similarly for pitch (r_x) and roll (r_z).
// Estimate the shape coefficients by fitting the shape to the landmarks:
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_cam, image_points, vertex_indices);
vector<float> fitted_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_from_ortho, image_points, vertex_indices);
// Obtain the full mesh and draw it using the estimated camera:
// Obtain the full mesh with the estimated coefficients:
render::Mesh mesh = morphable_model.draw_sample(fitted_coeffs, vector<float>());
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string()); // save the mesh as obj
// Draw the projected points again, this time using the fitted model shape:
for (auto&& idx : vertex_indices) {
Vec4f model_point(mesh.vertices[idx][0], mesh.vertices[idx][1], mesh.vertices[idx][2], mesh.vertices[idx][3]);
Vec2f screen_point(Mat(affine_cam * Mat(model_point)).at<float>(0), Mat(affine_cam * Mat(model_point)).at<float>(1));
cv::circle(outimg, cv::Point2f(screen_point), 3, { 0.0f, 0.0f, 255.0f });
}
// Extract the texture from the image using given mesh and camera parameters:
Mat isomap = render::extract_texture(mesh, affine_from_ortho, image);
// Save an output image with the landmarks from the different stages:
//outputfile.replace_extension(".png");
//cv::imwrite(outputfile.string(), outimg);
outputfile.replace_extension(".png");
cv::imwrite(outputfile.string(), outimg);
// Save the mesh as textured obj:
outputfile += fs::path(".obj");
render::write_textured_obj(mesh, outputfile.string());
// Extract the texture and save the extracted texture map (isomap):
Mat isomap = render::extract_texture(mesh, affine_cam, image, render::TextureInterpolation::NearestNeighbour);
// And save the isomap:
outputfile.replace_extension(".isomap.png");
cv::imwrite(outputfile.string(), isomap);
cout << "Finished fitting and wrote result image and isomap " << outputfile.string() << "." << endl;
cout << "Finished fitting and wrote result mesh and isomap to files with basename " << outputfile.stem().stem() << "." << endl;
return EXIT_SUCCESS;
}
......@@ -48,7 +48,7 @@ namespace eos {
* Note: The standard deviations given should be a vector, i.e. different for each landmark. This is not implemented yet.
*
* @param[in] morphable_model The Morphable Model whose shape (coefficients) are estimated.
* @param[in] affine_camera_matrix A 3x4 affine camera matrix from world to clip-space (should probably be of type CV_32FC1 as all our calculations are done with float).
* @param[in] affine_camera_matrix A 3x4 affine camera matrix from model to screen-space (should probably be of type CV_32FC1 as all our calculations are done with float).
* @param[in] landmarks 2D landmarks from an image, given in clip-coordinates.
* @param[in] vertex_ids The vertex ids in the model that correspond to the 2D points.
* @param[in] lambda The regularisation parameter (weight of the prior towards the mean).
......
......@@ -64,7 +64,7 @@ struct Frustum
* The rotation values are given in radians and estimated using the RPY convention.
* Yaw is applied first to the model, then pitch, then roll (R * P * Y * vertex).
*/
struct RenderingParameters
struct OrthographicRenderingParameters
{
float r_x; // Pitch.
float r_y; // Yaw. Positive means subject is looking left (we see her right cheek).
......@@ -74,6 +74,92 @@ struct RenderingParameters
Frustum frustum;
};
/**
* @brief Converts a glm::mat4x4 to a cv::Mat.
*
* Note: move to render namespace
*/
cv::Mat to_mat(const glm::mat4x4& glm_matrix)
{
// glm stores its matrices in col-major order in memory, OpenCV in row-major order.
// Hence we transpose the glm matrix to flip the memory layout, and then point OpenCV
// to that location.
auto glm_matrix_t = glm::transpose(glm_matrix);
cv::Mat opencv_mat(4, 4, CV_32FC1, &glm_matrix_t[0]);
// we need to clone because the underlying data of the original goes out of scope
return opencv_mat.clone();
};
/**
* @brief Creates a 4x4 model-view matrix from given fitting parameters.
*
* Together with the Frustum information, this describes the full
* orthographic rendering parameters of the OpenGL pipeline.
* Example:
*
* @code
* fitting::OrthographicRenderingParameters rendering_params = ...;
* glm::mat4x4 view_model = get_4x4_modelview_matrix(rendering_params);
* glm::mat4x4 ortho_projection = glm::ortho(rendering_params.frustum.l, rendering_params.frustum.r, rendering_params.frustum.b, rendering_params.frustum.t);
* glm::vec4 viewport(0, image.rows, image.cols, -image.rows); // flips y, origin top-left, like in OpenCV
*
* // project a point from 3D to 2D:
* glm::vec3 point_3d = ...; // from a mesh for example
* glm::vec3 point_2d = glm::project(point_3d, view_model, ortho_projection, viewport);
* @endcode
*/
glm::mat4x4 get_4x4_modelview_matrix(fitting::OrthographicRenderingParameters params)
{
// rotation order: RPY * P
auto rot_mtx_x = glm::rotate(glm::mat4(1.0f), params.r_x, glm::vec3{ 1.0f, 0.0f, 0.0f });
auto rot_mtx_y = glm::rotate(glm::mat4(1.0f), params.r_y, glm::vec3{ 0.0f, 1.0f, 0.0f });
auto rot_mtx_z = glm::rotate(glm::mat4(1.0f), params.r_z, glm::vec3{ 0.0f, 0.0f, 1.0f });
auto t_mtx = glm::translate(glm::mat4(1.0f), glm::vec3{ params.t_x, params.t_y, 0.0f });
auto modelview = t_mtx * rot_mtx_z * rot_mtx_x * rot_mtx_y;
return modelview;
};
/**
* @brief Creates a 3x4 affine camera matrix from given fitting parameters. The
* matrix transforms points directly from model-space to screen-space.
*
* This function is mainly used since the linear shape fitting fitting::fit_shape_to_landmarks_linear
* expects one of these 3x4 affine camera matrices, as well as render::extract_texture.
*/
cv::Mat get_3x4_affine_camera_matrix(fitting::OrthographicRenderingParameters params, int width, int height)
{
auto view_model = to_mat(get_4x4_modelview_matrix(params));
auto ortho_projection = to_mat(glm::ortho(params.frustum.l, params.frustum.r, params.frustum.b, params.frustum.t));
cv::Mat mvp = ortho_projection * view_model;
glm::vec4 viewport(0, height, width, -height); // flips y, origin top-left, like in OpenCV
// equivalent to what glm::project's viewport does, but we don't change z and w:
cv::Mat viewport_mat = (cv::Mat_<float>(4, 4) << viewport[2] / 2.0f, 0.0f, 0.0f, viewport[2] / 2.0f + viewport[0],
0.0f, viewport[3] / 2.0f, 0.0f, viewport[3] / 2.0f + viewport[1],
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
cv::Mat full_projection_4x4 = viewport_mat * mvp;
cv::Mat full_projection_3x4 = full_projection_4x4.rowRange(0, 3); // we take the first 3 rows, but then set the last one to [0 0 0 1]
full_projection_3x4.at<float>(2, 0) = 0.0f;
full_projection_3x4.at<float>(2, 1) = 0.0f;
full_projection_3x4.at<float>(2, 2) = 0.0f;
full_projection_3x4.at<float>(2, 3) = 1.0f;
return full_projection_3x4;
};
/**
* @brief Returns a glm/OpenGL compatible viewport vector that flips y and
* has the origin on the top-left, like in OpenCV.
*
* Note: Move to detail namespace / not used at the moment.
*/
glm::vec4 get_opencv_viewport(int width, int height)
{
return glm::vec4(0, height, width, -height);
};
/**
* @brief This algorithm estimates the rotation angles and translation of the model, as
* well as the viewing frustum of the camera, given a set of corresponding 2D-3D points.
......@@ -99,7 +185,7 @@ struct RenderingParameters
* @param[in] height Height of the image (or viewport).
* @return The estimated model and camera parameters.
*/
RenderingParameters estimate_orthographic_camera(std::vector<cv::Vec2f> image_points, std::vector<cv::Vec4f> model_points, int width, int height)
OrthographicRenderingParameters estimate_orthographic_camera(std::vector<cv::Vec2f> image_points, std::vector<cv::Vec4f> model_points, int width, int height)
{
using cv::Mat;
assert(image_points.size() == model_points.size());
......@@ -122,8 +208,8 @@ RenderingParameters estimate_orthographic_camera(std::vector<cv::Vec2f> image_po
auto info = lm.minimize(parameters); // we could or should use the return value
// 'parameters' contains the solution now.
Frustum camera_frustum{ -1.0f * aspect * parameters[5], 1.0f * aspect * parameters[5], -1.0f * parameters[5], 1.0f * parameters[5] };
return RenderingParameters{ static_cast<float>(parameters[0]), static_cast<float>(parameters[1]), static_cast<float>(parameters[2]), static_cast<float>(parameters[3]), static_cast<float>(parameters[4]), camera_frustum };
Frustum camera_frustum{ -1.0f * aspect * static_cast<float>(parameters[5]), 1.0f * aspect * static_cast<float>(parameters[5]), -1.0f * static_cast<float>(parameters[5]), 1.0f * static_cast<float>(parameters[5]) };
return OrthographicRenderingParameters{ static_cast<float>(parameters[0]), static_cast<float>(parameters[1]), static_cast<float>(parameters[2]), static_cast<float>(parameters[3]), static_cast<float>(parameters[4]), camera_frustum };
};
} /* namespace fitting */
......
......@@ -49,7 +49,7 @@ namespace eos {
struct Mesh
{
std::vector<cv::Vec4f> vertices; ///< 3D vertex positions.
std::vector<cv::Vec3f> colors; ///< Color information for each vertex. Expected to be in RGB order.
std::vector<cv::Vec3f> colors; ///< Colour information for each vertex. Expected to be in RGB order.
std::vector<cv::Vec2f> texcoords; ///< Texture coordinates for each vertex.
std::vector<std::array<int, 3>> tvi; ///< Triangle vertex indices
......@@ -145,7 +145,7 @@ inline void write_textured_obj(Mesh mesh, std::string filename)
mtl_file << "map_Kd " << texture_filename.string() << std::endl;
return;
}
};
} /* namespace render */
} /* namespace eos */
......
/*
* Eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/render/detail/render_affine_detail.hpp
*
* Copyright 2014, 2015 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RENDER_AFFINE_DETAIL_HPP_
#define RENDER_AFFINE_DETAIL_HPP_
#include "eos/render/detail/render_detail.hpp"
#include "opencv2/core/core.hpp"
/**
* Implementations of internal functions, not part of the
* API we expose and not meant to be used by a user.
*
* This file contains things specific to the affine rendering.
*/
namespace eos {
namespace render {
namespace detail {
/**
* Takes a 3x4 affine camera matrix estimated with fitting::estimate_affine_camera
* and computes the cross product of the first two rows to create a third axis that
* is orthogonal to the first two.
* This allows us to produce z values and figure out correct depth ordering in the
* rendering and for texture extraction.
*
* @param[in] affine_camera_matrix A 3x4 affine camera matrix.
* @return The matrix with a third row inserted.
*/
cv::Mat calculate_affine_z_direction(cv::Mat affine_camera_matrix)
{
using cv::Mat;
// Take the cross product of row 0 with row 1 to get the direction perpendicular to the viewing plane (= the viewing direction).
// Todo: We should check if we look/project into the right direction - the sign could be wrong?
Mat affine_cam_z_rotation = affine_camera_matrix.row(0).colRange(0, 3).cross(affine_camera_matrix.row(1).colRange(0, 3));
affine_cam_z_rotation /= cv::norm(affine_cam_z_rotation, cv::NORM_L2);
// The 4x4 affine camera matrix
Mat affine_cam_4x4 = Mat::zeros(4, 4, CV_32FC1);
// Replace the third row with the camera-direction (z)
Mat third_row_rotation_part = affine_cam_4x4.row(2).colRange(0, 3);
affine_cam_z_rotation.copyTo(third_row_rotation_part); // Set first 3 components. 4th component stays 0.
// Copy the first 2 rows from the input matrix
Mat first_two_rows_of_4x4 = affine_cam_4x4.rowRange(0, 2);
affine_camera_matrix.rowRange(0, 2).copyTo(first_two_rows_of_4x4);
// The 4th row is (0, 0, 0, 1):
affine_cam_4x4.at<float>(3, 3) = 1.0f;
return affine_cam_4x4;
};
/**
* Rasters a triangle into the given colour and depth buffer.
*
* In essence, loop through the pixels inside the triangle's bounding
* box, calculate the barycentric coordinates, and if inside the triangle
* and the z-test is passed, then draw the point using the barycentric
* coordinates for colour interpolation.
* Does not do perspective-correct weighting, and therefore only works
* with the affine rendering pipeline.
*
* No texturing at the moment.
*
* Note/Todo: See where and how this is used, and how similar it is to
* the "normal" raster_triangle. Maybe rename to raster_triangle_vertexcolour?
*
* @param[in] triangle A triangle.
* @param[in] colourbuffer The colour buffer to draw into.
* @param[in] depthbuffer The depth buffer to draw into and use for the depth test.
*/
void raster_triangle_affine(TriangleToRasterize triangle, cv::Mat colourbuffer, cv::Mat depthbuffer)
{
for (int yi = triangle.min_y; yi <= triangle.max_y; ++yi)
{
for (int xi = triangle.min_x; xi <= triangle.max_x; ++xi)
{
// we want centers of pixels to be used in computations. Todo: Do we?
const float x = static_cast<float>(xi) + 0.5f;
const float y = static_cast<float>(yi) + 0.5f;
// these will be used for barycentric weights computation
const double one_over_v0ToLine12 = 1.0 / implicit_line(triangle.v0.position[0], triangle.v0.position[1], triangle.v1.position, triangle.v2.position);
const double one_over_v1ToLine20 = 1.0 / implicit_line(triangle.v1.position[0], triangle.v1.position[1], triangle.v2.position, triangle.v0.position);
const double one_over_v2ToLine01 = 1.0 / implicit_line(triangle.v2.position[0], triangle.v2.position[1], triangle.v0.position, triangle.v1.position);
// affine barycentric weights
const double alpha = implicit_line(x, y, triangle.v1.position, triangle.v2.position) * one_over_v0ToLine12;
const double beta = implicit_line(x, y, triangle.v2.position, triangle.v0.position) * one_over_v1ToLine20;
const double gamma = implicit_line(x, y, triangle.v0.position, triangle.v1.position) * one_over_v2ToLine01;
// if pixel (x, y) is inside the triangle or on one of its edges
if (alpha >= 0 && beta >= 0 && gamma >= 0)
{
const int pixel_index_row = yi;
const int pixel_index_col = xi;
const double z_affine = alpha*static_cast<double>(triangle.v0.position[2]) + beta*static_cast<double>(triangle.v1.position[2]) + gamma*static_cast<double>(triangle.v2.position[2]);
if (z_affine < depthbuffer.at<double>(pixel_index_row, pixel_index_col))
{
// attributes interpolation
// pixel_color is in RGB, v.color are RGB
cv::Vec3f pixel_color = alpha*triangle.v0.color + beta*triangle.v1.color + gamma*triangle.v2.color;
// clamp bytes to 255
const unsigned char red = static_cast<unsigned char>(255.0f * std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?)
const unsigned char green = static_cast<unsigned char>(255.0f * std::min(pixel_color[1], 1.0f));
const unsigned char blue = static_cast<unsigned char>(255.0f * std::min(pixel_color[2], 1.0f));
// update buffers
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[0] = blue;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[1] = green;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[2] = red;
colourbuffer.at<cv::Vec4b>(pixel_index_row, pixel_index_col)[3] = 255; // alpha channel
depthbuffer.at<double>(pixel_index_row, pixel_index_col) = z_affine;
}
}
}
}
};
} /* namespace detail */
} /* namespace render */
} /* namespace eos */
#endif /* RENDER_AFFINE_DETAIL_HPP_ */
This diff is collapsed.
......@@ -73,6 +73,12 @@ inline bool is_point_in_triangle(cv::Point2f point, cv::Point2f triV0, cv::Point
* The vertices should be given in screen coordinates, but with their
* z-values preserved, so they can be compared against the depthbuffer.
*
* Obviously the depthbuffer given should have been created with the same projection
* matrix than the texture extraction is called with.
*
* Also, we don't do perspective-correct interpolation here I think, so only
* use it with affine and orthographic projection matrices.
*
* @param[in] v0 First vertex, in screen coordinates (but still with their z-value).
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
......
This diff is collapsed.
......@@ -23,6 +23,7 @@
#define RENDER_AFFINE_HPP_
#include "eos/render/detail/render_detail.hpp"
#include "eos/render/detail/render_affine_detail.hpp"
#include "eos/render/Mesh.hpp"
#include "opencv2/core/core.hpp"
......@@ -50,7 +51,7 @@ namespace eos {
*/
std::pair<cv::Mat, cv::Mat> render_affine(Mesh mesh, cv::Mat affine_camera_matrix, int viewport_width, int viewport_height, bool do_backface_culling = true)
{
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty());// The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
//assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
using cv::Mat;
......@@ -108,7 +109,7 @@ std::pair<cv::Mat, cv::Mat> render_affine(Mesh mesh, cv::Mat affine_camera_matri
// Raster all triangles, i.e. colour the pixel values and write the z-buffer
for (auto&& triangle : triangles_to_raster) {
detail::raster_triangle(triangle, colourbuffer, depthbuffer);
detail::raster_triangle_affine(triangle, colourbuffer, depthbuffer);
}
return std::make_pair(colourbuffer, depthbuffer);
};
......
This diff is collapsed.
......@@ -22,7 +22,10 @@
#ifndef RENDER_UTILS_HPP_
#define RENDER_UTILS_HPP_
#include "eos/render/Mesh.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
namespace eos {
namespace render {
......@@ -78,6 +81,139 @@ inline cv::Vec2f screen_to_clip_space(const cv::Vec2f& screen_coordinates, int s
return cv::Vec2f(x_cs, y_cs);
};
/**
* Calculates the normal of a face (or triangle), i.e. the
* per-face normal. Return normal will be normalised.
* Assumes the triangle is given in CCW order, i.e. vertices
* in counterclockwise order on the screen are front-facing.
*
* @param[in] v0 First vertex.
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
* @return The unit-length normal of the given triangle.
*/
cv::Vec3f calculate_face_normal(const cv::Vec3f& v0, const cv::Vec3f& v1, const cv::Vec3f& v2)
{
cv::Vec3f n = (v1 - v0).cross(v2 - v0); // v0-to-v1 x v0-to-v2
n /= cv::norm(n);
return n;
};
/**
* Draws the texture coordinates (uv-coords) of the given mesh
* into an image by looping over the triangles and drawing each
* triangle's texcoords.
*
* @param[in] mesh A mesh with texture coordinates.
* @param[in] image An optional image to draw onto.
* @return An image with the texture coordinate triangles drawn in it, 512x512 if no image is given.
*/
cv::Mat draw_texcoords(Mesh mesh, cv::Mat image = cv::Mat())
{
using cv::Point2f;
using cv::Scalar;
if (image.empty())
{
image = cv::Mat(512, 512, CV_8UC4, Scalar(0.0f, 0.0f, 0.0f, 255.0f));
}
for (const auto& triIdx : mesh.tvi) {
cv::line(image, Point2f(mesh.texcoords[triIdx[0]][0] * image.cols, mesh.texcoords[triIdx[0]][1] * image.rows), Point2f(mesh.texcoords[triIdx[1]][0] * image.cols, mesh.texcoords[triIdx[1]][1] * image.rows), Scalar(255.0f, 0.0f, 0.0f));
cv::line(image, Point2f(mesh.texcoords[triIdx[1]][0] * image.cols, mesh.texcoords[triIdx[1]][1] * image.rows), Point2f(mesh.texcoords[triIdx[2]][0] * image.cols, mesh.texcoords[triIdx[2]][1] * image.rows), Scalar(255.0f, 0.0f, 0.0f));
cv::line(image, Point2f(mesh.texcoords[triIdx[2]][0] * image.cols, mesh.texcoords[triIdx[2]][1] * image.rows), Point2f(mesh.texcoords[triIdx[0]][0] * image.cols, mesh.texcoords[triIdx[0]][1] * image.rows), Scalar(255.0f, 0.0f, 0.0f));
}
return image;
};
// TODO: Should go to detail:: namespace, or texturing/utils or whatever.
unsigned int get_max_possible_mipmaps_num(unsigned int width, unsigned int height)
{
unsigned int mipmapsNum = 1;
unsigned int size = std::max(width, height);
if (size == 1)
return 1;
do {
size >>= 1;
mipmapsNum++;
} while (size != 1);
return mipmapsNum;
};
inline bool is_power_of_two(int x)
{
return !(x & (x - 1));
};
class Texture
{
public:
// Todo: This whole class needs a major overhaul and documentation.
std::vector<cv::Mat> mipmaps; // make Texture a friend class of renderer, then move this to private?
unsigned char widthLog, heightLog; // log2 of width and height of the base mip-level
//private:
//std::string filename;
unsigned int mipmaps_num;
};
// throws: ocv exc, runtime_ex
Texture create_mipmapped_texture(cv::Mat image, unsigned int mipmapsNum = 0) {
assert(image.type() == CV_8UC3 || image.type() == CV_8UC4);
Texture texture;
texture.mipmaps_num = (mipmapsNum == 0 ? get_max_possible_mipmaps_num(image.cols, image.rows) : mipmapsNum);
/*if (mipmapsNum == 0)
{
uchar mmn = render::utils::MatrixUtils::getMaxPossibleMipmapsNum(image.cols, image.rows);
this->mipmapsNum = mmn;
} else
{
this->mipmapsNum = mipmapsNum;
}*/
if (texture.mipmaps_num > 1)
{
if (!is_power_of_two(image.cols) || !is_power_of_two(image.rows))
{
throw std::runtime_error("Error: Couldn't generate mipmaps, width or height not power of two.");
}
}
if (image.type() == CV_8UC3)
{
image.convertTo(image, CV_8UC4); // Most often, the input img is CV_8UC3. Img is BGR. Add an alpha channel
cv::cvtColor(image, image, CV_BGR2BGRA);
}
int currWidth = image.cols;
int currHeight = image.rows;
std::vector<cv::Mat> mipmaps;
for (int i = 0; i < texture.mipmaps_num; i++)
{
if (i == 0) {
mipmaps.push_back(image);
}
else {
cv::Mat currMipMap(currHeight, currWidth, CV_8UC4);
cv::resize(mipmaps[i - 1], currMipMap, currMipMap.size());
mipmaps.push_back(currMipMap);
}
if (currWidth > 1)
currWidth >>= 1;
if (currHeight > 1)
currHeight >>= 1;
}
texture.mipmaps = mipmaps;
texture.widthLog = (uchar)(std::log(mipmaps[0].cols) / CV_LOG2 + 0.0001f); // std::epsilon or something? or why 0.0001f here?
texture.heightLog = (uchar)(std::log(mipmaps[0].rows) / CV_LOG2 + 0.0001f); // Changed std::logf to std::log because it doesnt compile in linux (gcc 4.8). CHECK THAT
return texture;
};
} /* namespace render */
} /* namespace eos */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment