Commit 7d775985 authored by Patrik Huber's avatar Patrik Huber

Changed Mesh to use glm::tvec* instead of OpenCV

Changed all affected functions and classes accordingly: Mainly the renderer, affine-renderer, and affine texture extraction.

This results in a ~35% speed-up of rendering meshes.

It requires some not-so-beautiful conversions in render_affine and the current texture extraction, but they will be superseded by the new renderer soon anyway.
parent 40fb71b0
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
*/ */
#include "eos/morphablemodel/MorphableModel.hpp" #include "eos/morphablemodel/MorphableModel.hpp"
#include "eos/render/render.hpp" #include "eos/render/render.hpp"
#include "eos/fitting/nonlinear_camera_estimation.hpp"
#include "glm/gtc/matrix_transform.hpp" #include "glm/gtc/matrix_transform.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
...@@ -94,7 +93,7 @@ int main(int argc, char *argv[]) ...@@ -94,7 +93,7 @@ int main(int argc, char *argv[])
render::write_obj(sample_mesh, output_file.string()); render::write_obj(sample_mesh, output_file.string());
cv::Mat rendering; cv::Mat rendering;
std::tie(rendering, std::ignore) = render::render(sample_mesh, cv::Mat::eye(4, 4, CV_32FC1), fitting::to_mat(glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f)), 512, 512, boost::none, true, false, false); std::tie(rendering, std::ignore) = render::render(sample_mesh, glm::tmat4x4<float>(1.0f), glm::ortho(-130.0f, 130.0f, -130.0f, 130.0f), 512, 512, boost::none, true, false, false);
output_file.replace_extension(".png"); output_file.replace_extension(".png");
cv::imwrite(output_file.string(), rendering); cv::imwrite(output_file.string(), rendering);
......
...@@ -32,6 +32,9 @@ ...@@ -32,6 +32,9 @@
#include "cereal/types/vector.hpp" #include "cereal/types/vector.hpp"
#include "cereal/archives/binary.hpp" #include "cereal/archives/binary.hpp"
#include "glm/vec2.hpp"
#include "glm/vec3.hpp"
#include "glm/vec4.hpp"
#include <vector> #include <vector>
#include <array> #include <array>
#include <cstdint> #include <cstdint>
...@@ -320,14 +323,14 @@ eos::render::Mesh sample_to_mesh(cv::Mat shape, cv::Mat color, std::vector<std:: ...@@ -320,14 +323,14 @@ eos::render::Mesh sample_to_mesh(cv::Mat shape, cv::Mat color, std::vector<std::
// Construct the mesh vertices: // Construct the mesh vertices:
mesh.vertices.resize(num_vertices); mesh.vertices.resize(num_vertices);
for (auto i = 0; i < num_vertices; ++i) { for (auto i = 0; i < num_vertices; ++i) {
mesh.vertices[i] = cv::Vec4f(shape.at<float>(i * 3 + 0), shape.at<float>(i * 3 + 1), shape.at<float>(i * 3 + 2), 1.0f); mesh.vertices[i] = glm::tvec4<float>(shape.at<float>(i * 3 + 0), shape.at<float>(i * 3 + 1), shape.at<float>(i * 3 + 2), 1.0f);
} }
// Assign the vertex colour information if it's not a shape-only model: // Assign the vertex colour information if it's not a shape-only model:
if (!color.empty()) { if (!color.empty()) {
mesh.colors.resize(num_vertices); mesh.colors.resize(num_vertices);
for (auto i = 0; i < num_vertices; ++i) { for (auto i = 0; i < num_vertices; ++i) {
mesh.colors[i] = cv::Vec3f(color.at<float>(i * 3 + 0), color.at<float>(i * 3 + 1), color.at<float>(i * 3 + 2)); // order in hdf5: RGB. Order in OCV: BGR. But order in vertex.color: RGB mesh.colors[i] = glm::tvec3<float>(color.at<float>(i * 3 + 0), color.at<float>(i * 3 + 1), color.at<float>(i * 3 + 2)); // order in hdf5: RGB. Order in OCV: BGR. But order in vertex.color: RGB
} }
} }
...@@ -339,7 +342,7 @@ eos::render::Mesh sample_to_mesh(cv::Mat shape, cv::Mat color, std::vector<std:: ...@@ -339,7 +342,7 @@ eos::render::Mesh sample_to_mesh(cv::Mat shape, cv::Mat color, std::vector<std::
if (!texture_coordinates.empty()) { if (!texture_coordinates.empty()) {
mesh.texcoords.resize(num_vertices); mesh.texcoords.resize(num_vertices);
for (auto i = 0; i < num_vertices; ++i) { for (auto i = 0; i < num_vertices; ++i) {
mesh.texcoords[i] = texture_coordinates[i]; mesh.texcoords[i] = glm::tvec2<float>(texture_coordinates[i][0], texture_coordinates[i][1]);
} }
} }
......
...@@ -22,7 +22,9 @@ ...@@ -22,7 +22,9 @@
#ifndef MESH_HPP_ #ifndef MESH_HPP_
#define MESH_HPP_ #define MESH_HPP_
#include "opencv2/core/core.hpp" #include "glm/vec2.hpp"
#include "glm/vec3.hpp"
#include "glm/vec4.hpp"
#include "boost/filesystem/path.hpp" #include "boost/filesystem/path.hpp"
...@@ -44,9 +46,9 @@ namespace eos { ...@@ -44,9 +46,9 @@ namespace eos {
*/ */
struct Mesh struct Mesh
{ {
std::vector<cv::Vec4f> vertices; ///< 3D vertex positions. std::vector<glm::tvec4<float>> vertices; ///< 3D vertex positions.
std::vector<cv::Vec3f> colors; ///< Colour information for each vertex. Expected to be in RGB order. std::vector<glm::tvec3<float>> colors; ///< Colour information for each vertex. Expected to be in RGB order.
std::vector<cv::Vec2f> texcoords; ///< Texture coordinates for each vertex. std::vector<glm::tvec2<float>> texcoords; ///< Texture coordinates for each vertex.
std::vector<std::array<int, 3>> tvi; ///< Triangle vertex indices std::vector<std::array<int, 3>> tvi; ///< Triangle vertex indices
std::vector<std::array<int, 3>> tci; ///< Triangle color indices std::vector<std::array<int, 3>> tci; ///< Triangle color indices
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "eos/render/detail/render_detail.hpp" #include "eos/render/detail/render_detail.hpp"
#include "glm/vec3.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
/** /**
...@@ -121,7 +123,7 @@ void raster_triangle_affine(TriangleToRasterize triangle, cv::Mat colourbuffer, ...@@ -121,7 +123,7 @@ void raster_triangle_affine(TriangleToRasterize triangle, cv::Mat colourbuffer,
{ {
// attributes interpolation // attributes interpolation
// pixel_color is in RGB, v.color are RGB // pixel_color is in RGB, v.color are RGB
cv::Vec3f pixel_color = alpha*triangle.v0.color + beta*triangle.v1.color + gamma*triangle.v2.color; glm::tvec3<float> pixel_color = static_cast<float>(alpha)*triangle.v0.color + static_cast<float>(beta)*triangle.v1.color + static_cast<float>(gamma)*triangle.v2.color;
// clamp bytes to 255 // clamp bytes to 255
const unsigned char red = static_cast<unsigned char>(255.0f * std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?) const unsigned char red = static_cast<unsigned char>(255.0f * std::min(pixel_color[0], 1.0f)); // Todo: Proper casting (rounding?)
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "eos/render/utils.hpp" #include "eos/render/utils.hpp"
#include "glm/glm.hpp" // tvec2, glm::precision, tvec3, normalize, dot, cross
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "boost/optional.hpp" #include "boost/optional.hpp"
...@@ -46,11 +48,11 @@ class Vertex ...@@ -46,11 +48,11 @@ class Vertex
{ {
public: public:
Vertex() {}; Vertex() {};
Vertex(const cv::Vec4f& position, const cv::Vec3f& color, const cv::Vec2f& texcoords) : position(position), color(color), texcoords(texcoords) {}; Vertex(const glm::tvec4<float>& position, const glm::tvec3<float>& color, const glm::tvec2<float>& texcoords) : position(position), color(color), texcoords(texcoords) {};
cv::Vec4f position; glm::tvec4<float> position;
cv::Vec3f color; ///< in RGB order glm::tvec3<float> color; ///< in RGB order
cv::Vec2f texcoords; glm::tvec2<float> texcoords;
}; };
class plane class plane
...@@ -95,6 +97,20 @@ public: ...@@ -95,6 +97,20 @@ public:
d = -(point1.dot(normal)); d = -(point1.dot(normal));
} }
template<typename T, glm::precision P = glm::defaultp>
plane(const glm::tvec3<T, P>& point1, const glm::tvec3<T, P>& point2, const glm::tvec3<T, P>& point3)
{
glm::tvec3<T, P> v1 = point2 - point1;
glm::tvec3<T, P> v2 = point3 - point1;
glm::tvec3<T, P> normal = glm::cross(v1, v2);
normal = glm::normalize(normal);
a = normal[0];
b = normal[1];
c = normal[2];
d = -glm::dot(point1, normal);
}
void normalize() void normalize()
{ {
float length = sqrt(a*a + b*b + c*c); float length = sqrt(a*a + b*b + c*c);
...@@ -168,7 +184,8 @@ struct TriangleToRasterize ...@@ -168,7 +184,8 @@ struct TriangleToRasterize
* @param[in] viewport_height Screen height. * @param[in] viewport_height Screen height.
* @return A bounding box rectangle. * @return A bounding box rectangle.
*/ */
cv::Rect calculate_clipped_bounding_box(cv::Vec4f v0, cv::Vec4f v1, cv::Vec4f v2, int viewport_width, int viewport_height) template<typename T, glm::precision P = glm::defaultp>
cv::Rect calculate_clipped_bounding_box(const glm::tvec2<T, P>& v0, const glm::tvec2<T, P>& v1, const glm::tvec2<T, P>& v2, int viewport_width, int viewport_height)
{ {
/* Old, producing artifacts: /* Old, producing artifacts:
t.minX = max(min(t.v0.position[0], min(t.v1.position[0], t.v2.position[0])), 0.0f); t.minX = max(min(t.v0.position[0], min(t.v1.position[0], t.v2.position[0])), 0.0f);
...@@ -180,10 +197,10 @@ cv::Rect calculate_clipped_bounding_box(cv::Vec4f v0, cv::Vec4f v1, cv::Vec4f v2 ...@@ -180,10 +197,10 @@ cv::Rect calculate_clipped_bounding_box(cv::Vec4f v0, cv::Vec4f v1, cv::Vec4f v2
using std::max; using std::max;
using std::floor; using std::floor;
using std::ceil; using std::ceil;
int minX = max(min(floor(v0[0]), min(floor(v1[0]), floor(v2[0]))), 0.0f); // Readded this comment after merge: What about rounding, or rather the conversion from double to int? int minX = max(min(floor(v0[0]), min(floor(v1[0]), floor(v2[0]))), T(0)); // Readded this comment after merge: What about rounding, or rather the conversion from double to int?
int maxX = min(max(ceil(v0[0]), max(ceil(v1[0]), ceil(v2[0]))), static_cast<float>(viewport_width - 1)); int maxX = min(max(ceil(v0[0]), max(ceil(v1[0]), ceil(v2[0]))), static_cast<T>(viewport_width - 1));
int minY = max(min(floor(v0[1]), min(floor(v1[1]), floor(v2[1]))), 0.0f); int minY = max(min(floor(v0[1]), min(floor(v1[1]), floor(v2[1]))), T(0));
int maxY = min(max(ceil(v0[1]), max(ceil(v1[1]), ceil(v2[1]))), static_cast<float>(viewport_height - 1)); int maxY = min(max(ceil(v0[1]), max(ceil(v1[1]), ceil(v2[1]))), static_cast<T>(viewport_height - 1));
return cv::Rect(minX, minY, maxX - minX, maxY - minY); return cv::Rect(minX, minY, maxX - minX, maxY - minY);
}; };
...@@ -197,22 +214,24 @@ cv::Rect calculate_clipped_bounding_box(cv::Vec4f v0, cv::Vec4f v1, cv::Vec4f v2 ...@@ -197,22 +214,24 @@ cv::Rect calculate_clipped_bounding_box(cv::Vec4f v0, cv::Vec4f v1, cv::Vec4f v2
* @param[in] v2 Third vertex. * @param[in] v2 Third vertex.
* @return Whether the vertices are CCW in screen space. * @return Whether the vertices are CCW in screen space.
*/ */
bool are_vertices_ccw_in_screen_space(const cv::Vec4f& v0, const cv::Vec4f& v1, const cv::Vec4f& v2) template<typename T, glm::precision P = glm::defaultp>
bool are_vertices_ccw_in_screen_space(const glm::tvec2<T, P>& v0, const glm::tvec2<T, P>& v1, const glm::tvec2<T, P>& v2)
{ {
float dx01 = v1[0] - v0[0]; const auto dx01 = v1[0] - v0[0]; // todo: replace with x/y (GLM)
float dy01 = v1[1] - v0[1]; const auto dy01 = v1[1] - v0[1];
float dx02 = v2[0] - v0[0]; const auto dx02 = v2[0] - v0[0];
float dy02 = v2[1] - v0[1]; const auto dy02 = v2[1] - v0[1];
return (dx01*dy02 - dy01*dx02 < 0.0f); // Original: (dx01*dy02 - dy01*dx02 > 0.0f). But: OpenCV has origin top-left, y goes down return (dx01*dy02 - dy01*dx02 < T(0)); // Original: (dx01*dy02 - dy01*dx02 > 0.0f). But: OpenCV has origin top-left, y goes down
}; };
double implicit_line(float x, float y, const cv::Vec4f& v1, const cv::Vec4f& v2) template<typename T, glm::precision P = glm::defaultp>
double implicit_line(float x, float y, const glm::tvec4<T, P>& v1, const glm::tvec4<T, P>& v2)
{ {
return ((double)v1[1] - (double)v2[1])*(double)x + ((double)v2[0] - (double)v1[0])*(double)y + (double)v1[0] * (double)v2[1] - (double)v2[0] * (double)v1[1]; return ((double)v1[1] - (double)v2[1])*(double)x + ((double)v2[0] - (double)v1[0])*(double)y + (double)v1[0] * (double)v2[1] - (double)v2[0] * (double)v1[1];
}; };
std::vector<Vertex> clip_polygon_to_plane_in_4d(const std::vector<Vertex>& vertices, const cv::Vec4f& plane_normal) std::vector<Vertex> clip_polygon_to_plane_in_4d(const std::vector<Vertex>& vertices, const glm::tvec4<float>& plane_normal)
{ {
std::vector<Vertex> clippedVertices; std::vector<Vertex> clippedVertices;
...@@ -227,18 +246,18 @@ std::vector<Vertex> clip_polygon_to_plane_in_4d(const std::vector<Vertex>& verti ...@@ -227,18 +246,18 @@ std::vector<Vertex> clip_polygon_to_plane_in_4d(const std::vector<Vertex>& verti
int a = i; // the current vertex int a = i; // the current vertex
int b = (i + 1) % vertices.size(); // the following vertex (wraps around 0) int b = (i + 1) % vertices.size(); // the following vertex (wraps around 0)
float fa = vertices[a].position.dot(plane_normal); // Note: Shouldn't they be unit length? float fa = glm::dot(vertices[a].position, plane_normal); // Note: Shouldn't they be unit length?
float fb = vertices[b].position.dot(plane_normal); // < 0 means on visible side, > 0 means on invisible side? float fb = glm::dot(vertices[b].position, plane_normal); // < 0 means on visible side, > 0 means on invisible side?
if ((fa < 0 && fb > 0) || (fa > 0 && fb < 0)) // one vertex is on the visible side of the plane, one on the invisible? so we need to split? if ((fa < 0 && fb > 0) || (fa > 0 && fb < 0)) // one vertex is on the visible side of the plane, one on the invisible? so we need to split?
{ {
cv::Vec4f direction = vertices[b].position - vertices[a].position; auto direction = vertices[b].position - vertices[a].position;
float t = -(plane_normal.dot(vertices[a].position)) / (plane_normal.dot(direction)); // the parametric value on the line, where the line to draw intersects the plane? float t = -(glm::dot(plane_normal, vertices[a].position)) / (glm::dot(plane_normal, direction)); // the parametric value on the line, where the line to draw intersects the plane?
// generate a new vertex at the line-plane intersection point // generate a new vertex at the line-plane intersection point
cv::Vec4f position = vertices[a].position + t*direction; auto position = vertices[a].position + t*direction;
cv::Vec3f color = vertices[a].color + t*(vertices[b].color - vertices[a].color); auto color = vertices[a].color + t*(vertices[b].color - vertices[a].color);
cv::Vec2f texCoord = vertices[a].texcoords + t*(vertices[b].texcoords - vertices[a].texcoords); // We could omit that if we don't render with texture. auto texCoord = vertices[a].texcoords + t*(vertices[b].texcoords - vertices[a].texcoords); // We could omit that if we don't render with texture.
if (fa < 0) // we keep the original vertex plus the new one if (fa < 0) // we keep the original vertex plus the new one
{ {
...@@ -398,12 +417,12 @@ boost::optional<TriangleToRasterize> process_prospective_tri(Vertex v0, Vertex v ...@@ -398,12 +417,12 @@ boost::optional<TriangleToRasterize> process_prospective_tri(Vertex v0, Vertex v
t.v2.position[1] = v2_screen[1]; t.v2.position[1] = v2_screen[1];
if (enable_backface_culling) { if (enable_backface_culling) {
if (!are_vertices_ccw_in_screen_space(t.v0.position, t.v1.position, t.v2.position)) if (!are_vertices_ccw_in_screen_space(glm::tvec2<float>(t.v0.position), glm::tvec2<float>(t.v1.position), glm::tvec2<float>(t.v2.position)))
return boost::none; return boost::none;
} }
// Get the bounding box of the triangle: // Get the bounding box of the triangle:
cv::Rect boundingBox = calculate_clipped_bounding_box(t.v0.position, t.v1.position, t.v2.position, viewport_width, viewport_height); cv::Rect boundingBox = calculate_clipped_bounding_box(glm::tvec2<float>(t.v0.position), glm::tvec2<float>(t.v1.position), glm::tvec2<float>(t.v2.position), viewport_width, viewport_height);
t.min_x = boundingBox.x; t.min_x = boundingBox.x;
t.max_x = boundingBox.x + boundingBox.width; t.max_x = boundingBox.x + boundingBox.width;
t.min_y = boundingBox.y; t.min_y = boundingBox.y;
...@@ -486,10 +505,10 @@ void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, cv::Mat ...@@ -486,10 +505,10 @@ void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, cv::Mat
gamma *= d*triangle.one_over_z2; gamma *= d*triangle.one_over_z2;
// attributes interpolation // attributes interpolation
Vec3f color_persp = alpha*triangle.v0.color + beta*triangle.v1.color + gamma*triangle.v2.color; // Note: color might be empty if we use texturing and the shape-only model - but it works nonetheless? I think I set the vertex-colour to 127 in the shape-only model. glm::tvec3<float> color_persp = static_cast<float>(alpha)*triangle.v0.color + static_cast<float>(beta)*triangle.v1.color + static_cast<float>(gamma)*triangle.v2.color; // Note: color might be empty if we use texturing and the shape-only model - but it works nonetheless? I think I set the vertex-colour to 127 in the shape-only model.
Vec2f texcoords_persp = alpha*triangle.v0.texcoords + beta*triangle.v1.texcoords + gamma*triangle.v2.texcoords; glm::tvec2<float> texcoords_persp = static_cast<float>(alpha)*triangle.v0.texcoords + static_cast<float>(beta)*triangle.v1.texcoords + static_cast<float>(gamma)*triangle.v2.texcoords;
Vec3f pixel_color; glm::tvec3<float> pixel_color;
// Pixel Shader: // Pixel Shader:
if (texture) { // We use texturing if (texture) { // We use texturing
// check if texture != NULL? // check if texture != NULL?
...@@ -511,8 +530,8 @@ void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, cv::Mat ...@@ -511,8 +530,8 @@ void raster_triangle(TriangleToRasterize triangle, cv::Mat colourbuffer, cv::Mat
dvdy *= texture.get().mipmaps[0].rows; dvdy *= texture.get().mipmaps[0].rows;
// The Texture is in BGR, thus tex2D returns BGR // The Texture is in BGR, thus tex2D returns BGR
Vec3f texture_color = detail::tex2d(texcoords_persp, texture.get(), dudx, dudy, dvdx, dvdy); // uses the current texture glm::tvec3<float> texture_color = detail::tex2d(texcoords_persp, texture.get(), dudx, dudy, dvdx, dvdy); // uses the current texture
pixel_color = Vec3f(texture_color[2], texture_color[1], texture_color[0]); pixel_color = glm::tvec3<float>(texture_color[2], texture_color[1], texture_color[0]);
// other: color.mul(tex2D(texture, texCoord)); // other: color.mul(tex2D(texture, texCoord));
// Old note: for texturing, we load the texture as BGRA, so the colors get the wrong way in the next few lines... // Old note: for texturing, we load the texture as BGRA, so the colors get the wrong way in the next few lines...
} }
......
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
#include "eos/render/detail/render_detail.hpp" #include "eos/render/detail/render_detail.hpp"
#include "glm/vec2.hpp"
#include "glm/vec4.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
/** /**
...@@ -85,7 +88,7 @@ inline bool is_point_in_triangle(cv::Point2f point, cv::Point2f triV0, cv::Point ...@@ -85,7 +88,7 @@ inline bool is_point_in_triangle(cv::Point2f point, cv::Point2f triV0, cv::Point
* @param[in] depthbuffer Pre-calculated depthbuffer. * @param[in] depthbuffer Pre-calculated depthbuffer.
* @return True if the whole triangle is visible in the image. * @return True if the whole triangle is visible in the image.
*/ */
bool is_triangle_visible(const cv::Vec4f& v0, const cv::Vec4f& v1, const cv::Vec4f& v2, cv::Mat depthbuffer) bool is_triangle_visible(const glm::tvec4<float>& v0, const glm::tvec4<float>& v1, const glm::tvec4<float>& v2, cv::Mat depthbuffer)
{ {
// #Todo: Actually, only check the 3 vertex points, don't loop over the pixels - this should be enough. // #Todo: Actually, only check the 3 vertex points, don't loop over the pixels - this should be enough.
...@@ -96,10 +99,10 @@ bool is_triangle_visible(const cv::Vec4f& v0, const cv::Vec4f& v1, const cv::Vec ...@@ -96,10 +99,10 @@ bool is_triangle_visible(const cv::Vec4f& v0, const cv::Vec4f& v1, const cv::Vec
// clipping against the frustums etc. // clipping against the frustums etc.
// But as long as our model is fully on the screen, we're fine. Todo: Doublecheck that. // But as long as our model is fully on the screen, we're fine. Todo: Doublecheck that.
if (!detail::are_vertices_ccw_in_screen_space(v0, v1, v2)) if (!detail::are_vertices_ccw_in_screen_space(glm::tvec2<float>(v0), glm::tvec2<float>(v1), glm::tvec2<float>(v2)))
return false; return false;
cv::Rect bbox = detail::calculate_clipped_bounding_box(v0, v1, v2, viewport_width, viewport_height); cv::Rect bbox = detail::calculate_clipped_bounding_box(glm::tvec2<float>(v0), glm::tvec2<float>(v1), glm::tvec2<float>(v2), viewport_width, viewport_height);
int minX = bbox.x; int minX = bbox.x;
int maxX = bbox.x + bbox.width; int maxX = bbox.x + bbox.width;
int minY = bbox.y; int minY = bbox.y;
......
...@@ -122,7 +122,7 @@ namespace eos { ...@@ -122,7 +122,7 @@ namespace eos {
* @param[in] enable_far_clipping Whether vertices should be clipped against the far plane. * @param[in] enable_far_clipping Whether vertices should be clipped against the far plane.
* @return A pair with the colourbuffer as its first element and the depthbuffer as the second element. * @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
*/ */
std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true) std::pair<cv::Mat, cv::Mat> render(Mesh mesh, glm::tmat4x4<float> model_view_matrix, glm::tmat4x4<float> projection_matrix, int viewport_width, int viewport_height, const boost::optional<Texture>& texture = boost::none, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{ {
// Some internal documentation / old todos or notes: // Some internal documentation / old todos or notes:
// maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer(). // maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
...@@ -146,10 +146,10 @@ std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat ...@@ -146,10 +146,10 @@ std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat
vector<detail::Vertex> clipspace_vertices; vector<detail::Vertex> clipspace_vertices;
clipspace_vertices.reserve(mesh.vertices.size()); clipspace_vertices.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex
Mat clipspace_coords = projection_matrix * model_view_matrix * Mat(mesh.vertices[i]); glm::tvec4<float> clipspace_coords = projection_matrix * model_view_matrix * mesh.vertices[i];
cv::Vec3f vertex_colour; glm::tvec3<float> vertex_colour;
if (mesh.colors.empty()) { if (mesh.colors.empty()) {
vertex_colour = cv::Vec3f(0.5f, 0.5f, 0.5f); vertex_colour = glm::tvec3<float>(0.5f, 0.5f, 0.5f);
} }
else { else {
vertex_colour = mesh.colors[i]; vertex_colour = mesh.colors[i];
...@@ -212,7 +212,7 @@ std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat ...@@ -212,7 +212,7 @@ std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat
// split the triangle if it intersects the near plane: // split the triangle if it intersects the near plane:
if (enable_near_clipping) if (enable_near_clipping)
{ {
vertices = detail::clip_polygon_to_plane_in_4d(vertices, cv::Vec4f(0.0f, 0.0f, -1.0f, -1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?) vertices = detail::clip_polygon_to_plane_in_4d(vertices, glm::tvec4<float>(0.0f, 0.0f, -1.0f, -1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?)
} }
// triangulation of the polygon formed of vertices array // triangulation of the polygon formed of vertices array
......
...@@ -26,6 +26,10 @@ ...@@ -26,6 +26,10 @@
#include "eos/render/detail/render_affine_detail.hpp" #include "eos/render/detail/render_affine_detail.hpp"
#include "eos/render/Mesh.hpp" #include "eos/render/Mesh.hpp"
#include "glm/vec2.hpp"
#include "glm/vec3.hpp"
#include "glm/vec4.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include <utility> #include <utility>
...@@ -65,28 +69,29 @@ std::pair<cv::Mat, cv::Mat> render_affine(Mesh mesh, cv::Mat affine_camera_matri ...@@ -65,28 +69,29 @@ std::pair<cv::Mat, cv::Mat> render_affine(Mesh mesh, cv::Mat affine_camera_matri
vector<detail::Vertex> projected_vertices; vector<detail::Vertex> projected_vertices;
projected_vertices.reserve(mesh.vertices.size()); projected_vertices.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) { for (int i = 0; i < mesh.vertices.size(); ++i) {
Mat vertex_screen_coords = affine_with_z * Mat(mesh.vertices[i]); Mat vertex_screen_coords = affine_with_z * Mat(cv::Vec4f(mesh.vertices[i].x, mesh.vertices[i].y, mesh.vertices[i].z, mesh.vertices[i].w));
cv::Vec3f vertex_colour; glm::tvec4<float> vertex_screen_coords_glm(vertex_screen_coords.at<float>(0), vertex_screen_coords.at<float>(1), vertex_screen_coords.at<float>(2), vertex_screen_coords.at<float>(3));
glm::tvec3<float> vertex_colour;
if (mesh.colors.empty()) { if (mesh.colors.empty()) {
vertex_colour = cv::Vec3f(0.5f, 0.5f, 0.5f); vertex_colour = glm::tvec3<float>(0.5f, 0.5f, 0.5f);
} }
else { else {
vertex_colour = mesh.colors[i]; vertex_colour = mesh.colors[i];
} }
projected_vertices.push_back(detail::Vertex(vertex_screen_coords, vertex_colour, mesh.texcoords[i])); projected_vertices.push_back(detail::Vertex(vertex_screen_coords_glm, vertex_colour, mesh.texcoords[i]));
} }
// All vertices are screen-coordinates now // All vertices are screen-coordinates now
vector<detail::TriangleToRasterize> triangles_to_raster; vector<detail::TriangleToRasterize> triangles_to_raster;
for (const auto& tri_indices : mesh.tvi) { for (const auto& tri_indices : mesh.tvi) {
if (do_backface_culling) { if (do_backface_culling) {
if (!detail::are_vertices_ccw_in_screen_space(projected_vertices[tri_indices[0]].position, projected_vertices[tri_indices[1]].position, projected_vertices[tri_indices[2]].position)) if (!detail::are_vertices_ccw_in_screen_space(glm::tvec2<float>(projected_vertices[tri_indices[0]].position), glm::tvec2<float>(projected_vertices[tri_indices[1]].position), glm::tvec2<float>(projected_vertices[tri_indices[2]].position)))
continue; // don't render this triangle continue; // don't render this triangle
} }
// Get the bounding box of the triangle: // Get the bounding box of the triangle:
// take care: What do we do if all 3 vertices are not visible. Seems to work on a test case. // take care: What do we do if all 3 vertices are not visible. Seems to work on a test case.
cv::Rect bounding_box = detail::calculate_clipped_bounding_box(projected_vertices[tri_indices[0]].position, projected_vertices[tri_indices[1]].position, projected_vertices[tri_indices[2]].position, viewport_width, viewport_height); cv::Rect bounding_box = detail::calculate_clipped_bounding_box(glm::tvec2<float>(projected_vertices[tri_indices[0]].position), glm::tvec2<float>(projected_vertices[tri_indices[1]].position), glm::tvec2<float>(projected_vertices[tri_indices[2]].position), viewport_width, viewport_height);
auto min_x = bounding_box.x; auto min_x = bounding_box.x;
auto max_x = bounding_box.x + bounding_box.width; auto max_x = bounding_box.x + bounding_box.width;
auto min_y = bounding_box.y; auto min_y = bounding_box.y;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include "eos/render/render_affine.hpp" #include "eos/render/render_affine.hpp"
#include "eos/render/detail/render_detail.hpp" #include "eos/render/detail/render_detail.hpp"
#include "glm/vec4.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc/imgproc.hpp"
...@@ -140,12 +142,16 @@ inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat ...@@ -140,12 +142,16 @@ inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat
// - Use render(), or as in render(...), transfer the vertices once, not in a loop over all triangles (vertices are getting transformed multiple times) // - Use render(), or as in render(...), transfer the vertices once, not in a loop over all triangles (vertices are getting transformed multiple times)
// - We transform them later (below) a second time. Only do it once. // - We transform them later (below) a second time. Only do it once.
cv::Vec4f v0_as_Vec4f(mesh.vertices[triangle_indices[0]].x, mesh.vertices[triangle_indices[0]].y, mesh.vertices[triangle_indices[0]].z, mesh.vertices[triangle_indices[0]].w);
cv::Vec4f v1_as_Vec4f(mesh.vertices[triangle_indices[1]].x, mesh.vertices[triangle_indices[1]].y, mesh.vertices[triangle_indices[1]].z, mesh.vertices[triangle_indices[1]].w);
cv::Vec4f v2_as_Vec4f(mesh.vertices[triangle_indices[2]].x, mesh.vertices[triangle_indices[2]].y, mesh.vertices[triangle_indices[2]].z, mesh.vertices[triangle_indices[2]].w);
// Project the triangle vertices to screen coordinates, and use the depthbuffer to check whether the triangle is visible: // Project the triangle vertices to screen coordinates, and use the depthbuffer to check whether the triangle is visible:
const Vec4f v0 = Mat(affine_camera_matrix * Mat(mesh.vertices[triangle_indices[0]])); const Vec4f v0 = Mat(affine_camera_matrix * Mat(v0_as_Vec4f));
const Vec4f v1 = Mat(affine_camera_matrix * Mat(mesh.vertices[triangle_indices[1]])); const Vec4f v1 = Mat(affine_camera_matrix * Mat(v1_as_Vec4f));
const Vec4f v2 = Mat(affine_camera_matrix * Mat(mesh.vertices[triangle_indices[2]])); const Vec4f v2 = Mat(affine_camera_matrix * Mat(v2_as_Vec4f));
if (!detail::is_triangle_visible(v0, v1, v2, depthbuffer)) if (!detail::is_triangle_visible(glm::tvec4<float>(v0[0], v0[1], v0[2], v0[3]), glm::tvec4<float>(v1[0], v1[1], v1[2], v1[3]), glm::tvec4<float>(v2[0], v2[1], v2[2], v2[3]), depthbuffer))
{ {
//continue; //continue;
return; return;
...@@ -156,7 +162,7 @@ inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat ...@@ -156,7 +162,7 @@ inline cv::Mat extract_texture(Mesh mesh, cv::Mat affine_camera_matrix, cv::Mat
{ {
// Calculate how well visible the current triangle is: // Calculate how well visible the current triangle is:
// (in essence, the dot product of the viewing direction (0, 0, 1) and the face normal) // (in essence, the dot product of the viewing direction (0, 0, 1) and the face normal)
const Vec3f face_normal = calculate_face_normal(Vec3f(Mat(mesh.vertices[triangle_indices[0]]).rowRange(0, 3)), Vec3f(Mat(mesh.vertices[triangle_indices[1]]).rowRange(0, 3)), Vec3f(Mat(mesh.vertices[triangle_indices[2]]).rowRange(0, 3))); const Vec3f face_normal = calculate_face_normal(Vec3f(Mat(v0_as_Vec4f).rowRange(0, 3)), Vec3f(Mat(v1_as_Vec4f).rowRange(0, 3)), Vec3f(Mat(v2_as_Vec4f).rowRange(0, 3)));
// Transform the normal to "screen" (kind of "eye") space using the upper 3x3 part of the affine camera matrix (=the translation can be ignored): // Transform the normal to "screen" (kind of "eye") space using the upper 3x3 part of the affine camera matrix (=the translation can be ignored):
Vec3f face_normal_transformed = Mat(affine_camera_matrix.rowRange(0, 3).colRange(0, 3) * Mat(face_normal)); Vec3f face_normal_transformed = Mat(affine_camera_matrix.rowRange(0, 3).colRange(0, 3) * Mat(face_normal));
face_normal_transformed /= cv::norm(face_normal_transformed, cv::NORM_L2); // normalise to unit length face_normal_transformed /= cv::norm(face_normal_transformed, cv::NORM_L2); // normalise to unit length
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment