Commit 77f271f1 authored by Patrik Huber's avatar Patrik Huber

Moved some function implementations into headers and changed to snake_case

parent 9f2bf891
...@@ -167,7 +167,7 @@ int main(int argc, char *argv[]) ...@@ -167,7 +167,7 @@ int main(int argc, char *argv[])
vertexIndices.emplace_back(vertexIdx); vertexIndices.emplace_back(vertexIdx);
imagePoints.emplace_back(landmarks[i]); imagePoints.emplace_back(landmarks[i]);
} }
catch (std::out_of_range& e) { catch (const std::out_of_range&) {
// just continue if the point isn't defined in the mapping // just continue if the point isn't defined in the mapping
} }
++ibugId; ++ibugId;
...@@ -178,7 +178,7 @@ int main(int argc, char *argv[]) ...@@ -178,7 +178,7 @@ int main(int argc, char *argv[])
// Draw the mean-face landmarks projected using the estimated camera: // Draw the mean-face landmarks projected using the estimated camera:
for (auto&& vertex : modelPoints) { for (auto&& vertex : modelPoints) {
Vec2f screenPoint = fitting::projectAffine(vertex, affineCam, image.cols, image.rows); Vec2f screenPoint = fitting::project_affine(vertex, affineCam, image.cols, image.rows);
cv::circle(outimg, cv::Point2f(screenPoint), 5.0f, { 0.0f, 255.0f, 0.0f }); cv::circle(outimg, cv::Point2f(screenPoint), 5.0f, { 0.0f, 255.0f, 0.0f });
} }
...@@ -193,7 +193,7 @@ int main(int argc, char *argv[]) ...@@ -193,7 +193,7 @@ int main(int argc, char *argv[])
// Draw the projected points again, this time using the fitted model shape: // Draw the projected points again, this time using the fitted model shape:
for (auto&& idx : vertexIndices) { for (auto&& idx : vertexIndices) {
Vec4f modelPoint(mesh.vertices[idx][0], mesh.vertices[idx][1], mesh.vertices[idx][2], mesh.vertices[idx][3]); Vec4f modelPoint(mesh.vertices[idx][0], mesh.vertices[idx][1], mesh.vertices[idx][2], mesh.vertices[idx][3]);
Vec2f screenPoint = fitting::projectAffine(modelPoint, affineCam, image.cols, image.rows); Vec2f screenPoint = fitting::project_affine(modelPoint, affineCam, image.cols, image.rows);
cv::circle(outimg, cv::Point2f(screenPoint), 3.0f, { 0.0f, 0.0f, 255.0f }); cv::circle(outimg, cv::Point2f(screenPoint), 3.0f, { 0.0f, 0.0f, 255.0f });
} }
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#ifndef AFFINECAMERAESTIMATION_HPP_ #ifndef AFFINECAMERAESTIMATION_HPP_
#define AFFINECAMERAESTIMATION_HPP_ #define AFFINECAMERAESTIMATION_HPP_
#include "eos/render/utils.hpp"
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include <vector> #include <vector>
...@@ -51,13 +53,23 @@ cv::Mat estimateAffineCamera(std::vector<cv::Vec2f> imagePoints, std::vector<cv: ...@@ -51,13 +53,23 @@ cv::Mat estimateAffineCamera(std::vector<cv::Vec2f> imagePoints, std::vector<cv:
* also flips the y-axis (the image origin is top-left, while in * also flips the y-axis (the image origin is top-left, while in
* clip space top is +1 and bottom is -1). * clip space top is +1 and bottom is -1).
* *
* Note: Assumes the affine camera matrix only projects from world
* to clip space, because a subsequent window transform is applied.
*
* @param[in] vertex A vertex in 3D space. vertex[3] = 1.0f. * @param[in] vertex A vertex in 3D space. vertex[3] = 1.0f.
* @param[in] affineCameraMatrix A 3x4 affine camera matrix. * @param[in] affine_camera_matrix A 3x4 affine camera matrix.
* @param[in] screenWidth Width of the screen or window used for projection. * @param[in] screen_width Width of the screen or window used for projection.
* @param[in] screenHeight Height of the screen or window used for projection. * @param[in] screen_height Height of the screen or window used for projection.
* @return A vector with x and y coordinates transformed to screen coordinates. * @return A vector with x and y coordinates transformed to screen coordinates.
*/ */
cv::Vec2f projectAffine(cv::Vec4f vertex, cv::Mat affineCameraMatrix, int screenWidth, int screenHeight); inline cv::Vec2f project_affine(cv::Vec4f vertex, cv::Mat affine_camera_matrix, int screen_width, int screen_height)
{
// Transform to clip space:
cv::Mat clip_coords = affine_camera_matrix * cv::Mat(vertex);
// Take the x and y coordinates in clip space and apply the window transform:
cv::Vec2f screen_coords = render::clip_to_screen_space(cv::Vec2f(clip_coords.rowRange(0, 2)), screen_width, screen_height);
return screen_coords;
};
} /* namespace fitting */ } /* namespace fitting */
} /* namespace eos */ } /* namespace eos */
......
...@@ -39,12 +39,25 @@ namespace eos { ...@@ -39,12 +39,25 @@ namespace eos {
* we flip y at the end. * we flip y at the end.
* Qt: Origin top-left. OpenGL: bottom-left. OCV: top-left. * Qt: Origin top-left. OpenGL: bottom-left. OCV: top-left.
* *
* @param[in] clipCoordinates A point in clip coordinates. * @param[in] clip_coordinates A point in clip coordinates.
* @param[in] screenWidth Width of the screen or window. * @param[in] screen_width Width of the screen or window.
* @param[in] screenHeight Height of the screen or window. * @param[in] screen_height Height of the screen or window.
* @return A vector with x and y coordinates transformed to screen space. * @return A vector with x and y coordinates transformed to screen space.
*/ */
cv::Vec2f clipToScreenSpace(cv::Vec2f clipCoordinates, int screenWidth, int screenHeight); inline cv::Vec2f clip_to_screen_space(cv::Vec2f clip_coordinates, int screen_width, int screen_height)
{
// Window transform:
float x_ss = (clip_coordinates[0] + 1.0f) * (screen_width / 2.0f);
float y_ss = screen_height - (clip_coordinates[1] + 1.0f) * (screen_height / 2.0f); // also flip y; Qt: Origin top-left. OpenGL: bottom-left.
return cv::Vec2f(x_ss, y_ss);
/* Note: What we do here is equivalent to
x_w = (x * vW/2) + vW/2;
However, Shirley says we should do:
x_w = (x * vW/2) + (vW-1)/2;
(analogous for y)
Todo: Check the consequences.
*/
};
/** /**
* Transforms a point from image (screen) coordinates to * Transforms a point from image (screen) coordinates to
......
...@@ -144,14 +144,5 @@ Mat estimateAffineCamera(vector<Vec2f> imagePoints, vector<Vec4f> modelPoints) ...@@ -144,14 +144,5 @@ Mat estimateAffineCamera(vector<Vec2f> imagePoints, vector<Vec4f> modelPoints)
return P_Affine; return P_Affine;
} }
cv::Vec2f projectAffine(cv::Vec4f vertex, cv::Mat affineCameraMatrix, int screenWidth, int screenHeight)
{
// Transform to clip space:
Mat clipCoords = affineCameraMatrix * Mat(vertex);
// Take the x and y coordinates in clip space and apply the window transform:
cv::Vec2f screenCoords = render::clipToScreenSpace(cv::Vec2f(clipCoords.rowRange(0, 2)), screenWidth, screenHeight);
return screenCoords;
}
} /* namespace fitting */ } /* namespace fitting */
} /* namespace eos */ } /* namespace eos */
...@@ -24,21 +24,6 @@ using cv::Vec2f; ...@@ -24,21 +24,6 @@ using cv::Vec2f;
namespace eos { namespace eos {
namespace render { namespace render {
Vec2f clipToScreenSpace(Vec2f clipCoordinates, int screenWidth, int screenHeight)
{
// Window transform:
float x_ss = (clipCoordinates[0] + 1.0f) * (screenWidth / 2.0f);
float y_ss = screenHeight - (clipCoordinates[1] + 1.0f) * (screenHeight / 2.0f); // also flip y; Qt: Origin top-left. OpenGL: bottom-left.
return Vec2f(x_ss, y_ss);
/* Note: What we do here is equivalent to
x_w = (x * vW/2) + vW/2;
However, Shirley says we should do:
x_w = (x * vW/2) + (vW-1)/2;
(analogous for y)
Todo: Check the consequences.
*/
}
Vec2f screenToClipSpace(Vec2f screenCoordinates, int screenWidth, int screenHeight) Vec2f screenToClipSpace(Vec2f screenCoordinates, int screenWidth, int screenHeight)
{ {
float x_cs = screenCoordinates[0] / (screenWidth / 2.0f) - 1.0f; float x_cs = screenCoordinates[0] / (screenWidth / 2.0f) - 1.0f;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment