Commit fcae2025 authored by Richard Torenvliet's avatar Richard Torenvliet

Backup progress

parent 574ccbce
This diff is collapsed.
......@@ -119,6 +119,31 @@ public:
}
};
/**
* @brief Converts the given landmark name to the mapped name.
*
* @param[in] landmark_name A landmark name to convert.
* @return The mapped landmark name if a mapping exists, an empty optional otherwise.
* @throws out_of_range exception if there is no mapping
* for the given landmarkName.
*/
std::string get_vertex_id(std::string landmark_name) const {
if (landmark_mappings.empty()) {
// perform identity mapping, i.e. return the input
return landmark_name;
}
else {
auto&& converted_landmark = landmark_mappings.find(landmark_name);
if (converted_landmark != std::end(landmark_mappings)) {
// landmark mapping found, return it
return converted_landmark->second;
}
else { // landmark_name does not match the key of any element in the map
return "";
}
}
};
/**
* @brief Returns the number of loaded landmark mappings.
*
......
......@@ -193,13 +193,20 @@ namespace eos {
* @param landmarks
* @return
*/
cv::Rect get_face_roi(vector<Vec2f>image_points, int image_width, int image_height) {
cv::Rect get_face_roi(vector<cv::Vec2f>image_points, int image_width, int image_height) {
cv::Rect bbox = cv::boundingRect(image_points);
// sometimes the bbox starts outside the image boundaries due to wrong image points.
bbox.x = bbox.x < 0 ? 0 : bbox.x;
bbox.y = bbox.y < 0 ? 0 : bbox.y;
// cap on the image width and height.
bbox.width = bbox.x + bbox.width < image_width ? bbox.width: image_width - bbox.x - 1;
bbox.height = bbox.y + bbox.height < image_height ? bbox.height: image_height - bbox.y - 1;
assert(bbox.x + bbox.width < image_width);
assert(bbox.y + bbox.height < image_height);
return bbox;
}
/**
......
......@@ -46,6 +46,9 @@ struct FittingResult
std::vector<float> blendshape_coefficients;
core::LandmarkCollection<cv::Vec2f> landmarks;
core::Mesh mesh;
std::vector<Vec4f> model_points; // the points in the 3D shape model
std::vector<int> vertex_indices; // their vertex indices
std::vector<Vec2f> image_points; // the corresponding 2D landmark points
};
} /* namespace fitting */
......
......@@ -72,7 +72,7 @@ struct ModelContour
// We store r/l separately because we currently only fit to the contour facing the camera.
// Also if we were to fit to the whole contour: Be careful not to just fit to the closest. The
// "invisible" ones behind might be closer on an e.g 90° angle. Store CNT for left/right side separately?
// "invisible" ones behind might be closer on an e.g 90� angle. Store CNT for left/right side separately?
/**
* Helper method to load a ModelContour from
......@@ -226,7 +226,7 @@ inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<in
* have different size.
* Correspondence can be established using get_nearest_contour_correspondences().
*
* If the yaw angle is between +-7.5°, both contours will be selected.
* If the yaw angle is between +-7.5�, both contours will be selected.
*
* Note: Maybe rename to find_nearest_contour_points, to highlight that there is (potentially a lot) computational cost involved?
*
......@@ -251,7 +251,7 @@ std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_a
model_contour_indices.insert(end(model_contour_indices), begin(model_contour.left_contour), end(model_contour.left_contour));
contour_landmark_identifiers.insert(end(contour_landmark_identifiers), begin(contour_landmarks.left_contour), end(contour_landmarks.left_contour));
}
// Note there's an overlap between the angles - if a subject is between +- 7.5°, both contours get added.
// Note there's an overlap between the angles - if a subject is between +- 7.5�, both contours get added.
return std::make_pair(contour_landmark_identifiers, model_contour_indices);
};
......@@ -273,7 +273,7 @@ std::pair<std::vector<std::string>, std::vector<int>> select_contour(float yaw_a
* @param[in] viewport Current viewport to use.
* @return A tuple with the 2D contour landmark points, the corresponding points in the 3D shape model and their vertex indices.
*/
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
inline std::tuple<std::vector<cv::Vec2f>, std::vector<cv::Vec4f>, std::vector<int>> get_nearest_contour_correspondences(const core::LandmarkCollection<cv::Vec2f>& landmarks, const std::vector<std::string>& landmark_contour_identifiers, const std::vector<int>& model_contour_indices, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport)
{
// These are the additional contour-correspondences we're going to find and then use!
std::vector<cv::Vec4f> model_points_cnt; // the points in the 3D shape model
......
This diff is collapsed.
......@@ -460,6 +460,7 @@ public:
int width;
int height;
int last_frame_number;
bool add_random;
Keyframe last_keyframe;
std::unique_ptr<std::thread> frame_buffer_worker;
......@@ -488,6 +489,8 @@ public:
skip_frames = settings.get<int>("frames.skip_frames", 0);
frames_per_bin = settings.get<unsigned int>("frames.frames_per_bin", 2);
add_random = settings.get<bool>("frames.add_random", false);
this->reconstruction_data = reconstruction_data;
unsigned int num_shape_coeff = reconstruction_data.
morphable_model.get_shape_model().get_num_principal_components();
......@@ -501,6 +504,17 @@ public:
// reset frame count (to be sure)
n_frames = 0;
total_frames = 0;
frames_dropped = 0;
Mat frame;
// Get the first frame to see, sometimes frames are empty at the start, keep reading until we hit one.
while(frame.empty() || total_frames < skip_frames) {
cap.read(frame);
total_frames++;
}
std::cout << "Starting at video at frame:" << total_frames << std::endl;
// std::cout << "Settings: " << std::endl <<
// "min_frames: " << min_frames << std::endl <<
......@@ -572,11 +586,108 @@ public:
fitting_result.rendering_parameters = rendering_params;
cv::Rect face_roi = core::get_face_roi(image_points, frame_width, frame_height);
float frame_laplacian_score = static_cast<float>(variance_of_laplacian(frame(face_roi)));
return Keyframe(frame_laplacian_score, frame, fitting_result, total_frames);
}
/**
* Fill the buffer by iterating through the video until the very last frame.
*/
void video_iterator() {
// Go and fill the buffer with more frames while we are reconstructing.
while (next()) { }
// stop the video
__stop();
std::cout << "Video stopped at:" << total_frames << " frames - in buff " << n_frames << std::endl;
};
Keyframe get_last_keyframe() {
return last_keyframe;
};
bool try_add_random(Keyframe keyframe) {
// Determine whether to add or not:
float yaw_angle = glm::degrees(glm::yaw(keyframe.fitting_result.rendering_parameters.get_rotation()));
auto idx = angle_to_index(yaw_angle);
bool add_frame = false;
keyframe.yaw_angle = yaw_angle;
// Score is 0 for total black frames, we don't want those:
if (keyframe.score == 0) {
return false;
}
// only add when the look at the camera
if (idx == 3 || idx == 4) {
add_frame = true;
}
if (!add_frame) {
return false;
}
// Add the keyframe:
bins[idx].push_back(std::make_shared<Keyframe>(keyframe));
if (bins[idx].size() > frames_per_bin) {
n_frames--;
// need to remove the lowest one:
std::sort(std::begin(bins[idx]), std::end(bins[idx]),
[](const auto& lhs, const auto& rhs) { return lhs->score > rhs->score; });
bins[idx].resize(frames_per_bin);
}
return true;
}
Keyframe next_key_frame() {
Mat frame = __get_new_frame();
auto keyframe = generate_new_keyframe(frame);
return keyframe;
}
/**
*
* Set next frame and return frame_buffer. Returns true or false if the next frame is empty.
* Empty frames mean that we reached the end of the video stream (i.e., file or camera).
*
* @return bool if
*/
bool next() {
Mat frame = __get_new_frame();
if (frame.empty()) {
return false;
}
// TODO: only calculate lapscore within the bounding box of the face.
auto keyframe = generate_new_keyframe(frame);
bool frame_added = try_add(keyframe);
// Added or not, we put this as the last keyframe
last_keyframe = keyframe;
if(frame_added) {
n_frames++;
// Setting that the buffer has changed:
frame_buffer_changed = true;
}
total_frames++;
// fill up the buffer until we hit the minimum frames we want in the buffer.
if(n_frames < min_frames) {
return next();
}
return true;
}
/**
* Try to add a new key frame. Look at the laplacian score and the yaw_angle. The yaw_angle will
* determine the bin in which it will be added. The score will be compared to the other frames in the
......@@ -600,8 +711,6 @@ public:
return false;
}
std::cout << "idx: " << idx << std::endl;
// always add when we don't have enough frames
if (bins[idx].size() < frames_per_bin) {
add_frame = true; // definitely adding - we wouldn't have to go through the for-loop on the next line.
......@@ -689,62 +798,6 @@ public:
return get_keyframes();
}
/**
* Fill the buffer by iterating through the video until the very last frame.
*/
void video_iterator() {
// Go and fill the buffer with more frames while we are reconstructing.
while (next()) { }
// stop the video
__stop();
std::cout << "Video stopped at:" << total_frames << " frames - in buff " << n_frames << std::endl;
};
Keyframe get_last_keyframe() {
return last_keyframe;
};
/**
*
* Set next frame and return frame_buffer. Returns true or false if the next frame is empty.
* Empty frames mean that we reached the end of the video stream (i.e., file or camera).
*
* @return bool if
*/
bool next() {
Mat frame = __get_new_frame();
if (frame.empty()) {
return false;
}
// keep the last frame here, so we can play a video subsequently:
last_frame_number = total_frames;
// TODO: only calculate lapscore within the bounding box of the face.
auto keyframe = generate_new_keyframe(frame);
bool frame_added = try_add(keyframe);
// Added or not, we put this as the last keyframe
last_keyframe = keyframe;
if(frame_added) {
n_frames++;
// Setting that the buffer has changed:
frame_buffer_changed = true;
}
total_frames++;
// fill up the buffer until we hit the minimum frames we want in the buffer.
if(n_frames < min_frames) {
return next();
}
return true;
}
/**
* Update pca shape coeff. Probably we need to make something with a mutex, for updating and reading
......@@ -856,6 +909,8 @@ private:
// Note: these settings are for future use
int drop_frames = 0;
int frames_dropped = 0;
unsigned int num_shape_coefficients_to_fit = 0;
};
......
......@@ -263,32 +263,21 @@ public:
cv::Mat add_and_merge(const cv::Mat& isomap)
{
// Merge isomaps, add the current to the already merged, pixel by pixel:
#pragma omp target
for (int r = 0; r < isomap.rows; ++r)
{
#pragma omp parallel for
for (int r = 0; r < isomap.rows; ++r)
for (int c = 0; c < isomap.cols; ++c)
{
for (int c = 0; c < isomap.cols; ++c)
if (isomap.at<cv::Vec4b>(r, c)[3] <= threshold)
{
if (isomap.at<cv::Vec4b>(r, c)[3] <= threshold)
{
continue; // ignore this pixel, not visible in the extracted isomap of this current frame
}
// we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] =
(merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[1] =
(merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] =
(merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c)
+ isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] =
255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c);
continue; // ignore this pixel, not visible in the extracted isomap of this current frame
}
// we're sure to have a visible pixel, merge it:
// merged_pixel = (old_average * visible_count + new_pixel) / (visible_count + 1)
merged_isomap.at<cv::Vec4f>(r, c)[0] = (merged_isomap.at<cv::Vec4f>(r, c)[0] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[0]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[1] = (merged_isomap.at<cv::Vec4f>(r, c)[1] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[1]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[2] = (merged_isomap.at<cv::Vec4f>(r, c)[2] * visibility_counter.at<int>(r, c) + isomap.at<cv::Vec4b>(r, c)[2]) / (visibility_counter.at<int>(r, c) + 1);
merged_isomap.at<cv::Vec4f>(r, c)[3] = 255; // as soon as we've seen the pixel visible once, we set it to visible.
++visibility_counter.at<int>(r, c);
}
}
cv::Mat merged_isomap_uchar;
......@@ -296,6 +285,7 @@ public:
return merged_isomap_uchar;
};
cv::Mat sharpen(const cv::Mat& isomap) {
cv::Mat output;
cv::Mat kernel = (cv::Mat_<float>(5, 5) << -0.125, -0.125, -0.125, -0.125, -0.125,
......
......@@ -65,6 +65,7 @@ PYBIND11_PLUGIN(eos) {
new (&instance) core::LandmarkMapper(filename);
}, "Constructs a new landmark mapper from a file containing mappings from one set of landmark identifiers to another.", py::arg("filename"))
// We can't expose the convert member function yet - need std::optional (or some trick with self/this and a lambda)
.def("get_vertex_id", &core::LandmarkMapper::get_vertex_id, "Returns the vertex id given a landmark name")
;
py::class_<core::Mesh>(core_module, "Mesh", "This class represents a 3D mesh consisting of vertices, vertex colour information and texture coordinates.")
......@@ -187,12 +188,21 @@ PYBIND11_PLUGIN(eos) {
py::class_<fitting::ContourLandmarks>(fitting_module, "ContourLandmarks", "Defines which 2D landmarks comprise the right and left face contour.")
.def_static("load", &fitting::ContourLandmarks::load, "Helper method to load contour landmarks from a text file with landmark mappings, like ibug_to_sfm.txt.", py::arg("filename"))
// .def("get_contour_landmarks", [](const fitting::RenderingParameters& rendering_params, const core::LandmarkCollection<cv::Vec2f>& landmarks, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, float yaw_angle, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::vec4& viewport) {
// cv::Mat affine_from_ortho = fitting::get_3x4_affine_camera_matrix(rendering_params, width, heightG;
// }, "Get contour landmarks", py::arg("morphable_model"), py::arg("landmarks"), py::arg("contour_landmarks"), py::arg("model_contour"), py::arg("yaw_angle"), py::arg("mesh"), py::arg("view_model"), py::arg("ortho_projection"), py::arg("contour_landmarks"), py::arg("model_contour"), py::arg("num_iterations") = 5, py::arg("num_shape_coefficients_to_fit") = -1, py::arg("lambda") = 30.0f);
;
py::class_<fitting::ModelContour>(fitting_module, "ModelContour", "Definition of the vertex indices that define the right and left model contour.")
.def_static("load", &fitting::ModelContour::load, "Helper method to load a ModelContour from a json file from the hard drive.", py::arg("filename"))
;
// fitting_module.def("get_contour_landmarks", [](
// const core::LandmarkCollection<cv::Vec2f>& landmarks, const ContourLandmarks& contour_landmarks, const ModelContour& model_contour, float yaw_angle, const core::Mesh& mesh, const glm::mat4x4& view_model, const glm::mat4x4& ortho_projection, const glm::vec4& viewport) {
//
// }, "Get contour landmarks");
fitting_module.def("fit_shape_and_pose", [](const morphablemodel::MorphableModel& morphable_model, const std::vector<morphablemodel::Blendshape>& blendshapes, const std::vector<glm::vec2>& landmarks, const std::vector<std::string>& landmark_ids, const core::LandmarkMapper& landmark_mapper, int image_width, int image_height, const morphablemodel::EdgeTopology& edge_topology, const fitting::ContourLandmarks& contour_landmarks, const fitting::ModelContour& model_contour, int num_iterations, int num_shape_coefficients_to_fit, float lambda) {
assert(landmarks.size() == landmark_ids.size());
std::vector<float> pca_coeffs;
......
......@@ -39,4 +39,4 @@
465
]
}
}
\ No newline at end of file
}
......@@ -39,6 +39,10 @@ add_executable(accuracy-evaluation accuracy-evaluation.cpp)
target_link_libraries(accuracy-evaluation eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(accuracy-evaluation "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
add_executable(accuracy-evaluation-random accuracy-evaluation-random.cpp)
target_link_libraries(accuracy-evaluation-random eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(accuracy-evaluation-random "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
add_executable(openmp-test openmp-test.cpp)
target_link_libraries(openmp-test eos ${OpenCV_LIBS} ${Boost_LIBRARIES})
target_link_libraries(openmp-test "$<$<CXX_COMPILER_ID:GNU>:-pthread>$<$<CXX_COMPILER_ID:Clang>:-pthreads>")
......@@ -48,4 +52,5 @@ install(TARGETS scm-to-cereal DESTINATION bin)
install(TARGETS bfm-binary-to-cereal DESTINATION bin)
install(TARGETS edgestruct-csv-to-json DESTINATION bin)
install(TARGETS accuracy-evaluation DESTINATION bin)
install(TARGETS accuracy-evaluation-random DESTINATION bin)
install(TARGETS openmp-test DESTINATION bin)
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment