// We filter below by discarding all correspondence that are a certain distance apart.
// We could also (or in addition to) discard the worst 5% of the distances or something like that.
// Filter and store the image (edge) points with their corresponding vertex id:
vector<int>vertex_indices;
vector<cv::Vec2f>image_points;
assert(occluding_vertices.size()==idx_d.size());
for(inti=0;i<occluding_vertices.size();++i)
{
autoortho_scale=rendering_parameters.get_screen_width()/rendering_parameters.get_frustum().r;// This might be a bit of a hack - we recover the "real" scaling from the SOP estimate
if(idx_d[i].second<=distance_threshold*ortho_scale)// I think multiplying by the scale is good here and gives us invariance w.r.t. the image resolution and face size.
{
autoedge_point=image_edges[idx_d[i].first];
// Store the found 2D edge point, and the associated vertex id: