Commit cb6d90c9 authored by Richard Torenvliet's avatar Richard Torenvliet

Made an extension for halide

parent 515b33c9
...@@ -5,3 +5,5 @@ src/reconstruction/*.c ...@@ -5,3 +5,5 @@ src/reconstruction/*.c
*.o *.o
*.so *.so
src/reconstruction/build/ src/reconstruction/build/
*.dSYM
vendor/*
...@@ -49,6 +49,22 @@ show_reconstruction: ...@@ -49,6 +49,22 @@ show_reconstruction:
--model_shape_file data/pca_shape_model.npy \ --model_shape_file data/pca_shape_model.npy \
--n_components 6 --n_components 6
profile_reconstruction:
python -m cProfile src/main.py \
--reconstruct \
--files data/imm_face_db/*.asf \
--model_texture_file data/pca_texture_model.npy \
--model_shape_file data/pca_shape_model.npy \
--n_components 6
graph_reconstruction:
python ./src/main.py \
--generate_call_graph \
--files data/imm_face_db/*.asf \
--model_texture_file data/pca_texture_model.npy \
--model_shape_file data/pca_shape_model.npy \
--n_components 6
show_kivy: show_kivy:
python src/main.py \ python src/main.py \
--show_kivy \ --show_kivy \
...@@ -68,3 +84,6 @@ server: ...@@ -68,3 +84,6 @@ server:
ember: ember:
(cd viewer; ember server); (cd viewer; ember server);
ctags:
ctags -R --python-kinds=-i src
...@@ -9,13 +9,12 @@ TARGETS:= $(OPENCV) $(VIRTUALENV) data reconstruction ...@@ -9,13 +9,12 @@ TARGETS:= $(OPENCV) $(VIRTUALENV) data reconstruction
all: $(TARGETS) all: $(TARGETS)
include actions.mk include actions.mk
include src/reconstruction/build.mk
data: data/imm_face_db data: data/imm_face_db
reconstruction: texture.so reconstruction: texture.so halide src/reconstruction/texture_halide
texture.so: src/reconstruction/texture.pyx
(cd src/reconstruction; python setup.py build_ext --inplace)
OS := $(shell uname)
build: requirements.txt build: requirements.txt
@(source $(VIRTUALENV)/bin/activate; \ @(source $(VIRTUALENV)/bin/activate; \
......
#!/usr/local/bin/python
# python std # python std
import argparse import argparse
import logging import logging
...@@ -11,6 +12,7 @@ import pca ...@@ -11,6 +12,7 @@ import pca
import aam import aam
import imm_points as imm import imm_points as imm
import halide
from reconstruction import reconstruction from reconstruction import reconstruction
logging.basicConfig(level=logging.INFO, logging.basicConfig(level=logging.INFO,
...@@ -32,6 +34,11 @@ def add_parser_options(): ...@@ -32,6 +34,11 @@ def add_parser_options():
help='Reconstruct using kivy as a GUI' help='Reconstruct using kivy as a GUI'
) )
pca_group.add_argument(
'--generate_call_graph', action='store_true',
help='Generate call graph from the reconstruction'
)
pca_group.add_argument( pca_group.add_argument(
'--save_pca_shape', action='store_true', '--save_pca_shape', action='store_true',
help='save the pca shape model' help='save the pca shape model'
...@@ -207,6 +214,34 @@ def show_pca_model(args): ...@@ -207,6 +214,34 @@ def show_pca_model(args):
cv2.destroyAllWindows() cv2.destroyAllWindows()
def generate_call_graph(args):
assert args.model_shape_file, '--model_texture_file needs to be provided to save the pca model'
assert args.model_texture_file, '--model_texture_file needs to be provided to save the pca model'
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
graphviz = GraphvizOutput(output_file='filter_none.png')
with PyCallGraph(output=graphviz):
shape_model = pca.PcaModel(args.model_shape_file)
texture_model = pca.PcaModel(args.model_texture_file)
input_points = imm.IMMPoints(filename='data/imm_face_db/40-3m.asf')
input_image = input_points.get_image()
mean_points = imm.IMMPoints(points_list=shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
reconstruction.reconstruct_texture(
input_image, # src image
input_image, # dst image
texture_model,
input_points, # shape points input
mean_points, # shape points mean
)
def show_reconstruction(args): def show_reconstruction(args):
assert args.model_shape_file, '--model_texture_file needs to be provided to save the pca model' assert args.model_shape_file, '--model_texture_file needs to be provided to save the pca model'
assert args.model_texture_file, '--model_texture_file needs to be provided to save the pca model' assert args.model_texture_file, '--model_texture_file needs to be provided to save the pca model'
...@@ -227,15 +262,13 @@ def show_reconstruction(args): ...@@ -227,15 +262,13 @@ def show_reconstruction(args):
input_image, # src image input_image, # src image
input_image, # dst image input_image, # dst image
texture_model, texture_model,
#Vt_texture, # Vt
input_points, # shape points input input_points, # shape points input
mean_points, # shape points mean mean_points, # shape points mean
#mean_values_texture, # mean texture
#triangles, # triangles
#n_texture_components # learned n_texture_components
) )
dst = reconstruction.get_texture(mean_points, texture_model.mean_values) dst = reconstruction.get_texture(
mean_points, texture_model.mean_values
)
cv2.imshow('original', input_points.get_image()) cv2.imshow('original', input_points.get_image())
cv2.imshow('reconstructed', input_image) cv2.imshow('reconstructed', input_image)
...@@ -249,7 +282,6 @@ def show_reconstruction(args): ...@@ -249,7 +282,6 @@ def show_reconstruction(args):
cv2.destroyAllWindows() cv2.destroyAllWindows()
def main(): def main():
"""main""" """main"""
parser = add_parser_options() parser = add_parser_options()
...@@ -266,6 +298,8 @@ def main(): ...@@ -266,6 +298,8 @@ def main():
show_reconstruction(args) show_reconstruction(args)
elif args.show_kivy: elif args.show_kivy:
reconstruct_with_model(args) reconstruct_with_model(args)
elif args.generate_call_graph:
generate_call_graph(args)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
HERE := $(shell pwd)
ifeq ($(OS),Darwin)
HALIDE_LINK:=https://github.com/halide/Halide/releases/download/release_2016_04_27/halide-mac-64-trunk-2f11b9fce62f596e832907b82d87e8f75c53dd07.tgz
else
# todo link for linux, depends on gcc version, no check is in place for the
# right gcc version
HALIDE_LINK:=https://github.com/halide/Halide/releases/download/release_2016_04_27/halide-linux-64-gcc53-trunk-2f11b9fce62f596e832907b82d87e8f75c53dd07.tgz
endif
texture.so: src/reconstruction/texture.pyx
(cd src/reconstruction; python setup.py build_ext --inplace)
halide_2016_04_27.tar.gz:
wget -O data/halide_2016_04_27.tar.gz $(HALIDE_LINK)
vendor/halide:
mkdir -p vendor/
tar -xzvf data/halide_2016_04_27.tar.gz -C vendor/
halide: vendor/halide data/halide_2016_04_27.tar.gz
#DYLD_LIBRARY_PATH=vendor/halide/bin ./lesson_01
src/reconstruction/texture_halide:
clang++ src/reconstruction/texture_halide.cpp -g -I vendor/halide/include -L vendor/halide/bin -lHalide -o $@ -std=c++11
import numpy as np import numpy as np
from distutils.core import setup from distutils.core import setup
from distutils.extension import Extension from distutils.extension import Extension
import os
os.environ["CXX"] = "clang++"
os.environ["CC"] = "clang++"
from Cython.Build import cythonize from Cython.Build import cythonize
#g++ src/reconstruction/texture_halide.cpp
# -g -I vendor/halide/include -L vendor/halide/bin -lHalide -o $@ -std=c++11
extensions = [ extensions = [
Extension( Extension(
'texture', 'texture',
['texture.pyx'], ['texture.pyx'],
include_dirs=[np.get_include()], ) include_dirs=[np.get_include()], ),
Extension(
'halide',
['texture_halide.cpp'],
language="c++",
include_dirs=[
'../../vendor/halide/include',
'../../vendor/halide'
],
library_dirs=['../../vendor/halide/bin'],
libraries=['Halide'],
extra_compile_args=['-std=c++11'],
)
] ]
setup( setup(
......
// Halide tutorial lesson 1: Getting started with Funcs, Vars, and Exprs
// This lesson demonstrates basic usage of Halide as a JIT compiler for imaging.
// On linux, you can compile and run it like so:
// g++ lesson_01*.cpp -g -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_01 -std=c++11
// LD_LIBRARY_PATH=../bin ./lesson_01
// On os x:
// g++ lesson_01*.cpp -g -I ../include -L ../bin -lHalide -o lesson_01 -std=c++11
// DYLD_LIBRARY_PATH=../bin ./lesson_01
// If you have the entire Halide source tree, you can also build it by
// running:
// make tutorial_lesson_01_basics
// in a shell with the current directory at the top of the halide
// source tree.
// The only Halide header file you need is Halide.h. It includes all of Halide.
#include <Python.h>
#include "Halide.h"
static PyObject * texture_src_dst(PyObject *self, PyObject *args) {
// This program defines a single-stage imaging pipeline that
// outputs a grayscale diagonal gradient.
// A 'Func' object represents a pipeline stage. It's a pure
// function that defines what value each pixel should have. You
// can think of it as a computed image.
Halide::Func gradient;
// Var objects are names to use as variables in the definition of
// a Func. They have no meaning by themselves.
Halide::Var x, y;
// We typically use Vars named 'x' and 'y' to correspond to the x
// and y axes of an image, and we write them in that order. If
// you're used to thinking of images as having rows and columns,
// then x is the column index, and y is the row index.
// Funcs are defined at any integer coordinate of its variables as
// an Expr in terms of those variables and other functions.
// Here, we'll define an Expr which has the value x + y. Vars have
// appropriate operator overloading so that expressions like
// 'x + y' become 'Expr' objects.
Halide::Expr e = x + y;
// Now we'll add a definition for the Func object. At pixel x, y,
// the image will have the value of the Expr e. On the left hand
// side we have the Func we're defining and some Vars. On the right
// hand side we have some Expr object that uses those same Vars.
gradient(x, y) = e;
// This is the same as writing:
//
// gradient(x, y) = x + y;
//
// which is the more common form, but we are showing the
// intermediate Expr here for completeness.
// That line of code defined the Func, but it didn't actually
// compute the output image yet. At this stage it's just Funcs,
// Exprs, and Vars in memory, representing the structure of our
// imaging pipeline. We're meta-programming. This C++ program is
// constructing a Halide program in memory. Actually computing
// pixel data comes next.
// Now we 'realize' the Func, which JIT compiles some code that
// implements the pipeline we've defined, and then runs it. We
// also need to tell Halide the domain over which to evaluate the
// Func, which determines the range of x and y above, and the
// resolution of the output image. Halide.h also provides a basic
// templatized Image type we can use. We'll make an 800 x 600
// image.
Halide::Image<int32_t> output = gradient.realize(800, 600);
// Halide does type inference for you. Var objects represent
// 32-bit integers, so the Expr object 'x + y' also represents a
// 32-bit integer, and so 'gradient' defines a 32-bit image, and
// so we got a 32-bit signed integer image out when we call
// 'realize'. Halide types and type-casting rules are equivalent
// to C.
// Let's check everything worked, and we got the output we were
// expecting:
for (int j = 0; j < output.height(); j++) {
for (int i = 0; i < output.width(); i++) {
// We can access a pixel of an Image object using similar
// syntax to defining and using functions.
if (output(i, j) != i + j) {
printf("Something went wrong!\n"
"Pixel %d, %d was supposed to be %d, but instead it's %d\n",
i, j, i+j, output(i, j));
Py_RETURN_NONE;
}
}
}
// Everything worked! We defined a Func, then called 'realize' on
// it to generate and run machine code that produced an Image.
printf("Success!\n");
Py_RETURN_NONE;
}
static PyMethodDef module_methods[] = {
// name of function, function_name
{"texture", texture_src_dst, METH_VARARGS, "build texture"},
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC inithalide(void) {
(void) Py_InitModule("halide", module_methods);
}
import cv2
import aam
def test_sample_from_triangles():
blue_points = [[20, 20], [50, 160], [160, 20],
[50, 20], [60, 200], [180, 20]]
red_points = [[40, 80], [130, 150], [40, 150],
[40, 80], [60, 82], [60, 100]]
# blue_image = cv2.imread('../data/test_data/blue.png')
#red_image = cv2.imread('../data/test_data/red.png')
blue_image = cv2.imread('../data/imm_face_db/01-1m.jpg')
red_image = cv2.imread('../data/imm_face_db/02-1m.jpg')
triangles = [[0, 1, 2]]
for tri in triangles:
cv2.line(blue_image,
tuple(blue_points[tri[0]]),
tuple(blue_points[tri[1]]), (0, 255, 0), 1)
cv2.line(blue_image,
tuple(blue_points[tri[1]]),
tuple(blue_points[tri[2]]), (0, 255, 0), 1)
cv2.line(blue_image,
tuple(blue_points[tri[2]]),
tuple(blue_points[tri[0]]), (0, 255, 0), 1)
for tri in triangles:
cv2.line(red_image,
tuple(red_points[tri[0]]),
tuple(red_points[tri[1]]), (0, 255, 0), 1)
cv2.line(red_image,
tuple(red_points[tri[1]]),
tuple(red_points[tri[2]]), (0, 255, 0), 1)
cv2.line(red_image,
tuple(red_points[tri[2]]),
tuple(red_points[tri[0]]), (0, 255, 0), 1)
all_triangles = aam.sample_from_triangles(
red_image, red_points, triangles
)
cv2.imshow('blue_image', blue_image)
cv2.imshow('red_image', red_image)
cv2.waitKey(0)
import json import json
import os.path import os.path
import base64 import base64
from cStringIO import StringIO
from glob import glob from glob import glob
import cv2
from tornado import websocket, web, ioloop, autoreload from tornado import websocket, web, ioloop, autoreload
from reconstruction import reconstruction
import pca
import imm_points as imm import imm_points as imm
from reconstruction import reconstruction
BASE = '../viewer/app' BASE = '../viewer/app'
FILES_DIR = '../data/' FILES_DIR = '../data/'
...@@ -24,6 +27,13 @@ class ImageWebSocketHandler(websocket.WebSocketHandler): ...@@ -24,6 +27,13 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
self.images = glob('{}/*.jpg'.format(FACE_DB)) self.images = glob('{}/*.jpg'.format(FACE_DB))
self.asf = glob('{}/*.asf'.format(FACE_DB)) self.asf = glob('{}/*.asf'.format(FACE_DB))
# todo get from settings
model_texture_file = '{}/pca_texture_model.npy'.format(FILES_DIR)
model_shape_file = '{}/pca_shape_model.npy'.format(FILES_DIR)
self.shape_model = pca.PcaModel(model_shape_file)
self.texture_model = pca.PcaModel(model_texture_file)
websocket.WebSocketHandler.__init__(self, *args, **kwargs) websocket.WebSocketHandler.__init__(self, *args, **kwargs)
def __get_base64_image(self, filename): def __get_base64_image(self, filename):
...@@ -48,12 +58,25 @@ class ImageWebSocketHandler(websocket.WebSocketHandler): ...@@ -48,12 +58,25 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
def handle_return_reconstruction(self, message): def handle_return_reconstruction(self, message):
""" Return the reconstruction of the given image """ """ Return the reconstruction of the given image """
image_index = message['reconstruction_index'] image_index = message['reconstruction_index']
filename = self.images[image_index] asf_filename = self.asf[image_index]
input_points = self.asf[image_index]
image = self.__get_base64_image(filename) input_points = imm.IMMPoints(filename=asf_filename)
input_image = input_points.get_image()
mean_points = imm.IMMPoints(points_list=self.shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
#TODO This one is VERY SLOW, try to optimize
reconstruction.reconstruct_texture(
input_image, # src image
input_image, # dst image
self.texture_model,
input_points, # shape points input
mean_points, # shape points mean
)
reconstructed = reconstruction.reconstruct_texture(image) _, reconstructed = cv2.imencode('.jpg', input_image)
reconstructed = base64.b64encode(reconstructed)
self.write_message(json.dumps({'reconstructed': reconstructed})) self.write_message(json.dumps({'reconstructed': reconstructed}))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment