What Is the Easiest Way to Convert Ndarray into Cv::Mat

What is the easiest way to convert ndarray into cv::Mat?

As suggested by kyamagu, you can use OpenCV's official python wrapper code, especially the pyopencv_to and pyopencv_from.

I've been struggling as you did with all the dependencies and the generated header files. Nevertheless, it is possible to reduce the complexity of this by "cleaning" the cv2.cpp as lightalchemist did here in order to keep only what is necessary. You will need to adapt it to your need and to the version of OpenCV you're using but its basically the same code that I used.

#include <Python.h>
#include "numpy/ndarrayobject.h"
#include "opencv2/core/core.hpp"

static PyObject* opencv_error = 0;

static int failmsg(const char *fmt, ...)
{
char str[1000];

va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);

PyErr_SetString(PyExc_TypeError, str);
return 0;
}

class PyAllowThreads
{
public:
PyAllowThreads() : _state(PyEval_SaveThread()) {}
~PyAllowThreads()
{
PyEval_RestoreThread(_state);
}
private:
PyThreadState* _state;
};

class PyEnsureGIL
{
public:
PyEnsureGIL() : _state(PyGILState_Ensure()) {}
~PyEnsureGIL()
{
PyGILState_Release(_state);
}
private:
PyGILState_STATE _state;
};

#define ERRWRAP2(expr) \
try \
{ \
PyAllowThreads allowThreads; \
expr; \
} \
catch (const cv::Exception &e) \
{ \
PyErr_SetString(opencv_error, e.what()); \
return 0; \
}

using namespace cv;

static PyObject* failmsgp(const char *fmt, ...)
{
char str[1000];

va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);

PyErr_SetString(PyExc_TypeError, str);
return 0;
}

static size_t REFCOUNT_OFFSET = (size_t)&(((PyObject*)0)->ob_refcnt) +
(0x12345678 != *(const size_t*)"\x78\x56\x34\x12\0\0\0\0\0")*sizeof(int);

static inline PyObject* pyObjectFromRefcount(const int* refcount)
{
return (PyObject*)((size_t)refcount - REFCOUNT_OFFSET);
}

static inline int* refcountFromPyObject(const PyObject* obj)
{
return (int*)((size_t)obj + REFCOUNT_OFFSET);
}

class NumpyAllocator : public MatAllocator
{
public:
NumpyAllocator() {}
~NumpyAllocator() {}

void allocate(int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step)
{
PyEnsureGIL gil;

int depth = CV_MAT_DEPTH(type);
int cn = CV_MAT_CN(type);
const int f = (int)(sizeof(size_t)/8);
int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
int i;
npy_intp _sizes[CV_MAX_DIM+1];
for( i = 0; i < dims; i++ )
_sizes[i] = sizes[i];
if( cn > 1 )
{
/*if( _sizes[dims-1] == 1 )
_sizes[dims-1] = cn;
else*/
_sizes[dims++] = cn;
}
PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
if(!o)
CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
refcount = refcountFromPyObject(o);
npy_intp* _strides = PyArray_STRIDES(o);
for( i = 0; i < dims - (cn > 1); i++ )
step[i] = (size_t)_strides[i];
datastart = data = (uchar*)PyArray_DATA(o);
}

void deallocate(int* refcount, uchar*, uchar*)
{
PyEnsureGIL gil;
if( !refcount )
return;
PyObject* o = pyObjectFromRefcount(refcount);
Py_INCREF(o);
Py_DECREF(o);
}
};

NumpyAllocator g_numpyAllocator;

enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };

static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>", bool allowND=true)
{
if(!o || o == Py_None)
{
if( !m.data )
m.allocator = &g_numpyAllocator;
return true;
}

if( PyInt_Check(o) )
{
double v[] = {PyInt_AsLong((PyObject*)o), 0., 0., 0.};
m = Mat(4, 1, CV_64F, v).clone();
return true;
}
if( PyFloat_Check(o) )
{
double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.};
m = Mat(4, 1, CV_64F, v).clone();
return true;
}
if( PyTuple_Check(o) )
{
int i, sz = (int)PyTuple_Size((PyObject*)o);
m = Mat(sz, 1, CV_64F);
for( i = 0; i < sz; i++ )
{
PyObject* oi = PyTuple_GET_ITEM(o, i);
if( PyInt_Check(oi) )
m.at<double>(i) = (double)PyInt_AsLong(oi);
else if( PyFloat_Check(oi) )
m.at<double>(i) = (double)PyFloat_AsDouble(oi);
else
{
failmsg("%s is not a numerical tuple", name);
m.release();
return false;
}
}
return true;
}

if( !PyArray_Check(o) )
{
failmsg("%s is not a numpy array, neither a scalar", name);
return false;
}

bool needcopy = false, needcast = false;
int typenum = PyArray_TYPE(o), new_typenum = typenum;
int type = typenum == NPY_UBYTE ? CV_8U :
typenum == NPY_BYTE ? CV_8S :
typenum == NPY_USHORT ? CV_16U :
typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT ? CV_32S :
typenum == NPY_INT32 ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;

if( type < 0 )
{
if( typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG )
{
needcopy = needcast = true;
new_typenum = NPY_INT;
type = CV_32S;
}
else
{
failmsg("%s data type = %d is not supported", name, typenum);
return false;
}
}

int ndims = PyArray_NDIM(o);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", name, ndims);
return false;
}

int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(o);
const npy_intp* _strides = PyArray_STRIDES(o);
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;

for( int i = ndims-1; i >= 0 && !needcopy; i-- )
{
// these checks handle cases of
// a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
// b) transposed arrays, where _strides[] elements go in non-descending order
// c) flipped arrays, where some of _strides[] elements are negative
if( (i == ndims-1 && (size_t)_strides[i] != elemsize) ||
(i < ndims-1 && _strides[i] < _strides[i+1]) )
needcopy = true;
}

if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] )
needcopy = true;

if (needcopy)
{
if( needcast )
o = (PyObject*)PyArray_Cast((PyArrayObject*)o, new_typenum);
else
o = (PyObject*)PyArray_GETCONTIGUOUS((PyArrayObject*)o);
_strides = PyArray_STRIDES(o);
}

for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}

// handle degenerate case
if( ndims == 0) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}

if( ismultichannel )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}

if( ndims > 2 && !allowND )
{
failmsg("%s has more than 2 dimensions", name);
return false;
}

m = Mat(ndims, size, type, PyArray_DATA(o), step);

if( m.data )
{
m.refcount = refcountFromPyObject(o);
if (!needcopy)
{
m.addref(); // protect the original numpy array from deallocation
// (since Mat destructor will decrement the reference counter)
}
};
m.allocator = &g_numpyAllocator;

return true;
}

static PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
Mat temp, *p = (Mat*)&m;
if(!p->refcount || p->allocator != &g_numpyAllocator)
{
temp.allocator = &g_numpyAllocator;
ERRWRAP2(m.copyTo(temp));
p = &temp;
}
p->addref();
return pyObjectFromRefcount(p->refcount);
}

Once you have a cleaned up cv2.cpp file, here is some Cython code that takes care of the conversion. Notice the definition and the call to the import_array() function (it's a NumPy function defined in a header included somewhere in cv2.cpp), this is necessary to define some macros used by pyopencv_to, if you don't call it you will get segmentation faults as lightalchemist pointed out.

from cpython.ref cimport PyObject

# Declares OpenCV's cv::Mat class
cdef extern from "opencv2/core/core.hpp":
cdef cppclass Mat:
pass

# Declares the official wrapper conversion functions + NumPy's import_array() function
cdef extern from "cv2.cpp":
void import_array()
PyObject* pyopencv_from(const _Mat&)
int pyopencv_to(PyObject*, _Mat&)


# Function to be called at initialization
cdef void init():
import_array()

# Python to C++ conversion
cdef Mat nparrayToMat(object array):
cdef Mat mat
cdef PyObject* pyobject = <PyObject*> array
pyopencv_to(pyobject, mat)
return <Mat> mat

# C++ to Python conversion
cdef object matToNparray(Mat mat):
return <object> pyopencv_from(mat)

Note: somehow I got an error with NumPy 1.8.0 on Fedora 20 while compiling due to a strange return statement in the import_array macro, I had to manually remove it to make it work but I can't find this return statement in the NumPy's 1.8.0 GitHub source code

Converting numpy.ndarray into opencv cv::Mat

I used the following project:https://github.com/Algomorph/pyboostcvconverter and statically linked to it.

Note (To avoid segmentation fault):

1) PY_ARRAY_UNIQUE_SYMBOL should be defined where import_array is invoked
In other places where is included, use NO_IMPORT_ARRAY

#define PY_ARRAY_UNIQUE_SYMBOL PYVISION_ARRAY_API
#include <pyboostcvconverter/pyboostcvconverter.hpp>

2) Invoke init_ar from the BOOST_PYTHON_MODULE

/**
* @brief Initialize Numpy Array
*/
static void init_ar( )
{
// initialize
Py_Initialize();

// defined in numpy
import_array();
}

How to convert a ndarray into opencv::Mat using Boost.Python?

Finally I found the solution for that from the documentations:

We Have to receive the numpy array as numeric::array in C++ and have to do the following steps to easily convert the numpy into cv::mat efficiently.

void* img_arr = PyArray_DATA((PyObject*)arr.ptr());

And we need to pass this void ptr to the cv::Mat Constructor with other parameters required.

Mat image(rows, cols , CV_8UC3, img_arr);
  1. int parameter: Expects the no. of rows
  2. int parameter: Expects the no. of cols
  3. Type parameter : Expects the type of image.
  4. Void Pointer Parameter: Expects the image data.

And this resolves the problem!!!!.

OpenCV numpy to cv::Mat conversion

I finally found a solution. I dont know if this is the correct way of doing it, but it works.

I made a header file that contains

PyMODINIT_FUNC PyInit_cv2();

as a forward declaration and then copied over everything in the modules/python/src2 directory. I assumed this was already happening in the cv2.cpp file, because there is already exactly that line (in cv2.cpp).

But just adding that include works perfectly fine, apparently. Now I can call the init function when my own module is initialized and it seems to properly set all the needed state.

C++ conversion from NumPy array to Mat (OpenCV)

The best solution in your situation is using custom boost::python converter for cv::Mat object. OpenCV has Python wrapper and when you are using this wrapper you are operating on Numpy arrays - you don't even need to know that those arrays are converted to cv::Mat objects while "crossing the c++ <-> python border". Writing such converter for simple type is quite easy, however creating converter for cv::Mat isn't simple. Fortunetely someone else already did this - here is version for OpenCV 2.x and here for 3.x. If you are not familiar with boost::python converters, this article should help you.

Hope it helps, if you wil have any problems, let us know.

how to transform a OpenCV cvMat back to ndarray in numpy ?

Try using appending [:,:] to the matrix (ie. use mat[:,:] instead of mat) in your call to np.asarray - doing this will also allows asarray to work on images.

Your example:

>>> import cv
>>> import numpy as np
>>> mat = cv.CreateMat( 3 , 5 , cv.CV_32FC1 )
>>> cv.Set( mat , 7 )
>>> a = np.asarray( mat[:,:] )
>>> a
array([[ 7., 7., 7., 7., 7.],
[ 7., 7., 7., 7., 7.],
[ 7., 7., 7., 7., 7.]], dtype=float32)

And for an image:

>>> im = cv.CreateImage( ( 5 , 5 ) , 8 , 1 )
>>> cv.Set( im , 100 )
>>> im_array = np.asarray( im )
>>> im_array
array(<iplimage(nChannels=1 width=5 height=5 widthStep=8 )>, dtype=object)
>>> im_array = np.asarray( im[:,:] )
>>> im_array
array([[100, 100, 100, 100, 100],
[100, 100, 100, 100, 100],
[100, 100, 100, 100, 100],
[100, 100, 100, 100, 100],
[100, 100, 100, 100, 100]], dtype=uint8)

Converting Numpy Array to OpenCV Array

Your code can be fixed as follows:

import numpy as np, cv
vis = np.zeros((384, 836), np.float32)
h,w = vis.shape
vis2 = cv.CreateMat(h, w, cv.CV_32FC3)
vis0 = cv.fromarray(vis)
cv.CvtColor(vis0, vis2, cv.CV_GRAY2BGR)

Short explanation:

  1. np.uint32 data type is not supported by OpenCV (it supports uint8, int8, uint16, int16, int32, float32, float64)
  2. cv.CvtColor can't handle numpy arrays so both arguments has to be converted to OpenCV type. cv.fromarray do this conversion.
  3. Both arguments of cv.CvtColor must have the same depth. So I've changed source type to 32bit float to match the ddestination.

Also I recommend you use newer version of OpenCV python API because it uses numpy arrays as primary data type:

import numpy as np, cv2
vis = np.zeros((384, 836), np.float32)
vis2 = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

Not able to convert Numpy array to OpenCV Mat in Cython when trying to write c++ wrapper function

Finally I found the answer.
In order to use numpy, we have to call numpy.import_array(). For detail, see Here.

It can be tricky to find where to call it, for my case, I just call in my .pyx script:

import numpy as np  # Import Python functions, attributes, submodules of numpy
cimport numpy as np # Import numpy C/C++ API

np.import_array()

def cudaWarpPerspectiveWrapper(np.ndarray[np.uint8_t, ndim=2] _src,
np.ndarray[np.float32_t, ndim=2] _M,
_size_tuple,
int _flags=INTER_NEAREST):
# Create GPU/device InputArray for src

cdef Mat src_mat
cdef GpuMat src_gpu
pyopencv_to(<PyObject*> _src, src_mat)
src_gpu.upload(src_mat)

# Create CPU/host InputArray for M
cdef Mat M_mat = Mat()
pyopencv_to(<PyObject*> _M, M_mat)

# Create Size object from size tuple
# Note that size/shape in Python is handled in row-major-order -- therefore, width is [1] and height is [0]
cdef Size size = Size(<int> _size_tuple[0], <int> _size_tuple[1])

# Create empty GPU/device OutputArray for dst
cdef GpuMat dst_gpu = GpuMat()
warpPerspective(src_gpu, dst_gpu, M_mat, size, 2)

# Get result of dst
cdef Mat dst_host
dst_gpu.download(dst_host)
cdef np.ndarray out = <np.ndarray> pyopencv_from(dst_host)
return out

Then everything is working like a magic.



Related Topics



Leave a reply



Submit