00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043 #ifndef __OPENCV_GPU_MATRIX_OPERATIONS_HPP__
00044 #define __OPENCV_GPU_MATRIX_OPERATIONS_HPP__
00045
00046 namespace cv
00047 {
00048
00049 namespace gpu
00050 {
00051
00055
00056 inline GpuMat::GpuMat() : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) {}
00057
00058 inline GpuMat::GpuMat(int _rows, int _cols, int _type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
00059 {
00060 if( _rows > 0 && _cols > 0 )
00061 create( _rows, _cols, _type );
00062 }
00063
00064 inline GpuMat::GpuMat(Size _size, int _type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
00065 {
00066 if( _size.height > 0 && _size.width > 0 )
00067 create( _size.height, _size.width, _type );
00068 }
00069
00070 inline GpuMat::GpuMat(int _rows, int _cols, int _type, const Scalar& _s)
00071 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
00072 {
00073 if(_rows > 0 && _cols > 0)
00074 {
00075 create(_rows, _cols, _type);
00076 *this = _s;
00077 }
00078 }
00079
00080 inline GpuMat::GpuMat(Size _size, int _type, const Scalar& _s)
00081 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
00082 {
00083 if( _size.height > 0 && _size.width > 0 )
00084 {
00085 create( _size.height, _size.width, _type );
00086 *this = _s;
00087 }
00088 }
00089
00090 inline GpuMat::GpuMat(const GpuMat& m)
00091 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend)
00092 {
00093 if( refcount )
00094 CV_XADD(refcount, 1);
00095 }
00096
00097 inline GpuMat::GpuMat(int _rows, int _cols, int _type, void* _data, size_t _step)
00098 : flags(Mat::MAGIC_VAL + (_type & TYPE_MASK)), rows(_rows), cols(_cols), step(_step), data((uchar*)_data), refcount(0),
00099 datastart((uchar*)_data), dataend((uchar*)_data)
00100 {
00101 size_t minstep = cols*elemSize();
00102 if( step == Mat::AUTO_STEP )
00103 {
00104 step = minstep;
00105 flags |= Mat::CONTINUOUS_FLAG;
00106 }
00107 else
00108 {
00109 if( rows == 1 ) step = minstep;
00110 CV_DbgAssert( step >= minstep );
00111 flags |= step == minstep ? Mat::CONTINUOUS_FLAG : 0;
00112 }
00113 dataend += step*(rows-1) + minstep;
00114 }
00115
00116 inline GpuMat::GpuMat(Size _size, int _type, void* _data, size_t _step)
00117 : flags(Mat::MAGIC_VAL + (_type & TYPE_MASK)), rows(_size.height), cols(_size.width),
00118 step(_step), data((uchar*)_data), refcount(0),
00119 datastart((uchar*)_data), dataend((uchar*)_data)
00120 {
00121 size_t minstep = cols*elemSize();
00122 if( step == Mat::AUTO_STEP )
00123 {
00124 step = minstep;
00125 flags |= Mat::CONTINUOUS_FLAG;
00126 }
00127 else
00128 {
00129 if( rows == 1 ) step = minstep;
00130 CV_DbgAssert( step >= minstep );
00131 flags |= step == minstep ? Mat::CONTINUOUS_FLAG : 0;
00132 }
00133 dataend += step*(rows-1) + minstep;
00134 }
00135
00136
00137 inline GpuMat::GpuMat(const GpuMat& m, const Range& rowRange, const Range& colRange)
00138 {
00139 flags = m.flags;
00140 step = m.step; refcount = m.refcount;
00141 data = m.data; datastart = m.datastart; dataend = m.dataend;
00142
00143 if( rowRange == Range::all() )
00144 rows = m.rows;
00145 else
00146 {
00147 CV_Assert( 0 <= rowRange.start && rowRange.start <= rowRange.end && rowRange.end <= m.rows );
00148 rows = rowRange.size();
00149 data += step*rowRange.start;
00150 }
00151
00152 if( colRange == Range::all() )
00153 cols = m.cols;
00154 else
00155 {
00156 CV_Assert( 0 <= colRange.start && colRange.start <= colRange.end && colRange.end <= m.cols );
00157 cols = colRange.size();
00158 data += colRange.start*elemSize();
00159 flags &= cols < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
00160 }
00161
00162 if( rows == 1 )
00163 flags |= Mat::CONTINUOUS_FLAG;
00164
00165 if( refcount )
00166 CV_XADD(refcount, 1);
00167 if( rows <= 0 || cols <= 0 )
00168 rows = cols = 0;
00169 }
00170
00171 inline GpuMat::GpuMat(const GpuMat& m, const Rect& roi)
00172 : flags(m.flags), rows(roi.height), cols(roi.width),
00173 step(m.step), data(m.data + roi.y*step), refcount(m.refcount),
00174 datastart(m.datastart), dataend(m.dataend)
00175 {
00176 flags &= roi.width < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
00177 data += roi.x*elemSize();
00178 CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
00179 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
00180 if( refcount )
00181 CV_XADD(refcount, 1);
00182 if( rows <= 0 || cols <= 0 )
00183 rows = cols = 0;
00184 }
00185
00186 inline GpuMat::GpuMat(const Mat& m)
00187 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) { upload(m); }
00188
00189 inline GpuMat::~GpuMat() { release(); }
00190
00191 inline GpuMat& GpuMat::operator = (const GpuMat& m)
00192 {
00193 if( this != &m )
00194 {
00195 if( m.refcount )
00196 CV_XADD(m.refcount, 1);
00197 release();
00198 flags = m.flags;
00199 rows = m.rows; cols = m.cols;
00200 step = m.step; data = m.data;
00201 datastart = m.datastart; dataend = m.dataend;
00202 refcount = m.refcount;
00203 }
00204 return *this;
00205 }
00206
00207 inline GpuMat& GpuMat::operator = (const Mat& m) { upload(m); return *this; }
00208
00209 template <class T> inline GpuMat::operator DevMem2D_<T>() const { return DevMem2D_<T>(rows, cols, (T*)data, step); }
00210 template <class T> inline GpuMat::operator PtrStep_<T>() const { return PtrStep_<T>(static_cast< DevMem2D_<T> >(*this)); }
00211
00212
00213
00214 inline GpuMat::operator Mat() const
00215 {
00216 Mat m;
00217 download(m);
00218 return m;
00219 }
00220
00221
00222
00223 inline GpuMat GpuMat::row(int y) const { return GpuMat(*this, Range(y, y+1), Range::all()); }
00224 inline GpuMat GpuMat::col(int x) const { return GpuMat(*this, Range::all(), Range(x, x+1)); }
00225 inline GpuMat GpuMat::rowRange(int startrow, int endrow) const { return GpuMat(*this, Range(startrow, endrow), Range::all()); }
00226 inline GpuMat GpuMat::rowRange(const Range& r) const { return GpuMat(*this, r, Range::all()); }
00227 inline GpuMat GpuMat::colRange(int startcol, int endcol) const { return GpuMat(*this, Range::all(), Range(startcol, endcol)); }
00228 inline GpuMat GpuMat::colRange(const Range& r) const { return GpuMat(*this, Range::all(), r); }
00229
00230 inline GpuMat GpuMat::clone() const
00231 {
00232 GpuMat m;
00233 copyTo(m);
00234 return m;
00235 }
00236
00237
00238
00239
00240
00241 inline void GpuMat::assignTo( GpuMat& m, int type ) const
00242 {
00243 if( type < 0 )
00244 m = *this;
00245 else
00246 convertTo(m, type);
00247 }
00248
00249
00250
00251
00252 inline void GpuMat::create(Size _size, int _type) { create(_size.height, _size.width, _type); }
00253
00254
00255
00256 inline void GpuMat::swap(GpuMat& b)
00257 {
00258 std::swap( flags, b.flags );
00259 std::swap( rows, b.rows ); std::swap( cols, b.cols );
00260 std::swap( step, b.step ); std::swap( data, b.data );
00261 std::swap( datastart, b.datastart );
00262 std::swap( dataend, b.dataend );
00263 std::swap( refcount, b.refcount );
00264 }
00265
00266 inline void GpuMat::locateROI( Size& wholeSize, Point& ofs ) const
00267 {
00268 size_t esz = elemSize(), minstep;
00269 ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
00270 CV_DbgAssert( step > 0 );
00271 if( delta1 == 0 )
00272 ofs.x = ofs.y = 0;
00273 else
00274 {
00275 ofs.y = (int)(delta1/step);
00276 ofs.x = (int)((delta1 - step*ofs.y)/esz);
00277 CV_DbgAssert( data == datastart + ofs.y*step + ofs.x*esz );
00278 }
00279 minstep = (ofs.x + cols)*esz;
00280 wholeSize.height = (int)((delta2 - minstep)/step + 1);
00281 wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
00282 wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
00283 wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
00284 }
00285
00286 inline GpuMat& GpuMat::adjustROI( int dtop, int dbottom, int dleft, int dright )
00287 {
00288 Size wholeSize; Point ofs;
00289 size_t esz = elemSize();
00290 locateROI( wholeSize, ofs );
00291 int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
00292 int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
00293 data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
00294 rows = row2 - row1; cols = col2 - col1;
00295 if( esz*cols == step || rows == 1 )
00296 flags |= Mat::CONTINUOUS_FLAG;
00297 else
00298 flags &= ~Mat::CONTINUOUS_FLAG;
00299 return *this;
00300 }
00301
00302 inline GpuMat GpuMat::operator()( Range rowRange, Range colRange ) const { return GpuMat(*this, rowRange, colRange); }
00303 inline GpuMat GpuMat::operator()( const Rect& roi ) const { return GpuMat(*this, roi); }
00304
00305 inline bool GpuMat::isContinuous() const { return (flags & Mat::CONTINUOUS_FLAG) != 0; }
00306 inline size_t GpuMat::elemSize() const { return CV_ELEM_SIZE(flags); }
00307 inline size_t GpuMat::elemSize1() const { return CV_ELEM_SIZE1(flags); }
00308 inline int GpuMat::type() const { return CV_MAT_TYPE(flags); }
00309 inline int GpuMat::depth() const { return CV_MAT_DEPTH(flags); }
00310 inline int GpuMat::channels() const { return CV_MAT_CN(flags); }
00311 inline size_t GpuMat::step1() const { return step/elemSize1(); }
00312 inline Size GpuMat::size() const { return Size(cols, rows); }
00313 inline bool GpuMat::empty() const { return data == 0; }
00314
00315 inline uchar* GpuMat::ptr(int y)
00316 {
00317 CV_DbgAssert( (unsigned)y < (unsigned)rows );
00318 return data + step*y;
00319 }
00320
00321 inline const uchar* GpuMat::ptr(int y) const
00322 {
00323 CV_DbgAssert( (unsigned)y < (unsigned)rows );
00324 return data + step*y;
00325 }
00326
00327 template<typename _Tp> inline _Tp* GpuMat::ptr(int y)
00328 {
00329 CV_DbgAssert( (unsigned)y < (unsigned)rows );
00330 return (_Tp*)(data + step*y);
00331 }
00332
00333 template<typename _Tp> inline const _Tp* GpuMat::ptr(int y) const
00334 {
00335 CV_DbgAssert( (unsigned)y < (unsigned)rows );
00336 return (const _Tp*)(data + step*y);
00337 }
00338
00339 inline GpuMat GpuMat::t() const
00340 {
00341 GpuMat tmp;
00342 transpose(*this, tmp);
00343 return tmp;
00344 }
00345
00346 static inline void swap( GpuMat& a, GpuMat& b ) { a.swap(b); }
00347
00348 inline GpuMat createContinuous(int rows, int cols, int type)
00349 {
00350 GpuMat m;
00351 createContinuous(rows, cols, type, m);
00352 return m;
00353 }
00354
00355 inline void createContinuous(Size size, int type, GpuMat& m)
00356 {
00357 createContinuous(size.height, size.width, type, m);
00358 }
00359
00360 inline GpuMat createContinuous(Size size, int type)
00361 {
00362 GpuMat m;
00363 createContinuous(size, type, m);
00364 return m;
00365 }
00366
00367 inline void ensureSizeIsEnough(Size size, int type, GpuMat& m)
00368 {
00369 ensureSizeIsEnough(size.height, size.width, type, m);
00370 }
00371
00372
00376
00377 inline CudaMem::CudaMem() : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0) {}
00378 inline CudaMem::CudaMem(int _rows, int _cols, int _type, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
00379 {
00380 if( _rows > 0 && _cols > 0 )
00381 create( _rows, _cols, _type, _alloc_type);
00382 }
00383
00384 inline CudaMem::CudaMem(Size _size, int _type, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
00385 {
00386 if( _size.height > 0 && _size.width > 0 )
00387 create( _size.height, _size.width, _type, _alloc_type);
00388 }
00389
00390 inline CudaMem::CudaMem(const CudaMem& m) : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
00391 {
00392 if( refcount )
00393 CV_XADD(refcount, 1);
00394 }
00395
00396 inline CudaMem::CudaMem(const Mat& m, int _alloc_type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(0)
00397 {
00398 if( m.rows > 0 && m.cols > 0 )
00399 create( m.size(), m.type(), _alloc_type);
00400
00401 Mat tmp = createMatHeader();
00402 m.copyTo(tmp);
00403 }
00404
00405 inline CudaMem::~CudaMem()
00406 {
00407 release();
00408
00409 }
00410
00411 inline CudaMem& CudaMem::operator = (const CudaMem& m)
00412 {
00413 if( this != &m )
00414 {
00415 if( m.refcount )
00416 CV_XADD(m.refcount, 1);
00417 release();
00418 flags = m.flags;
00419 rows = m.rows; cols = m.cols;
00420 step = m.step; data = m.data;
00421 datastart = m.datastart;
00422 dataend = m.dataend;
00423 refcount = m.refcount;
00424 alloc_type = m.alloc_type;
00425 }
00426 return *this;
00427 }
00428
00429 inline CudaMem CudaMem::clone() const
00430 {
00431 CudaMem m(size(), type(), alloc_type);
00432 Mat to = m;
00433 Mat from = *this;
00434 from.copyTo(to);
00435 return m;
00436 }
00437
00438 inline void CudaMem::create(Size _size, int _type, int _alloc_type) { create(_size.height, _size.width, _type, _alloc_type); }
00439
00440
00441
00442
00443
00444 inline Mat CudaMem::createMatHeader() const { return Mat(size(), type(), data); }
00445 inline CudaMem::operator Mat() const { return createMatHeader(); }
00446
00447 inline CudaMem::operator GpuMat() const { return createGpuMatHeader(); }
00448
00449
00450 inline bool CudaMem::isContinuous() const { return (flags & Mat::CONTINUOUS_FLAG) != 0; }
00451 inline size_t CudaMem::elemSize() const { return CV_ELEM_SIZE(flags); }
00452 inline size_t CudaMem::elemSize1() const { return CV_ELEM_SIZE1(flags); }
00453 inline int CudaMem::type() const { return CV_MAT_TYPE(flags); }
00454 inline int CudaMem::depth() const { return CV_MAT_DEPTH(flags); }
00455 inline int CudaMem::channels() const { return CV_MAT_CN(flags); }
00456 inline size_t CudaMem::step1() const { return step/elemSize1(); }
00457 inline Size CudaMem::size() const { return Size(cols, rows); }
00458 inline bool CudaMem::empty() const { return data == 0; }
00459
00461
00462
00463 inline GpuMat operator ~ (const GpuMat& src)
00464 {
00465 GpuMat dst;
00466 bitwise_not(src, dst);
00467 return dst;
00468 }
00469
00470
00471 inline GpuMat operator | (const GpuMat& src1, const GpuMat& src2)
00472 {
00473 GpuMat dst;
00474 bitwise_or(src1, src2, dst);
00475 return dst;
00476 }
00477
00478
00479 inline GpuMat operator & (const GpuMat& src1, const GpuMat& src2)
00480 {
00481 GpuMat dst;
00482 bitwise_and(src1, src2, dst);
00483 return dst;
00484 }
00485
00486
00487 inline GpuMat operator ^ (const GpuMat& src1, const GpuMat& src2)
00488 {
00489 GpuMat dst;
00490 bitwise_xor(src1, src2, dst);
00491 return dst;
00492 }
00493
00494
00495 }
00496
00497 }
00498
00499 #endif