00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other GpuMaterials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 #ifndef __OPENCV_GPU_HPP__ 00044 #define __OPENCV_GPU_HPP__ 00045 00046 #include <vector> 00047 #include "opencv2/core/core.hpp" 00048 #include "opencv2/imgproc/imgproc.hpp" 00049 #include "opencv2/objdetect/objdetect.hpp" 00050 #include "opencv2/gpu/devmem2d.hpp" 00051 #include "opencv2/features2d/features2d.hpp" 00052 00053 namespace cv 00054 { 00055 namespace gpu 00056 { 00058 00060 CV_EXPORTS int getCudaEnabledDeviceCount(); 00061 00063 00064 CV_EXPORTS void setDevice(int device); 00065 CV_EXPORTS int getDevice(); 00066 00069 CV_EXPORTS void resetDevice(); 00070 00071 enum FeatureSet 00072 { 00073 FEATURE_SET_COMPUTE_10 = 10, 00074 FEATURE_SET_COMPUTE_11 = 11, 00075 FEATURE_SET_COMPUTE_12 = 12, 00076 FEATURE_SET_COMPUTE_13 = 13, 00077 FEATURE_SET_COMPUTE_20 = 20, 00078 FEATURE_SET_COMPUTE_21 = 21, 00079 GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11, 00080 NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13 00081 }; 00082 00083 // Gives information about what GPU archs this OpenCV GPU module was 00084 // compiled for 00085 class CV_EXPORTS TargetArchs 00086 { 00087 public: 00088 static bool builtWith(FeatureSet feature_set); 00089 static bool has(int major, int minor); 00090 static bool hasPtx(int major, int minor); 00091 static bool hasBin(int major, int minor); 00092 static bool hasEqualOrLessPtx(int major, int minor); 00093 static bool hasEqualOrGreater(int major, int minor); 00094 static bool hasEqualOrGreaterPtx(int major, int minor); 00095 static bool hasEqualOrGreaterBin(int major, int minor); 00096 private: 00097 TargetArchs(); 00098 }; 00099 00100 // Gives information about the given GPU 00101 class CV_EXPORTS DeviceInfo 00102 { 00103 public: 00104 // Creates DeviceInfo object for the current GPU 00105 DeviceInfo() : device_id_(getDevice()) { query(); } 00106 00107 // Creates DeviceInfo object for the given GPU 00108 DeviceInfo(int device_id) : device_id_(device_id) { query(); } 00109 00110 string name() const { return name_; } 00111 00112 // Return compute capability versions 00113 int majorVersion() const { return majorVersion_; } 00114 int minorVersion() const { return minorVersion_; } 00115 00116 int multiProcessorCount() const { return multi_processor_count_; } 00117 00118 size_t freeMemory() const; 00119 size_t totalMemory() const; 00120 00121 // Checks whether device supports the given feature 00122 bool supports(FeatureSet feature_set) const; 00123 00124 // Checks whether the GPU module can be run on the given device 00125 bool isCompatible() const; 00126 00127 private: 00128 void query(); 00129 void queryMemory(size_t& free_memory, size_t& total_memory) const; 00130 00131 int device_id_; 00132 00133 string name_; 00134 int multi_processor_count_; 00135 int majorVersion_; 00136 int minorVersion_; 00137 }; 00138 00140 00141 CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func); 00142 CV_EXPORTS void nppError( int err, const char *file, const int line, const char *func); 00143 00145 class Stream; 00146 class CudaMem; 00147 00149 class CV_EXPORTS GpuMat 00150 { 00151 public: 00153 GpuMat(); 00155 GpuMat(int rows, int cols, int type); 00156 GpuMat(Size size, int type); 00158 GpuMat(int rows, int cols, int type, const Scalar& s); 00159 GpuMat(Size size, int type, const Scalar& s); 00161 GpuMat(const GpuMat& m); 00162 00164 GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP); 00165 GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP); 00166 00168 GpuMat(const GpuMat& m, const Range& rowRange, const Range& colRange); 00169 GpuMat(const GpuMat& m, const Rect& roi); 00170 00172 explicit GpuMat (const Mat& m); 00173 00175 ~GpuMat(); 00176 00178 GpuMat& operator = (const GpuMat& m); 00180 GpuMat& operator = (const Mat& m); 00181 00183 // Contains just image size, data ptr and step. 00184 template <class T> operator DevMem2D_<T>() const; 00185 template <class T> operator PtrStep_<T>() const; 00186 00188 void upload(const cv::Mat& m); 00189 00191 void upload(const CudaMem& m, Stream& stream); 00192 00194 operator Mat() const; 00195 void download(cv::Mat& m) const; 00196 00198 void download(CudaMem& m, Stream& stream) const; 00199 00201 GpuMat row(int y) const; 00203 GpuMat col(int x) const; 00205 GpuMat rowRange(int startrow, int endrow) const; 00206 GpuMat rowRange(const Range& r) const; 00208 GpuMat colRange(int startcol, int endcol) const; 00209 GpuMat colRange(const Range& r) const; 00210 00212 GpuMat clone() const; 00214 // It calls m.create(this->size(), this->type()). 00215 void copyTo( GpuMat& m ) const; 00217 void copyTo( GpuMat& m, const GpuMat& mask ) const; 00219 void convertTo( GpuMat& m, int rtype, double alpha=1, double beta=0 ) const; 00220 00221 void assignTo( GpuMat& m, int type=-1 ) const; 00222 00224 GpuMat& operator = (const Scalar& s); 00226 GpuMat& setTo(const Scalar& s, const GpuMat& mask = GpuMat()); 00228 // number of channels and/or different number of rows. see cvReshape. 00229 GpuMat reshape(int cn, int rows = 0) const; 00230 00232 // previous data is unreferenced if needed. 00233 void create(int rows, int cols, int type); 00234 void create(Size size, int type); 00236 // deallocate the data when reference counter reaches 0. 00237 void release(); 00238 00240 void swap(GpuMat& mat); 00241 00243 void locateROI( Size& wholeSize, Point& ofs ) const; 00245 GpuMat& adjustROI( int dtop, int dbottom, int dleft, int dright ); 00247 // (this is a generalized form of row, rowRange etc.) 00248 GpuMat operator()( Range rowRange, Range colRange ) const; 00249 GpuMat operator()( const Rect& roi ) const; 00250 00252 // (i.e. when there are no gaps between successive rows). 00253 // similar to CV_IS_GpuMat_CONT(cvGpuMat->type) 00254 bool isContinuous() const; 00256 // similar to CV_ELEM_SIZE(cvMat->type) 00257 size_t elemSize() const; 00259 size_t elemSize1() const; 00261 int type() const; 00263 int depth() const; 00265 int channels() const; 00267 size_t step1() const; 00269 // width == number of columns, height == number of rows 00270 Size size() const; 00272 bool empty() const; 00273 00275 uchar* ptr(int y = 0); 00276 const uchar* ptr(int y = 0) const; 00277 00279 template<typename _Tp> _Tp* ptr(int y = 0); 00280 template<typename _Tp> const _Tp* ptr(int y = 0) const; 00281 00283 GpuMat t() const; 00284 00291 int flags; 00293 int rows, cols; 00295 size_t step; 00297 uchar* data; 00298 00300 // when GpuMatrix points to user-allocated data, the pointer is NULL 00301 int* refcount; 00302 00304 uchar* datastart; 00305 uchar* dataend; 00306 }; 00307 00308 //#define TemplatedGpuMat // experimental now, deprecated to use 00309 #ifdef TemplatedGpuMat 00310 #include "GpuMat_BetaDeprecated.hpp" 00311 #endif 00312 00314 CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m); 00315 00318 CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m); 00319 00321 // CudaMem is limited cv::Mat with page locked memory allocation. 00322 // Page locked memory is only needed for async and faster coping to GPU. 00323 // It is convertable to cv::Mat header without reference counting 00324 // so you can use it with other opencv functions. 00325 00326 class CV_EXPORTS CudaMem 00327 { 00328 public: 00329 enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 }; 00330 00331 CudaMem(); 00332 CudaMem(const CudaMem& m); 00333 00334 CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED); 00335 CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); 00336 00337 00339 explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED); 00340 00341 ~CudaMem(); 00342 00343 CudaMem& operator = (const CudaMem& m); 00344 00346 CudaMem clone() const; 00347 00349 void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED); 00350 void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); 00351 00353 void release(); 00354 00356 Mat createMatHeader() const; 00357 operator Mat() const; 00358 00360 GpuMat createGpuMatHeader() const; 00361 operator GpuMat() const; 00362 00363 //returns if host memory can be mapperd to gpu address space; 00364 static bool canMapHostMemory(); 00365 00366 // Please see cv::Mat for descriptions 00367 bool isContinuous() const; 00368 size_t elemSize() const; 00369 size_t elemSize1() const; 00370 int type() const; 00371 int depth() const; 00372 int channels() const; 00373 size_t step1() const; 00374 Size size() const; 00375 bool empty() const; 00376 00377 00378 // Please see cv::Mat for descriptions 00379 int flags; 00380 int rows, cols; 00381 size_t step; 00382 00383 uchar* data; 00384 int* refcount; 00385 00386 uchar* datastart; 00387 uchar* dataend; 00388 00389 int alloc_type; 00390 }; 00391 00393 // Encapculates Cuda Stream. Provides interface for async coping. 00394 // Passed to each function that supports async kernel execution. 00395 // Reference counting is enabled 00396 00397 class CV_EXPORTS Stream 00398 { 00399 public: 00400 Stream(); 00401 ~Stream(); 00402 00403 Stream(const Stream&); 00404 Stream& operator=(const Stream&); 00405 00406 bool queryIfComplete(); 00407 void waitForCompletion(); 00408 00410 // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat) 00411 void enqueueDownload(const GpuMat& src, CudaMem& dst); 00412 void enqueueDownload(const GpuMat& src, Mat& dst); 00413 00415 // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI) 00416 void enqueueUpload(const CudaMem& src, GpuMat& dst); 00417 void enqueueUpload(const Mat& src, GpuMat& dst); 00418 00419 void enqueueCopy(const GpuMat& src, GpuMat& dst); 00420 00421 void enqueueMemSet(GpuMat& src, Scalar val); 00422 void enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask); 00423 00424 // converts matrix type, ex from float to uchar depending on type 00425 void enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a = 1, double b = 0); 00426 00427 static Stream& Null(); 00428 00429 operator bool() const; 00430 00431 private: 00432 void create(); 00433 void release(); 00434 00435 struct Impl; 00436 Impl *impl; 00437 00438 friend struct StreamAccessor; 00439 00440 explicit Stream(Impl* impl); 00441 }; 00442 00443 00445 00448 CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst, Stream& stream = Stream::Null()); 00449 00452 CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode, Stream& stream = Stream::Null()); 00453 00457 CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null()); 00458 00460 CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null()); 00461 00463 CV_EXPORTS void merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null()); 00464 00466 CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null()); 00467 00469 CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = Stream::Null()); 00470 00473 CV_EXPORTS void magnitude(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null()); 00474 00477 CV_EXPORTS void magnitudeSqr(const GpuMat& x, GpuMat& magnitude, Stream& stream = Stream::Null()); 00478 00481 CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); 00482 00485 CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); 00486 00489 CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); 00490 00493 CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); 00494 00497 CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null()); 00498 00499 00501 00504 CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); 00507 CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null()); 00508 00511 CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); 00514 CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null()); 00515 00518 CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); 00521 CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null()); 00522 00525 CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); 00528 CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, Stream& stream = Stream::Null()); 00529 00532 CV_EXPORTS void exp(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); 00533 00536 CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); 00537 00540 CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); 00543 CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null()); 00544 00547 CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null()); 00548 00550 CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); 00551 00553 CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); 00554 00556 CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); 00557 00559 CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); 00560 00562 CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); 00563 00565 CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); 00566 00568 CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); 00569 00571 CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); 00572 00573 00575 00578 CV_EXPORTS void remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap); 00579 00581 CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, 00582 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1)); 00583 00585 CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, 00586 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1)); 00587 00589 CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize, 00590 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1)); 00591 00595 CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, Stream& stream = Stream::Null()); 00596 00602 CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, Stream& stream = Stream::Null()); 00603 00605 CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null()); 00606 00608 CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null()); 00609 00613 CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); 00614 00617 CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null()); 00618 00621 CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null()); 00622 00626 CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); 00627 00630 CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value = Scalar(), Stream& stream = Stream::Null()); 00631 00635 CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null()); 00636 00638 CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& stream = Stream::Null()); 00639 00643 CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, GpuMat& sqsum, Stream& stream = Stream::Null()); 00644 00648 CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null()); 00649 00651 CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum); 00652 00656 CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null()); 00657 00659 CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101); 00660 00662 CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101); 00663 00666 CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false); 00667 00670 CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, 00671 float scale, bool conjB=false); 00672 00684 CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0); 00685 00689 CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, 00690 bool ccorr=false); 00691 00692 struct CV_EXPORTS ConvolveBuf; 00693 00695 CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, 00696 bool ccorr, ConvolveBuf& buf); 00697 00698 struct CV_EXPORTS ConvolveBuf 00699 { 00700 ConvolveBuf() {} 00701 ConvolveBuf(Size image_size, Size templ_size) 00702 { create(image_size, templ_size); } 00703 void create(Size image_size, Size templ_size); 00704 00705 private: 00706 static Size estimateBlockSize(Size result_size, Size templ_size); 00707 friend void convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&); 00708 00709 Size result_size; 00710 Size block_size; 00711 Size dft_size; 00712 int spect_len; 00713 00714 GpuMat image_spect, templ_spect, result_spect; 00715 GpuMat image_block, templ_block, result_data; 00716 }; 00717 00719 CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method); 00720 00722 CV_EXPORTS void downsample(const GpuMat& src, GpuMat& dst, int k=2); 00723 00726 CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2, 00727 GpuMat& result, Stream& stream = Stream::Null()); 00728 00730 00733 CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev); 00734 00738 CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2); 00739 00743 CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf); 00744 00748 CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2); 00749 00752 CV_EXPORTS Scalar sum(const GpuMat& src); 00753 00756 CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf); 00757 00760 CV_EXPORTS Scalar absSum(const GpuMat& src); 00761 00764 CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf); 00765 00768 CV_EXPORTS Scalar sqrSum(const GpuMat& src); 00769 00772 CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf); 00773 00775 CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat()); 00776 00778 CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf); 00779 00781 CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, 00782 const GpuMat& mask=GpuMat()); 00783 00785 CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, 00786 const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf); 00787 00789 CV_EXPORTS int countNonZero(const GpuMat& src); 00790 00792 CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf); 00793 00794 00796 00797 CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, 00798 GpuMat& dst, Stream& stream = Stream::Null()); 00799 00800 CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, 00801 const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, 00802 Stream& stream = Stream::Null()); 00803 00804 CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, 00805 const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, 00806 int num_iters=100, float max_dist=8.0, int min_inlier_count=100, 00807 vector<int>* inliers=NULL); 00808 00810 00817 class CV_EXPORTS BaseRowFilter_GPU 00818 { 00819 public: 00820 BaseRowFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} 00821 virtual ~BaseRowFilter_GPU() {} 00822 virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; 00823 int ksize, anchor; 00824 }; 00825 00832 class CV_EXPORTS BaseColumnFilter_GPU 00833 { 00834 public: 00835 BaseColumnFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} 00836 virtual ~BaseColumnFilter_GPU() {} 00837 virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; 00838 int ksize, anchor; 00839 }; 00840 00846 class CV_EXPORTS BaseFilter_GPU 00847 { 00848 public: 00849 BaseFilter_GPU(const Size& ksize_, const Point& anchor_) : ksize(ksize_), anchor(anchor_) {} 00850 virtual ~BaseFilter_GPU() {} 00851 virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; 00852 Size ksize; 00853 Point anchor; 00854 }; 00855 00862 class CV_EXPORTS FilterEngine_GPU 00863 { 00864 public: 00865 virtual ~FilterEngine_GPU() {} 00866 00867 virtual void apply(const GpuMat& src, GpuMat& dst, Rect roi = Rect(0,0,-1,-1), Stream& stream = Stream::Null()) = 0; 00868 }; 00869 00871 CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU>& filter2D, int srcType, int dstType); 00872 00874 CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>& rowFilter, 00875 const Ptr<BaseColumnFilter_GPU>& columnFilter, int srcType, int bufType, int dstType); 00876 00879 CV_EXPORTS Ptr<BaseRowFilter_GPU> getRowSumFilter_GPU(int srcType, int sumType, int ksize, int anchor = -1); 00880 00883 CV_EXPORTS Ptr<BaseColumnFilter_GPU> getColumnSumFilter_GPU(int sumType, int dstType, int ksize, int anchor = -1); 00884 00887 CV_EXPORTS Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1)); 00888 00890 CV_EXPORTS Ptr<FilterEngine_GPU> createBoxFilter_GPU(int srcType, int dstType, const Size& ksize, 00891 const Point& anchor = Point(-1,-1)); 00892 00897 CV_EXPORTS Ptr<BaseFilter_GPU> getMorphologyFilter_GPU(int op, int type, const Mat& kernel, const Size& ksize, 00898 Point anchor=Point(-1,-1)); 00899 00901 CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat& kernel, 00902 const Point& anchor = Point(-1,-1), int iterations = 1); 00903 00906 CV_EXPORTS Ptr<BaseFilter_GPU> getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, const Size& ksize, 00907 Point anchor = Point(-1, -1)); 00908 00910 CV_EXPORTS Ptr<FilterEngine_GPU> createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, 00911 const Point& anchor = Point(-1,-1)); 00912 00921 CV_EXPORTS Ptr<BaseRowFilter_GPU> getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel, 00922 int anchor = -1, int borderType = BORDER_CONSTANT); 00923 00932 CV_EXPORTS Ptr<BaseColumnFilter_GPU> getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel, 00933 int anchor = -1, int borderType = BORDER_CONSTANT); 00934 00936 CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel, 00937 const Mat& columnKernel, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, 00938 int columnBorderType = -1); 00939 00941 CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, 00942 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); 00943 00945 CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0, 00946 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); 00947 00949 CV_EXPORTS Ptr<BaseFilter_GPU> getMaxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); 00950 00952 CV_EXPORTS Ptr<BaseFilter_GPU> getMinFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); 00953 00956 CV_EXPORTS void boxFilter(const GpuMat& src, GpuMat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()); 00957 00959 static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()) { boxFilter(src, dst, -1, ksize, anchor, stream); } 00960 00962 CV_EXPORTS void erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null()); 00963 00965 CV_EXPORTS void dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null()); 00966 00968 CV_EXPORTS void morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null()); 00969 00971 CV_EXPORTS void filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernel, Point anchor=Point(-1,-1), Stream& stream = Stream::Null()); 00972 00974 CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, 00975 Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); 00976 00978 CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, 00979 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); 00980 00982 CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1, 00983 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); 00984 00986 CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0, 00987 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); 00988 00991 CV_EXPORTS void Laplacian(const GpuMat& src, GpuMat& dst, int ddepth, int ksize = 1, double scale = 1, Stream& stream = Stream::Null()); 00992 00994 00996 CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf, Stream& stream = Stream::Null()); 00997 00999 01001 CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel); 01005 CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null()); 01010 CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null()); 01015 CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null()); 01021 CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null()); 01022 01024 01025 class CV_EXPORTS StereoBM_GPU 01026 { 01027 public: 01028 enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 }; 01029 01030 enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 }; 01031 01033 StereoBM_GPU(); 01035 StereoBM_GPU(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ); 01036 01039 void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); 01040 01042 // if current GPU will be faster than CPU in this algorithm. 01043 // It queries current active device. 01044 static bool checkIfGpuCallReasonable(); 01045 01046 int preset; 01047 int ndisp; 01048 int winSize; 01049 01050 // If avergeTexThreshold == 0 => post procesing is disabled 01051 // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image 01052 // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold 01053 // i.e. input left image is low textured. 01054 float avergeTexThreshold; 01055 private: 01056 GpuMat minSSD, leBuf, riBuf; 01057 }; 01058 01060 // "Efficient Belief Propagation for Early Vision" 01061 // P.Felzenszwalb 01062 01063 class CV_EXPORTS StereoBeliefPropagation 01064 { 01065 public: 01066 enum { DEFAULT_NDISP = 64 }; 01067 enum { DEFAULT_ITERS = 5 }; 01068 enum { DEFAULT_LEVELS = 5 }; 01069 01070 static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels); 01071 01073 explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP, 01074 int iters = DEFAULT_ITERS, 01075 int levels = DEFAULT_LEVELS, 01076 int msg_type = CV_32F); 01077 01084 StereoBeliefPropagation(int ndisp, int iters, int levels, 01085 float max_data_term, float data_weight, 01086 float max_disc_term, float disc_single_jump, 01087 int msg_type = CV_32F); 01088 01091 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); 01092 01093 01095 void operator()(const GpuMat& data, GpuMat& disparity, Stream& stream = Stream::Null()); 01096 01097 int ndisp; 01098 01099 int iters; 01100 int levels; 01101 01102 float max_data_term; 01103 float data_weight; 01104 float max_disc_term; 01105 float disc_single_jump; 01106 01107 int msg_type; 01108 private: 01109 GpuMat u, d, l, r, u2, d2, l2, r2; 01110 std::vector<GpuMat> datas; 01111 GpuMat out; 01112 }; 01113 01115 // "A Constant-Space Belief Propagation Algorithm for Stereo Matching" 01116 // Qingxiong Yang, Liang Wang, Narendra Ahuja 01117 // http://vision.ai.uiuc.edu/~qyang6/ 01118 01119 class CV_EXPORTS StereoConstantSpaceBP 01120 { 01121 public: 01122 enum { DEFAULT_NDISP = 128 }; 01123 enum { DEFAULT_ITERS = 8 }; 01124 enum { DEFAULT_LEVELS = 4 }; 01125 enum { DEFAULT_NR_PLANE = 4 }; 01126 01127 static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane); 01128 01130 explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, 01131 int iters = DEFAULT_ITERS, 01132 int levels = DEFAULT_LEVELS, 01133 int nr_plane = DEFAULT_NR_PLANE, 01134 int msg_type = CV_32F); 01135 01139 StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, 01140 float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, 01141 int min_disp_th = 0, 01142 int msg_type = CV_32F); 01143 01146 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); 01147 01148 int ndisp; 01149 01150 int iters; 01151 int levels; 01152 01153 int nr_plane; 01154 01155 float max_data_term; 01156 float data_weight; 01157 float max_disc_term; 01158 float disc_single_jump; 01159 01160 int min_disp_th; 01161 01162 int msg_type; 01163 01164 bool use_local_init_data_cost; 01165 private: 01166 GpuMat u[2], d[2], l[2], r[2]; 01167 GpuMat disp_selected_pyr[2]; 01168 01169 GpuMat data_cost; 01170 GpuMat data_cost_selected; 01171 01172 GpuMat temp; 01173 01174 GpuMat out; 01175 }; 01176 01178 // Disparity map refinement using joint bilateral filtering given a single color image. 01179 // Qingxiong Yang, Liang Wang, Narendra Ahuja 01180 // http://vision.ai.uiuc.edu/~qyang6/ 01181 01182 class CV_EXPORTS DisparityBilateralFilter 01183 { 01184 public: 01185 enum { DEFAULT_NDISP = 64 }; 01186 enum { DEFAULT_RADIUS = 3 }; 01187 enum { DEFAULT_ITERS = 1 }; 01188 01190 explicit DisparityBilateralFilter(int ndisp = DEFAULT_NDISP, int radius = DEFAULT_RADIUS, int iters = DEFAULT_ITERS); 01191 01195 DisparityBilateralFilter(int ndisp, int radius, int iters, float edge_threshold, float max_disc_threshold, float sigma_range); 01196 01199 void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream = Stream::Null()); 01200 01201 private: 01202 int ndisp; 01203 int radius; 01204 int iters; 01205 01206 float edge_threshold; 01207 float max_disc_threshold; 01208 float sigma_range; 01209 01210 GpuMat table_color; 01211 GpuMat table_space; 01212 }; 01213 01214 01216 01217 struct CV_EXPORTS HOGDescriptor 01218 { 01219 enum { DEFAULT_WIN_SIGMA = -1 }; 01220 enum { DEFAULT_NLEVELS = 64 }; 01221 enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL }; 01222 01223 HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16), 01224 Size block_stride=Size(8, 8), Size cell_size=Size(8, 8), 01225 int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA, 01226 double threshold_L2hys=0.2, bool gamma_correction=true, 01227 int nlevels=DEFAULT_NLEVELS); 01228 01229 size_t getDescriptorSize() const; 01230 size_t getBlockHistogramSize() const; 01231 01232 void setSVMDetector(const vector<float>& detector); 01233 01234 static vector<float> getDefaultPeopleDetector(); 01235 static vector<float> getPeopleDetector48x96(); 01236 static vector<float> getPeopleDetector64x128(); 01237 01238 void detect(const GpuMat& img, vector<Point>& found_locations, 01239 double hit_threshold=0, Size win_stride=Size(), 01240 Size padding=Size()); 01241 01242 void detectMultiScale(const GpuMat& img, vector<Rect>& found_locations, 01243 double hit_threshold=0, Size win_stride=Size(), 01244 Size padding=Size(), double scale0=1.05, 01245 int group_threshold=2); 01246 01247 void getDescriptors(const GpuMat& img, Size win_stride, 01248 GpuMat& descriptors, 01249 int descr_format=DESCR_FORMAT_COL_BY_COL); 01250 01251 Size win_size; 01252 Size block_size; 01253 Size block_stride; 01254 Size cell_size; 01255 int nbins; 01256 double win_sigma; 01257 double threshold_L2hys; 01258 bool gamma_correction; 01259 int nlevels; 01260 01261 protected: 01262 void computeBlockHistograms(const GpuMat& img); 01263 void computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle); 01264 01265 double getWinSigma() const; 01266 bool checkDetectorSize() const; 01267 01268 static int numPartsWithin(int size, int part_size, int stride); 01269 static Size numPartsWithin(Size size, Size part_size, Size stride); 01270 01271 // Coefficients of the separating plane 01272 float free_coef; 01273 GpuMat detector; 01274 01275 // Results of the last classification step 01276 GpuMat labels, labels_buf; 01277 Mat labels_host; 01278 01279 // Results of the last histogram evaluation step 01280 GpuMat block_hists, block_hists_buf; 01281 01282 // Gradients conputation results 01283 GpuMat grad, qangle, grad_buf, qangle_buf; 01284 01285 // returns subbuffer with required size, reallocates buffer if nessesary. 01286 static GpuMat getBuffer(const Size& sz, int type, GpuMat& buf); 01287 static GpuMat getBuffer(int rows, int cols, int type, GpuMat& buf); 01288 01289 std::vector<GpuMat> image_scales; 01290 }; 01291 01292 01294 01295 class CV_EXPORTS BruteForceMatcher_GPU_base 01296 { 01297 public: 01298 enum DistType {L1Dist = 0, L2Dist, HammingDist}; 01299 01300 explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist); 01301 01302 // Add descriptors to train descriptor collection. 01303 void add(const std::vector<GpuMat>& descCollection); 01304 01305 // Get train descriptors collection. 01306 const std::vector<GpuMat>& getTrainDescriptors() const; 01307 01308 // Clear train descriptors collection. 01309 void clear(); 01310 01311 // Return true if there are not train descriptors in collection. 01312 bool empty() const; 01313 01314 // Return true if the matcher supports mask in match methods. 01315 bool isMaskSupported() const; 01316 01317 // Find one best match for each query descriptor. 01318 // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx 01319 // distance.at<float>(0, queryIdx) will contain distance 01320 void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs, 01321 GpuMat& trainIdx, GpuMat& distance, 01322 const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); 01323 01324 // Download trainIdx and distance to CPU vector with DMatch 01325 static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches); 01326 01327 // Find one best match for each query descriptor. 01328 void match(const GpuMat& queryDescs, const GpuMat& trainDescs, std::vector<DMatch>& matches, 01329 const GpuMat& mask = GpuMat()); 01330 01331 // Make gpu collection of trains and masks in suitable format for matchCollection function 01332 void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, 01333 const vector<GpuMat>& masks = std::vector<GpuMat>()); 01334 01335 // Find one best match from train collection for each query descriptor. 01336 // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx 01337 // imgIdx.at<int>(0, queryIdx) will contain best image index for queryIdx 01338 // distance.at<float>(0, queryIdx) will contain distance 01339 void matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection, 01340 GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, 01341 const GpuMat& maskCollection, Stream& stream = Stream::Null()); 01342 01343 // Download trainIdx, imgIdx and distance to CPU vector with DMatch 01344 static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, 01345 std::vector<DMatch>& matches); 01346 01347 // Find one best match from train collection for each query descriptor. 01348 void match(const GpuMat& queryDescs, std::vector<DMatch>& matches, 01349 const std::vector<GpuMat>& masks = std::vector<GpuMat>()); 01350 01351 // Find k best matches for each query descriptor (in increasing order of distances). 01352 // trainIdx.at<int>(queryIdx, i) will contain index of i'th best trains (i < k). 01353 // distance.at<float>(queryIdx, i) will contain distance. 01354 // allDist is a buffer to store all distance between query descriptors and train descriptors 01355 // it have size (nQuery,nTrain) and CV_32F type 01356 // allDist.at<float>(queryIdx, trainIdx) will contain FLT_MAX, if trainIdx is one from k best, 01357 // otherwise it will contain distance between queryIdx and trainIdx descriptors 01358 void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs, 01359 GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); 01360 01361 // Download trainIdx and distance to CPU vector with DMatch 01362 // compactResult is used when mask is not empty. If compactResult is false matches 01363 // vector will have the same size as queryDescriptors rows. If compactResult is true 01364 // matches vector will not contain matches for fully masked out query descriptors. 01365 static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, 01366 std::vector< std::vector<DMatch> >& matches, bool compactResult = false); 01367 01368 // Find k best matches for each query descriptor (in increasing order of distances). 01369 // compactResult is used when mask is not empty. If compactResult is false matches 01370 // vector will have the same size as queryDescriptors rows. If compactResult is true 01371 // matches vector will not contain matches for fully masked out query descriptors. 01372 void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs, 01373 std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask = GpuMat(), 01374 bool compactResult = false); 01375 01376 // Find k best matches for each query descriptor (in increasing order of distances). 01377 // compactResult is used when mask is not empty. If compactResult is false matches 01378 // vector will have the same size as queryDescriptors rows. If compactResult is true 01379 // matches vector will not contain matches for fully masked out query descriptors. 01380 void knnMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, int knn, 01381 const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false ); 01382 01383 // Find best matches for each query descriptor which have distance less than maxDistance. 01384 // nMatches.at<unsigned int>(0, queruIdx) will contain matches count for queryIdx. 01385 // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches, 01386 // because it didn't have enough memory. 01387 // trainIdx.at<int>(queruIdx, i) will contain ith train index (i < min(nMatches.at<unsigned int>(0, queruIdx), trainIdx.cols)) 01388 // distance.at<int>(queruIdx, i) will contain ith distance (i < min(nMatches.at<unsigned int>(0, queruIdx), trainIdx.cols)) 01389 // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x nTrain, 01390 // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches 01391 // Matches doesn't sorted. 01392 void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs, 01393 GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance, float maxDistance, 01394 const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); 01395 01396 // Download trainIdx, nMatches and distance to CPU vector with DMatch. 01397 // matches will be sorted in increasing order of distances. 01398 // compactResult is used when mask is not empty. If compactResult is false matches 01399 // vector will have the same size as queryDescriptors rows. If compactResult is true 01400 // matches vector will not contain matches for fully masked out query descriptors. 01401 static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& nMatches, const GpuMat& distance, 01402 std::vector< std::vector<DMatch> >& matches, bool compactResult = false); 01403 01404 // Find best matches for each query descriptor which have distance less than maxDistance 01405 // in increasing order of distances). 01406 void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs, 01407 std::vector< std::vector<DMatch> >& matches, float maxDistance, 01408 const GpuMat& mask = GpuMat(), bool compactResult = false); 01409 01410 // Find best matches from train collection for each query descriptor which have distance less than 01411 // maxDistance (in increasing order of distances). 01412 void radiusMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, float maxDistance, 01413 const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false); 01414 01415 private: 01416 DistType distType; 01417 01418 std::vector<GpuMat> trainDescCollection; 01419 }; 01420 01421 template <class Distance> 01422 class CV_EXPORTS BruteForceMatcher_GPU; 01423 01424 template <typename T> 01425 class CV_EXPORTS BruteForceMatcher_GPU< L1<T> > : public BruteForceMatcher_GPU_base 01426 { 01427 public: 01428 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L1Dist) {} 01429 explicit BruteForceMatcher_GPU(L1<T> /*d*/) : BruteForceMatcher_GPU_base(L1Dist) {} 01430 }; 01431 template <typename T> 01432 class CV_EXPORTS BruteForceMatcher_GPU< L2<T> > : public BruteForceMatcher_GPU_base 01433 { 01434 public: 01435 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {} 01436 explicit BruteForceMatcher_GPU(L2<T> /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {} 01437 }; 01438 template <> class CV_EXPORTS BruteForceMatcher_GPU< HammingLUT > : public BruteForceMatcher_GPU_base 01439 { 01440 public: 01441 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(HammingDist) {} 01442 explicit BruteForceMatcher_GPU(HammingLUT /*d*/) : BruteForceMatcher_GPU_base(HammingDist) {} 01443 }; 01444 template <> class CV_EXPORTS BruteForceMatcher_GPU< Hamming > : public BruteForceMatcher_GPU_base 01445 { 01446 public: 01447 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(HammingDist) {} 01448 explicit BruteForceMatcher_GPU(Hamming /*d*/) : BruteForceMatcher_GPU_base(HammingDist) {} 01449 }; 01450 01452 // The cascade classifier class for object detection. 01453 class CV_EXPORTS CascadeClassifier_GPU 01454 { 01455 public: 01456 CascadeClassifier_GPU(); 01457 CascadeClassifier_GPU(const string& filename); 01458 ~CascadeClassifier_GPU(); 01459 01460 bool empty() const; 01461 bool load(const string& filename); 01462 void release(); 01463 01464 /* returns number of detected objects */ 01465 int detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor=1.2, int minNeighbors=4, Size minSize=Size()); 01466 01467 bool findLargestObject; 01468 bool visualizeInPlace; 01469 01470 Size getClassifierSize() const; 01471 private: 01472 01473 struct CascadeClassifierImpl; 01474 CascadeClassifierImpl* impl; 01475 }; 01476 01478 01479 class CV_EXPORTS SURF_GPU : public CvSURFParams 01480 { 01481 public: 01482 enum KeypointLayout 01483 { 01484 SF_X = 0, 01485 SF_Y, 01486 SF_LAPLACIAN, 01487 SF_SIZE, 01488 SF_DIR, 01489 SF_HESSIAN, 01490 SF_FEATURE_STRIDE 01491 }; 01492 01494 SURF_GPU(); 01496 explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4, 01497 int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false); 01498 01500 int descriptorSize() const; 01501 01503 void uploadKeypoints(const vector<KeyPoint>& keypoints, GpuMat& keypointsGPU); 01505 void downloadKeypoints(const GpuMat& keypointsGPU, vector<KeyPoint>& keypoints); 01506 01508 void downloadDescriptors(const GpuMat& descriptorsGPU, vector<float>& descriptors); 01509 01519 void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints); 01522 void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors, 01523 bool useProvidedKeypoints = false); 01524 01525 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints); 01526 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors, 01527 bool useProvidedKeypoints = false); 01528 01529 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors, 01530 bool useProvidedKeypoints = false); 01531 01533 float keypointsRatio; 01534 01535 GpuMat sum, mask1, maskSum, intBuffer; 01536 01537 GpuMat det, trace; 01538 01539 GpuMat maxPosBuffer; 01540 }; 01541 01542 } 01543 01547 CV_EXPORTS void filterSpeckles( Mat& img, uchar newVal, int maxSpeckleSize, uchar diffThreshold, Mat& buf); 01548 01549 } 01550 #include "opencv2/gpu/matrix_operations.hpp" 01551 01552 #endif /* __OPENCV_GPU_HPP__ */