block.hpp
Go to the documentation of this file.
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #ifndef __OPENCV_GPU_DEVICE_BLOCK_HPP__
44 #define __OPENCV_GPU_DEVICE_BLOCK_HPP__
45 
46 namespace cv { namespace gpu { namespace device
47 {
48  struct Block
49  {
50  static __device__ __forceinline__ unsigned int id()
51  {
52  return blockIdx.x;
53  }
54 
55  static __device__ __forceinline__ unsigned int stride()
56  {
57  return blockDim.x * blockDim.y * blockDim.z;
58  }
59 
60  static __device__ __forceinline__ void sync()
61  {
62  __syncthreads();
63  }
64 
65  static __device__ __forceinline__ int flattenedThreadId()
66  {
67  return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
68  }
69 
70  template<typename It, typename T>
71  static __device__ __forceinline__ void fill(It beg, It end, const T& value)
72  {
73  int STRIDE = stride();
74  It t = beg + flattenedThreadId();
75 
76  for(; t < end; t += STRIDE)
77  *t = value;
78  }
79 
80  template<typename OutIt, typename T>
81  static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
82  {
83  int STRIDE = stride();
84  int tid = flattenedThreadId();
85  value += tid;
86 
87  for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE)
88  *t = value;
89  }
90 
91  template<typename InIt, typename OutIt>
92  static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)
93  {
94  int STRIDE = stride();
95  InIt t = beg + flattenedThreadId();
96  OutIt o = out + (t - beg);
97 
98  for(; t < end; t += STRIDE, o += STRIDE)
99  *o = *t;
100  }
101 
102  template<typename InIt, typename OutIt, class UnOp>
103  static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)
104  {
105  int STRIDE = stride();
106  InIt t = beg + flattenedThreadId();
107  OutIt o = out + (t - beg);
108 
109  for(; t < end; t += STRIDE, o += STRIDE)
110  *o = op(*t);
111  }
112 
113  template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
114  static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
115  {
116  int STRIDE = stride();
117  InIt1 t1 = beg1 + flattenedThreadId();
118  InIt2 t2 = beg2 + flattenedThreadId();
119  OutIt o = out + (t1 - beg1);
120 
121  for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
122  *o = op(*t1, *t2);
123  }
124 
125  template<int CTA_SIZE, typename T, class BinOp>
126  static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
127  {
128  int tid = flattenedThreadId();
129  T val = buffer[tid];
130 
131  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
132  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
133  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
134  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
135 
136  if (tid < 32)
137  {
138  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
139  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
140  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
141  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
142  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
143  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
144  }
145  }
146 
147  template<int CTA_SIZE, typename T, class BinOp>
148  static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
149  {
150  int tid = flattenedThreadId();
151  T val = buffer[tid] = init;
152  __syncthreads();
153 
154  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
155  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
156  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
157  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
158 
159  if (tid < 32)
160  {
161  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
162  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
163  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
164  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
165  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
166  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
167  }
168  __syncthreads();
169  return buffer[0];
170  }
171 
172  template <typename T, class BinOp>
173  static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op)
174  {
175  int ftid = flattenedThreadId();
176  int sft = stride();
177 
178  if (sft < n)
179  {
180  for (unsigned int i = sft + ftid; i < n; i += sft)
181  data[ftid] = op(data[ftid], data[i]);
182 
183  __syncthreads();
184 
185  n = sft;
186  }
187 
188  while (n > 1)
189  {
190  unsigned int half = n/2;
191 
192  if (ftid < half)
193  data[ftid] = op(data[ftid], data[n - ftid - 1]);
194 
195  __syncthreads();
196 
197  n = n - half;
198  }
199  }
200  };
201 }}}
202 
203 #endif /* __OPENCV_GPU_DEVICE_BLOCK_HPP__ */
static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
Definition: block.hpp:81
static __device__ __forceinline__ int flattenedThreadId()
Definition: block.hpp:65
static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)
Definition: block.hpp:92
static __device__ __forceinline__ void fill(It beg, It end, const T &value)
Definition: block.hpp:71
static __device__ __forceinline__ unsigned int stride()
Definition: block.hpp:55
GLsizei GLsizei GLenum GLenum const GLvoid * data
Definition: core_c.h:403
static __device__ __forceinline__ void reduce(volatile T *buffer, BinOp op)
Definition: block.hpp:126
const CvMat const CvMat const CvMat CvMat CvMat CvMat CvMat CvSize CvMat CvMat * T
Definition: calib3d.hpp:270
static __device__ __forceinline__ void reduce_n(T *data, unsigned int n, BinOp op)
Definition: block.hpp:173
static __device__ __forceinline__ unsigned int id()
Definition: block.hpp:50
GLuint GLfloat * val
Definition: block.hpp:48
GLuint buffer
GLenum GLsizei n
static __device__ __forceinline__ void sync()
Definition: block.hpp:60
GLsizei const GLfloat * value
Definition: core_c.h:341
GLuint GLuint end
static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)
Definition: block.hpp:103
int n
Definition: legacy.hpp:3070
static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
Definition: block.hpp:114
double double end
Definition: core_c.h:774
GLdouble GLdouble t
static __device__ __forceinline__ T reduce(volatile T *buffer, T init, BinOp op)
Definition: block.hpp:148