| Package | Description |
|---|---|
| org.bytedeco.javacpp |
| Modifier and Type | Method and Description |
|---|---|
opencv_core.UMat |
opencv_core.UMat.adjustROI(int dtop,
int dbottom,
int dleft,
int dright)
moves/resizes the current matrix ROI inside the parent matrix.
|
opencv_core.UMat |
opencv_core.UMat.allocator(opencv_core.MatAllocator allocator) |
opencv_core.UMat |
opencv_core.UMat.apply(opencv_core.Range ranges) |
opencv_core.UMat |
opencv_core.UMat.apply(opencv_core.Range rowRange,
opencv_core.Range colRange)
extracts a rectangular sub-matrix
|
opencv_core.UMat |
opencv_core.UMat.apply(opencv_core.Rect roi) |
opencv_core.UMat |
opencv_core.UMat.clone()
returns deep copy of the matrix, i.e.
|
opencv_core.UMat |
opencv_core.UMat.col(int x)
returns a new matrix header for the specified column
|
opencv_core.UMat |
opencv_core.UMat.colRange(int startcol,
int endcol)
...
|
opencv_core.UMat |
opencv_core.UMat.colRange(opencv_core.Range r) |
opencv_core.UMat |
opencv_core.UMat.cols(int cols) |
opencv_core.UMat |
opencv_stitching.ImageFeatures.descriptors() |
opencv_core.UMat |
opencv_core.UMat.diag() |
opencv_core.UMat |
opencv_core.UMat.diag(int d)
...
|
static opencv_core.UMat |
opencv_core.UMat.diag(opencv_core.UMat d)
constructs a square diagonal matrix which main diagonal is vector "d"
|
opencv_core.UMat |
opencv_core.UMat.dims(int dims) |
static opencv_core.UMat |
opencv_core.UMat.eye(int rows,
int cols,
int type) |
static opencv_core.UMat |
opencv_core.UMat.eye(opencv_core.Size size,
int type) |
opencv_core.UMat |
opencv_core.UMatBytePairVector.first(long i) |
opencv_core.UMat |
opencv_core.UMat.flags(int flags) |
opencv_core.UMat |
opencv_core.UMatVector.get(long i) |
opencv_core.UMat |
opencv_stitching.Timelapser.getDst() |
opencv_core.UMat |
opencv_core.Mat.getUMat(int accessFlags) |
opencv_core.UMat |
opencv_core.Mat.getUMat(int accessFlags,
int usageFlags)
retrieve UMat from Mat
|
opencv_core.UMat |
opencv_core.UMat.inv() |
opencv_core.UMat |
opencv_core.UMat.inv(int method)
matrix inversion by means of matrix expressions
|
opencv_core.UMat |
opencv_stitching.Stitcher.matchingMask() |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.Mat m) |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.Mat m,
double scale)
per-element matrix multiplication by means of matrix expressions
|
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.UMat m) |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.UMat m,
double scale) |
opencv_core.UMat |
opencv_objdetect.HOGDescriptor.oclSvmDetector() |
opencv_core.UMat |
opencv_core.UMat.offset(long offset) |
static opencv_core.UMat |
opencv_core.UMat.ones(int rows,
int cols,
int type) |
static opencv_core.UMat |
opencv_core.UMat.ones(opencv_core.Size size,
int type) |
opencv_core.UMat |
opencv_core.UMat.position(long position) |
opencv_core.UMat |
opencv_core.UMat.put(opencv_core.Scalar s)
sets every matrix element to s
|
opencv_core.UMat |
opencv_core.UMat.put(opencv_core.UMat m)
assignment operators
|
opencv_core.UMat |
opencv_core.UMat.reshape(int cn) |
opencv_core.UMat |
opencv_core.UMat.reshape(int cn,
int rows)
creates alternative matrix header for the same data, with different
|
opencv_core.UMat |
opencv_core.UMat.reshape(int cn,
int newndims,
int[] newsz) |
opencv_core.UMat |
opencv_core.UMat.reshape(int cn,
int newndims,
IntBuffer newsz) |
opencv_core.UMat |
opencv_core.UMat.reshape(int cn,
int newndims,
IntPointer newsz) |
opencv_core.UMat |
opencv_core.UMat.row(int y)
returns a new matrix header for the specified row
|
opencv_core.UMat |
opencv_core.UMat.rowRange(int startrow,
int endrow)
...
|
opencv_core.UMat |
opencv_core.UMat.rowRange(opencv_core.Range r) |
opencv_core.UMat |
opencv_core.UMat.rows(int rows) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.Mat value) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
sets some of the matrix elements to s, according to the mask
|
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.UMat value) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.UMat value,
opencv_core.UMat mask) |
opencv_core.UMat |
opencv_core.UMat.t()
matrix transposition by means of matrix expressions
|
opencv_core.UMat |
opencv_core.UMat.u(opencv_core.UMatData u) |
opencv_core.UMat |
opencv_dnn.Blob.umatRef() |
opencv_core.UMat |
opencv_dnn.Blob.umatRef(boolean writeOnly)
Returns reference to cv::UMat, containing blob data.
|
opencv_core.UMat |
opencv_dnn.Blob.umatRefConst()
Returns reference to cv::UMat, containing blob data, for read-only purposes.
|
opencv_core.UMat |
opencv_core.UMat.usageFlags(int usageFlags) |
static opencv_core.UMat |
opencv_core.UMat.zeros(int rows,
int cols,
int type)
Matlab-style matrix initialization
|
static opencv_core.UMat |
opencv_core.UMat.zeros(opencv_core.Size size,
int type) |
| Modifier and Type | Method and Description |
|---|---|
void |
opencv_core.RNG._fill(opencv_core.UMat mat,
int distType,
opencv_core.UMat a,
opencv_core.UMat b) |
void |
opencv_core.RNG._fill(opencv_core.UMat mat,
int distType,
opencv_core.UMat a,
opencv_core.UMat b,
boolean saturateRange) |
static void |
opencv_core.absdiff(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_imgproc.accumulate(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.accumulate(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_imgproc.accumulateProduct(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_imgproc.accumulateProduct(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_imgproc.accumulateSquare(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.accumulateSquare(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_imgproc.accumulateWeighted(opencv_core.UMat src,
opencv_core.UMat dst,
double alpha) |
static void |
opencv_imgproc.accumulateWeighted(opencv_core.UMat src,
opencv_core.UMat dst,
double alpha,
opencv_core.UMat mask) |
static void |
opencv_imgproc.adaptiveThreshold(opencv_core.UMat src,
opencv_core.UMat dst,
double maxValue,
int adaptiveMethod,
int thresholdType,
int blockSize,
double C) |
static void |
opencv_core.add(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.add(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask,
int dtype) |
void |
opencv_ximgproc.SelectiveSearchSegmentation.addImage(opencv_core.UMat img) |
static void |
opencv_core.addWeighted(opencv_core.UMat src1,
double alpha,
opencv_core.UMat src2,
double beta,
double gamma,
opencv_core.UMat dst) |
static void |
opencv_core.addWeighted(opencv_core.UMat src1,
double alpha,
opencv_core.UMat src2,
double beta,
double gamma,
opencv_core.UMat dst,
int dtype) |
static void |
opencv_features2d.AGAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold) |
static void |
opencv_features2d.AGAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression) |
static void |
opencv_features2d.AGAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression,
int type) |
static void |
opencv_ximgproc.amFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigma_s,
double sigma_r) |
static void |
opencv_ximgproc.amFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigma_s,
double sigma_r,
boolean adjust_outliers) |
void |
opencv_stitching.ExposureCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.UMat image,
opencv_core.UMat mask) |
void |
opencv_stitching.NoExposureCompensator.apply(int arg0,
opencv_core.Point arg1,
opencv_core.UMat arg2,
opencv_core.UMat arg3) |
void |
opencv_stitching.GainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.UMat image,
opencv_core.UMat mask) |
void |
opencv_stitching.BlocksGainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.UMat image,
opencv_core.UMat mask) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.UMat src) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.UMat src,
int flags) |
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.UMat image,
opencv_core.UMat fgmask) |
void |
opencv_imgproc.CLAHE.apply(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.UMat image,
opencv_core.UMat fgmask,
double learningRate) |
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.UMat data,
opencv_core.UMat mean,
int flags) |
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.UMat data,
opencv_core.UMat mean,
int flags,
double retainedVariance) |
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.UMat data,
opencv_core.UMat mean,
int flags,
int maxComponents) |
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.UMat image,
opencv_stitching.ImageFeatures features) |
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.UMat image,
opencv_stitching.ImageFeatures features,
opencv_core.RectVector rois) |
void |
opencv_stitching.FeaturesMatcher.apply(opencv_stitching.ImageFeatures features,
opencv_stitching.MatchesInfo pairwise_matches,
opencv_core.UMat mask)
\brief Performs images matching.
|
void |
opencv_stitching.BestOf2NearestRangeMatcher.apply(opencv_stitching.ImageFeatures features,
opencv_stitching.MatchesInfo pairwise_matches,
opencv_core.UMat mask) |
static void |
opencv_imgproc.applyColorMap(opencv_core.UMat src,
opencv_core.UMat dst,
int colormap) |
void |
opencv_bioinspired.Retina.applyFastToneMapping(opencv_core.UMat inputImage,
opencv_core.UMat outputToneMappedImage) |
void |
opencv_bioinspired.RetinaFastToneMapping.applyFastToneMapping(opencv_core.UMat inputImage,
opencv_core.UMat outputToneMappedImage) |
float |
opencv_shape.ShapeTransformer.applyTransformation(opencv_core.UMat input) |
float |
opencv_shape.ShapeTransformer.applyTransformation(opencv_core.UMat input,
opencv_core.UMat output) |
static void |
opencv_imgproc.approxPolyDP(opencv_core.UMat curve,
opencv_core.UMat approxCurve,
double epsilon,
boolean closed) |
static double |
opencv_imgproc.arcLength(opencv_core.UMat curve,
boolean closed) |
static void |
opencv_imgproc.arrowedLine(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.arrowedLine(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int line_type,
int shift,
double tipLength) |
void |
opencv_core.UMat.assignTo(opencv_core.UMat m) |
void |
opencv_core.UMat.assignTo(opencv_core.UMat m,
int type) |
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.UMat vec) |
void |
opencv_core.PCA.backProject(opencv_core.UMat vec,
opencv_core.UMat result) |
void |
opencv_core.SVD.backSubst(opencv_core.UMat rhs,
opencv_core.UMat dst) |
static void |
opencv_core.SVD.backSubst(opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt,
opencv_core.UMat rhs,
opencv_core.UMat dst) |
static void |
opencv_core.batchDistance(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dist,
int dtype,
opencv_core.UMat nidx) |
static void |
opencv_core.batchDistance(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dist,
int dtype,
opencv_core.UMat nidx,
int normType,
int K,
opencv_core.UMat mask,
int update,
boolean crosscheck) |
void |
opencv_dnn.Blob.batchFromImages(opencv_core.UMat image) |
void |
opencv_dnn.Blob.batchFromImages(opencv_core.UMat image,
int dstCn) |
static void |
opencv_imgproc.bilateralFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
opencv_imgproc.bilateralFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType) |
static void |
opencv_ximgproc.bilateralTextureFilter(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_ximgproc.bilateralTextureFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg) |
static void |
opencv_core.bitwise_and(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.bitwise_and(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_core.bitwise_not(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.bitwise_not(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_core.bitwise_or(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.bitwise_or(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask) |
static void |
opencv_core.bitwise_xor(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.bitwise_xor(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask) |
void |
opencv_stitching.Blender.blend(opencv_core.UMat dst,
opencv_core.UMat dst_mask) |
void |
opencv_stitching.FeatherBlender.blend(opencv_core.UMat dst,
opencv_core.UMat dst_mask) |
void |
opencv_stitching.MultiBandBlender.blend(opencv_core.UMat dst,
opencv_core.UMat dst_mask) |
static void |
opencv_imgproc.blendLinear(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat weights1,
opencv_core.UMat weights2,
opencv_core.UMat dst) |
static void |
opencv_imgproc.blur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize) |
static void |
opencv_imgproc.blur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType) |
static opencv_core.Rect |
opencv_imgproc.boundingRect(opencv_core.UMat points) |
static void |
opencv_imgproc.boxFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.boxFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType) |
static void |
opencv_imgproc.boxPoints(opencv_core.RotatedRect box,
opencv_core.UMat points) |
void |
opencv_flann.Index.build(opencv_core.UMat features,
opencv_flann.IndexParams params) |
void |
opencv_flann.Index.build(opencv_core.UMat features,
opencv_flann.IndexParams params,
int distType) |
void |
opencv_shape.HistogramCostExtractor.buildCostMatrix(opencv_core.UMat descriptors1,
opencv_core.UMat descriptors2,
opencv_core.UMat costMatrix) |
opencv_core.Rect |
opencv_stitching.RotationWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.AffineWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static void |
opencv_imgproc.buildPyramid(opencv_core.UMat src,
opencv_core.MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.UMat src,
opencv_core.MatVector dst,
int maxlevel,
int borderType) |
static void |
opencv_imgproc.buildPyramid(opencv_core.UMat src,
opencv_core.UMatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.UMat src,
opencv_core.UMatVector dst,
int maxlevel,
int borderType) |
void |
opencv_video.DenseOpticalFlow.calc(opencv_core.UMat I0,
opencv_core.UMat I1,
opencv_core.UMat flow) |
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.UMat frame0,
opencv_core.UMat frame1,
opencv_core.UMat flow1) |
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.UMat frame0,
opencv_core.UMat frame1,
opencv_core.UMat flow1,
opencv_core.UMat flow2) |
void |
opencv_video.SparseOpticalFlow.calc(opencv_core.UMat prevImg,
opencv_core.UMat nextImg,
opencv_core.UMat prevPts,
opencv_core.UMat nextPts,
opencv_core.UMat status) |
void |
opencv_video.SparseOpticalFlow.calc(opencv_core.UMat prevImg,
opencv_core.UMat nextImg,
opencv_core.UMat prevPts,
opencv_core.UMat nextPts,
opencv_core.UMat status,
opencv_core.UMat err) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
float[] ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
FloatBuffer ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
FloatPointer ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.UMatVector images,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
float[] ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
FloatBuffer ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.UMatVector images,
IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat dst,
FloatPointer ranges,
double scale) |
static void |
opencv_core.calcCovarMatrix(opencv_core.UMat samples,
opencv_core.UMat covar,
opencv_core.UMat mean,
int flags) |
static void |
opencv_core.calcCovarMatrix(opencv_core.UMat samples,
opencv_core.UMat covar,
opencv_core.UMat mean,
int flags,
int ctype) |
float |
opencv_ml.StatModel.calcError(opencv_ml.TrainData data,
boolean test,
opencv_core.UMat resp) |
static double |
opencv_optflow.calcGlobalOrientation(opencv_core.UMat orientation,
opencv_core.UMat mask,
opencv_core.UMat mhi,
double timestamp,
double duration) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int[] histSize,
float[] ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntBuffer histSize,
FloatBuffer ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int[] histSize,
float[] ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntBuffer histSize,
FloatBuffer ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate) |
static void |
opencv_optflow.calcMotionGradient(opencv_core.UMat mhi,
opencv_core.UMat mask,
opencv_core.UMat orientation,
double delta1,
double delta2) |
static void |
opencv_optflow.calcMotionGradient(opencv_core.UMat mhi,
opencv_core.UMat mask,
opencv_core.UMat orientation,
double delta1,
double delta2,
int apertureSize) |
static void |
opencv_video.calcOpticalFlowFarneback(opencv_core.UMat prev,
opencv_core.UMat next,
opencv_core.UMat flow,
double pyr_scale,
int levels,
int winsize,
int iterations,
int poly_n,
double poly_sigma,
int flags) |
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.UMat prevImg,
opencv_core.UMat nextImg,
opencv_core.UMat prevPts,
opencv_core.UMat nextPts,
opencv_core.UMat status,
opencv_core.UMat err) |
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.UMat prevImg,
opencv_core.UMat nextImg,
opencv_core.UMat prevPts,
opencv_core.UMat nextPts,
opencv_core.UMat status,
opencv_core.UMat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold) |
static void |
opencv_optflow.calcOpticalFlowSF(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat flow,
int layers,
int averaging_block_size,
int max_flow) |
static void |
opencv_optflow.calcOpticalFlowSF(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat flow,
int layers,
int averaging_block_size,
int max_flow,
double sigma_dist,
double sigma_color,
int postprocess_window,
double sigma_dist_fix,
double sigma_color_fix,
double occ_thr,
int upscale_averaging_radius,
double upscale_sigma_dist,
double upscale_sigma_color,
double speed_up_thr) |
static void |
opencv_optflow.calcOpticalFlowSparseToDense(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat flow) |
static void |
opencv_optflow.calcOpticalFlowSparseToDense(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat flow,
int grid_step,
int k,
float sigma,
boolean use_post_proc,
float fgs_lambda,
float fgs_sigma) |
opencv_core.Point |
opencv_photo.AlignMTB.calculateShift(opencv_core.UMat img0,
opencv_core.UMat img1) |
void |
opencv_optflow.VariationalRefinement.calcUV(opencv_core.UMat I0,
opencv_core.UMat I1,
opencv_core.UMat flow_u,
opencv_core.UMat flow_v) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.UMat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
double[] fovx,
double[] fovy,
double[] focalLength,
opencv_core.Point2d principalPoint,
double[] aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.UMat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoublePointer fovx,
DoublePointer fovy,
DoublePointer focalLength,
opencv_core.Point2d principalPoint,
DoublePointer aspectRatio) |
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.UMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
static void |
opencv_imgproc.Canny(opencv_core.UMat image,
opencv_core.UMat edges,
double threshold1,
double threshold2) |
static void |
opencv_imgproc.Canny(opencv_core.UMat image,
opencv_core.UMat edges,
double threshold1,
double threshold2,
int apertureSize,
boolean L2gradient) |
static void |
opencv_imgproc.Canny(opencv_core.UMat dx,
opencv_core.UMat dy,
opencv_core.UMat edges,
double threshold1,
double threshold2) |
static void |
opencv_imgproc.Canny(opencv_core.UMat dx,
opencv_core.UMat dy,
opencv_core.UMat edges,
double threshold1,
double threshold2,
boolean L2gradient) |
static void |
opencv_core.cartToPolar(opencv_core.UMat x,
opencv_core.UMat y,
opencv_core.UMat magnitude,
opencv_core.UMat angle) |
static void |
opencv_core.cartToPolar(opencv_core.UMat x,
opencv_core.UMat y,
opencv_core.UMat magnitude,
opencv_core.UMat angle,
boolean angleInDegrees) |
static boolean |
opencv_core.checkRange(opencv_core.UMat a) |
static boolean |
opencv_core.checkRange(opencv_core.UMat a,
boolean quiet,
opencv_core.Point pos,
double minVal,
double maxVal) |
static void |
opencv_imgproc.circle(opencv_core.UMat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color) |
static void |
opencv_imgproc.circle(opencv_core.UMat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_photo.colorChange(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst) |
static void |
opencv_photo.colorChange(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst,
float red_mul,
float green_mul,
float blue_mul) |
static void |
opencv_core.compare(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
int cmpop) |
static double |
opencv_imgproc.compareHist(opencv_core.UMat H1,
opencv_core.UMat H2,
int method) |
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.UMat lines1,
opencv_core.UMat lines2) |
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.UMat lines1,
opencv_core.UMat lines2,
opencv_core.UMat _image) |
static void |
opencv_core.completeSymm(opencv_core.UMat mtx) |
static void |
opencv_core.completeSymm(opencv_core.UMat mtx,
boolean lowerToUpper) |
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.MatVector images,
opencv_core.UMat pano) |
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.UMat pano) |
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.UMatVector images,
opencv_core.UMat pano) |
static void |
opencv_calib3d.composeRT(opencv_core.UMat rvec1,
opencv_core.UMat tvec1,
opencv_core.UMat rvec2,
opencv_core.UMat tvec2,
opencv_core.UMat rvec3,
opencv_core.UMat tvec3) |
static void |
opencv_calib3d.composeRT(opencv_core.UMat rvec1,
opencv_core.UMat tvec1,
opencv_core.UMat rvec2,
opencv_core.UMat tvec2,
opencv_core.UMat rvec3,
opencv_core.UMat tvec3,
opencv_core.UMat dr3dr1,
opencv_core.UMat dr3dt1,
opencv_core.UMat dr3dr2,
opencv_core.UMat dr3dt2,
opencv_core.UMat dt3dr1,
opencv_core.UMat dt3dt1,
opencv_core.UMat dt3dr2,
opencv_core.UMat dt3dt2) |
void |
opencv_core.LDA.compute(opencv_core.MatVector src,
opencv_core.UMat labels) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
float[] descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
FloatPointer descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat descriptors) |
void |
opencv_xfeatures2d.VGG.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat descriptors) |
void |
opencv_features2d.Feature2D.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat descriptors) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat imgDescriptor) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters,
opencv_core.Mat descriptors) |
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.UMat image,
opencv_core.Rect roi,
opencv_core.UMat descriptors) |
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.UMat image,
opencv_core.UMat descriptors) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.UMat keypointDescriptors,
opencv_core.UMat imgDescriptor) |
static void |
opencv_core.SVD.compute(opencv_core.UMat src,
opencv_core.UMat w) |
static void |
opencv_core.SVD.compute(opencv_core.UMat src,
opencv_core.UMat w,
int flags) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.UMat keypointDescriptors,
opencv_core.UMat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters) |
void |
opencv_calib3d.StereoMatcher.compute(opencv_core.UMat left,
opencv_core.UMat right,
opencv_core.UMat disparity) |
static void |
opencv_core.SVD.compute(opencv_core.UMat src,
opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt) |
static void |
opencv_core.SVD.compute(opencv_core.UMat src,
opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt,
int flags) |
void |
opencv_core.LDA.compute(opencv_core.UMatVector src,
opencv_core.UMat labels) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI,
int thresh) |
void |
opencv_photo.AlignMTB.computeBitmaps(opencv_core.UMat img,
opencv_core.UMat tb,
opencv_core.UMat eb) |
static void |
opencv_calib3d.computeCorrespondEpilines(opencv_core.UMat points,
int whichImage,
opencv_core.UMat F,
opencv_core.UMat lines) |
float |
opencv_shape.ShapeDistanceExtractor.computeDistance(opencv_core.UMat contour1,
opencv_core.UMat contour2) |
static double |
opencv_ximgproc.computeMSE(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
static void |
opencv_text.computeNMChannels(opencv_core.UMat _src,
opencv_core.MatVector _channels) |
static void |
opencv_text.computeNMChannels(opencv_core.UMat _src,
opencv_core.MatVector _channels,
int _mode) |
static void |
opencv_text.computeNMChannels(opencv_core.UMat _src,
opencv_core.UMatVector _channels) |
static void |
opencv_text.computeNMChannels(opencv_core.UMat _src,
opencv_core.UMatVector _channels,
int _mode) |
float |
opencv_xfeatures2d.PCTSignaturesSQFD.computeQuadraticFormDistance(opencv_core.UMat _signature0,
opencv_core.UMat _signature1) |
void |
opencv_xfeatures2d.PCTSignatures.computeSignature(opencv_core.UMat image,
opencv_core.UMat signature) |
static int |
opencv_imgproc.connectedComponents(opencv_core.UMat image,
opencv_core.UMat labels) |
static int |
opencv_imgproc.connectedComponents(opencv_core.UMat image,
opencv_core.UMat labels,
int connectivity,
int ltype) |
static int |
opencv_imgproc.connectedComponentsWithAlgorithm(opencv_core.UMat image,
opencv_core.UMat labels,
int connectivity,
int ltype,
int ccltype) |
static int |
opencv_imgproc.connectedComponentsWithStats(opencv_core.UMat image,
opencv_core.UMat labels,
opencv_core.UMat stats,
opencv_core.UMat centroids) |
static int |
opencv_imgproc.connectedComponentsWithStats(opencv_core.UMat image,
opencv_core.UMat labels,
opencv_core.UMat stats,
opencv_core.UMat centroids,
int connectivity,
int ltype) |
static int |
opencv_imgproc.connectedComponentsWithStatsWithAlgorithm(opencv_core.UMat image,
opencv_core.UMat labels,
opencv_core.UMat stats,
opencv_core.UMat centroids,
int connectivity,
int ltype,
int ccltype) |
static double |
opencv_imgproc.contourArea(opencv_core.UMat contour) |
static double |
opencv_imgproc.contourArea(opencv_core.UMat contour,
boolean oriented) |
static void |
opencv_core.convertFp16(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.convertMaps(opencv_core.UMat map1,
opencv_core.UMat map2,
opencv_core.UMat dstmap1,
opencv_core.UMat dstmap2,
int dstmap1type) |
static void |
opencv_imgproc.convertMaps(opencv_core.UMat map1,
opencv_core.UMat map2,
opencv_core.UMat dstmap1,
opencv_core.UMat dstmap2,
int dstmap1type,
boolean nninterpolation) |
static void |
opencv_calib3d.convertPointsFromHomogeneous(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_calib3d.convertPointsHomogeneous(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_calib3d.convertPointsToHomogeneous(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.convertScaleAbs(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.convertScaleAbs(opencv_core.UMat src,
opencv_core.UMat dst,
double alpha,
double beta) |
void |
opencv_core.Mat.convertTo(opencv_core.UMat m,
int rtype) |
void |
opencv_core.UMat.convertTo(opencv_core.UMat m,
int rtype) |
void |
opencv_core.Mat.convertTo(opencv_core.UMat m,
int rtype,
double alpha,
double beta) |
void |
opencv_core.UMat.convertTo(opencv_core.UMat m,
int rtype,
double alpha,
double beta) |
static void |
opencv_imgproc.convexHull(opencv_core.UMat points,
opencv_core.UMat hull) |
static void |
opencv_imgproc.convexHull(opencv_core.UMat points,
opencv_core.UMat hull,
boolean clockwise,
boolean returnPoints) |
static void |
opencv_imgproc.convexityDefects(opencv_core.UMat contour,
opencv_core.UMat convexhull,
opencv_core.UMat convexityDefects) |
static void |
opencv_core.copyMakeBorder(opencv_core.UMat src,
opencv_core.UMat dst,
int top,
int bottom,
int left,
int right,
int borderType) |
static void |
opencv_core.copyMakeBorder(opencv_core.UMat src,
opencv_core.UMat dst,
int top,
int bottom,
int left,
int right,
int borderType,
opencv_core.Scalar value) |
void |
opencv_core.UMat.copySize(opencv_core.UMat m)
internal use function; properly re-allocates _size, _step arrays
|
void |
opencv_core.Mat.copyTo(opencv_core.UMat m) |
void |
opencv_core.UMat.copyTo(opencv_core.UMat m) |
void |
opencv_core.Mat.copyTo(opencv_core.UMat m,
opencv_core.UMat mask) |
void |
opencv_core.UMat.copyTo(opencv_core.UMat m,
opencv_core.UMat mask) |
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize,
int ksize) |
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize,
int ksize,
int borderType) |
static void |
opencv_imgproc.cornerHarris(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize,
int ksize,
double k) |
static void |
opencv_imgproc.cornerHarris(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize,
int ksize,
double k,
int borderType) |
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize) |
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.UMat src,
opencv_core.UMat dst,
int blockSize,
int ksize,
int borderType) |
static void |
opencv_imgproc.cornerSubPix(opencv_core.UMat image,
opencv_core.UMat corners,
opencv_core.Size winSize,
opencv_core.Size zeroZone,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.correctMatches(opencv_core.UMat F,
opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat newPoints1,
opencv_core.UMat newPoints2) |
static int |
opencv_core.countNonZero(opencv_core.UMat src) |
static void |
opencv_ximgproc.covarianceEstimation(opencv_core.UMat src,
opencv_core.UMat dst,
int windowRows,
int windowCols) |
static opencv_xfeatures2d.DAISY |
opencv_xfeatures2d.DAISY.create(float radius,
int q_radius,
int q_theta,
int q_hist,
int norm,
opencv_core.UMat H,
boolean interpolation,
boolean use_orientation) |
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.UMat initStep,
opencv_core.TermCriteria termcrit) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.UMat samples,
int layout,
opencv_core.UMat responses) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.UMat samples,
int layout,
opencv_core.UMat responses,
opencv_core.UMat varIdx,
opencv_core.UMat sampleIdx,
opencv_core.UMat sampleWeights,
opencv_core.UMat varType) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static void |
opencv_ml.createConcentricSpheresTestSet(int nsamples,
int nfeatures,
int nclasses,
opencv_core.UMat samples,
opencv_core.UMat responses) |
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static opencv_ximgproc.GuidedFilter |
opencv_ximgproc.createGuidedFilter(opencv_core.UMat guide,
int radius,
double eps) |
static void |
opencv_imgproc.createHanningWindow(opencv_core.UMat dst,
opencv_core.Size winSize,
int type) |
static void |
opencv_stitching.createLaplacePyr(opencv_core.UMat img,
int num_levels,
opencv_core.UMatVector pyr) |
static void |
opencv_stitching.createLaplacePyrGpu(opencv_core.UMat img,
int num_levels,
opencv_core.UMatVector pyr) |
static void |
opencv_text.createOCRHMMTransitionsTable(BytePointer vocabulary,
opencv_text.StdStringVector lexicon,
opencv_core.UMat transition_probabilities_table) |
static void |
opencv_text.createOCRHMMTransitionsTable(String vocabulary,
opencv_text.StdStringVector lexicon,
opencv_core.UMat transition_probabilities_table) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.UMat image,
int region_size,
float ratio) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.UMat image,
int algorithm,
int region_size,
float ruler) |
static void |
opencv_stitching.createWeightMap(opencv_core.UMat mask,
float sharpness,
opencv_core.UMat weight) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.UMat m) |
static void |
opencv_imgproc.cvtColor(opencv_core.UMat src,
opencv_core.UMat dst,
int code) |
static void |
opencv_imgproc.cvtColor(opencv_core.UMat src,
opencv_core.UMat dst,
int code,
int dstCn) |
static void |
opencv_core.dct(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.dct(opencv_core.UMat src,
opencv_core.UMat dst,
int flags) |
static void |
opencv_photo.decolor(opencv_core.UMat src,
opencv_core.UMat grayscale,
opencv_core.UMat color_boost) |
static void |
opencv_calib3d.decomposeEssentialMat(opencv_core.UMat E,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat t) |
static int |
opencv_calib3d.decomposeHomographyMat(opencv_core.UMat H,
opencv_core.UMat K,
opencv_core.MatVector rotations,
opencv_core.MatVector translations,
opencv_core.MatVector normals) |
static int |
opencv_calib3d.decomposeHomographyMat(opencv_core.UMat H,
opencv_core.UMat K,
opencv_core.UMatVector rotations,
opencv_core.UMatVector translations,
opencv_core.UMatVector normals) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.UMat projMatrix,
opencv_core.UMat cameraMatrix,
opencv_core.UMat rotMatrix,
opencv_core.UMat transVect) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.UMat projMatrix,
opencv_core.UMat cameraMatrix,
opencv_core.UMat rotMatrix,
opencv_core.UMat transVect,
opencv_core.UMat rotMatrixX,
opencv_core.UMat rotMatrixY,
opencv_core.UMat rotMatrixZ,
opencv_core.UMat eulerAngles) |
static void |
opencv_imgproc.demosaicing(opencv_core.UMat _src,
opencv_core.UMat _dst,
int code) |
static void |
opencv_imgproc.demosaicing(opencv_core.UMat _src,
opencv_core.UMat _dst,
int code,
int dcn) |
opencv_stitching.ImageFeatures |
opencv_stitching.ImageFeatures.descriptors(opencv_core.UMat descriptors) |
static void |
opencv_photo.detailEnhance(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_photo.detailEnhance(opencv_core.UMat src,
opencv_core.UMat dst,
float sigma_s,
float sigma_r) |
void |
opencv_features2d.Feature2D.detect(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints) |
void |
opencv_features2d.Feature2D.detect(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat mask) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.UMat image,
opencv_core.UMat positions) |
void |
opencv_imgproc.LineSegmentDetector.detect(opencv_core.UMat _image,
opencv_core.UMat _lines) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.UMat image,
opencv_core.UMat positions,
opencv_core.UMat votes) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.UMat edges,
opencv_core.UMat dx,
opencv_core.UMat dy,
opencv_core.UMat positions) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.UMat edges,
opencv_core.UMat dx,
opencv_core.UMat dy,
opencv_core.UMat positions,
opencv_core.UMat votes) |
void |
opencv_imgproc.LineSegmentDetector.detect(opencv_core.UMat _image,
opencv_core.UMat _lines,
opencv_core.UMat width,
opencv_core.UMat prec,
opencv_core.UMat nfa) |
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.UMat image,
opencv_core.UMat mask,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat descriptors) |
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.UMat image,
opencv_core.UMat mask,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat descriptors,
boolean useProvidedKeypoints) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
double[] foundWeights) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
double[] foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
DoublePointer foundWeights) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
DoublePointer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] numDetections) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer numDetections) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_features2d.MSER.detectRegions(opencv_core.UMat image,
opencv_core.PointVectorVector msers,
opencv_core.RectVector bboxes) |
static void |
opencv_text.detectRegions(opencv_core.UMat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static double |
opencv_core.determinant(opencv_core.UMat mtx) |
static void |
opencv_core.dft(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.dft(opencv_core.UMat src,
opencv_core.UMat dst,
int flags,
int nonzeroRows) |
static opencv_core.UMat |
opencv_core.UMat.diag(opencv_core.UMat d)
constructs a square diagonal matrix which main diagonal is vector "d"
|
static void |
opencv_imgproc.dilate(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat kernel) |
static void |
opencv_imgproc.dilate(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue) |
static void |
opencv_imgproc.distanceTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int distanceType,
int maskSize) |
static void |
opencv_imgproc.distanceTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int distanceType,
int maskSize,
int dstType) |
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat labels,
int distanceType,
int maskSize) |
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat labels,
int distanceType,
int maskSize,
int labelType) |
static void |
opencv_calib3d.distortPoints(opencv_core.UMat undistorted,
opencv_core.UMat distorted,
opencv_core.UMat K,
opencv_core.UMat D) |
static void |
opencv_calib3d.distortPoints(opencv_core.UMat undistorted,
opencv_core.UMat distorted,
opencv_core.UMat K,
opencv_core.UMat D,
double alpha) |
static void |
opencv_core.divide(double scale,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.divide(double scale,
opencv_core.UMat src2,
opencv_core.UMat dst,
int dtype) |
static void |
opencv_core.divide(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.divide(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
double scale,
int dtype) |
double |
opencv_core.Mat.dot(opencv_core.UMat m) |
double |
opencv_core.UMat.dot(opencv_core.UMat m) |
static void |
opencv_calib3d.drawChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners,
boolean patternWasFound) |
static void |
opencv_imgproc.drawContours(opencv_core.UMat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.UMat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.UMat hierarchy,
int maxLevel,
opencv_core.Point offset) |
static void |
opencv_imgproc.drawContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.UMat hierarchy,
int maxLevel,
opencv_core.Point offset) |
static void |
opencv_features2d.drawKeypoints(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat outImage) |
static void |
opencv_features2d.drawKeypoints(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat outImage,
opencv_core.Scalar color,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.UMat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.UMat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.UMat outImg) |
static void |
opencv_features2d.drawMatches(opencv_core.UMat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.UMat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.UMat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
byte[] matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.UMat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.UMat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.UMat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
BytePointer matchesMask,
int flags) |
static void |
opencv_features2d.drawMatchesKnn(opencv_core.UMat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.UMat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVectorVector matches1to2,
opencv_core.UMat outImg) |
static void |
opencv_features2d.drawMatchesKnn(opencv_core.UMat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.UMat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVectorVector matches1to2,
opencv_core.UMat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
opencv_core.ByteVectorVector matchesMask,
int flags) |
void |
opencv_imgproc.LineSegmentDetector.drawSegments(opencv_core.UMat _image,
opencv_core.UMat lines) |
static void |
opencv_xfeatures2d.PCTSignatures.drawSignature(opencv_core.UMat source,
opencv_core.UMat signature,
opencv_core.UMat result) |
static void |
opencv_xfeatures2d.PCTSignatures.drawSignature(opencv_core.UMat source,
opencv_core.UMat signature,
opencv_core.UMat result,
float radiusToShorterSideRatio,
int borderThickness) |
static void |
opencv_ximgproc.dtFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor) |
static void |
opencv_ximgproc.dtFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static void |
opencv_photo.edgePreservingFilter(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_photo.edgePreservingFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int flags,
float sigma_s,
float sigma_r) |
static boolean |
opencv_core.eigen(opencv_core.UMat src,
opencv_core.UMat eigenvalues) |
static boolean |
opencv_core.eigen(opencv_core.UMat src,
opencv_core.UMat eigenvalues,
opencv_core.UMat eigenvectors) |
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color,
int thickness,
int lineType) |
static float |
opencv_imgproc.EMD(opencv_core.UMat signature1,
opencv_core.UMat signature2,
int distType) |
static float |
opencv_imgproc.EMD(opencv_core.UMat signature1,
opencv_core.UMat signature2,
int distType,
opencv_core.UMat cost,
float[] lowerBound,
opencv_core.UMat flow) |
static float |
opencv_imgproc.EMD(opencv_core.UMat signature1,
opencv_core.UMat signature2,
int distType,
opencv_core.UMat cost,
FloatPointer lowerBound,
opencv_core.UMat flow) |
static float |
opencv_shape.EMDL1(opencv_core.UMat signature1,
opencv_core.UMat signature2) |
static void |
opencv_imgproc.equalizeHist(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_text.erGrouping(opencv_core.UMat img,
opencv_core.MatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.UMat img,
opencv_core.MatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.UMat image,
opencv_core.UMat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.UMat image,
opencv_core.UMat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects,
int method,
BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.UMat image,
opencv_core.UMat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.UMat img,
opencv_core.UMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.UMat img,
opencv_core.UMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_imgproc.erode(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat kernel) |
static void |
opencv_imgproc.erode(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
BoolPointer ok) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.UMat from,
opencv_core.UMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static int |
opencv_calib3d.estimateAffine3D(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat out,
opencv_core.UMat inliers) |
static int |
opencv_calib3d.estimateAffine3D(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat out,
opencv_core.UMat inliers,
double ransacThreshold,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.UMat from,
opencv_core.UMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
FloatPointer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
opencv_videostab.RansacParams params,
FloatPointer rmse,
IntPointer ninliers) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.Size image_size,
opencv_core.UMat R,
opencv_core.UMat P) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.Size image_size,
opencv_core.UMat R,
opencv_core.UMat P,
double balance,
opencv_core.Size new_size,
double fov_scale) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.UMat src,
opencv_core.UMat dst,
boolean fullAffine) |
void |
opencv_shape.ShapeTransformer.estimateTransformation(opencv_core.UMat transformingShape,
opencv_core.UMat targetShape,
opencv_core.DMatchVector matches) |
void |
opencv_text.OCRHMMDecoder.ClassifierCallback.eval(opencv_core.UMat image,
int[] out_class,
opencv_text.DoubleVector out_confidence) |
void |
opencv_text.OCRHMMDecoder.ClassifierCallback.eval(opencv_core.UMat image,
IntPointer out_class,
opencv_text.DoubleVector out_confidence) |
void |
opencv_text.OCRBeamSearchDecoder.ClassifierCallback.eval(opencv_core.UMat image,
opencv_text.DoubleVector recognition_probabilities,
int[] oversegmentation) |
void |
opencv_text.OCRBeamSearchDecoder.ClassifierCallback.eval(opencv_core.UMat image,
opencv_text.DoubleVector recognition_probabilities,
IntPointer oversegmentation) |
static void |
opencv_core.exp(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.extractChannel(opencv_core.UMat src,
opencv_core.UMat dst,
int coi) |
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.UMat coiimg) |
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.UMat coiimg,
int coi) |
static void |
opencv_features2d.FAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold) |
static void |
opencv_features2d.FAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression) |
static void |
opencv_features2d.FAST(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression,
int type) |
static void |
opencv_ximgproc.fastGlobalSmootherFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double sigma_color) |
static void |
opencv_ximgproc.fastGlobalSmootherFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static void |
opencv_ximgproc.FastHoughTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int dstMatDepth) |
static void |
opencv_ximgproc.FastHoughTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
float h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
float[] h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
float[] h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
float h,
int search_window,
int block_size,
opencv_core.Stream stream) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.UMat src,
opencv_core.UMat dst,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.UMat src,
opencv_core.UMat dst,
float h_luminance,
float photo_render) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.UMat src,
opencv_core.UMat dst,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.UMat src,
opencv_core.UMat dst,
float h_luminance,
float photo_render,
int search_window,
int block_size,
opencv_core.Stream stream) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
void |
opencv_stitching.Blender.feed(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Point tl) |
void |
opencv_stitching.FeatherBlender.feed(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Point tl) |
void |
opencv_stitching.MultiBandBlender.feed(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Point tl) |
static void |
opencv_imgproc.fillConvexPoly(opencv_core.UMat img,
opencv_core.UMat points,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillConvexPoly(opencv_core.UMat img,
opencv_core.UMat points,
opencv_core.Scalar color,
int lineType,
int shift) |
static void |
opencv_imgproc.fillPoly(opencv_core.UMat img,
opencv_core.MatVector pts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.UMat img,
opencv_core.MatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.UMat img,
opencv_core.UMatVector pts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.UMat img,
opencv_core.UMatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
void |
opencv_ximgproc.DTFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_ximgproc.GuidedFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_ximgproc.AdaptiveManifoldFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_ximgproc.FastGlobalSmootherFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_ximgproc.DTFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst,
int dDepth) |
void |
opencv_ximgproc.GuidedFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst,
int dDepth) |
void |
opencv_ximgproc.AdaptiveManifoldFilter.filter(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat joint) |
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.UMat disparity_map_left,
opencv_core.UMat left_view,
opencv_core.UMat filtered_disparity_map) |
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.UMat disparity_map_left,
opencv_core.UMat left_view,
opencv_core.UMat filtered_disparity_map,
opencv_core.UMat disparity_map_right,
opencv_core.Rect ROI,
opencv_core.UMat right_view) |
static void |
opencv_imgproc.filter2D(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.UMat kernel) |
static void |
opencv_imgproc.filter2D(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.UMat kernel,
opencv_core.Point anchor,
double delta,
int borderType) |
static void |
opencv_calib3d.filterSpeckles(opencv_core.UMat img,
double newVal,
int maxSpeckleSize,
double maxDiff) |
static void |
opencv_calib3d.filterSpeckles(opencv_core.UMat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
opencv_core.UMat buf) |
static boolean |
opencv_calib3d.find4QuadCornerSubpix(opencv_core.UMat img,
opencv_core.UMat corners,
opencv_core.Size region_size) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners,
int flags) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat centers) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat centers,
int flags,
opencv_features2d.Feature2D blobDetector) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.MatVector contours,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.MatVector contours,
opencv_core.UMat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.MatVector contours,
opencv_core.UMat hierarchy,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
opencv_core.UMat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.UMat image,
opencv_core.UMatVector contours,
opencv_core.UMat hierarchy,
int mode,
int method,
opencv_core.Point offset) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
double focal,
opencv_core.Point2d pp,
int method,
double prob,
double threshold,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix,
int method,
double prob,
double threshold,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
int method,
double param1,
double param2,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat mask,
int method,
double param1,
double param2) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.UMat mask,
int maxIters,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
opencv_core.UMat mask,
int method,
double ransacReprojThreshold) |
float |
opencv_ml.KNearest.findNearest(opencv_core.UMat samples,
int k,
opencv_core.UMat results) |
float |
opencv_ml.KNearest.findNearest(opencv_core.UMat samples,
int k,
opencv_core.UMat results,
opencv_core.UMat neighborResponses,
opencv_core.UMat dist) |
static void |
opencv_core.findNonZero(opencv_core.UMat src,
opencv_core.UMat idx) |
static double |
opencv_video.findTransformECC(opencv_core.UMat templateImage,
opencv_core.UMat inputImage,
opencv_core.UMat warpMatrix) |
static double |
opencv_video.findTransformECC(opencv_core.UMat templateImage,
opencv_core.UMat inputImage,
opencv_core.UMat warpMatrix,
int motionType,
opencv_core.TermCriteria criteria,
opencv_core.UMat inputMask) |
opencv_core.UMatBytePairVector |
opencv_core.UMatBytePairVector.first(long i,
opencv_core.UMat first) |
static opencv_core.RotatedRect |
opencv_imgproc.fitEllipse(opencv_core.UMat points) |
static void |
opencv_imgproc.fitLine(opencv_core.UMat points,
opencv_core.UMat line,
int distType,
double param,
double reps,
double aeps) |
static void |
opencv_core.flip(opencv_core.UMat src,
opencv_core.UMat dst,
int flipCode) |
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.UMat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.UMat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static opencv_core.Formatted |
opencv_core.format(opencv_core.UMat mtx,
int fmt) |
static opencv_dnn.Blob |
opencv_dnn.Blob.fromImages(opencv_core.UMat image) |
static opencv_dnn.Blob |
opencv_dnn.Blob.fromImages(opencv_core.UMat image,
int dstCn) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
double sigmaX) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
double sigmaX,
double sigmaY,
int borderType) |
static void |
opencv_core.gemm(opencv_core.UMat src1,
opencv_core.UMat src2,
double alpha,
opencv_core.UMat src3,
double beta,
opencv_core.UMat dst) |
static void |
opencv_core.gemm(opencv_core.UMat src1,
opencv_core.UMat src2,
double alpha,
opencv_core.UMat src3,
double beta,
opencv_core.UMat dst,
int flags) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_video.BackgroundSubtractor.getBackgroundImage(opencv_core.UMat backgroundImage) |
double |
opencv_ml.SVM.getDecisionFunction(int i,
opencv_core.UMat alpha,
opencv_core.UMat svidx) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.UMat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint) |
static void |
opencv_imgproc.getDerivKernels(opencv_core.UMat kx,
opencv_core.UMat ky,
int dx,
int dy,
int ksize) |
static void |
opencv_imgproc.getDerivKernels(opencv_core.UMat kx,
opencv_core.UMat ky,
int dx,
int dy,
int ksize,
boolean normalize,
int ktype) |
static void |
opencv_ximgproc.getDisparityVis(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_ximgproc.getDisparityVis(opencv_core.UMat src,
opencv_core.UMat dst,
double scale) |
void |
opencv_shape.ShapeContextDistanceExtractor.getImages(opencv_core.UMat image1,
opencv_core.UMat image2) |
void |
opencv_core.DownhillSolver.getInitStep(opencv_core.UMat step) |
void |
opencv_ximgproc.SuperpixelSEEDS.getLabelContourMask(opencv_core.UMat image) |
void |
opencv_ximgproc.SuperpixelSLIC.getLabelContourMask(opencv_core.UMat image) |
void |
opencv_ximgproc.SuperpixelLSC.getLabelContourMask(opencv_core.UMat image) |
void |
opencv_ximgproc.SuperpixelSEEDS.getLabelContourMask(opencv_core.UMat image,
boolean thick_line) |
void |
opencv_ximgproc.SuperpixelSLIC.getLabelContourMask(opencv_core.UMat image,
boolean thick_line) |
void |
opencv_ximgproc.SuperpixelLSC.getLabelContourMask(opencv_core.UMat image,
boolean thick_line) |
void |
opencv_ximgproc.SuperpixelSEEDS.getLabels(opencv_core.UMat labels_out) |
void |
opencv_ximgproc.SuperpixelSLIC.getLabels(opencv_core.UMat labels_out) |
void |
opencv_ximgproc.SuperpixelLSC.getLabels(opencv_core.UMat labels_out) |
void |
opencv_bioinspired.Retina.getMagno(opencv_core.UMat retinaOutput_magno) |
void |
opencv_bioinspired.Retina.getMagnoRAW(opencv_core.UMat retinaOutput_magno) |
void |
opencv_ml.TrainData.getNormCatValues(int vi,
opencv_core.UMat sidx,
int[] values) |
void |
opencv_ml.TrainData.getNormCatValues(int vi,
opencv_core.UMat sidx,
IntPointer values) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
void |
opencv_bioinspired.Retina.getParvo(opencv_core.UMat retinaOutput_parvo) |
void |
opencv_bioinspired.Retina.getParvoRAW(opencv_core.UMat retinaOutput_parvo) |
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.UMat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.UMat patch) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.UMat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.UMat patch,
int patchType) |
void |
opencv_ml.TrainData.getSample(opencv_core.UMat varIdx,
int sidx,
float[] buf) |
void |
opencv_ml.TrainData.getSample(opencv_core.UMat varIdx,
int sidx,
FloatPointer buf) |
void |
opencv_bioinspired.TransientAreasSegmentationModule.getSegmentationPicture(opencv_core.UMat transientAreas) |
void |
opencv_ml.TrainData.getValues(int vi,
opencv_core.UMat sidx,
float[] values) |
void |
opencv_ml.TrainData.getValues(int vi,
opencv_core.UMat sidx,
FloatPointer values) |
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.UMat image,
opencv_core.UMat corners,
int maxCorners,
double qualityLevel,
double minDistance) |
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.UMat image,
opencv_core.UMat corners,
int maxCorners,
double qualityLevel,
double minDistance,
opencv_core.UMat mask,
int blockSize,
boolean useHarrisDetector,
double k) |
static void |
opencv_imgproc.grabCut(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Rect rect,
opencv_core.UMat bgdModel,
opencv_core.UMat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Rect rect,
opencv_core.UMat bgdModel,
opencv_core.UMat fgdModel,
int iterCount,
int mode) |
static void |
opencv_ximgproc.guidedFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
int radius,
double eps) |
static void |
opencv_ximgproc.guidedFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
int radius,
double eps,
int dDepth) |
static void |
opencv_core.hconcat(opencv_core.Mat src,
long nsrc,
opencv_core.UMat dst) |
static void |
opencv_core.hconcat(opencv_core.MatVector src,
opencv_core.UMat dst) |
static void |
opencv_core.hconcat(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.hconcat(opencv_core.UMatVector src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.HoughCircles(opencv_core.UMat image,
opencv_core.UMat circles,
int method,
double dp,
double minDist) |
static void |
opencv_imgproc.HoughCircles(opencv_core.UMat image,
opencv_core.UMat circles,
int method,
double dp,
double minDist,
double param1,
double param2,
int minRadius,
int maxRadius) |
static void |
opencv_imgproc.HoughLines(opencv_core.UMat image,
opencv_core.UMat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLines(opencv_core.UMat image,
opencv_core.UMat lines,
double rho,
double theta,
int threshold,
double srn,
double stn,
double min_theta,
double max_theta) |
static void |
opencv_imgproc.HoughLinesP(opencv_core.UMat image,
opencv_core.UMat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLinesP(opencv_core.UMat image,
opencv_core.UMat lines,
double rho,
double theta,
int threshold,
double minLineLength,
double maxLineGap) |
static opencv_core.Scalar4i |
opencv_ximgproc.HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.UMat srcImgInfo) |
static opencv_core.Scalar4i |
opencv_ximgproc.HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.UMat srcImgInfo,
int angleRange,
int makeSkew,
int rules) |
static void |
opencv_imgproc.HuMoments(opencv_core.Moments m,
opencv_core.UMat hu) |
static void |
opencv_core.idct(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.idct(opencv_core.UMat src,
opencv_core.UMat dst,
int flags) |
static void |
opencv_core.idft(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.idft(opencv_core.UMat src,
opencv_core.UMat dst,
int flags,
int nonzeroRows) |
static void |
opencv_photo.illuminationChange(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst) |
static void |
opencv_photo.illuminationChange(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst,
float alpha,
float beta) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.UMat buf,
int flags) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.UMat buf,
int flags,
opencv_core.Mat dst) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
byte[] buf) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
byte[] buf,
int[] params) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
ByteBuffer buf) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
BytePointer buf) |
static boolean |
opencv_imgcodecs.imencode(BytePointer ext,
opencv_core.UMat img,
BytePointer buf,
IntPointer params) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
byte[] buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
byte[] buf,
int[] params) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
ByteBuffer buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
BytePointer buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.UMat img,
BytePointer buf,
IntPointer params) |
static void |
opencv_highgui.imshow(BytePointer winname,
opencv_core.UMat mat) |
static void |
opencv_highgui.imshow(String winname,
opencv_core.UMat mat) |
static boolean |
opencv_imgcodecs.imwrite(BytePointer filename,
opencv_core.UMat img) |
static boolean |
opencv_imgcodecs.imwrite(BytePointer filename,
opencv_core.UMat img,
int[] params) |
static boolean |
opencv_imgcodecs.imwrite(BytePointer filename,
opencv_core.UMat img,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imwrite(BytePointer filename,
opencv_core.UMat img,
IntPointer params) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.UMat img) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.UMat img,
int[] params) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.UMat img,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.UMat img,
IntPointer params) |
static void |
opencv_imgproc.initUndistortRectifyMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat R,
opencv_core.UMat newCameraMatrix,
opencv_core.Size size,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static void |
opencv_calib3d.initUndistortRectifyMap(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMat R,
opencv_core.UMat P,
opencv_core.Size size,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2,
int projType,
double alpha) |
static void |
opencv_photo.inpaint(opencv_core.UMat src,
opencv_core.UMat inpaintMask,
opencv_core.UMat dst,
double inpaintRadius,
int flags) |
static void |
opencv_core.inRange(opencv_core.UMat src,
opencv_core.UMat lowerb,
opencv_core.UMat upperb,
opencv_core.UMat dst) |
static void |
opencv_core.insertChannel(opencv_core.UMat src,
opencv_core.UMat dst,
int coi) |
static void |
opencv_core.insertImageCOI(opencv_core.UMat coiimg,
opencv_core.CvArr arr) |
static void |
opencv_core.insertImageCOI(opencv_core.UMat coiimg,
opencv_core.CvArr arr,
int coi) |
static void |
opencv_imgproc.integral(opencv_core.UMat src,
opencv_core.UMat sum) |
static void |
opencv_imgproc.integral(opencv_core.UMat src,
opencv_core.UMat sum,
int sdepth) |
static void |
opencv_imgproc.integral2(opencv_core.UMat src,
opencv_core.UMat sum,
opencv_core.UMat sqsum) |
static void |
opencv_imgproc.integral2(opencv_core.UMat src,
opencv_core.UMat sum,
opencv_core.UMat sqsum,
int sdepth,
int sqdepth) |
static void |
opencv_imgproc.integral3(opencv_core.UMat src,
opencv_core.UMat sum,
opencv_core.UMat sqsum,
opencv_core.UMat tilted) |
static void |
opencv_imgproc.integral3(opencv_core.UMat src,
opencv_core.UMat sum,
opencv_core.UMat sqsum,
opencv_core.UMat tilted,
int sdepth,
int sqdepth) |
void |
opencv_ximgproc.SparseMatchInterpolator.interpolate(opencv_core.UMat from_image,
opencv_core.UMat from_points,
opencv_core.UMat to_image,
opencv_core.UMat to_points,
opencv_core.UMat dense_flow) |
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.UMat _p1,
opencv_core.UMat _p2,
opencv_core.UMat _p12) |
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.UMat _p1,
opencv_core.UMat _p2,
opencv_core.UMat _p12,
boolean handleNested) |
static double |
opencv_core.invert(opencv_core.UMat src,
opencv_core.UMat dst) |
static double |
opencv_core.invert(opencv_core.UMat src,
opencv_core.UMat dst,
int flags) |
static void |
opencv_imgproc.invertAffineTransform(opencv_core.UMat M,
opencv_core.UMat iM) |
static boolean |
opencv_imgproc.isContourConvex(opencv_core.UMat contour) |
void |
opencv_ximgproc.SuperpixelSEEDS.iterate(opencv_core.UMat img) |
void |
opencv_ximgproc.SuperpixelSEEDS.iterate(opencv_core.UMat img,
int num_iterations) |
static void |
opencv_ximgproc.jointBilateralFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
opencv_ximgproc.jointBilateralFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType) |
static double |
opencv_core.kmeans(opencv_core.UMat data,
int K,
opencv_core.UMat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags) |
static double |
opencv_core.kmeans(opencv_core.UMat data,
int K,
opencv_core.UMat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags,
opencv_core.UMat centers) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.MatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.UMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.UMat mask,
boolean compactResult) |
void |
opencv_flann.Index.knnSearch(opencv_core.UMat query,
opencv_core.UMat indices,
opencv_core.UMat dists,
int knn) |
void |
opencv_flann.Index.knnSearch(opencv_core.UMat query,
opencv_core.UMat indices,
opencv_core.UMat dists,
int knn,
opencv_flann.SearchParams params) |
static void |
opencv_ximgproc.l0Smooth(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_ximgproc.l0Smooth(opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double kappa) |
static void |
opencv_imgproc.Laplacian(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth) |
static void |
opencv_imgproc.Laplacian(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
int ksize,
double scale,
double delta,
int borderType) |
static opencv_dnn.BlobShape |
opencv_dnn.BlobShape.like(opencv_core.UMat m)
Returns shape of passed UMat.
|
static void |
opencv_imgproc.line(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.line(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.linearPolar(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Point2f center,
double maxRadius,
int flags) |
boolean |
opencv_flann.Index.load(opencv_core.UMat features,
BytePointer filename) |
boolean |
opencv_flann.Index.load(opencv_core.UMat features,
String filename) |
static void |
opencv_core.log(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.logPolar(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Point2f center,
double M,
int flags) |
static void |
opencv_core.LUT(opencv_core.UMat src,
opencv_core.UMat lut,
opencv_core.UMat dst) |
static void |
opencv_core.magnitude(opencv_core.UMat x,
opencv_core.UMat y,
opencv_core.UMat magnitude) |
static double |
opencv_core.Mahalanobis(opencv_core.UMat v1,
opencv_core.UMat v2,
opencv_core.UMat icovar) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVector matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVector matches,
opencv_core.MatVector masks) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVector matches,
opencv_core.UMatVector masks) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVector matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVector matches,
opencv_core.UMat mask) |
static double |
opencv_imgproc.matchShapes(opencv_core.UMat contour1,
opencv_core.UMat contour2,
int method,
double parameter) |
static void |
opencv_imgproc.matchTemplate(opencv_core.UMat image,
opencv_core.UMat templ,
opencv_core.UMat result,
int method) |
static void |
opencv_imgproc.matchTemplate(opencv_core.UMat image,
opencv_core.UMat templ,
opencv_core.UMat result,
int method,
opencv_core.UMat mask) |
static void |
opencv_calib3d.matMulDeriv(opencv_core.UMat A,
opencv_core.UMat B,
opencv_core.UMat dABdA,
opencv_core.UMat dABdB) |
static void |
opencv_core.max(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static opencv_core.Scalar |
opencv_core.mean(opencv_core.UMat src) |
static opencv_core.Scalar |
opencv_core.mean(opencv_core.UMat src,
opencv_core.UMat mask) |
static int |
opencv_video.meanShift(opencv_core.UMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
static void |
opencv_core.meanStdDev(opencv_core.UMat src,
opencv_core.UMat mean,
opencv_core.UMat stddev) |
static void |
opencv_core.meanStdDev(opencv_core.UMat src,
opencv_core.UMat mean,
opencv_core.UMat stddev,
opencv_core.UMat mask) |
static void |
opencv_imgproc.medianBlur(opencv_core.UMat src,
opencv_core.UMat dst,
int ksize) |
static void |
opencv_core.merge(opencv_core.Mat mv,
long count,
opencv_core.UMat dst) |
static void |
opencv_core.merge(opencv_core.MatVector mv,
opencv_core.UMat dst) |
static void |
opencv_core.merge(opencv_core.UMatVector mv,
opencv_core.UMat dst) |
static void |
opencv_core.min(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static opencv_core.RotatedRect |
opencv_imgproc.minAreaRect(opencv_core.UMat points) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.UMat points,
opencv_core.Point2f center,
float[] radius) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.UMat points,
opencv_core.Point2f center,
FloatPointer radius) |
static double |
opencv_imgproc.minEnclosingTriangle(opencv_core.UMat points,
opencv_core.UMat triangle) |
double |
opencv_core.MinProblemSolver.minimize(opencv_core.UMat x) |
static void |
opencv_core.minMaxIdx(opencv_core.UMat src,
double[] minVal) |
static void |
opencv_core.minMaxIdx(opencv_core.UMat src,
double[] minVal,
double[] maxVal,
int[] minIdx,
int[] maxIdx,
opencv_core.UMat mask) |
static void |
opencv_core.minMaxIdx(opencv_core.UMat src,
DoublePointer minVal) |
static void |
opencv_core.minMaxIdx(opencv_core.UMat src,
DoublePointer minVal,
DoublePointer maxVal,
IntPointer minIdx,
IntPointer maxIdx,
opencv_core.UMat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.UMat src,
double[] minVal) |
static void |
opencv_core.minMaxLoc(opencv_core.UMat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.UMat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.UMat src,
DoublePointer minVal) |
static void |
opencv_core.minMaxLoc(opencv_core.UMat src,
DoublePointer minVal,
DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.UMat mask) |
static opencv_core.Moments |
opencv_imgproc.moments(opencv_core.UMat array) |
static opencv_core.Moments |
opencv_imgproc.moments(opencv_core.UMat array,
boolean binaryImage) |
static void |
opencv_imgproc.morphologyEx(opencv_core.UMat src,
opencv_core.UMat dst,
int op,
opencv_core.UMat kernel) |
static void |
opencv_imgproc.morphologyEx(opencv_core.UMat src,
opencv_core.UMat dst,
int op,
opencv_core.UMat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue) |
static void |
opencv_text.MSERsToERStats(opencv_core.UMat image,
opencv_core.PointVectorVector contours,
opencv_text.ERStatVectorVector regions) |
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.UMat m) |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.UMat m) |
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.UMat m,
double scale) |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.UMat m,
double scale) |
static void |
opencv_core.mulSpectrums(opencv_core.UMat a,
opencv_core.UMat b,
opencv_core.UMat c,
int flags) |
static void |
opencv_core.mulSpectrums(opencv_core.UMat a,
opencv_core.UMat b,
opencv_core.UMat c,
int flags,
boolean conjB) |
static void |
opencv_core.multiply(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.multiply(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
double scale,
int dtype) |
static void |
opencv_core.mulTransposed(opencv_core.UMat src,
opencv_core.UMat dst,
boolean aTa) |
static void |
opencv_core.mulTransposed(opencv_core.UMat src,
opencv_core.UMat dst,
boolean aTa,
opencv_core.UMat delta,
double scale,
int dtype) |
void |
opencv_superres.FrameSource.nextFrame(opencv_core.UMat frame) |
void |
opencv_superres.SuperResolution.nextFrame(opencv_core.UMat frame) |
static void |
opencv_ximgproc.niBlackThreshold(opencv_core.UMat _src,
opencv_core.UMat _dst,
double maxValue,
int type,
int blockSize,
double delta) |
static void |
opencv_photo.nonLocalMeans(opencv_core.UMat src,
opencv_core.UMat dst,
float h) |
static void |
opencv_photo.nonLocalMeans(opencv_core.UMat src,
opencv_core.UMat dst,
float h,
int search_window,
int block_size,
int borderMode,
opencv_core.Stream stream) |
static double |
opencv_core.norm(opencv_core.UMat src1) |
static double |
opencv_core.norm(opencv_core.UMat src1,
int normType,
opencv_core.UMat mask) |
static double |
opencv_core.norm(opencv_core.UMat src1,
opencv_core.UMat src2) |
static double |
opencv_core.norm(opencv_core.UMat src1,
opencv_core.UMat src2,
int normType,
opencv_core.UMat mask) |
static void |
opencv_core.normalize(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.normalize(opencv_core.UMat src,
opencv_core.UMat dst,
double alpha,
double beta,
int norm_type,
int dtype,
opencv_core.UMat mask) |
static void |
opencv_stitching.normalizeUsingWeightMap(opencv_core.UMat weight,
opencv_core.UMat src) |
opencv_objdetect.HOGDescriptor |
opencv_objdetect.HOGDescriptor.oclSvmDetector(opencv_core.UMat oclSvmDetector) |
static void |
opencv_core.patchNaNs(opencv_core.UMat a) |
static void |
opencv_core.patchNaNs(opencv_core.UMat a,
double val) |
static void |
opencv_core.PCABackProject(opencv_core.UMat data,
opencv_core.UMat mean,
opencv_core.UMat eigenvectors,
opencv_core.UMat result) |
static void |
opencv_core.PCACompute(opencv_core.UMat data,
opencv_core.UMat mean,
opencv_core.UMat eigenvectors) |
static void |
opencv_core.PCACompute(opencv_core.UMat data,
opencv_core.UMat mean,
opencv_core.UMat eigenvectors,
double retainedVariance) |
static void |
opencv_core.PCACompute(opencv_core.UMat data,
opencv_core.UMat mean,
opencv_core.UMat eigenvectors,
int maxComponents) |
static void |
opencv_core.PCAProject(opencv_core.UMat data,
opencv_core.UMat mean,
opencv_core.UMat eigenvectors,
opencv_core.UMat result) |
static void |
opencv_photo.pencilSketch(opencv_core.UMat src,
opencv_core.UMat dst1,
opencv_core.UMat dst2) |
static void |
opencv_photo.pencilSketch(opencv_core.UMat src,
opencv_core.UMat dst1,
opencv_core.UMat dst2,
float sigma_s,
float sigma_r,
float shade_factor) |
static void |
opencv_core.perspectiveTransform(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat m) |
static void |
opencv_core.phase(opencv_core.UMat x,
opencv_core.UMat y,
opencv_core.UMat angle) |
static void |
opencv_core.phase(opencv_core.UMat x,
opencv_core.UMat y,
opencv_core.UMat angle,
boolean angleInDegrees) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.UMat src1,
opencv_core.UMat src2) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat window,
double[] response) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat window,
DoublePointer response) |
static double |
opencv_imgproc.pointPolygonTest(opencv_core.UMat contour,
opencv_core.Point2f pt,
boolean measureDist) |
static void |
opencv_core.polarToCart(opencv_core.UMat magnitude,
opencv_core.UMat angle,
opencv_core.UMat x,
opencv_core.UMat y) |
static void |
opencv_core.polarToCart(opencv_core.UMat magnitude,
opencv_core.UMat angle,
opencv_core.UMat x,
opencv_core.UMat y,
boolean angleInDegrees) |
static void |
opencv_imgproc.polylines(opencv_core.UMat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.UMat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.UMat img,
opencv_core.UMatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.UMat img,
opencv_core.UMatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.pow(opencv_core.UMat src,
double power,
opencv_core.UMat dst) |
static void |
opencv_imgproc.preCornerDetect(opencv_core.UMat src,
opencv_core.UMat dst,
int ksize) |
static void |
opencv_imgproc.preCornerDetect(opencv_core.UMat src,
opencv_core.UMat dst,
int ksize,
int borderType) |
void |
opencv_face.FaceRecognizer.predict_collect(opencv_core.UMat src,
opencv_face.PredictCollector collector) |
int |
opencv_face.FaceRecognizer.predict_label(opencv_core.UMat src) |
float |
opencv_ml.StatModel.predict(opencv_core.UMat samples) |
float |
opencv_ml.LogisticRegression.predict(opencv_core.UMat samples) |
void |
opencv_face.FaceRecognizer.predict(opencv_core.UMat src,
int[] label,
double[] confidence) |
void |
opencv_face.FaceRecognizer.predict(opencv_core.UMat src,
IntPointer label,
DoublePointer confidence) |
float |
opencv_ml.StatModel.predict(opencv_core.UMat samples,
opencv_core.UMat results,
int flags) |
float |
opencv_ml.LogisticRegression.predict(opencv_core.UMat samples,
opencv_core.UMat results,
int flags) |
opencv_core.Point2d |
opencv_ml.EM.predict2(opencv_core.UMat sample,
opencv_core.UMat probs) |
float |
opencv_ml.NormalBayesClassifier.predictProb(opencv_core.UMat inputs,
opencv_core.UMat outputs,
opencv_core.UMat outputProbs) |
float |
opencv_ml.NormalBayesClassifier.predictProb(opencv_core.UMat inputs,
opencv_core.UMat outputs,
opencv_core.UMat outputProbs,
int flags) |
static int |
opencv_core.print(opencv_core.UMat mtx) |
static int |
opencv_core.print(opencv_core.UMat mtx,
Pointer stream) |
void |
opencv_photo.AlignExposures.process(opencv_core.MatVector src,
opencv_core.MatVector dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.AlignMTB.process(opencv_core.MatVector src,
opencv_core.MatVector dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.MatVector src,
opencv_core.UMat dst) |
void |
opencv_photo.CalibrateCRF.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeDebevec.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeRobertson.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeExposures.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeDebevec.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeRobertson.process(opencv_core.MatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_videostab.IOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
void |
opencv_videostab.NullOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
void |
opencv_videostab.TranslationBasedLocalOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
void |
opencv_photo.Tonemap.process(opencv_core.UMat src,
opencv_core.UMat dst) |
void |
opencv_stitching.Timelapser.process(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Point tl) |
void |
opencv_photo.AlignExposures.process(opencv_core.UMatVector src,
opencv_core.MatVector dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.AlignMTB.process(opencv_core.UMatVector src,
opencv_core.MatVector dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.UMatVector src,
opencv_core.UMat dst) |
void |
opencv_photo.CalibrateCRF.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeDebevec.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeRobertson.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times) |
void |
opencv_photo.MergeExposures.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeDebevec.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_photo.MergeRobertson.process(opencv_core.UMatVector src,
opencv_core.UMat dst,
opencv_core.UMat times,
opencv_core.UMat response) |
void |
opencv_ximgproc.GraphSegmentation.processImage(opencv_core.UMat src,
opencv_core.UMat dst) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.UMat vec) |
opencv_core.Mat |
opencv_core.LDA.project(opencv_core.UMat src) |
void |
opencv_core.PCA.project(opencv_core.UMat vec,
opencv_core.UMat result) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.Mat affine,
opencv_core.UMat K,
opencv_core.UMat D) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.Mat affine,
opencv_core.UMat K,
opencv_core.UMat D,
double alpha,
opencv_core.UMat jacobian) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat rvec,
opencv_core.UMat tvec,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat imagePoints) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.UMat rvec,
opencv_core.UMat tvec,
opencv_core.UMat K,
opencv_core.UMat D,
double alpha,
opencv_core.UMat jacobian) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat rvec,
opencv_core.UMat tvec,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat imagePoints,
opencv_core.UMat jacobian,
double aspectRatio) |
static double |
opencv_core.PSNR(opencv_core.UMat src1,
opencv_core.UMat src2) |
opencv_core.UMatVector |
opencv_core.UMatVector.put(long i,
opencv_core.UMat value) |
opencv_core.UMatVector |
opencv_core.UMatVector.put(opencv_core.UMat... array) |
opencv_core.UMat |
opencv_core.UMat.put(opencv_core.UMat m)
assignment operators
|
opencv_core.UMatBytePairVector |
opencv_core.UMatBytePairVector.put(opencv_core.UMat[] firstValue,
byte[] secondValue) |
static void |
opencv_imgproc.putText(opencv_core.UMat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_imgproc.putText(opencv_core.UMat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin) |
static void |
opencv_imgproc.putText(opencv_core.UMat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_imgproc.putText(opencv_core.UMat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin) |
static void |
opencv_imgproc.pyrDown(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.pyrDown(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dstsize,
int borderType) |
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.UMat src,
opencv_core.UMat dst,
double sp,
double sr) |
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.UMat src,
opencv_core.UMat dst,
double sp,
double sr,
int maxLevel,
opencv_core.TermCriteria termcrit) |
static void |
opencv_imgproc.pyrUp(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_imgproc.pyrUp(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dstsize,
int borderType) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.MatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.UMat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.UMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.UMat queryDescriptors,
opencv_core.UMat trainDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.UMat mask,
boolean compactResult) |
int |
opencv_flann.Index.radiusSearch(opencv_core.UMat query,
opencv_core.UMat indices,
opencv_core.UMat dists,
double radius,
int maxResults) |
int |
opencv_flann.Index.radiusSearch(opencv_core.UMat query,
opencv_core.UMat indices,
opencv_core.UMat dists,
double radius,
int maxResults,
opencv_flann.SearchParams params) |
static void |
opencv_ml.randMVNormal(opencv_core.UMat mean,
opencv_core.UMat cov,
int nsamples,
opencv_core.UMat samples) |
static void |
opencv_core.randn(opencv_core.UMat dst,
opencv_core.UMat mean,
opencv_core.UMat stddev) |
static void |
opencv_core.randShuffle(opencv_core.UMat dst) |
static void |
opencv_core.randShuffle(opencv_core.UMat dst,
double iterFactor,
opencv_core.RNG rng) |
static void |
opencv_core.randu(opencv_core.UMat dst,
opencv_core.UMat low,
opencv_core.UMat high) |
boolean |
opencv_videoio.VideoCapture.read(opencv_core.UMat image) |
static int |
opencv_ximgproc.readGT(BytePointer src_path,
opencv_core.UMat dst) |
static int |
opencv_ximgproc.readGT(String src_path,
opencv_core.UMat dst) |
opencv_core.Mat |
opencv_core.LDA.reconstruct(opencv_core.UMat src) |
static int |
opencv_calib3d.recoverPose(opencv_core.UMat E,
opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat R,
opencv_core.UMat t) |
static int |
opencv_calib3d.recoverPose(opencv_core.UMat E,
opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat R,
opencv_core.UMat t,
double focal,
opencv_core.Point2d pp,
opencv_core.UMat mask) |
static int |
opencv_calib3d.recoverPose(opencv_core.UMat E,
opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix,
opencv_core.UMat R,
opencv_core.UMat t) |
static int |
opencv_calib3d.recoverPose(opencv_core.UMat E,
opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix,
opencv_core.UMat R,
opencv_core.UMat t,
opencv_core.UMat mask) |
static void |
opencv_imgproc.rectangle(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.rectangle(opencv_core.UMat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static void |
opencv_core.reduce(opencv_core.UMat src,
opencv_core.UMat dst,
int dim,
int rtype) |
static void |
opencv_core.reduce(opencv_core.UMat src,
opencv_core.UMat dst,
int dim,
int rtype,
int dtype) |
static void |
opencv_imgproc.remap(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat map1,
opencv_core.UMat map2,
int interpolation) |
static void |
opencv_imgproc.remap(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat map1,
opencv_core.UMat map2,
int interpolation,
int borderMode,
opencv_core.Scalar borderValue) |
static void |
opencv_core.repeat(opencv_core.UMat src,
int ny,
int nx,
opencv_core.UMat dst) |
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.UMat disparity,
opencv_core.UMat _3dImage,
opencv_core.UMat Q) |
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.UMat disparity,
opencv_core.UMat _3dImage,
opencv_core.UMat Q,
boolean handleMissingValues,
int ddepth) |
static void |
opencv_imgproc.resize(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation) |
boolean |
opencv_videoio.VideoCapture.retrieve(opencv_core.UMat image) |
boolean |
opencv_videoio.VideoCapture.retrieve(opencv_core.UMat image,
int flag) |
static void |
opencv_calib3d.Rodrigues(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_calib3d.Rodrigues(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat jacobian) |
static void |
opencv_ximgproc.rollingGuidanceFilter(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_ximgproc.rollingGuidanceFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType) |
static void |
opencv_core.rotate(opencv_core.UMat src,
opencv_core.UMat dst,
int rotateCode) |
static int |
opencv_imgproc.rotatedRectangleIntersection(opencv_core.RotatedRect rect1,
opencv_core.RotatedRect rect2,
opencv_core.UMat intersectingRegion) |
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.UMat src,
opencv_core.UMat mtxR,
opencv_core.UMat mtxQ) |
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.UMat src,
opencv_core.UMat mtxR,
opencv_core.UMat mtxQ,
opencv_core.UMat Qx,
opencv_core.UMat Qy,
opencv_core.UMat Qz) |
void |
opencv_bioinspired.Retina.run(opencv_core.UMat inputImage) |
void |
opencv_bioinspired.TransientAreasSegmentationModule.run(opencv_core.UMat inputToSegment) |
BytePointer |
opencv_text.OCRTesseract.run(opencv_core.UMat image,
int min_confidence) |
BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.UMat image,
int min_confidence) |
BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.UMat image,
int min_confidence) |
void |
opencv_bioinspired.TransientAreasSegmentationModule.run(opencv_core.UMat inputToSegment,
int channelIndex) |
BytePointer |
opencv_text.OCRTesseract.run(opencv_core.UMat image,
int min_confidence,
int component_level) |
BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.UMat image,
int min_confidence,
int component_level) |
BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.UMat image,
int min_confidence,
int component_level) |
BytePointer |
opencv_text.OCRTesseract.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence) |
BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence) |
BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence) |
BytePointer |
opencv_text.OCRTesseract.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence,
int component_level) |
BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence,
int component_level) |
BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.UMat image,
opencv_core.UMat mask,
int min_confidence,
int component_level) |
void |
opencv_videostab.IDenseOptFlowEstimator.run(opencv_core.UMat frame0,
opencv_core.UMat frame1,
opencv_core.UMat flowX,
opencv_core.UMat flowY,
opencv_core.UMat errors) |
void |
opencv_videostab.ISparseOptFlowEstimator.run(opencv_core.UMat frame0,
opencv_core.UMat frame1,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat status,
opencv_core.UMat errors) |
void |
opencv_videostab.SparsePyrLkOptFlowEstimator.run(opencv_core.UMat frame0,
opencv_core.UMat frame1,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat status,
opencv_core.UMat errors) |
void |
opencv_text.ERFilter.run(opencv_core.UMat image,
opencv_text.ERStatVector regions) |
static double |
opencv_calib3d.sampsonDistance(opencv_core.UMat pt1,
opencv_core.UMat pt2,
opencv_core.UMat F) |
static void |
opencv_core.scaleAdd(opencv_core.UMat src1,
double alpha,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_imgproc.Scharr(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Scharr(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
int dx,
int dy,
double scale,
double delta,
int borderType) |
static void |
opencv_photo.seamlessClone(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat mask,
opencv_core.Point p,
opencv_core.UMat blend,
int flags) |
static void |
opencv_optflow.segmentMotion(opencv_core.UMat mhi,
opencv_core.UMat segmask,
opencv_core.RectVector boundingRects,
double timestamp,
double segThresh) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.UMat kernelX,
opencv_core.UMat kernelY) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.UMat kernelX,
opencv_core.UMat kernelY,
opencv_core.Point anchor,
double delta,
int borderType) |
void |
opencv_ximgproc.SelectiveSearchSegmentation.setBaseImage(opencv_core.UMat img) |
void |
opencv_stitching.ProjectorBase.setCameraParams(opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T) |
static void |
opencv_core.setIdentity(opencv_core.UMat mtx) |
static void |
opencv_core.setIdentity(opencv_core.UMat mtx,
opencv_core.Scalar s) |
void |
opencv_ximgproc.SelectiveSearchSegmentationStrategy.setImage(opencv_core.UMat img,
opencv_core.UMat regions,
opencv_core.UMat sizes) |
void |
opencv_ximgproc.SelectiveSearchSegmentationStrategy.setImage(opencv_core.UMat img,
opencv_core.UMat regions,
opencv_core.UMat sizes,
int image_id) |
void |
opencv_shape.ShapeContextDistanceExtractor.setImages(opencv_core.UMat image1,
opencv_core.UMat image2) |
void |
opencv_core.DownhillSolver.setInitStep(opencv_core.UMat step) |
void |
opencv_ml.ANN_MLP.setLayerSizes(opencv_core.UMat _layer_sizes) |
void |
opencv_stitching.Stitcher.setMatchingMask(opencv_core.UMat mask) |
void |
opencv_objdetect.HOGDescriptor.setSVMDetector(opencv_core.UMat _svmdetector) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.UMat templ) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.UMat templ,
opencv_core.Point templCenter) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.UMat edges,
opencv_core.UMat dx,
opencv_core.UMat dy) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.UMat edges,
opencv_core.UMat dx,
opencv_core.UMat dy,
opencv_core.Point templCenter) |
void |
opencv_dnn.Blob.setTo(opencv_core.UMat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.UMat value) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.UMat value) |
void |
opencv_dnn.Blob.setTo(opencv_core.UMat value,
int allocFlags) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.UMat value,
opencv_core.UMat mask) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.UMat value,
opencv_core.UMat mask) |
void |
opencv_photo.AlignMTB.shiftMat(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Point shift) |
opencv_videoio.VideoCapture |
opencv_videoio.VideoCapture.shiftRight(opencv_core.UMat image)
\overload
\sa read()
|
static void |
opencv_imgproc.Sobel(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Sobel(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
int dx,
int dy,
int ksize,
double scale,
double delta,
int borderType) |
static boolean |
opencv_core.solve(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static boolean |
opencv_core.solve(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
int flags) |
static int |
opencv_core.solveCubic(opencv_core.UMat coeffs,
opencv_core.UMat roots) |
static boolean |
opencv_calib3d.solvePnP(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat rvec,
opencv_core.UMat tvec) |
static boolean |
opencv_calib3d.solvePnP(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat rvec,
opencv_core.UMat tvec,
boolean useExtrinsicGuess,
int flags) |
static boolean |
opencv_calib3d.solvePnPRansac(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat rvec,
opencv_core.UMat tvec) |
static boolean |
opencv_calib3d.solvePnPRansac(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat rvec,
opencv_core.UMat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
opencv_core.UMat inliers,
int flags) |
static double |
opencv_core.solvePoly(opencv_core.UMat coeffs,
opencv_core.UMat roots) |
static double |
opencv_core.solvePoly(opencv_core.UMat coeffs,
opencv_core.UMat roots,
int maxIters) |
static void |
opencv_core.SVD.solveZ(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_core.sort(opencv_core.UMat src,
opencv_core.UMat dst,
int flags) |
static void |
opencv_core.sortIdx(opencv_core.UMat src,
opencv_core.UMat dst,
int flags) |
static void |
opencv_imgproc.spatialGradient(opencv_core.UMat src,
opencv_core.UMat dx,
opencv_core.UMat dy) |
static void |
opencv_imgproc.spatialGradient(opencv_core.UMat src,
opencv_core.UMat dx,
opencv_core.UMat dy,
int ksize,
int borderType) |
static void |
opencv_core.split(opencv_core.UMat m,
opencv_core.MatVector mv) |
static void |
opencv_core.split(opencv_core.UMat m,
opencv_core.UMatVector mv) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.UMat _src,
opencv_core.UMat _dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.UMat _src,
opencv_core.UMat _dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType) |
static void |
opencv_core.sqrt(opencv_core.UMat src,
opencv_core.UMat dst) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F,
int flags,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat tvec,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat tvec,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags,
opencv_core.Size newImageSize,
double balance,
double fov_scale) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat F,
opencv_core.Size imgSize,
opencv_core.UMat H1,
opencv_core.UMat H2) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat F,
opencv_core.Size imgSize,
opencv_core.UMat H1,
opencv_core.UMat H2,
double threshold) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.MatVector images,
opencv_core.RectVectorVector rois,
opencv_core.UMat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.MatVector images,
opencv_core.UMat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.UMatVector images,
opencv_core.RectVectorVector rois,
opencv_core.UMat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.UMatVector images,
opencv_core.UMat pano) |
static void |
opencv_photo.stylization(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_photo.stylization(opencv_core.UMat src,
opencv_core.UMat dst,
float sigma_s,
float sigma_r) |
static opencv_core.Mat |
opencv_core.LDA.subspaceProject(opencv_core.UMat W,
opencv_core.UMat mean,
opencv_core.UMat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceReconstruct(opencv_core.UMat W,
opencv_core.UMat mean,
opencv_core.UMat src) |
static void |
opencv_core.subtract(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.subtract(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst,
opencv_core.UMat mask,
int dtype) |
static opencv_core.Scalar |
opencv_core.sumElems(opencv_core.UMat src) |
static void |
opencv_core.SVBackSubst(opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt,
opencv_core.UMat rhs,
opencv_core.UMat dst) |
static void |
opencv_core.SVDecomp(opencv_core.UMat src,
opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt) |
static void |
opencv_core.SVDecomp(opencv_core.UMat src,
opencv_core.UMat w,
opencv_core.UMat u,
opencv_core.UMat vt,
int flags) |
static void |
opencv_core.swap(opencv_core.UMat a,
opencv_core.UMat b)
\overload
|
static void |
opencv_photo.textureFlattening(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst) |
static void |
opencv_photo.textureFlattening(opencv_core.UMat src,
opencv_core.UMat mask,
opencv_core.UMat dst,
float low_threshold,
float high_threshold,
int kernel_size) |
static void |
opencv_ximgproc.thinning(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_ximgproc.thinning(opencv_core.UMat src,
opencv_core.UMat dst,
int thinningType) |
static double |
opencv_imgproc.threshold(opencv_core.UMat src,
opencv_core.UMat dst,
double thresh,
double maxval,
int type) |
static opencv_core.Scalar |
opencv_core.trace(opencv_core.UMat mtx) |
void |
opencv_face.FaceRecognizer.train(opencv_core.MatVector src,
opencv_core.UMat labels) |
boolean |
opencv_ml.StatModel.train(opencv_core.UMat samples,
int layout,
opencv_core.UMat responses) |
void |
opencv_face.FaceRecognizer.train(opencv_core.UMatVector src,
opencv_core.UMat labels) |
boolean |
opencv_ml.EM.trainE(opencv_core.UMat samples,
opencv_core.UMat means0) |
boolean |
opencv_ml.EM.trainE(opencv_core.UMat samples,
opencv_core.UMat means0,
opencv_core.UMat covs0,
opencv_core.UMat weights0,
opencv_core.UMat logLikelihoods,
opencv_core.UMat labels,
opencv_core.UMat probs) |
boolean |
opencv_ml.EM.trainEM(opencv_core.UMat samples) |
boolean |
opencv_ml.EM.trainEM(opencv_core.UMat samples,
opencv_core.UMat logLikelihoods,
opencv_core.UMat labels,
opencv_core.UMat probs) |
boolean |
opencv_ml.EM.trainM(opencv_core.UMat samples,
opencv_core.UMat probs0) |
boolean |
opencv_ml.EM.trainM(opencv_core.UMat samples,
opencv_core.UMat probs0,
opencv_core.UMat logLikelihoods,
opencv_core.UMat labels,
opencv_core.UMat probs) |
static void |
opencv_core.transform(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat m) |
static void |
opencv_core.transpose(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
opencv_calib3d.triangulatePoints(opencv_core.UMat projMatr1,
opencv_core.UMat projMatr2,
opencv_core.UMat projPoints1,
opencv_core.UMat projPoints2,
opencv_core.UMat points4D) |
static void |
opencv_imgproc.undistort(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs) |
static void |
opencv_imgproc.undistort(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat newCameraMatrix) |
static void |
opencv_calib3d.undistortImage(opencv_core.UMat distorted,
opencv_core.UMat undistorted,
opencv_core.UMat K,
opencv_core.UMat D) |
static void |
opencv_calib3d.undistortImage(opencv_core.UMat distorted,
opencv_core.UMat undistorted,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMat Knew,
opencv_core.Size new_size) |
static void |
opencv_imgproc.undistortPoints(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs) |
static void |
opencv_calib3d.undistortPoints(opencv_core.UMat distorted,
opencv_core.UMat undistorted,
opencv_core.UMat K,
opencv_core.UMat D) |
static void |
opencv_imgproc.undistortPoints(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat R,
opencv_core.UMat P) |
static void |
opencv_calib3d.undistortPoints(opencv_core.UMat distorted,
opencv_core.UMat undistorted,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMat R,
opencv_core.UMat P) |
void |
opencv_face.FaceRecognizer.update(opencv_core.MatVector src,
opencv_core.UMat labels) |
void |
opencv_face.FaceRecognizer.update(opencv_core.UMatVector src,
opencv_core.UMat labels) |
static void |
opencv_optflow.updateMotionHistory(opencv_core.UMat silhouette,
opencv_core.UMat mhi,
double timestamp,
double duration) |
static void |
opencv_calib3d.validateDisparity(opencv_core.UMat disparity,
opencv_core.UMat cost,
int minDisparity,
int numberOfDisparities) |
static void |
opencv_calib3d.validateDisparity(opencv_core.UMat disparity,
opencv_core.UMat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp) |
static void |
opencv_core.vconcat(opencv_core.Mat src,
long nsrc,
opencv_core.UMat dst) |
static void |
opencv_core.vconcat(opencv_core.MatVector src,
opencv_core.UMat dst) |
static void |
opencv_core.vconcat(opencv_core.UMat src1,
opencv_core.UMat src2,
opencv_core.UMat dst) |
static void |
opencv_core.vconcat(opencv_core.UMatVector src,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.RotationWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.AffineWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
int interp_mode,
int border_mode,
opencv_core.UMat dst) |
static void |
opencv_imgproc.warpAffine(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue) |
void |
opencv_stitching.RotationWarper.warpBackward(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.Size dst_size,
opencv_core.UMat dst) |
void |
opencv_shape.ShapeTransformer.warpImage(opencv_core.UMat transformingImage,
opencv_core.UMat output) |
void |
opencv_shape.ShapeTransformer.warpImage(opencv_core.UMat transformingImage,
opencv_core.UMat output,
int flags,
int borderMode,
opencv_core.Scalar borderValue) |
static void |
opencv_imgproc.warpPerspective(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue) |
opencv_core.Point2f |
opencv_stitching.RotationWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Point2f |
opencv_stitching.DetailPlaneWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Point2f |
opencv_stitching.AffineWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Point2f |
opencv_stitching.DetailPlaneWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T) |
opencv_core.Rect |
opencv_stitching.RotationWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.AffineWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T) |
static void |
opencv_imgproc.watershed(opencv_core.UMat image,
opencv_core.UMat markers) |
void |
opencv_core.FileStorage.write(BytePointer name,
opencv_core.UMat val) |
void |
opencv_core.FileStorage.write(String name,
opencv_core.UMat val) |
static boolean |
opencv_optflow.writeOpticalFlow(BytePointer path,
opencv_core.UMat flow) |
static boolean |
opencv_optflow.writeOpticalFlow(String path,
opencv_core.UMat flow) |
Copyright © 2017. All rights reserved.