| Package | Description |
|---|---|
| org.bytedeco.javacpp |
| Modifier and Type | Method and Description |
|---|---|
opencv_core.Size |
opencv_core.MatSize.apply() |
opencv_core.Size |
opencv_objdetect.HOGDescriptor.blockSize() |
opencv_core.Size |
opencv_objdetect.HOGDescriptor.blockStride() |
opencv_core.Size |
opencv_videostab.TranslationBasedLocalOutlierRejector.cellSize() |
opencv_core.Size |
opencv_objdetect.HOGDescriptor.cellSize() |
opencv_core.Size |
opencv_videostab.LpMotionStabilizer.frameSize() |
opencv_core.Size |
opencv_core.SizeVector.get(long i) |
opencv_core.Size |
opencv_bioinspired.Retina.getInputSize()
\brief Retreive retina input buffer size
|
opencv_core.Size |
opencv_objdetect.DetectionBasedTracker.IDetector.getMaxObjectSize() |
opencv_core.Size |
opencv_objdetect.DetectionBasedTracker.IDetector.getMinObjectSize() |
opencv_core.Size |
opencv_objdetect.BaseCascadeClassifier.getOriginalWindowSize() |
opencv_core.Size |
opencv_objdetect.CascadeClassifier.getOriginalWindowSize() |
opencv_core.Size |
opencv_bioinspired.Retina.getOutputSize()
\brief Retreive retina output buffer size that can be different from the input if a spatial log
transformation is applied
|
opencv_core.Size |
opencv_bioinspired.TransientAreasSegmentationModule.getSize()
\brief return the sze of the manage input and output images
|
static opencv_core.Size |
opencv_imgproc.getTextSize(BytePointer text,
int fontFace,
double fontScale,
int thickness,
int[] baseLine) |
static opencv_core.Size |
opencv_imgproc.getTextSize(BytePointer text,
int fontFace,
double fontScale,
int thickness,
IntBuffer baseLine) |
static opencv_core.Size |
opencv_imgproc.getTextSize(BytePointer text,
int fontFace,
double fontScale,
int thickness,
IntPointer baseLine)
\brief Calculates the width and height of a text string.
|
static opencv_core.Size |
opencv_imgproc.getTextSize(String text,
int fontFace,
double fontScale,
int thickness,
int[] baseLine) |
static opencv_core.Size |
opencv_imgproc.getTextSize(String text,
int fontFace,
double fontScale,
int thickness,
IntBuffer baseLine) |
static opencv_core.Size |
opencv_imgproc.getTextSize(String text,
int fontFace,
double fontScale,
int thickness,
IntPointer baseLine) |
opencv_core.Size |
opencv_imgproc.CLAHE.getTilesGridSize() |
opencv_core.Size |
opencv_video.SparsePyrLKOpticalFlow.getWinSize() |
opencv_core.Size |
opencv_core.Size.height(int height) |
opencv_core.Size |
opencv_stitching.ImageFeatures.img_size() |
opencv_core.Size |
opencv_core.Size.position(long position) |
opencv_core.Size |
opencv_core.Size.put(opencv_core.Size sz) |
opencv_core.Size |
opencv_core.Rect.size()
size (width, height) of the rectangle
|
opencv_core.Size |
opencv_core.Mat.size() |
opencv_core.Size |
opencv_core.UMat.size() |
opencv_core.Size |
opencv_core.MatExpr.size() |
opencv_core.Size |
opencv_core.MatOp.size(opencv_core.MatExpr expr) |
opencv_core.Size |
opencv_dnn.Blob.size2()
Returns cv::Size(cols(), rows())
|
opencv_core.Size |
opencv_core.Size.width(int width) |
opencv_core.Size |
opencv_videostab.PyrLkOptFlowEstimatorBase.winSize() |
opencv_core.Size |
opencv_objdetect.HOGDescriptor.winSize() |
| Modifier and Type | Method and Description |
|---|---|
opencv_objdetect.HOGDescriptor |
opencv_objdetect.HOGDescriptor.blockSize(opencv_core.Size blockSize) |
opencv_objdetect.HOGDescriptor |
opencv_objdetect.HOGDescriptor.blockStride(opencv_core.Size blockStride) |
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize) |
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType)
\brief Blurs an image using the normalized box filter.
|
static void |
opencv_imgproc.blur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize) |
static void |
opencv_imgproc.blur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType) |
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
\brief Blurs an image using the box filter.
|
static void |
opencv_imgproc.boxFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.boxFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType) |
opencv_core.Rect |
opencv_stitching.RotationWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap)
\brief Builds the projection maps according to the given camera data.
|
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.AffineWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.RotationWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.AffineWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat xmap,
opencv_core.UMat ymap) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage)
\brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
|
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.UMat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold)
\brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
pyramids.
|
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.UMat prevImg,
opencv_core.UMat nextImg,
opencv_core.UMat prevPts,
opencv_core.UMat nextPts,
opencv_core.UMat status,
opencv_core.UMat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\brief Performs camera calibaration
|
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\overload double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
OutputArray stdDeviations, OutputArray perViewErrors,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
|
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria)
\brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.UMat stdDeviationsIntrinsics,
opencv_core.UMat stdDeviationsExtrinsics,
opencv_core.UMat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoubleBuffer fovx,
DoubleBuffer fovy,
DoubleBuffer focalLength,
opencv_core.Point2d principalPoint,
DoubleBuffer aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoublePointer fovx,
DoublePointer fovy,
DoublePointer focalLength,
opencv_core.Point2d principalPoint,
DoublePointer aspectRatio)
\brief Computes useful camera characteristics from the camera matrix.
|
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.UMat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
double[] fovx,
double[] fovy,
double[] focalLength,
opencv_core.Point2d principalPoint,
double[] aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.UMat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoublePointer fovx,
DoublePointer fovy,
DoublePointer focalLength,
opencv_core.Point2d principalPoint,
DoublePointer aspectRatio) |
opencv_objdetect.HOGDescriptor |
opencv_objdetect.HOGDescriptor.cellSize(opencv_core.Size cellSize) |
static boolean |
opencv_imgproc.clipLine(opencv_core.Size imgSize,
opencv_core.Point pt1,
opencv_core.Point pt2)
\brief Clips the line against the image rectangle.
|
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.Mat lines1,
opencv_core.Mat lines2) |
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.Mat lines1,
opencv_core.Mat lines2,
opencv_core.Mat _image)
\brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.
|
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.UMat lines1,
opencv_core.UMat lines2) |
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.UMat lines1,
opencv_core.UMat lines2,
opencv_core.UMat _image) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.UMat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.computeGradient(opencv_core.Mat img,
opencv_core.Mat grad,
opencv_core.Mat angleOfs,
opencv_core.Size paddingTL,
opencv_core.Size paddingBR) |
static void |
opencv_imgproc.cornerSubPix(opencv_core.Mat image,
opencv_core.Mat corners,
opencv_core.Size winSize,
opencv_core.Size zeroZone,
opencv_core.TermCriteria criteria)
\brief Refines the corner locations.
|
static void |
opencv_imgproc.cornerSubPix(opencv_core.UMat image,
opencv_core.UMat corners,
opencv_core.Size winSize,
opencv_core.Size zeroZone,
opencv_core.TermCriteria criteria) |
void |
opencv_core.Mat.create(opencv_core.Size size,
int type)
\overload
|
void |
opencv_core.UMat.create(opencv_core.Size size,
int type) |
void |
opencv_core.UMat.create(opencv_core.Size size,
int type,
int usageFlags) |
static opencv_video.SparsePyrLKOpticalFlow |
opencv_video.SparsePyrLKOpticalFlow.create(opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria crit,
int flags,
double minEigThreshold) |
static opencv_imgproc.CLAHE |
opencv_imgproc.createCLAHE(double clipLimit,
opencv_core.Size tileGridSize)
\} imgproc_shape
|
static void |
opencv_imgproc.createHanningWindow(opencv_core.Mat dst,
opencv_core.Size winSize,
int type)
\brief This function computes a Hanning window coefficients in two dimensions.
|
static void |
opencv_imgproc.createHanningWindow(opencv_core.UMat dst,
opencv_core.Size winSize,
int type) |
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize)
\relates bioinspired::Retina
\{
|
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize,
boolean colorMode) |
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize,
boolean colorMode,
int colorSamplingMethod,
boolean useRetinaLogSampling,
float reductionFactor,
float samplingStrenght)
\brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
|
static opencv_bioinspired.RetinaFastToneMapping |
opencv_bioinspired.createRetinaFastToneMapping(opencv_core.Size inputSize)
\relates bioinspired::RetinaFastToneMapping
|
static opencv_bioinspired.TransientAreasSegmentationModule |
opencv_bioinspired.createTransientAreasSegmentationModule(opencv_core.Size inputSize)
\brief allocator
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
double[] weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
DoubleBuffer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations)
without found weights output
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
DoublePointer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations)
with found weights output
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
DoubleBuffer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize)
\brief Detects objects of different sizes in the input image.
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping)
without found weights output
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
DoublePointer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping)
with result weights output
|
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
double[] foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.UMat img,
opencv_core.RectVector foundLocations,
DoublePointer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize)
\overload
|
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels)
\overload
if
outputRejectLevels is true returns rejectLevels and levelWeights |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.UMat image,
opencv_core.RectVector objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
double[] confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
DoubleBuffer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
DoublePointer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding)
evaluate specified ROI and return confidence value for each location
|
static void |
opencv_calib3d.drawChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
boolean patternWasFound)
\brief Renders the detected chessboard corners.
|
static void |
opencv_calib3d.drawChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners,
boolean patternWasFound) |
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws a simple or thick elliptic arc or fills an ellipse sector.
|
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.UMat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.ellipse2Poly(opencv_core.Point center,
opencv_core.Size axes,
int angle,
int arcStart,
int arcEnd,
int delta,
opencv_core.PointVector pts)
\brief Approximates an elliptic arc with a polyline.
|
static opencv_core.Mat |
opencv_videostab.ensureInclusionConstraint(opencv_core.Mat M,
opencv_core.Size size,
float trimRatio) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Size image_size,
opencv_core.Mat R,
opencv_core.Mat P) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Size image_size,
opencv_core.Mat R,
opencv_core.Mat P,
double balance,
opencv_core.Size new_size,
double fov_scale)
\brief Estimates new camera matrix for undistortion or rectification.
|
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.Size image_size,
opencv_core.UMat R,
opencv_core.UMat P) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.Size image_size,
opencv_core.UMat R,
opencv_core.UMat P,
double balance,
opencv_core.Size new_size,
double fov_scale) |
static float |
opencv_videostab.estimateOptimalTrimRatio(opencv_core.Mat M,
opencv_core.Size size) |
static opencv_core.MatExpr |
opencv_core.Mat.eye(opencv_core.Size size,
int type)
\overload
|
static opencv_core.UMat |
opencv_core.UMat.eye(opencv_core.Size size,
int type) |
static boolean |
opencv_calib3d.find4QuadCornerSubpix(opencv_core.Mat img,
opencv_core.Mat corners,
opencv_core.Size region_size)
finds subpixel-accurate positions of the chessboard corners
|
static boolean |
opencv_calib3d.find4QuadCornerSubpix(opencv_core.UMat img,
opencv_core.UMat corners,
opencv_core.Size region_size) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
int flags)
\brief Finds the positions of internal corners of the chessboard.
|
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat corners,
int flags) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.Feature2D blobDetector)
\brief Finds centers in the grid of circles.
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat centers) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat centers,
int flags,
opencv_features2d.Feature2D blobDetector) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX,
double sigmaY,
int borderType)
\brief Blurs an image using a Gaussian filter.
|
static void |
opencv_imgproc.GaussianBlur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
double sigmaX) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size ksize,
double sigmaX,
double sigmaY,
int borderType) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint)
\brief Returns the default new camera matrix.
|
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint) |
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma) |
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma,
double psi,
int ktype)
\brief Returns Gabor filter coefficients.
|
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
\brief Returns the new camera matrix based on the free scaling parameter.
|
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch,
int patchType)
\brief Retrieves a pixel rectangle from an image with sub-pixel accuracy.
|
static void |
opencv_imgproc.getRectSubPix(opencv_core.UMat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.UMat patch) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.UMat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.UMat patch,
int patchType) |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize) |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize,
opencv_core.Point anchor)
\brief Returns a structuring element of the specified size and shape for morphological operations.
|
static void |
opencv_objdetect.groupRectangles_meanshift(opencv_core.RectVector rectList,
double[] foundWeights,
double[] foundScales,
double detectThreshold,
opencv_core.Size winDetSize) |
static void |
opencv_objdetect.groupRectangles_meanshift(opencv_core.RectVector rectList,
DoubleBuffer foundWeights,
DoubleBuffer foundScales,
double detectThreshold,
opencv_core.Size winDetSize) |
static void |
opencv_objdetect.groupRectangles_meanshift(opencv_core.RectVector rectList,
DoublePointer foundWeights,
DoublePointer foundScales,
double detectThreshold,
opencv_core.Size winDetSize)
\overload
|
opencv_stitching.ImageFeatures |
opencv_stitching.ImageFeatures.img_size(opencv_core.Size img_size) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio)
\brief Finds an initial camera matrix from 3D-2D point correspondences.
|
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio) |
static void |
opencv_imgproc.initUndistortRectifyMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat newCameraMatrix,
opencv_core.Size size,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2)
\brief Computes the undistortion and rectification transformation map.
|
static void |
opencv_calib3d.initUndistortRectifyMap(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Mat R,
opencv_core.Mat P,
opencv_core.Size size,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2)
\brief Computes undistortion and rectification maps for image transform by cv::remap().
|
static void |
opencv_imgproc.initUndistortRectifyMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.UMat R,
opencv_core.UMat newCameraMatrix,
opencv_core.Size size,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static void |
opencv_calib3d.initUndistortRectifyMap(opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMat R,
opencv_core.UMat P,
opencv_core.Size size,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2,
int projType,
double alpha)
initializes maps for cv::remap() for wide-angle
|
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.UMat map1,
opencv_core.UMat map2,
int projType,
double alpha) |
void |
opencv_core.Mat.locateROI(opencv_core.Size wholeSize,
opencv_core.Point ofs)
\brief Locates the matrix header within a parent matrix.
|
void |
opencv_core.UMat.locateROI(opencv_core.Size wholeSize,
opencv_core.Point ofs)
locates matrix header within a parent matrix.
|
static opencv_core.MatExpr |
opencv_core.Mat.ones(opencv_core.Size size,
int type)
\overload
|
static opencv_core.UMat |
opencv_core.UMat.ones(opencv_core.Size size,
int type) |
boolean |
opencv_videoio.VideoWriter.open(BytePointer filename,
int fourcc,
double fps,
opencv_core.Size frameSize) |
boolean |
opencv_videoio.VideoWriter.open(BytePointer filename,
int fourcc,
double fps,
opencv_core.Size frameSize,
boolean isColor)
\brief Initializes or reinitializes video writer.
|
boolean |
opencv_videoio.VideoWriter.open(String filename,
int fourcc,
double fps,
opencv_core.Size frameSize) |
boolean |
opencv_videoio.VideoWriter.open(String filename,
int fourcc,
double fps,
opencv_core.Size frameSize,
boolean isColor) |
static boolean |
opencv_stitching.overlapRoi(opencv_core.Point tl1,
opencv_core.Point tl2,
opencv_core.Size sz1,
opencv_core.Size sz2,
opencv_core.Rect roi) |
void |
opencv_videostab.IOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_videostab.NullOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_videostab.TranslationBasedLocalOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_videostab.IOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
void |
opencv_videostab.NullOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
void |
opencv_videostab.TranslationBasedLocalOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.UMat points0,
opencv_core.UMat points1,
opencv_core.UMat mask) |
opencv_core.SizeVector |
opencv_core.SizeVector.put(long i,
opencv_core.Size value) |
opencv_core.SizeVector |
opencv_core.SizeVector.put(opencv_core.Size... array) |
opencv_core.Size |
opencv_core.Size.put(opencv_core.Size sz) |
static void |
opencv_imgproc.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
\} imgproc_misc
|
static void |
opencv_imgproc.pyrDown(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dstsize,
int borderType) |
static void |
opencv_imgproc.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
\brief Upsamples an image and then blurs it.
|
static void |
opencv_imgproc.pyrUp(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dstsize,
int borderType) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation)
\} imgproc_filter
|
static void |
opencv_imgproc.resize(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation) |
static void |
opencv_features2d.KeyPointsFilter.runByImageBorder(opencv_core.KeyPointVector keypoints,
opencv_core.Size imageSize,
int borderSize) |
void |
opencv_videostab.TranslationBasedLocalOutlierRejector.setCellSize(opencv_core.Size val) |
void |
opencv_videostab.LpMotionStabilizer.setFrameSize(opencv_core.Size val) |
void |
opencv_objdetect.DetectionBasedTracker.IDetector.setMaxObjectSize(opencv_core.Size max) |
void |
opencv_objdetect.DetectionBasedTracker.IDetector.setMinObjectSize(opencv_core.Size min) |
void |
opencv_imgproc.CLAHE.setTilesGridSize(opencv_core.Size tileGridSize) |
void |
opencv_videostab.PyrLkOptFlowEstimatorBase.setWinSize(opencv_core.Size val) |
void |
opencv_video.SparsePyrLKOpticalFlow.setWinSize(opencv_core.Size winSize) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.Mat _src,
opencv_core.Mat _dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.Mat _src,
opencv_core.Mat _dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
\brief Calculates the normalized sum of squares of the pixel values overlapping the filter.
|
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.UMat _src,
opencv_core.UMat _dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.UMat _src,
opencv_core.UMat _dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
int flags,
opencv_core.TermCriteria criteria)
\brief Performs stereo calibration
|
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
int flags,
opencv_core.TermCriteria criteria)
\brief Calibrates the stereo camera.
|
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat E,
opencv_core.UMat F,
int flags,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat tvec,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
\brief Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat tvec,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
opencv_core.Size newImageSize,
double balance,
double fov_scale)
\brief Stereo rectification for fisheye camera model
|
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat tvec,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2) |
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat K1,
opencv_core.UMat D1,
opencv_core.UMat K2,
opencv_core.UMat D2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat tvec,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags,
opencv_core.Size newImageSize,
double balance,
double fov_scale) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2,
double threshold)
\brief Computes a rectification transform for an uncalibrated stereo camera.
|
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat F,
opencv_core.Size imgSize,
opencv_core.UMat H1,
opencv_core.UMat H2) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat F,
opencv_core.Size imgSize,
opencv_core.UMat H1,
opencv_core.UMat H2,
double threshold) |
static void |
opencv_calib3d.undistortImage(opencv_core.Mat distorted,
opencv_core.Mat undistorted,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Mat Knew,
opencv_core.Size new_size)
\brief Transforms an image to compensate for fisheye lens distortion.
|
static void |
opencv_calib3d.undistortImage(opencv_core.UMat distorted,
opencv_core.UMat undistorted,
opencv_core.UMat K,
opencv_core.UMat D,
opencv_core.UMat Knew,
opencv_core.Size new_size) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
\brief Applies an affine transformation to an image.
|
static void |
opencv_imgproc.warpAffine(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue) |
void |
opencv_stitching.RotationWarper.warpBackward(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Size dst_size,
opencv_core.Mat dst)
\brief Projects the image backward.
|
void |
opencv_stitching.RotationWarper.warpBackward(opencv_core.UMat src,
opencv_core.UMat K,
opencv_core.UMat R,
int interp_mode,
int border_mode,
opencv_core.Size dst_size,
opencv_core.UMat dst) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
\brief Applies a perspective transformation to an image.
|
static void |
opencv_imgproc.warpPerspective(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.UMat src,
opencv_core.UMat dst,
opencv_core.UMat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue) |
opencv_core.Rect |
opencv_stitching.RotationWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.AffineWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
opencv_core.Rect |
opencv_stitching.RotationWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.AffineWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.UMat K,
opencv_core.UMat R,
opencv_core.UMat T) |
opencv_objdetect.HOGDescriptor |
opencv_objdetect.HOGDescriptor.winSize(opencv_core.Size winSize) |
static opencv_core.MatExpr |
opencv_core.Mat.zeros(opencv_core.Size size,
int type)
\overload
|
static opencv_core.UMat |
opencv_core.UMat.zeros(opencv_core.Size size,
int type) |
| Constructor and Description |
|---|
HOGDescriptor(opencv_core.Size _winSize,
opencv_core.Size _blockSize,
opencv_core.Size _blockStride,
opencv_core.Size _cellSize,
int _nbins) |
HOGDescriptor(opencv_core.Size _winSize,
opencv_core.Size _blockSize,
opencv_core.Size _blockStride,
opencv_core.Size _cellSize,
int _nbins,
int _derivAperture,
double _winSigma,
int _histogramNormType,
double _L2HysThreshold,
boolean _gammaCorrection,
int _nlevels,
boolean _signedGradient) |
Mat(opencv_core.Size size,
int type)
\overload
|
Mat(opencv_core.Size size,
int type,
opencv_core.Scalar s)
\overload
|
Mat(opencv_core.Size size,
int type,
Pointer data) |
Mat(opencv_core.Size size,
int type,
Pointer data,
long step)
\overload
|
OrbFeaturesFinder(opencv_core.Size _grid_size,
int nfeatures,
float scaleFactor,
int nlevels) |
Point(opencv_core.Size sz) |
Rect(opencv_core.Point org,
opencv_core.Size sz) |
Size(opencv_core.Size sz) |
SizeVector(opencv_core.Size... array) |
UMat(opencv_core.Size size,
int type) |
UMat(opencv_core.Size size,
int type,
int usageFlags) |
UMat(opencv_core.Size size,
int type,
opencv_core.Scalar s) |
UMat(opencv_core.Size size,
int type,
opencv_core.Scalar s,
int usageFlags) |
VideoWriter(BytePointer filename,
int fourcc,
double fps,
opencv_core.Size frameSize) |
VideoWriter(BytePointer filename,
int fourcc,
double fps,
opencv_core.Size frameSize,
boolean isColor)
\overload
|
VideoWriter(String filename,
int fourcc,
double fps,
opencv_core.Size frameSize) |
VideoWriter(String filename,
int fourcc,
double fps,
opencv_core.Size frameSize,
boolean isColor) |
Copyright © 2017. All rights reserved.