| Package | Description |
|---|---|
| org.bytedeco.javacpp | |
| org.bytedeco.javacpp.helper |
| Modifier and Type | Method and Description |
|---|---|
opencv_stitching.Blender |
opencv_stitching.Stitcher.blender() |
opencv_stitching.BundleAdjusterBase |
opencv_stitching.Stitcher.bundleAdjuster() |
opencv_dnn.Layer |
opencv_dnn.LayerFactory.Constuctor.call(opencv_dnn.LayerParams params) |
opencv_objdetect.BaseCascadeClassifier |
opencv_objdetect.CascadeClassifier.cc() |
opencv_features2d.DescriptorMatcher |
opencv_features2d.DescriptorMatcher.clone() |
opencv_features2d.DescriptorMatcher |
opencv_features2d.BFMatcher.clone() |
opencv_features2d.DescriptorMatcher |
opencv_features2d.FlannBasedMatcher.clone() |
opencv_features2d.DescriptorMatcher |
opencv_features2d.DescriptorMatcher.clone(boolean emptyTrainData)
\brief Clones the matcher.
|
opencv_features2d.DescriptorMatcher |
opencv_features2d.BFMatcher.clone(boolean emptyTrainData) |
opencv_features2d.DescriptorMatcher |
opencv_features2d.FlannBasedMatcher.clone(boolean emptyTrainData) |
static opencv_ximgproc.AdaptiveManifoldFilter |
opencv_ximgproc.AdaptiveManifoldFilter.create() |
static opencv_xfeatures2d.FREAK |
opencv_xfeatures2d.FREAK.create() |
static opencv_xfeatures2d.StarDetector |
opencv_xfeatures2d.StarDetector.create() |
static opencv_xfeatures2d.BriefDescriptorExtractor |
opencv_xfeatures2d.BriefDescriptorExtractor.create() |
static opencv_xfeatures2d.LUCID |
opencv_xfeatures2d.LUCID.create() |
static opencv_xfeatures2d.LATCH |
opencv_xfeatures2d.LATCH.create() |
static opencv_xfeatures2d.DAISY |
opencv_xfeatures2d.DAISY.create() |
static opencv_xfeatures2d.MSDDetector |
opencv_xfeatures2d.MSDDetector.create() |
static opencv_xfeatures2d.VGG |
opencv_xfeatures2d.VGG.create() |
static opencv_xfeatures2d.BoostDesc |
opencv_xfeatures2d.BoostDesc.create() |
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create() |
static opencv_xfeatures2d.PCTSignaturesSQFD |
opencv_xfeatures2d.PCTSignaturesSQFD.create() |
static opencv_xfeatures2d.SIFT |
opencv_xfeatures2d.SIFT.create() |
static opencv_xfeatures2d.SURF |
opencv_xfeatures2d.SURF.create() |
static opencv_video.DualTVL1OpticalFlow |
opencv_video.DualTVL1OpticalFlow.create() |
static opencv_video.FarnebackOpticalFlow |
opencv_video.FarnebackOpticalFlow.create() |
static opencv_video.SparsePyrLKOpticalFlow |
opencv_video.SparsePyrLKOpticalFlow.create() |
static opencv_text.OCRTesseract |
opencv_text.OCRTesseract.create() |
static opencv_stitching.Stitcher |
opencv_stitching.Stitcher.create() |
static opencv_ml.NormalBayesClassifier |
opencv_ml.NormalBayesClassifier.create()
Creates empty model
Use StatModel::train to train the model after creation.
|
static opencv_ml.KNearest |
opencv_ml.KNearest.create()
\brief Creates the empty model
|
static opencv_ml.SVM |
opencv_ml.SVM.create()
Creates empty model.
|
static opencv_ml.EM |
opencv_ml.EM.create()
Creates empty %EM model.
|
static opencv_ml.DTrees |
opencv_ml.DTrees.create()
\brief Creates the empty model
|
static opencv_ml.RTrees |
opencv_ml.RTrees.create()
Creates the empty model.
|
static opencv_ml.Boost |
opencv_ml.Boost.create()
Creates the empty model.
|
static opencv_ml.ANN_MLP |
opencv_ml.ANN_MLP.create()
\brief Creates empty model
|
static opencv_ml.LogisticRegression |
opencv_ml.LogisticRegression.create()
\brief Creates empty model.
|
static opencv_ml.SVMSGD |
opencv_ml.SVMSGD.create()
\brief Creates empty model.
|
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create() |
static opencv_features2d.ORB |
opencv_features2d.ORB.create() |
static opencv_features2d.MSER |
opencv_features2d.MSER.create() |
static opencv_features2d.FastFeatureDetector |
opencv_features2d.FastFeatureDetector.create() |
static opencv_features2d.AgastFeatureDetector |
opencv_features2d.AgastFeatureDetector.create() |
static opencv_features2d.GFTTDetector |
opencv_features2d.GFTTDetector.create() |
static opencv_features2d.SimpleBlobDetector |
opencv_features2d.SimpleBlobDetector.create() |
static opencv_features2d.KAZE |
opencv_features2d.KAZE.create() |
static opencv_features2d.AKAZE |
opencv_features2d.AKAZE.create() |
static opencv_features2d.BFMatcher |
opencv_features2d.BFMatcher.create() |
static opencv_features2d.FlannBasedMatcher |
opencv_features2d.FlannBasedMatcher.create() |
static opencv_face.StandardCollector |
opencv_face.StandardCollector.create() |
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create() |
static opencv_core.ConjGradSolver |
opencv_core.ConjGradSolver.create() |
static opencv_calib3d.StereoBM |
opencv_calib3d.StereoBM.create() |
static opencv_xfeatures2d.FREAK |
opencv_xfeatures2d.FREAK.create(boolean orientationNormalized,
boolean scaleNormalized,
float patternScale,
int nOctaves,
int[] selectedPairs) |
static opencv_xfeatures2d.FREAK |
opencv_xfeatures2d.FREAK.create(boolean orientationNormalized,
boolean scaleNormalized,
float patternScale,
int nOctaves,
IntBuffer selectedPairs) |
static opencv_features2d.KAZE |
opencv_features2d.KAZE.create(boolean extended,
boolean upright,
float threshold,
int nOctaves,
int nOctaveLayers,
int diffusivity)
\brief The KAZE constructor
|
static opencv_xfeatures2d.FREAK |
opencv_xfeatures2d.FREAK.create(boolean orientationNormalized,
boolean scaleNormalized,
float patternScale,
int nOctaves,
IntPointer selectedPairs) |
static opencv_features2d.DescriptorMatcher |
opencv_features2d.DescriptorMatcher.create(BytePointer descriptorMatcherType)
\brief Creates a descriptor matcher of a given type with the default parameters (using default
constructor).
|
static opencv_text.OCRTesseract |
opencv_text.OCRTesseract.create(BytePointer datapath,
BytePointer language,
BytePointer char_whitelist,
int oem,
int psmode)
\brief Creates an instance of the OCRTesseract class.
|
static opencv_face.StandardCollector |
opencv_face.StandardCollector.create(double threshold)
\brief Static constructor
|
static opencv_video.DualTVL1OpticalFlow |
opencv_video.DualTVL1OpticalFlow.create(double tau,
double lambda,
double theta,
int nscales,
int warps,
double epsilon,
int innnerIterations,
int outerIterations,
double scaleStep,
double gamma,
int medianFiltering,
boolean useInitialFlow)
\brief Creates instance of cv::DualTVL1OpticalFlow
|
static opencv_xfeatures2d.SURF |
opencv_xfeatures2d.SURF.create(double hessianThreshold,
int nOctaves,
int nOctaveLayers,
boolean extended,
boolean upright) |
opencv_stitching.RotationWarper |
opencv_stitching.WarperCreator.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.PlaneWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.CylindricalWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.SphericalWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.FisheyeWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.StereographicWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.CompressedRectilinearWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.CompressedRectilinearPortraitWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.PaniniWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.PaniniPortraitWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.MercatorWarper.create(float scale) |
opencv_stitching.RotationWarper |
opencv_stitching.TransverseMercatorWarper.create(float scale) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(float[] radiusList,
int[] numberList) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(float[] radiusList,
int[] numberList,
float dMax,
float dMin,
int[] indexChange) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(FloatBuffer radiusList,
IntBuffer numberList) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(FloatBuffer radiusList,
IntBuffer numberList,
float dMax,
float dMin,
IntBuffer indexChange) |
static opencv_xfeatures2d.DAISY |
opencv_xfeatures2d.DAISY.create(float radius,
int q_radius,
int q_theta,
int q_hist,
int norm,
opencv_core.Mat H,
boolean interpolation,
boolean use_orientation) |
static opencv_xfeatures2d.DAISY |
opencv_xfeatures2d.DAISY.create(float radius,
int q_radius,
int q_theta,
int q_hist,
int norm,
opencv_core.UMat H,
boolean interpolation,
boolean use_orientation) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(FloatPointer radiusList,
IntPointer numberList) |
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(FloatPointer radiusList,
IntPointer numberList,
float dMax,
float dMin,
IntPointer indexChange)
\brief The BRISK constructor for a custom pattern
|
static opencv_features2d.DescriptorMatcher |
opencv_features2d.DescriptorMatcher.create(int matcherType) |
static opencv_xfeatures2d.BriefDescriptorExtractor |
opencv_xfeatures2d.BriefDescriptorExtractor.create(int bytes,
boolean use_orientation) |
static opencv_stitching.Stitcher |
opencv_stitching.Stitcher.create(int mode,
boolean try_use_gpu)
\brief Creates a Stitcher configured in one of the stitching modes.
|
static opencv_features2d.BFMatcher |
opencv_features2d.BFMatcher.create(int normType,
boolean crossCheck) |
static opencv_xfeatures2d.BoostDesc |
opencv_xfeatures2d.BoostDesc.create(int desc,
boolean use_scale_orientation,
float scale_factor) |
static opencv_xfeatures2d.LATCH |
opencv_xfeatures2d.LATCH.create(int bytes,
boolean rotationInvariance,
int half_ssd_size) |
static opencv_features2d.FastFeatureDetector |
opencv_features2d.FastFeatureDetector.create(int threshold,
boolean nonmaxSuppression,
int type) |
static opencv_features2d.AgastFeatureDetector |
opencv_features2d.AgastFeatureDetector.create(int threshold,
boolean nonmaxSuppression,
int type) |
static opencv_video.FarnebackOpticalFlow |
opencv_video.FarnebackOpticalFlow.create(int numLevels,
double pyrScale,
boolean fastPyramids,
int winSize,
int numIters,
int polyN,
double polySigma,
int flags) |
static opencv_features2d.GFTTDetector |
opencv_features2d.GFTTDetector.create(int maxCorners,
double qualityLevel,
double minDistance,
int blockSize,
boolean useHarrisDetector,
double k) |
static opencv_xfeatures2d.VGG |
opencv_xfeatures2d.VGG.create(int desc,
float isigma,
boolean img_normalize,
boolean use_scale_orientation,
float scale_factor,
boolean dsc_normalize) |
static opencv_features2d.ORB |
opencv_features2d.ORB.create(int nfeatures,
float scaleFactor,
int nlevels,
int edgeThreshold,
int firstLevel,
int WTA_K,
int scoreType,
int patchSize,
int fastThreshold)
\brief The ORB constructor
|
static opencv_xfeatures2d.LUCID |
opencv_xfeatures2d.LUCID.create(int lucid_kernel,
int blur_kernel) |
static opencv_calib3d.StereoBM |
opencv_calib3d.StereoBM.create(int numDisparities,
int blockSize)
\brief Creates StereoBM object
|
static opencv_xfeatures2d.SIFT |
opencv_xfeatures2d.SIFT.create(int nfeatures,
int nOctaveLayers,
double contrastThreshold,
double edgeThreshold,
double sigma) |
static opencv_xfeatures2d.PCTSignaturesSQFD |
opencv_xfeatures2d.PCTSignaturesSQFD.create(int distanceFunction,
int similarityFunction,
float similarityParameter)
\brief Creates the algorithm instance using selected distance function,
similarity function and similarity function parameter.
|
static opencv_features2d.BRISK |
opencv_features2d.BRISK.create(int thresh,
int octaves,
float patternScale)
\brief The BRISK constructor
|
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create(int initSampleCount,
int initSeedCount,
int pointDistribution)
\brief Creates PCTSignatures algorithm using sample and seed count.
|
static opencv_calib3d.StereoSGBM |
opencv_calib3d.StereoSGBM.create(int minDisparity,
int numDisparities,
int blockSize) |
static opencv_features2d.MSER |
opencv_features2d.MSER.create(int _delta,
int _min_area,
int _max_area,
double _max_variation,
double _min_diversity,
int _max_evolution,
double _area_threshold,
double _min_margin,
int _edge_blur_size)
\brief Full consturctor for %MSER detector
|
static opencv_features2d.AKAZE |
opencv_features2d.AKAZE.create(int descriptor_type,
int descriptor_size,
int descriptor_channels,
float threshold,
int nOctaves,
int nOctaveLayers,
int diffusivity)
\brief The AKAZE constructor
|
static opencv_core.DFT1D |
opencv_core.DFT1D.create(int len,
int count,
int depth,
int flags) |
static opencv_core.DCT2D |
opencv_core.DCT2D.create(int width,
int height,
int depth,
int flags) |
static opencv_core.DFT1D |
opencv_core.DFT1D.create(int len,
int count,
int depth,
int flags,
boolean[] useBuffer) |
static opencv_core.DFT1D |
opencv_core.DFT1D.create(int len,
int count,
int depth,
int flags,
BoolPointer useBuffer) |
static opencv_xfeatures2d.MSDDetector |
opencv_xfeatures2d.MSDDetector.create(int m_patch_radius,
int m_search_area_radius,
int m_nms_radius,
int m_nms_scale_radius,
float m_th_saliency,
int m_kNN,
float m_scale_factor,
int m_n_scales,
boolean m_compute_orientation) |
static opencv_xfeatures2d.StarDetector |
opencv_xfeatures2d.StarDetector.create(int maxSize,
int responseThreshold,
int lineThresholdProjected,
int lineThresholdBinarized,
int suppressNonmaxSize)
the full constructor
|
static opencv_core.DFT2D |
opencv_core.DFT2D.create(int width,
int height,
int depth,
int src_channels,
int dst_channels,
int flags) |
static opencv_core.DFT2D |
opencv_core.DFT2D.create(int width,
int height,
int depth,
int src_channels,
int dst_channels,
int flags,
int nonzero_rows) |
static opencv_calib3d.StereoSGBM |
opencv_calib3d.StereoSGBM.create(int minDisparity,
int numDisparities,
int blockSize,
int P1,
int P2,
int disp12MaxDiff,
int preFilterCap,
int uniquenessRatio,
int speckleWindowSize,
int speckleRange,
int mode)
\brief Creates StereoSGBM object
|
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat sampleWeights,
opencv_core.Mat varType)
\brief Creates training data from in-memory arrays.
|
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.Mat initStep,
opencv_core.TermCriteria termcrit)
\brief This function returns the reference to the ready-to-use DownhillSolver object.
|
static opencv_core.ConjGradSolver |
opencv_core.ConjGradSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.TermCriteria termcrit)
\brief This function returns the reference to the ready-to-use ConjGradSolver object.
|
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.UMat initStep,
opencv_core.TermCriteria termcrit) |
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create(opencv_core.Point2fVector initSamplingPoints,
int initSeedCount)
\brief Creates PCTSignatures algorithm using pre-generated sampling points
and number of clusterization seeds.
|
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create(opencv_core.Point2fVector initSamplingPoints,
int[] initClusterSeedIndexes) |
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create(opencv_core.Point2fVector initSamplingPoints,
IntBuffer initClusterSeedIndexes) |
static opencv_xfeatures2d.PCTSignatures |
opencv_xfeatures2d.PCTSignatures.create(opencv_core.Point2fVector initSamplingPoints,
IntPointer initClusterSeedIndexes)
\brief Creates PCTSignatures algorithm using pre-generated sampling points
and clusterization seeds indexes.
|
static opencv_video.SparsePyrLKOpticalFlow |
opencv_video.SparsePyrLKOpticalFlow.create(opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria crit,
int flags,
double minEigThreshold) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.UMat samples,
int layout,
opencv_core.UMat responses) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.UMat samples,
int layout,
opencv_core.UMat responses,
opencv_core.UMat varIdx,
opencv_core.UMat sampleIdx,
opencv_core.UMat sampleWeights,
opencv_core.UMat varType) |
static opencv_features2d.SimpleBlobDetector |
opencv_features2d.SimpleBlobDetector.create(opencv_features2d.SimpleBlobDetector.Params parameters) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size)
\brief Creates an instance of the OCRBeamSearchDecoder class.
|
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode)
\brief Creates an instance of the OCRHMMDecoder class.
|
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static opencv_features2d.DescriptorMatcher |
opencv_features2d.DescriptorMatcher.create(String descriptorMatcherType) |
static opencv_text.OCRTesseract |
opencv_text.OCRTesseract.create(String datapath,
String language,
String char_whitelist,
int oem,
int psmode) |
static opencv_shape.AffineTransformer |
opencv_shape.createAffineTransformer(boolean fullAffine)
Complete constructor
|
static opencv_photo.AlignMTB |
opencv_photo.createAlignMTB() |
static opencv_photo.AlignMTB |
opencv_photo.createAlignMTB(int max_bits,
int exclude_range,
boolean cut)
\brief Creates AlignMTB object
|
static opencv_ximgproc.AdaptiveManifoldFilter |
opencv_ximgproc.createAMFilter(double sigma_s,
double sigma_r) |
static opencv_ximgproc.AdaptiveManifoldFilter |
opencv_ximgproc.createAMFilter(double sigma_s,
double sigma_r,
boolean adjust_outliers)
\brief Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
|
static opencv_video.BackgroundSubtractorKNN |
opencv_video.createBackgroundSubtractorKNN() |
static opencv_video.BackgroundSubtractorKNN |
opencv_video.createBackgroundSubtractorKNN(int history,
double dist2Threshold,
boolean detectShadows)
\brief Creates KNN Background Subtractor
|
static opencv_video.BackgroundSubtractorMOG2 |
opencv_video.createBackgroundSubtractorMOG2() |
static opencv_video.BackgroundSubtractorMOG2 |
opencv_video.createBackgroundSubtractorMOG2(int history,
double varThreshold,
boolean detectShadows)
\brief Creates MOG2 Background Subtractor
|
static opencv_dnn.Importer |
opencv_dnn.createCaffeImporter(BytePointer prototxt) |
static opencv_dnn.Importer |
opencv_dnn.createCaffeImporter(BytePointer prototxt,
BytePointer caffeModel)
\brief Creates the importer of Caffe framework network.
|
static opencv_dnn.Importer |
opencv_dnn.createCaffeImporter(String prototxt) |
static opencv_dnn.Importer |
opencv_dnn.createCaffeImporter(String prototxt,
String caffeModel) |
static opencv_photo.CalibrateDebevec |
opencv_photo.createCalibrateDebevec() |
static opencv_photo.CalibrateDebevec |
opencv_photo.createCalibrateDebevec(int samples,
float lambda,
boolean random)
\brief Creates CalibrateDebevec object
|
static opencv_photo.CalibrateRobertson |
opencv_photo.createCalibrateRobertson() |
static opencv_photo.CalibrateRobertson |
opencv_photo.createCalibrateRobertson(int max_iter,
float threshold)
\brief Creates CalibrateRobertson object
|
static opencv_shape.HistogramCostExtractor |
opencv_shape.createChiHistogramCostExtractor() |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createChiHistogramCostExtractor(int nDummies,
float defaultCost) |
static opencv_imgproc.CLAHE |
opencv_imgproc.createCLAHE() |
static opencv_imgproc.CLAHE |
opencv_imgproc.createCLAHE(double clipLimit,
opencv_core.Size tileGridSize)
\} imgproc_shape
|
static opencv_stitching.ExposureCompensator |
opencv_stitching.ExposureCompensator.createDefault(int type) |
static opencv_stitching.Blender |
opencv_stitching.Blender.createDefault(int type) |
static opencv_stitching.Timelapser |
opencv_stitching.Timelapser.createDefault(int type) |
static opencv_stitching.Blender |
opencv_stitching.Blender.createDefault(int type,
boolean try_gpu) |
static opencv_ximgproc.DisparityWLSFilter |
opencv_ximgproc.createDisparityWLSFilter(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant
filter parameters automatically based on the matcher instance.
|
static opencv_ximgproc.DisparityWLSFilter |
opencv_ximgproc.createDisparityWLSFilterGeneric(boolean use_confidence)
\brief More generic factory method, create instance of DisparityWLSFilter and execute basic
initialization routines.
|
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
\brief Factory method, create instance of DTFilter and produce initialization routines.
|
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static opencv_ximgproc.EdgeAwareInterpolator |
opencv_ximgproc.createEdgeAwareInterpolator()
\brief Factory method that creates an instance of the
EdgeAwareInterpolator.
|
static opencv_face.BasicFaceRecognizer |
opencv_face.createEigenFaceRecognizer() |
static opencv_face.BasicFaceRecognizer |
opencv_face.createEigenFaceRecognizer(int num_components,
double threshold) |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createEMDHistogramCostExtractor() |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createEMDHistogramCostExtractor(int flag,
int nDummies,
float defaultCost) |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createEMDL1HistogramCostExtractor() |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createEMDL1HistogramCostExtractor(int nDummies,
float defaultCost) |
static opencv_text.ERFilter |
opencv_text.createERFilterNM1(opencv_text.ERFilter.Callback cb) |
static opencv_text.ERFilter |
opencv_text.createERFilterNM1(opencv_text.ERFilter.Callback cb,
int thresholdDelta,
float minArea,
float maxArea,
float minProbability,
boolean nonMaxSuppression,
float minProbabilityDiff)
\brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12].
|
static opencv_text.ERFilter |
opencv_text.createERFilterNM2(opencv_text.ERFilter.Callback cb) |
static opencv_text.ERFilter |
opencv_text.createERFilterNM2(opencv_text.ERFilter.Callback cb,
float minProbability)
\brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12].
|
static opencv_objdetect.BaseCascadeClassifier.MaskGenerator |
opencv_objdetect.createFaceDetectionMaskGenerator() |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
\brief Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
|
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static opencv_face.BasicFaceRecognizer |
opencv_face.createFisherFaceRecognizer() |
static opencv_face.BasicFaceRecognizer |
opencv_face.createFisherFaceRecognizer(int num_components,
double threshold) |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Camera() |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Camera(int deviceId) |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Empty() |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Video_CUDA(BytePointer fileName) |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Video_CUDA(String fileName) |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Video(BytePointer fileName) |
static opencv_superres.FrameSource |
opencv_superres.createFrameSource_Video(String fileName) |
static opencv_imgproc.GeneralizedHoughBallard |
opencv_imgproc.createGeneralizedHoughBallard()
Ballard, D.H.
|
static opencv_imgproc.GeneralizedHoughGuil |
opencv_imgproc.createGeneralizedHoughGuil()
Guil, N., González-Linares, J.M.
|
static opencv_ximgproc.GraphSegmentation |
opencv_ximgproc.createGraphSegmentation() |
static opencv_ximgproc.GraphSegmentation |
opencv_ximgproc.createGraphSegmentation(double sigma,
float k,
int min_size)
\brief Creates a graph based segmentor
|
static opencv_ximgproc.GuidedFilter |
opencv_ximgproc.createGuidedFilter(opencv_core.Mat guide,
int radius,
double eps)
\brief Factory method, create instance of GuidedFilter and produce initialization routines.
|
static opencv_ximgproc.GuidedFilter |
opencv_ximgproc.createGuidedFilter(opencv_core.UMat guide,
int radius,
double eps) |
static opencv_shape.HausdorffDistanceExtractor |
opencv_shape.createHausdorffDistanceExtractor() |
static opencv_shape.HausdorffDistanceExtractor |
opencv_shape.createHausdorffDistanceExtractor(int distanceFlag,
float rankProp) |
static opencv_dnn.Layer |
opencv_dnn.LayerFactory.createLayerInstance(BytePointer type,
opencv_dnn.LayerParams params)
\brief Creates instance of registered layer.
|
static opencv_dnn.Layer |
opencv_dnn.LayerFactory.createLayerInstance(String type,
opencv_dnn.LayerParams params) |
static opencv_face.LBPHFaceRecognizer |
opencv_face.createLBPHFaceRecognizer() |
static opencv_face.LBPHFaceRecognizer |
opencv_face.createLBPHFaceRecognizer(int radius,
int neighbors,
int grid_x,
int grid_y,
double threshold) |
static opencv_imgproc.LineSegmentDetector |
opencv_imgproc.createLineSegmentDetector() |
static opencv_imgproc.LineSegmentDetector |
opencv_imgproc.createLineSegmentDetector(int _refine,
double _scale,
double _sigma_scale,
double _quant,
double _ang_th,
double _log_eps,
double _density_th,
int _n_bins)
\brief Creates a smart pointer to a LineSegmentDetector object and initializes it.
|
static opencv_photo.MergeDebevec |
opencv_photo.createMergeDebevec()
\brief Creates MergeDebevec object
|
static opencv_photo.MergeMertens |
opencv_photo.createMergeMertens() |
static opencv_photo.MergeMertens |
opencv_photo.createMergeMertens(float contrast_weight,
float saturation_weight,
float exposure_weight)
\brief Creates MergeMertens object
|
static opencv_photo.MergeRobertson |
opencv_photo.createMergeRobertson()
\brief Creates MergeRobertson object
|
static opencv_shape.HistogramCostExtractor |
opencv_shape.createNormHistogramCostExtractor() |
static opencv_shape.HistogramCostExtractor |
opencv_shape.createNormHistogramCostExtractor(int flag,
int nDummies,
float defaultCost) |
static opencv_superres.BroxOpticalFlow |
opencv_superres.createOptFlow_Brox_CUDA() |
static opencv_video.DenseOpticalFlow |
opencv_optflow.createOptFlow_DeepFlow()
\brief DeepFlow optical flow algorithm implementation.
|
static opencv_optflow.DISOpticalFlow |
opencv_optflow.createOptFlow_DIS() |
static opencv_optflow.DISOpticalFlow |
opencv_optflow.createOptFlow_DIS(int preset)
\brief Creates an instance of DISOpticalFlow
|
static opencv_superres.SuperResDualTVL1OpticalFlow |
opencv_superres.createOptFlow_DualTVL1_CUDA() |
static opencv_video.DualTVL1OpticalFlow |
opencv_video.createOptFlow_DualTVL1()
\brief Creates instance of cv::DenseOpticalFlow
|
static opencv_superres.SuperResDualTVL1OpticalFlow |
opencv_superres.createOptFlow_DualTVL1() |
static opencv_superres.SuperResFarnebackOpticalFlow |
opencv_superres.createOptFlow_Farneback_CUDA() |
static opencv_superres.SuperResFarnebackOpticalFlow |
opencv_superres.createOptFlow_Farneback() |
static opencv_video.DenseOpticalFlow |
opencv_optflow.createOptFlow_Farneback()
Additional interface to the Farneback's algorithm - calcOpticalFlowFarneback()
|
static opencv_superres.PyrLKOpticalFlow |
opencv_superres.createOptFlow_PyrLK_CUDA() |
static opencv_video.DenseOpticalFlow |
opencv_optflow.createOptFlow_SimpleFlow()
Additional interface to the SimpleFlow algorithm - calcOpticalFlowSF()
|
static opencv_video.DenseOpticalFlow |
opencv_optflow.createOptFlow_SparseToDense()
Additional interface to the SparseToDenseFlow algorithm - calcOpticalFlowSparseToDense()
|
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize)
\relates bioinspired::Retina
\{
|
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize,
boolean colorMode) |
static opencv_bioinspired.Retina |
opencv_bioinspired.createRetina(opencv_core.Size inputSize,
boolean colorMode,
int colorSamplingMethod,
boolean useRetinaLogSampling,
float reductionFactor,
float samplingStrenght)
\brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
|
static opencv_bioinspired.RetinaFastToneMapping |
opencv_bioinspired.createRetinaFastToneMapping(opencv_core.Size inputSize)
\relates bioinspired::RetinaFastToneMapping
|
static opencv_ximgproc.RFFeatureGetter |
opencv_ximgproc.createRFFeatureGetter() |
static opencv_calib3d.StereoMatcher |
opencv_ximgproc.createRightMatcher(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience method to set up the matcher for computing the right-view disparity map
that is required in case of filtering with confidence.
|
static opencv_ximgproc.SelectiveSearchSegmentation |
opencv_ximgproc.createSelectiveSearchSegmentation()
\brief Create a new SelectiveSearchSegmentation class.
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyColor |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyColor()
\brief Create a new color-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyFill |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyFill()
\brief Create a new fill-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple()
\brief Create a new multiple strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1)
\brief Create a new multiple strategy and set one subtrategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2)
\brief Create a new multiple strategy and set two subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
\brief Create a new multiple strategy and set three subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategySize |
opencv_ximgproc.createSelectiveSearchSegmentationStrategySize()
\brief Create a new size-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyTexture |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyTexture()
\brief Create a new size-based strategy
|
static opencv_shape.ShapeContextDistanceExtractor |
opencv_shape.createShapeContextDistanceExtractor() |
static opencv_shape.ShapeContextDistanceExtractor |
opencv_shape.createShapeContextDistanceExtractor(int nAngularBins,
int nRadialBins,
float innerRadius,
float outerRadius,
int iterations,
opencv_shape.HistogramCostExtractor comparer,
opencv_shape.ShapeTransformer transformer) |
static opencv_stitching.Stitcher |
opencv_stitching.createStitcher() |
static opencv_stitching.Stitcher |
opencv_stitching.createStitcher(boolean try_use_gpu) |
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(BytePointer model) |
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(BytePointer model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures)
The only constructor
|
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(String model) |
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(String model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.Mat image,
int region_size,
float ratio)
\brief Class implementing the LSC (Linear Spectral Clustering) superpixels
|
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.UMat image,
int region_size,
float ratio) |
static opencv_ximgproc.SuperpixelSEEDS |
opencv_ximgproc.createSuperpixelSEEDS(int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels) |
static opencv_ximgproc.SuperpixelSEEDS |
opencv_ximgproc.createSuperpixelSEEDS(int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels,
int prior,
int histogram_bins,
boolean double_step)
\brief Initializes a SuperpixelSEEDS object.
|
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.Mat image,
int algorithm,
int region_size,
float ruler) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.UMat image,
int algorithm,
int region_size,
float ruler) |
static opencv_superres.SuperResolution |
opencv_superres.createSuperResolution_BTVL1_CUDA() |
static opencv_superres.SuperResolution |
opencv_superres.createSuperResolution_BTVL1()
\brief Create Bilateral TV-L1 Super Resolution.
|
static opencv_dnn.Importer |
opencv_dnn.createTensorflowImporter(BytePointer model)
\brief Creates the importer of TensorFlow framework network.
|
static opencv_dnn.Importer |
opencv_dnn.createTensorflowImporter(String model) |
static opencv_shape.ThinPlateSplineShapeTransformer |
opencv_shape.createThinPlateSplineShapeTransformer() |
static opencv_shape.ThinPlateSplineShapeTransformer |
opencv_shape.createThinPlateSplineShapeTransformer(double regularizationParameter)
Complete constructor
|
static opencv_photo.Tonemap |
opencv_photo.createTonemap() |
static opencv_photo.Tonemap |
opencv_photo.createTonemap(float gamma)
\brief Creates simple linear mapper with gamma correction
|
static opencv_photo.TonemapDrago |
opencv_photo.createTonemapDrago() |
static opencv_photo.TonemapDrago |
opencv_photo.createTonemapDrago(float gamma,
float saturation,
float bias)
\brief Creates TonemapDrago object
|
static opencv_photo.TonemapDurand |
opencv_photo.createTonemapDurand() |
static opencv_photo.TonemapDurand |
opencv_photo.createTonemapDurand(float gamma,
float contrast,
float saturation,
float sigma_space,
float sigma_color)
\brief Creates TonemapDurand object
|
static opencv_photo.TonemapMantiuk |
opencv_photo.createTonemapMantiuk() |
static opencv_photo.TonemapMantiuk |
opencv_photo.createTonemapMantiuk(float gamma,
float scale,
float saturation)
\brief Creates TonemapMantiuk object
|
static opencv_photo.TonemapReinhard |
opencv_photo.createTonemapReinhard() |
static opencv_photo.TonemapReinhard |
opencv_photo.createTonemapReinhard(float gamma,
float intensity,
float light_adapt,
float color_adapt)
\brief Creates TonemapReinhard object
|
static opencv_dnn.Importer |
opencv_dnn.createTorchImporter(BytePointer filename) |
static opencv_dnn.Importer |
opencv_dnn.createTorchImporter(BytePointer filename,
boolean isBinary)
\brief Creates the importer of Torch7 framework network.
|
static opencv_dnn.Importer |
opencv_dnn.createTorchImporter(String filename) |
static opencv_dnn.Importer |
opencv_dnn.createTorchImporter(String filename,
boolean isBinary) |
static opencv_bioinspired.TransientAreasSegmentationModule |
opencv_bioinspired.createTransientAreasSegmentationModule(opencv_core.Size inputSize)
\brief allocator
|
static opencv_optflow.VariationalRefinement |
opencv_optflow.createVariationalFlowRefinement()
\brief Creates an instance of VariationalRefinement
|
opencv_text.IntDeque |
opencv_text.ERStat.crossings()
horizontal crossings
|
opencv_videostab.DeblurerBase |
opencv_videostab.StabilizerBase.deblurrer() |
opencv_features2d.Feature2D |
opencv_videostab.KeypointBasedMotionEstimator.detector() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.err() |
opencv_stitching.ExposureCompensator |
opencv_stitching.Stitcher.exposureCompensator() |
opencv_stitching.FeaturesFinder |
opencv_stitching.Stitcher.featuresFinder() |
opencv_stitching.FeaturesMatcher |
opencv_stitching.Stitcher.featuresMatcher() |
opencv_core.Formatted |
opencv_core.Formatter.format(opencv_core.Mat mtx) |
static opencv_core.Formatted |
opencv_core.format(opencv_core.Mat mtx,
int fmt) |
static opencv_core.Formatted |
opencv_core.format(opencv_core.UMat mtx,
int fmt) |
opencv_videostab.IFrameSource |
opencv_videostab.StabilizerBase.frameSource() |
opencv_core.CvFileStorage |
opencv_core.FileStorage.fs()
the underlying C FileStorage structure
|
static opencv_core.Formatter |
opencv_core.Formatter.get() |
static opencv_core.Formatter |
opencv_core.Formatter.get(int fmt) |
opencv_shape.HistogramCostExtractor |
opencv_shape.ShapeContextDistanceExtractor.getCostExtractor() |
opencv_core.MinProblemSolver.Function |
opencv_core.MinProblemSolver.getFunction()
\brief Getter for the optimized function.
|
opencv_dnn.Layer |
opencv_dnn.Net.getLayer(opencv_dnn.DictValue layerId)
\brief Returns pointer to layer with specified name which the network use.
|
opencv_objdetect.BaseCascadeClassifier.MaskGenerator |
opencv_objdetect.BaseCascadeClassifier.getMaskGenerator() |
opencv_objdetect.BaseCascadeClassifier.MaskGenerator |
opencv_objdetect.CascadeClassifier.getMaskGenerator() |
opencv_superres.DenseOpticalFlowExt |
opencv_superres.SuperResolution.getOpticalFlow()
\brief Dense optical flow algorithm
/** @see setOpticalFlow
|
opencv_shape.ShapeTransformer |
opencv_shape.ShapeContextDistanceExtractor.getTransformAlgorithm() |
opencv_videostab.InpainterBase |
opencv_videostab.StabilizerBase.inpainter() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.J() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.JtErr() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.JtJ() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.JtJN() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.JtJV() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.JtJW() |
static opencv_ml.SVM |
opencv_ml.SVM.load(BytePointer filepath)
\brief Loads and creates a serialized svm from a file
Use SVM::save to serialize and store an SVM to disk.
|
static opencv_ml.ANN_MLP |
opencv_ml.ANN_MLP.load(BytePointer filepath)
\brief Loads and creates a serialized ANN from a file
Use ANN::save to serialize and store an ANN to disk.
|
static opencv_ml.SVM |
opencv_ml.SVM.load(String filepath) |
static opencv_ml.ANN_MLP |
opencv_ml.ANN_MLP.load(String filepath) |
static opencv_text.ERFilter.Callback |
opencv_text.loadClassifierNM1(BytePointer filename)
\brief Allow to implicitly load the default classifier when creating an ERFilter object.
|
static opencv_text.ERFilter.Callback |
opencv_text.loadClassifierNM1(String filename) |
static opencv_text.ERFilter.Callback |
opencv_text.loadClassifierNM2(BytePointer filename)
\brief Allow to implicitly load the default classifier when creating an ERFilter object.
|
static opencv_text.ERFilter.Callback |
opencv_text.loadClassifierNM2(String filename) |
static opencv_ml.TrainData |
opencv_ml.TrainData.loadFromCSV(BytePointer filename,
int headerLineCount) |
static opencv_ml.TrainData |
opencv_ml.TrainData.loadFromCSV(BytePointer filename,
int headerLineCount,
int responseStartIdx,
int responseEndIdx,
BytePointer varTypeSpec,
byte delimiter,
byte missch)
\brief Reads the dataset from a .csv file and returns the ready-to-use training data.
|
static opencv_ml.TrainData |
opencv_ml.TrainData.loadFromCSV(String filename,
int headerLineCount) |
static opencv_ml.TrainData |
opencv_ml.TrainData.loadFromCSV(String filename,
int headerLineCount,
int responseStartIdx,
int responseEndIdx,
String varTypeSpec,
byte delimiter,
byte missch) |
static opencv_text.OCRBeamSearchDecoder.ClassifierCallback |
opencv_text.loadOCRBeamSearchClassifierCNN(BytePointer filename)
\brief Allow to implicitly load the default character classifier when creating an OCRBeamSearchDecoder object.
|
static opencv_text.OCRBeamSearchDecoder.ClassifierCallback |
opencv_text.loadOCRBeamSearchClassifierCNN(String filename) |
static opencv_text.OCRHMMDecoder.ClassifierCallback |
opencv_text.loadOCRHMMClassifierCNN(BytePointer filename)
\brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.
|
static opencv_text.OCRHMMDecoder.ClassifierCallback |
opencv_text.loadOCRHMMClassifierCNN(String filename) |
static opencv_text.OCRHMMDecoder.ClassifierCallback |
opencv_text.loadOCRHMMClassifierNM(BytePointer filename)
\brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object.
|
static opencv_text.OCRHMMDecoder.ClassifierCallback |
opencv_text.loadOCRHMMClassifierNM(String filename) |
opencv_videostab.ILog |
opencv_videostab.StabilizerBase.log() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.mask() |
opencv_videostab.ImageMotionEstimatorBase |
opencv_videostab.WobbleSuppressorBase.motionEstimator() |
opencv_videostab.ImageMotionEstimatorBase |
opencv_videostab.StabilizerBase.motionEstimator() |
opencv_videostab.MotionFilterBase |
opencv_videostab.OnePassStabilizer.motionFilter() |
opencv_videostab.IMotionStabilizer |
opencv_videostab.TwoPassStabilizer.motionStabilizer() |
opencv_videostab.IDenseOptFlowEstimator |
opencv_videostab.MotionInpainter.optFlowEstimator() |
opencv_videostab.ISparseOptFlowEstimator |
opencv_videostab.KeypointBasedMotionEstimator.opticalFlowEstimator() |
opencv_videostab.IOutlierRejector |
opencv_videostab.KeypointBasedMotionEstimator.outlierRejector() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.param() |
opencv_core.CvMat |
opencv_calib3d.CvLevMarq.prevParam() |
opencv_stitching.SeamFinder |
opencv_stitching.Stitcher.seamFinder() |
opencv_stitching.WarperCreator |
opencv_stitching.Stitcher.warper() |
opencv_videostab.WobbleSuppressorBase |
opencv_videostab.TwoPassStabilizer.wobbleSuppressor() |
| Modifier and Type | Method and Description |
|---|---|
void |
opencv_ximgproc.SelectiveSearchSegmentation.addGraphSegmentation(opencv_ximgproc.GraphSegmentation g)
\brief Add a new graph segmentation in the list of graph segementations to process.
|
void |
opencv_ximgproc.SelectiveSearchSegmentation.addStrategy(opencv_ximgproc.SelectiveSearchSegmentationStrategy s)
\brief Add a new strategy in the list of strategy to process.
|
void |
opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple.addStrategy(opencv_ximgproc.SelectiveSearchSegmentationStrategy g,
float weight)
\brief Add a new sub-strategy
|
float |
opencv_ml.StatModel.calcError(opencv_ml.TrainData data,
boolean test,
opencv_core.Mat resp)
\brief Computes error on the training or test dataset
|
float |
opencv_ml.StatModel.calcError(opencv_ml.TrainData data,
boolean test,
opencv_core.UMat resp) |
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.Mat initStep,
opencv_core.TermCriteria termcrit)
\brief This function returns the reference to the ready-to-use DownhillSolver object.
|
static opencv_core.ConjGradSolver |
opencv_core.ConjGradSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.TermCriteria termcrit)
\brief This function returns the reference to the ready-to-use ConjGradSolver object.
|
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.UMat initStep,
opencv_core.TermCriteria termcrit) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size)
\brief Creates an instance of the OCRBeamSearchDecoder class.
|
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode)
\brief Creates an instance of the OCRHMMDecoder class.
|
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
BytePointer vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.UMat transition_probabilities_table,
opencv_core.UMat emission_probabilities_table,
int mode) |
static opencv_ximgproc.DisparityWLSFilter |
opencv_ximgproc.createDisparityWLSFilter(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant
filter parameters automatically based on the matcher instance.
|
static opencv_text.ERFilter |
opencv_text.createERFilterNM1(opencv_text.ERFilter.Callback cb) |
static opencv_text.ERFilter |
opencv_text.createERFilterNM1(opencv_text.ERFilter.Callback cb,
int thresholdDelta,
float minArea,
float maxArea,
float minProbability,
boolean nonMaxSuppression,
float minProbabilityDiff)
\brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12].
|
static opencv_text.ERFilter |
opencv_text.createERFilterNM2(opencv_text.ERFilter.Callback cb) |
static opencv_text.ERFilter |
opencv_text.createERFilterNM2(opencv_text.ERFilter.Callback cb,
float minProbability)
\brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12].
|
static opencv_calib3d.StereoMatcher |
opencv_ximgproc.createRightMatcher(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience method to set up the matcher for computing the right-view disparity map
that is required in case of filtering with confidence.
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1)
\brief Create a new multiple strategy and set one subtrategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2)
\brief Create a new multiple strategy and set two subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2)
\brief Create a new multiple strategy and set two subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
\brief Create a new multiple strategy and set three subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
\brief Create a new multiple strategy and set three subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
\brief Create a new multiple strategy and set three subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
opencv_ximgproc.createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_shape.ShapeContextDistanceExtractor |
opencv_shape.createShapeContextDistanceExtractor(int nAngularBins,
int nRadialBins,
float innerRadius,
float outerRadius,
int iterations,
opencv_shape.HistogramCostExtractor comparer,
opencv_shape.ShapeTransformer transformer) |
static opencv_shape.ShapeContextDistanceExtractor |
opencv_shape.createShapeContextDistanceExtractor(int nAngularBins,
int nRadialBins,
float innerRadius,
float outerRadius,
int iterations,
opencv_shape.HistogramCostExtractor comparer,
opencv_shape.ShapeTransformer transformer) |
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(BytePointer model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures)
The only constructor
|
static opencv_ximgproc.StructuredEdgeDetection |
opencv_ximgproc.createStructuredEdgeDetection(String model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures) |
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static void |
opencv_text.detectRegions(opencv_core.UMat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static void |
opencv_text.detectRegions(opencv_core.UMat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
float[] repeatability,
int[] correspCount,
opencv_features2d.Feature2D fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
FloatBuffer repeatability,
IntBuffer correspCount,
opencv_features2d.Feature2D fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
FloatPointer repeatability,
IntPointer correspCount,
opencv_features2d.Feature2D fdetector)
\} features2d_draw
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.Feature2D blobDetector)
\brief Finds centers in the grid of circles.
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.UMat image,
opencv_core.Size patternSize,
opencv_core.UMat centers,
int flags,
opencv_features2d.Feature2D blobDetector) |
void |
opencv_face.FaceRecognizer.predict_collect(opencv_core.Mat src,
opencv_face.PredictCollector collector)
\brief - if implemented - send all result of prediction to collector that can be used for somehow custom result handling
|
void |
opencv_face.FaceRecognizer.predict_collect(opencv_core.UMat src,
opencv_face.PredictCollector collector) |
static int |
opencv_core.print(opencv_core.Formatted fmtd) |
static int |
opencv_core.print(opencv_core.Formatted fmtd,
Pointer stream) |
void |
opencv_videostab.MotionStabilizationPipeline.pushBack(opencv_videostab.IMotionStabilizer stabilizer) |
void |
opencv_videostab.InpaintingPipeline.pushBack(opencv_videostab.InpainterBase inpainter) |
void |
opencv_stitching.Stitcher.setBlender(opencv_stitching.Blender b) |
void |
opencv_stitching.Stitcher.setBundleAdjuster(opencv_stitching.BundleAdjusterBase bundle_adjuster) |
void |
opencv_text.ERFilter.setCallback(opencv_text.ERFilter.Callback cb)
set/get methods to set the algorithm properties,
|
void |
opencv_shape.ShapeContextDistanceExtractor.setCostExtractor(opencv_shape.HistogramCostExtractor comparer)
\brief Set the algorithm used for building the shape context descriptor cost matrix.
|
void |
opencv_ml.SVM.setCustomKernel(opencv_ml.SVM.Kernel _kernel)
Initialize with custom kernel.
|
void |
opencv_videostab.StabilizerBase.setDeblurer(opencv_videostab.DeblurerBase val) |
void |
opencv_videostab.KeypointBasedMotionEstimator.setDetector(opencv_features2d.Feature2D val) |
void |
opencv_stitching.Stitcher.setExposureCompensator(opencv_stitching.ExposureCompensator exposure_comp) |
void |
opencv_stitching.Stitcher.setFeaturesFinder(opencv_stitching.FeaturesFinder features_finder) |
void |
opencv_stitching.Stitcher.setFeaturesMatcher(opencv_stitching.FeaturesMatcher features_matcher) |
void |
opencv_videostab.StabilizerBase.setFrameSource(opencv_videostab.IFrameSource val) |
void |
opencv_core.MinProblemSolver.setFunction(opencv_core.MinProblemSolver.Function f)
\brief Setter for the optimized function.
|
void |
opencv_videostab.StabilizerBase.setInpainter(opencv_videostab.InpainterBase val) |
void |
opencv_superres.SuperResolution.setInput(opencv_superres.FrameSource frameSource)
\brief Set input frame source for Super Resolution algorithm.
|
void |
opencv_videostab.StabilizerBase.setLog(opencv_videostab.ILog ilog) |
void |
opencv_objdetect.BaseCascadeClassifier.setMaskGenerator(opencv_objdetect.BaseCascadeClassifier.MaskGenerator maskGenerator) |
void |
opencv_objdetect.CascadeClassifier.setMaskGenerator(opencv_objdetect.BaseCascadeClassifier.MaskGenerator maskGenerator) |
void |
opencv_videostab.WobbleSuppressorBase.setMotionEstimator(opencv_videostab.ImageMotionEstimatorBase val) |
void |
opencv_videostab.StabilizerBase.setMotionEstimator(opencv_videostab.ImageMotionEstimatorBase val) |
void |
opencv_videostab.OnePassStabilizer.setMotionFilter(opencv_videostab.MotionFilterBase val) |
void |
opencv_videostab.TwoPassStabilizer.setMotionStabilizer(opencv_videostab.IMotionStabilizer val) |
void |
opencv_videostab.MotionInpainter.setOptFlowEstimator(opencv_videostab.IDenseOptFlowEstimator val) |
void |
opencv_superres.SuperResolution.setOpticalFlow(opencv_superres.DenseOpticalFlowExt val)
\copybrief getOpticalFlow @see getOpticalFlow
|
void |
opencv_videostab.KeypointBasedMotionEstimator.setOpticalFlowEstimator(opencv_videostab.ISparseOptFlowEstimator val) |
void |
opencv_videostab.KeypointBasedMotionEstimator.setOutlierRejector(opencv_videostab.IOutlierRejector val) |
void |
opencv_stitching.Stitcher.setSeamFinder(opencv_stitching.SeamFinder seam_finder) |
void |
opencv_shape.ShapeContextDistanceExtractor.setTransformAlgorithm(opencv_shape.ShapeTransformer transformer)
\brief Set the algorithm used for aligning the shapes.
|
void |
opencv_stitching.Stitcher.setWarper(opencv_stitching.WarperCreator creator) |
void |
opencv_videostab.TwoPassStabilizer.setWobbleSuppressor(opencv_videostab.WobbleSuppressorBase val) |
static BytePointer |
opencv_core.shiftLeft(BytePointer out,
opencv_core.Formatted fmtd) |
static String |
opencv_core.shiftLeft(String out,
opencv_core.Formatted fmtd) |
boolean |
opencv_ml.StatModel.train(opencv_ml.TrainData trainData) |
boolean |
opencv_ml.StatModel.train(opencv_ml.TrainData trainData,
int flags)
\brief Trains the statistical model
|
boolean |
opencv_ml.SVM.trainAuto(opencv_ml.TrainData data) |
boolean |
opencv_ml.SVM.trainAuto(opencv_ml.TrainData data,
int kFold,
opencv_ml.ParamGrid Cgrid,
opencv_ml.ParamGrid gammaGrid,
opencv_ml.ParamGrid pGrid,
opencv_ml.ParamGrid nuGrid,
opencv_ml.ParamGrid coeffGrid,
opencv_ml.ParamGrid degreeGrid,
boolean balanced)
\brief Trains an %SVM with optimal parameters.
|
| Modifier and Type | Method and Description |
|---|---|
static opencv_ml.ANN_MLP |
opencv_ml.AbstractStatModel.loadANN_MLP(BytePointer filename,
BytePointer objname) |
static opencv_ml.ANN_MLP |
opencv_ml.AbstractStatModel.loadANN_MLP(String filename,
String objname) |
static opencv_ml.Boost |
opencv_ml.AbstractStatModel.loadBoost(BytePointer filename,
BytePointer objname) |
static opencv_ml.Boost |
opencv_ml.AbstractStatModel.loadBoost(String filename,
String objname) |
static opencv_ml.DTrees |
opencv_ml.AbstractStatModel.loadDTrees(BytePointer filename,
BytePointer objname) |
static opencv_ml.DTrees |
opencv_ml.AbstractStatModel.loadDTrees(String filename,
String objname) |
static opencv_ml.EM |
opencv_ml.AbstractStatModel.loadEM(BytePointer filename,
BytePointer objname) |
static opencv_ml.EM |
opencv_ml.AbstractStatModel.loadEM(String filename,
String objname) |
static opencv_ml.KNearest |
opencv_ml.AbstractStatModel.loadKNearest(BytePointer filename,
BytePointer objname) |
static opencv_ml.KNearest |
opencv_ml.AbstractStatModel.loadKNearest(String filename,
String objname) |
static opencv_ml.LogisticRegression |
opencv_ml.AbstractStatModel.loadLogisticRegression(BytePointer filename,
BytePointer objname) |
static opencv_ml.LogisticRegression |
opencv_ml.AbstractStatModel.loadLogisticRegression(String filename,
String objname) |
static opencv_ml.NormalBayesClassifier |
opencv_ml.AbstractStatModel.loadNormalBayesClassifier(BytePointer filename,
BytePointer objname) |
static opencv_ml.NormalBayesClassifier |
opencv_ml.AbstractStatModel.loadNormalBayesClassifier(String filename,
String objname) |
static opencv_ml.RTrees |
opencv_ml.AbstractStatModel.loadRTrees(BytePointer filename,
BytePointer objname) |
static opencv_ml.RTrees |
opencv_ml.AbstractStatModel.loadRTrees(String filename,
String objname) |
static opencv_ml.SVM |
opencv_ml.AbstractStatModel.loadSVM(BytePointer filename,
BytePointer objname) |
static opencv_ml.SVM |
opencv_ml.AbstractStatModel.loadSVM(String filename,
String objname) |
Copyright © 2017. All rights reserved.