Image Processing with OpenCV in Java: Complete Guide

This comprehensive guide covers image processing techniques using OpenCV in Java, from basic operations to advanced computer vision algorithms.

Project Setup and Dependencies

Maven Configuration

<!-- pom.xml -->
<properties>
<opencv.version>4.8.0</opencv.version>
<maven.compiler.source>17</maven.compiler.source>
<maven.compiler.target>17</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.openpnp</groupId>
<artifactId>opencv</artifactId>
<version>${opencv.version}</version>
</dependency>
<!-- For image I/O beyond OpenCV -->
<dependency>
<groupId>com.twelvemonkeys.imageio</groupId>
<artifactId>imageio-core</artifactId>
<version>3.10.1</version>
</dependency>
<!-- For logging -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>2.0.9</version>
</dependency>
</dependencies>

OpenCV Initialization

package com.example.opencv;
import org.opencv.core.Core;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OpenCVInitializer {
private static final Logger logger = LoggerFactory.getLogger(OpenCVInitializer.class);
static {
loadOpenCV();
}
private static void loadOpenCV() {
try {
// Method 1: Using OpenCV loader (recommended)
nu.pattern.OpenCV.loadLocally();
logger.info("OpenCV loaded successfully: {}", Core.VERSION);
// Method 2: Manual loading
// System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
} catch (Exception e) {
logger.error("Failed to load OpenCV: {}", e.getMessage());
throw new RuntimeException("OpenCV initialization failed", e);
}
}
public static void checkOpenCV() {
logger.info("OpenCV version: {}", Core.VERSION);
logger.info("OpenCV build info: {}", Core.getBuildInformation());
}
}

Core Image Processing Operations

Basic Image Operations

package com.example.opencv.operations;
import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
public class BasicImageOperations {
private static final Logger logger = LoggerFactory.getLogger(BasicImageOperations.class);
/**
* Load image from file
*/
public static Mat loadImage(String filePath) {
Mat image = Imgcodecs.imread(filePath);
if (image.empty()) {
throw new IllegalArgumentException("Could not load image: " + filePath);
}
logger.info("Loaded image: {}x{} channels: {}", 
image.width(), image.height(), image.channels());
return image;
}
/**
* Save image to file
*/
public static boolean saveImage(Mat image, String filePath) {
boolean success = Imgcodecs.imwrite(filePath, image);
if (success) {
logger.info("Image saved: {}", filePath);
} else {
logger.error("Failed to save image: {}", filePath);
}
return success;
}
/**
* Convert between color spaces
*/
public static Mat convertColorSpace(Mat src, int conversionCode) {
Mat dst = new Mat();
Imgproc.cvtColor(src, dst, conversionCode);
return dst;
}
/**
* Resize image
*/
public static Mat resizeImage(Mat src, Size newSize, int interpolation) {
Mat dst = new Mat();
Imgproc.resize(src, dst, newSize, 0, 0, interpolation);
return dst;
}
/**
* Crop image
*/
public static Mat cropImage(Mat src, Rect roi) {
return new Mat(src, roi);
}
/**
* Rotate image
*/
public static Mat rotateImage(Mat src, double angle, Scale scale) {
Point center = new Point(src.width() / 2.0, src.height() / 2.0);
Mat rotationMatrix = Imgproc.getRotationMatrix2D(center, angle, scale);
Mat dst = new Mat();
Imgproc.warpAffine(src, dst, rotationMatrix, src.size());
return dst;
}
/**
* Flip image
*/
public static Mat flipImage(Mat src, int flipCode) {
Mat dst = new Mat();
Core.flip(src, dst, flipCode);
return dst;
}
}

Color Manipulation

package com.example.opencv.operations;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
public class ColorOperations {
/**
* Convert to grayscale
*/
public static Mat toGrayscale(Mat src) {
Mat gray = new Mat();
if (src.channels() == 3) {
Imgproc.cvtColor(src, gray, Imgproc.COLOR_BGR2GRAY);
} else if (src.channels() == 4) {
Imgproc.cvtColor(src, gray, Imgproc.COLOR_BGRA2GRAY);
} else {
src.copyTo(gray);
}
return gray;
}
/**
* Adjust brightness and contrast
*/
public static Mat adjustBrightnessContrast(Mat src, double alpha, double beta) {
Mat dst = new Mat();
src.convertTo(dst, -1, alpha, beta);
return dst;
}
/**
* Apply gamma correction
*/
public static Mat gammaCorrection(Mat src, double gamma) {
Mat lookupTable = new Mat(1, 256, CvType.CV_8U);
byte[] lookupData = new byte[256];
for (int i = 0; i < 256; i++) {
lookupData[i] = (byte) (Math.pow(i / 255.0, gamma) * 255.0);
}
lookupTable.put(0, 0, lookupData);
Mat dst = new Mat();
Core.LUT(src, lookupTable, dst);
return dst;
}
/**
* Apply histogram equalization
*/
public static Mat histogramEqualization(Mat src) {
Mat dst = new Mat();
if (src.channels() == 1) {
// Grayscale image
Imgproc.equalizeHist(src, dst);
} else {
// Color image - equalize each channel separately
List<Mat> channels = new ArrayList<>();
Core.split(src, channels);
for (int i = 0; i < channels.size(); i++) {
Imgproc.equalizeHist(channels.get(i), channels.get(i));
}
Core.merge(channels, dst);
}
return dst;
}
/**
* Apply color balance (white balance)
*/
public static Mat colorBalance(Mat src, double percent) {
if (src.channels() != 3) return src;
Mat dst = src.clone();
List<Mat> channels = new ArrayList<>();
Core.split(dst, channels);
for (int i = 0; i < channels.size(); i++) {
// Find low and high percentiles
Mat flat = channels.get(i).reshape(1, 1);
Core.sort(flat, flat, Core.SORT_ASCENDING);
int lowVal = (int) flat.get(0, (int) (flat.cols() * percent / 100))[0];
int highVal = (int) flat.get(0, (int) (flat.cols() * (1 - percent / 100)))[0];
// Stretch histogram
Mat channel = channels.get(i);
Core.subtract(channel, new Scalar(lowVal), channel);
Core.multiply(channel, new Scalar(255.0 / (highVal - lowVal)), channel);
Core.MinMaxLocResult mm = Core.minMaxLoc(channel);
logger.debug("Channel {} range: {} - {}", i, mm.minVal, mm.maxVal);
}
Core.merge(channels, dst);
return dst;
}
/**
* Apply sepia tone filter
*/
public static Mat applySepia(Mat src) {
if (src.channels() != 3) return src;
Mat kernel = new Mat(3, 3, CvType.CV_32F);
// Sepia transformation matrix
kernel.put(0, 0, 0.272, 0.534, 0.131);
kernel.put(1, 0, 0.349, 0.686, 0.168);
kernel.put(2, 0, 0.393, 0.769, 0.189);
Mat dst = new Mat();
Imgproc.transform(src, dst, kernel);
return dst;
}
}

Filtering and Convolution

package com.example.opencv.operations;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
public class FilterOperations {
/**
* Apply Gaussian blur
*/
public static Mat gaussianBlur(Mat src, int kernelSize, double sigma) {
if (kernelSize % 2 == 0) kernelSize++; // Ensure odd kernel size
Mat dst = new Mat();
Imgproc.GaussianBlur(src, dst, new Size(kernelSize, kernelSize), sigma);
return dst;
}
/**
* Apply median blur
*/
public static Mat medianBlur(Mat src, int kernelSize) {
if (kernelSize % 2 == 0) kernelSize++; // Ensure odd kernel size
Mat dst = new Mat();
Imgproc.medianBlur(src, dst, kernelSize);
return dst;
}
/**
* Apply bilateral filter
*/
public static Mat bilateralFilter(Mat src, int d, double sigmaColor, double sigmaSpace) {
Mat dst = new Mat();
Imgproc.bilateralFilter(src, dst, d, sigmaColor, sigmaSpace);
return dst;
}
/**
* Apply custom convolution kernel
*/
public static Mat customConvolution(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.filter2D(src, dst, -1, kernel);
return dst;
}
/**
* Create common kernels
*/
public static class Kernels {
// Sharpening kernel
public static Mat sharpen() {
Mat kernel = new Mat(3, 3, CvType.CV_32F);
kernel.put(0, 0, 0, -1, 0);
kernel.put(1, 0, -1, 5, -1);
kernel.put(2, 0, 0, -1, 0);
return kernel;
}
// Edge detection kernel (Sobel)
public static Mat sobelX() {
Mat kernel = new Mat(3, 3, CvType.CV_32F);
kernel.put(0, 0, -1, 0, 1);
kernel.put(1, 0, -2, 0, 2);
kernel.put(2, 0, -1, 0, 1);
return kernel;
}
public static Mat sobelY() {
Mat kernel = new Mat(3, 3, CvType.CV_32F);
kernel.put(0, 0, -1, -2, -1);
kernel.put(1, 0, 0, 0, 0);
kernel.put(2, 0, 1, 2, 1);
return kernel;
}
// Emboss kernel
public static Mat emboss() {
Mat kernel = new Mat(3, 3, CvType.CV_32F);
kernel.put(0, 0, -2, -1, 0);
kernel.put(1, 0, -1, 1, 1);
kernel.put(2, 0, 0, 1, 2);
return kernel;
}
}
/**
* Apply unsharp masking
*/
public static Mat unsharpMask(Mat src, double amount, int kernelSize, double sigma) {
Mat blurred = gaussianBlur(src, kernelSize, sigma);
Mat sharp = new Mat();
Core.addWeighted(src, 1.0 + amount, blurred, -amount, 0, sharp);
return sharp;
}
}

Edge Detection and Feature Extraction

Edge Detection Methods

package com.example.opencv.features;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
public class EdgeDetection {
/**
* Canny edge detection
*/
public static Mat cannyEdges(Mat src, double threshold1, double threshold2, int apertureSize) {
Mat gray = ColorOperations.toGrayscale(src);
Mat edges = new Mat();
Imgproc.Canny(gray, edges, threshold1, threshold2, apertureSize, true);
return edges;
}
/**
* Sobel edge detection
*/
public static Mat sobelEdges(Mat src, int dx, int dy, int ksize) {
Mat gray = ColorOperations.toGrayscale(src);
Mat edges = new Mat();
Imgproc.Sobel(gray, edges, CvType.CV_16S, dx, dy, ksize);
Core.convertScaleAbs(edges, edges);
return edges;
}
/**
* Laplacian edge detection
*/
public static Mat laplacianEdges(Mat src, int ksize) {
Mat gray = ColorOperations.toGrayscale(src);
Mat edges = new Mat();
Imgproc.Laplacian(gray, edges, CvType.CV_16S, ksize);
Core.convertScaleAbs(edges, edges);
return edges;
}
/**
* Multi-scale edge detection
*/
public static Mat multiScaleEdges(Mat src) {
List<Mat> edgePyramid = new ArrayList<>();
// Detect edges at multiple scales
Mat current = src.clone();
for (int i = 0; i < 3; i++) {
Mat edges = cannyEdges(current, 50, 150, 3);
edgePyramid.add(edges);
Imgproc.pyrDown(current, current);
}
// Combine edges from different scales
Mat combined = new Mat(edgePyramid.get(0).size(), CvType.CV_8U, new Scalar(0));
for (int i = 0; i < edgePyramid.size(); i++) {
Mat resized = new Mat();
Imgproc.resize(edgePyramid.get(i), resized, combined.size());
Core.max(combined, resized, combined);
}
return combined;
}
}

Feature Detection

package com.example.opencv.features;
import org.opencv.core.*;
import org.opencv.features2d.*;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
import java.util.List;
public class FeatureDetection {
/**
* Harris corner detection
*/
public static Mat harrisCorners(Mat src, int blockSize, int ksize, double k) {
Mat gray = ColorOperations.toGrayscale(src);
Mat corners = new Mat();
Mat cornersNorm = new Mat();
Imgproc.cornerHarris(gray, corners, blockSize, ksize, k);
Core.normalize(corners, cornersNorm, 0, 255, Core.NORM_MINMAX, CvType.CV_8U);
return cornersNorm;
}
/**
* ORB feature detection and description
*/
public static Features orbFeatures(Mat src, int maxFeatures) {
Mat gray = ColorOperations.toGrayscale(src);
ORB orb = ORB.create(maxFeatures);
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
orb.detectAndCompute(gray, new Mat(), keypoints, descriptors);
return new Features(keypoints, descriptors);
}
/**
* SIFT feature detection and description
*/
public static Features siftFeatures(Mat src, int nFeatures) {
Mat gray = ColorOperations.toGrayscale(src);
SIFT sift = SIFT.create(nFeatures);
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
sift.detectAndCompute(gray, new Mat(), keypoints, descriptors);
return new Features(keypoints, descriptors);
}
/**
* Feature matching between two images
*/
public static List<DMatch> matchFeatures(Features features1, Features features2, 
int matcherType) {
DescriptorMatcher matcher;
if (matcherType == 1) {
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
} else {
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
}
List<DMatch> matches = new ArrayList<>();
matcher.match(features1.descriptors, features2.descriptors, matches);
// Filter matches using Lowe's ratio test
List<DMatch> goodMatches = new ArrayList<>();
for (DMatch match : matches) {
if (match.distance < 0.7) { // Adjust threshold as needed
goodMatches.add(match);
}
}
return goodMatches;
}
/**
* Draw keypoints on image
*/
public static Mat drawKeypoints(Mat src, MatOfKeyPoint keypoints) {
Mat result = src.clone();
Features2d.drawKeypoints(src, keypoints, result, 
new Scalar(0, 255, 0), Features2d.DrawMatchesFlags_DRAW_RICH_KEYPOINTS);
return result;
}
/**
* Feature container class
*/
public static class Features {
public final MatOfKeyPoint keypoints;
public final Mat descriptors;
public Features(MatOfKeyPoint keypoints, Mat descriptors) {
this.keypoints = keypoints;
this.descriptors = descriptors;
}
}
}

Image Segmentation

Thresholding Methods

package com.example.opencv.segmentation;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
public class Thresholding {
/**
* Simple thresholding
*/
public static Mat simpleThreshold(Mat src, double thresholdValue, double maxValue, int type) {
Mat gray = ColorOperations.toGrayscale(src);
Mat dst = new Mat();
Imgproc.threshold(gray, dst, thresholdValue, maxValue, type);
return dst;
}
/**
* Adaptive thresholding
*/
public static Mat adaptiveThreshold(Mat src, double maxValue, int adaptiveMethod, 
int thresholdType, int blockSize, double C) {
Mat gray = ColorOperations.toGrayscale(src);
Mat dst = new Mat();
Imgproc.adaptiveThreshold(gray, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C);
return dst;
}
/**
* Otsu's thresholding
*/
public static Mat otsuThreshold(Mat src) {
Mat gray = ColorOperations.toGrayscale(src);
Mat dst = new Mat();
Imgproc.threshold(gray, dst, 0, 255, Imgproc.THRESH_BINARY + Imgproc.THRESH_OTSU);
return dst;
}
/**
* Multi-level thresholding
*/
public static Mat multiLevelThreshold(Mat src, double[] thresholds, double[] values) {
if (thresholds.length != values.length - 1) {
throw new IllegalArgumentException("Thresholds and values length mismatch");
}
Mat gray = ColorOperations.toGrayscale(src);
Mat dst = Mat.zeros(gray.size(), CvType.CV_8U);
for (int i = 0; i <= thresholds.length; i++) {
double lower = (i == 0) ? 0 : thresholds[i-1];
double upper = (i == thresholds.length) ? 255 : thresholds[i];
double value = values[i];
Mat mask = new Mat();
Core.inRange(gray, new Scalar(lower), new Scalar(upper), mask);
dst.setTo(new Scalar(value), mask);
}
return dst;
}
}

Contour Detection

package com.example.opencv.segmentation;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
public class ContourDetection {
/**
* Find contours in binary image
*/
public static List<MatOfPoint> findContours(Mat binaryImage, int mode, int method) {
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(binaryImage, contours, hierarchy, mode, method);
return contours;
}
/**
* Draw contours on image
*/
public static Mat drawContours(Mat src, List<MatOfPoint> contours, int contourIndex, 
Scalar color, int thickness) {
Mat result = src.clone();
Imgproc.drawContours(result, contours, contourIndex, color, thickness);
return result;
}
/**
* Filter contours by area
*/
public static List<MatOfPoint> filterContoursByArea(List<MatOfPoint> contours, 
double minArea, double maxArea) {
List<MatOfPoint> filtered = new ArrayList<>();
for (MatOfPoint contour : contours) {
double area = Imgproc.contourArea(contour);
if (area >= minArea && area <= maxArea) {
filtered.add(contour);
}
}
return filtered;
}
/**
* Get contour properties
*/
public static ContourProperties getContourProperties(MatOfPoint contour) {
double area = Imgproc.contourArea(contour);
MatOfPoint2f contour2f = new MatOfPoint2f(contour.toArray());
MatOfPoint2f approx = new MatOfPoint2f();
// Approximate contour
double epsilon = 0.02 * Imgproc.arcLength(contour2f, true);
Imgproc.approxPolyDP(contour2f, approx, epsilon, true);
// Bounding rectangle
Rect boundingRect = Imgproc.boundingRect(contour);
// Minimum enclosing circle
Point center = new Point();
float[] radius = new float[1];
Imgproc.minEnclosingCircle(contour2f, center, radius);
return new ContourProperties(area, approx, boundingRect, center, radius[0]);
}
/**
* Detect shapes in image
*/
public static List<DetectedShape> detectShapes(Mat src) {
Mat gray = ColorOperations.toGrayscale(src);
Mat blurred = FilterOperations.gaussianBlur(gray, 5, 1.5);
Mat edges = EdgeDetection.cannyEdges(blurred, 50, 150, 3);
List<MatOfPoint> contours = findContours(edges, Imgproc.RETR_EXTERNAL, 
Imgproc.CHAIN_APPROX_SIMPLE);
List<DetectedShape> shapes = new ArrayList<>();
for (MatOfPoint contour : contours) {
ContourProperties props = getContourProperties(contour);
String shape = classifyShape(props);
if (!"unknown".equals(shape)) {
shapes.add(new DetectedShape(shape, props.boundingRect, props.area));
}
}
return shapes;
}
private static String classifyShape(ContourProperties props) {
int vertices = props.approx.rows();
if (vertices == 3) {
return "triangle";
} else if (vertices == 4) {
// Check if it's a square or rectangle
Rect rect = props.boundingRect;
double aspectRatio = (double) rect.width / rect.height;
if (aspectRatio >= 0.95 && aspectRatio <= 1.05) {
return "square";
} else {
return "rectangle";
}
} else if (vertices > 4) {
double circularity = 4 * Math.PI * props.area / 
(props.boundingRect.width * props.boundingRect.height);
if (circularity > 0.8) {
return "circle";
} else {
return "polygon";
}
}
return "unknown";
}
/**
* Contour properties container
*/
public static class ContourProperties {
public final double area;
public final MatOfPoint2f approx;
public final Rect boundingRect;
public final Point center;
public final double radius;
public ContourProperties(double area, MatOfPoint2f approx, Rect boundingRect, 
Point center, double radius) {
this.area = area;
this.approx = approx;
this.boundingRect = boundingRect;
this.center = center;
this.radius = radius;
}
}
/**
* Detected shape container
*/
public static class DetectedShape {
public final String shape;
public final Rect boundingBox;
public final double area;
public DetectedShape(String shape, Rect boundingBox, double area) {
this.shape = shape;
this.boundingBox = boundingBox;
this.area = area;
}
}
}

Morphological Operations

package com.example.opencv.operations;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
public class MorphologicalOperations {
/**
* Create structuring element
*/
public static Mat getStructuringElement(int shape, Size size) {
return Imgproc.getStructuringElement(shape, size);
}
/**
* Erosion operation
*/
public static Mat erode(Mat src, Mat kernel, int iterations) {
Mat dst = new Mat();
Imgproc.erode(src, dst, kernel, new Point(-1, -1), iterations);
return dst;
}
/**
* Dilation operation
*/
public static Mat dilate(Mat src, Mat kernel, int iterations) {
Mat dst = new Mat();
Imgproc.dilate(src, dst, kernel, new Point(-1, -1), iterations);
return dst;
}
/**
* Opening operation (erosion followed by dilation)
*/
public static Mat opening(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_OPEN, kernel);
return dst;
}
/**
* Closing operation (dilation followed by erosion)
*/
public static Mat closing(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_CLOSE, kernel);
return dst;
}
/**
* Morphological gradient (dilation - erosion)
*/
public static Mat morphologicalGradient(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_GRADIENT, kernel);
return dst;
}
/**
* Top hat transformation (src - opening)
*/
public static Mat topHat(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_TOPHAT, kernel);
return dst;
}
/**
* Black hat transformation (closing - src)
*/
public static Mat blackHat(Mat src, Mat kernel) {
Mat dst = new Mat();
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_BLACKHAT, kernel);
return dst;
}
/**
* Remove small noise using morphological operations
*/
public static Mat removeNoise(Mat binaryImage, int kernelSize) {
Mat kernel = getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(kernelSize, kernelSize));
// Remove small noise (opening)
Mat cleaned = opening(binaryImage, kernel);
// Close small holes (closing)
cleaned = closing(cleaned, kernel);
return cleaned;
}
}

Advanced Image Processing

Image Pyramids

package com.example.opencv.advanced;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
import java.util.List;
public class PyramidOperations {
/**
* Build Gaussian pyramid
*/
public static List<Mat> buildGaussianPyramid(Mat src, int levels) {
List<Mat> pyramid = new ArrayList<>();
pyramid.add(src.clone());
Mat current = src.clone();
for (int i = 1; i < levels; i++) {
Mat down = new Mat();
Imgproc.pyrDown(current, down);
pyramid.add(down);
current = down;
}
return pyramid;
}
/**
* Build Laplacian pyramid
*/
public static List<Mat> buildLaplacianPyramid(Mat src, int levels) {
List<Mat> pyramid = new ArrayList<>();
Mat current = src.clone();
for (int i = 0; i < levels - 1; i++) {
Mat down = new Mat();
Mat up = new Mat();
// Downsample
Imgproc.pyrDown(current, down);
// Upsample and subtract to get Laplacian
Imgproc.pyrUp(down, up, current.size());
Mat laplacian = new Mat();
Core.subtract(current, up, laplacian);
pyramid.add(laplacian);
current = down;
}
// Add the final Gaussian level
pyramid.add(current);
return pyramid;
}
/**
* Reconstruct image from Laplacian pyramid
*/
public static Mat reconstructFromLaplacianPyramid(List<Mat> pyramid) {
Mat current = pyramid.get(pyramid.size() - 1);
for (int i = pyramid.size() - 2; i >= 0; i--) {
Mat up = new Mat();
Imgproc.pyrUp(current, up, pyramid.get(i).size());
Core.add(up, pyramid.get(i), current);
}
return current;
}
}

Image Blending

package com.example.opencv.advanced;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
import java.util.List;
public class ImageBlending {
/**
* Alpha blending
*/
public static Mat alphaBlend(Mat src1, Mat src2, double alpha) {
Mat dst = new Mat();
Core.addWeighted(src1, alpha, src2, 1 - alpha, 0, dst);
return dst;
}
/**
* Pyramid blending
*/
public static Mat pyramidBlend(Mat src1, Mat src2, Mat mask, int levels) {
// Build pyramids
List<Mat> pyramid1 = PyramidOperations.buildLaplacianPyramid(src1, levels);
List<Mat> pyramid2 = PyramidOperations.buildLaplacianPyramid(src2, levels);
List<Mat> maskPyramid = PyramidOperations.buildGaussianPyramid(mask, levels);
// Blend each level
List<Mat> blendedPyramid = new ArrayList<>();
for (int i = 0; i < levels; i++) {
Mat blended = new Mat();
Core.addWeighted(pyramid1.get(i), 1.0, pyramid2.get(i), 1.0, 0, blended);
// Apply mask
Mat maskLevel = maskPyramid.get(i);
if (maskLevel.channels() == 1) {
List<Mat> masks = new ArrayList<>();
for (int j = 0; j < blended.channels(); j++) {
masks.add(maskLevel);
}
Core.merge(masks, maskLevel);
}
Core.multiply(blended, maskLevel, blended);
blendedPyramid.add(blended);
}
// Reconstruct
return PyramidOperations.reconstructFromLaplacianPyramid(blendedPyramid);
}
}

Practical Examples

Document Scanner

package com.example.opencv.applications;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
public class DocumentScanner {
/**
* Detect document in image and apply perspective transformation
*/
public static Mat scanDocument(Mat src) {
// Preprocess image
Mat gray = ColorOperations.toGrayscale(src);
Mat blurred = FilterOperations.gaussianBlur(gray, 5, 1.5);
Mat edges = EdgeDetection.cannyEdges(blurred, 50, 150, 3);
// Find contours
List<MatOfPoint> contours = ContourDetection.findContours(
edges, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Find the largest quadrilateral (document)
MatOfPoint documentContour = findDocumentContour(contours);
if (documentContour == null) {
return src; // Return original if no document found
}
// Apply perspective transformation
return applyPerspectiveCorrection(src, documentContour);
}
private static MatOfPoint findDocumentContour(List<MatOfPoint> contours) {
// Filter by area and find quadrilateral
List<MatOfPoint> largeContours = ContourDetection.filterContoursByArea(
contours, src.rows() * src.cols() * 0.1, Double.MAX_VALUE);
for (MatOfPoint contour : largeContours) {
MatOfPoint2f contour2f = new MatOfPoint2f(contour.toArray());
MatOfPoint2f approx = new MatOfPoint2f();
double epsilon = 0.02 * Imgproc.arcLength(contour2f, true);
Imgproc.approxPolyDP(contour2f, approx, epsilon, true);
if (approx.rows() == 4) {
return new MatOfPoint(approx.toArray());
}
}
return null;
}
private static Mat applyPerspectiveCorrection(Mat src, MatOfPoint documentContour) {
Point[] corners = documentContour.toArray();
// Order corners: top-left, top-right, bottom-right, bottom-left
corners = orderCorners(corners);
// Calculate new dimensions
double widthA = Math.sqrt(Math.pow(corners[2].x - corners[3].x, 2) + 
Math.pow(corners[2].y - corners[3].y, 2));
double widthB = Math.sqrt(Math.pow(corners[1].x - corners[0].x, 2) + 
Math.pow(corners[1].y - corners[0].y, 2));
double maxWidth = Math.max(widthA, widthB);
double heightA = Math.sqrt(Math.pow(corners[1].x - corners[2].x, 2) + 
Math.pow(corners[1].y - corners[2].y, 2));
double heightB = Math.sqrt(Math.pow(corners[0].x - corners[3].x, 2) + 
Math.pow(corners[0].y - corners[3].y, 2));
double maxHeight = Math.max(heightA, heightB);
// Define destination points
MatOfPoint2f dstPoints = new MatOfPoint2f(
new Point(0, 0),
new Point(maxWidth - 1, 0),
new Point(maxWidth - 1, maxHeight - 1),
new Point(0, maxHeight - 1)
);
// Apply perspective transformation
MatOfPoint2f srcPoints = new MatOfPoint2f(corners);
Mat transform = Imgproc.getPerspectiveTransform(srcPoints, dstPoints);
Mat result = new Mat();
Imgproc.warpPerspective(src, result, transform, 
new Size(maxWidth, maxHeight));
return result;
}
private static Point[] orderCorners(Point[] corners) {
// Sort by y-coordinate
List<Point> points = new ArrayList<>(List.of(corners));
points.sort(Comparator.comparingDouble(p -> p.y));
Point[] top = new Point[] { points.get(0), points.get(1) };
Point[] bottom = new Point[] { points.get(2), points.get(3) };
// Sort top points by x-coordinate
if (top[0].x > top[1].x) {
Point temp = top[0];
top[0] = top[1];
top[1] = temp;
}
// Sort bottom points by x-coordinate
if (bottom[0].x > bottom[1].x) {
Point temp = bottom[0];
bottom[0] = bottom[1];
bottom[1] = temp;
}
return new Point[] { top[0], top[1], bottom[1], bottom[0] };
}
}

Performance Optimization

Benchmarking and Optimization

package com.example.opencv.utils;
import org.opencv.core.Mat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PerformanceUtils {
private static final Logger logger = LoggerFactory.getLogger(PerformanceUtils.class);
/**
* Measure execution time of an operation
*/
public static <T> T measureTime(String operationName, Operation<T> operation) {
long startTime = System.nanoTime();
T result = operation.execute();
long endTime = System.nanoTime();
double durationMs = (endTime - startTime) / 1_000_000.0;
logger.info("{} completed in {:.2f} ms", operationName, durationMs);
return result;
}
/**
* Process image in tiles for large images
*/
public static Mat processInTiles(Mat src, TileProcessor processor, int tileSize) {
Mat dst = src.clone();
for (int y = 0; y < src.rows(); y += tileSize) {
for (int x = 0; x < src.cols(); x += tileSize) {
int width = Math.min(tileSize, src.cols() - x);
int height = Math.min(tileSize, src.rows() - y);
Rect roi = new Rect(x, y, width, height);
Mat tile = new Mat(src, roi);
Mat processedTile = processor.processTile(tile);
processedTile.copyTo(new Mat(dst, roi));
}
}
return dst;
}
/**
* Functional interface for operations
*/
@FunctionalInterface
public interface Operation<T> {
T execute();
}
/**
* Functional interface for tile processing
*/
@FunctionalInterface  
public interface TileProcessor {
Mat processTile(Mat tile);
}
}

Conclusion

This comprehensive OpenCV image processing guide in Java covers:

Key Areas:

  • Basic image operations and color manipulation
  • Advanced filtering and convolution
  • Edge detection and feature extraction
  • Image segmentation and contour analysis
  • Morphological operations
  • Advanced techniques like pyramid processing

Best Practices:

  1. Always check if Mat is empty before processing
  2. Release Mat objects when no longer needed
  3. Use appropriate data types (CV_8U, CV_32F, etc.)
  4. Consider performance for real-time applications
  5. Handle different color spaces appropriately

Performance Tips:

  • Use Core.inRange() for multiple thresholding
  • Precompute kernels for repeated operations
  • Process images in tiles for large images
  • Use Core.LUT() for pixel-wise transformations
  • Consider using GPU acceleration for complex operations

OpenCV in Java provides a powerful combination for image processing applications, from simple filters to complex computer vision systems.

Leave a Reply

Your email address will not be published. Required fields are marked *


Macro Nepal Helper