Introduction to Zero-Copy
Zero-copy is a technique that eliminates unnecessary data copying between memory spaces, significantly improving performance in I/O-intensive applications. Java provides several mechanisms for zero-copy data transfer, particularly for file and network operations.
Table of Contents
- Traditional vs Zero-Copy File Transfer
- FileChannel Transfer Methods
- Memory-Mapped Files
- Direct Buffers
- Network Zero-Copy
- NIO.2 Zero-Copy Operations
- Performance Benchmarks
- Best Practices
Traditional vs Zero-Copy File Transfer
Traditional File Copy
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.*;
public class TraditionalFileCopy {
// Traditional stream-based copy (multiple copies)
public static void traditionalStreamCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try (InputStream in = Files.newInputStream(source);
OutputStream out = Files.newOutputStream(target)) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
long endTime = System.nanoTime();
System.out.printf("Traditional stream copy: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Buffered stream copy (slightly better)
public static void bufferedStreamCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try (BufferedInputStream in = new BufferedInputStream(Files.newInputStream(source));
BufferedOutputStream out = new BufferedOutputStream(Files.newOutputStream(target))) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
long endTime = System.nanoTime();
System.out.printf("Buffered stream copy: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// NIO channel copy with heap buffers (still copies)
public static void nioChannelCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(8192);
while (sourceChannel.read(buffer) != -1) {
buffer.flip();
targetChannel.write(buffer);
buffer.clear();
}
}
long endTime = System.nanoTime();
System.out.printf("NIO channel copy: %d ms%n",
(endTime - startTime) / 1_000_000);
}
public static void main(String[] args) throws IOException {
// Create test files
Path source = Paths.get("test_source.dat");
Path target1 = Paths.get("test_target1.dat");
Path target2 = Paths.get("test_target2.dat");
Path target3 = Paths.get("test_target3.dat");
// Create a 100MB test file
createTestFile(source, 100 * 1024 * 1024);
// Test different copy methods
traditionalStreamCopy(source, target1);
bufferedStreamCopy(source, target2);
nioChannelCopy(source, target3);
// Cleanup
Files.deleteIfExists(source);
Files.deleteIfExists(target1);
Files.deleteIfExists(target2);
Files.deleteIfExists(target3);
}
private static void createTestFile(Path path, long size) throws IOException {
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(8192);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
}
FileChannel Transfer Methods
Zero-Copy File Transfer
import java.io.*;
import java.nio.channels.FileChannel;
import java.nio.file.*;
public class ZeroCopyFileTransfer {
// Using transferTo() for zero-copy file transfer
public static void transferToCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long position = 0;
long count = sourceChannel.size();
// Single transferTo call for entire file
long transferred = sourceChannel.transferTo(position, count, targetChannel);
System.out.printf("Transferred: %d bytes%n", transferred);
}
long endTime = System.nanoTime();
System.out.printf("transferTo zero-copy: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Using transferFrom() for zero-copy file transfer
public static void transferFromCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long position = 0;
long count = sourceChannel.size();
// Single transferFrom call for entire file
long transferred = targetChannel.transferFrom(sourceChannel, position, count);
System.out.printf("Transferred: %d bytes%n", transferred);
}
long endTime = System.nanoTime();
System.out.printf("transferFrom zero-copy: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Chunked transfer for very large files
public static void chunkedTransferTo(Path source, Path target, long chunkSize)
throws IOException {
long startTime = System.nanoTime();
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long fileSize = sourceChannel.size();
long position = 0;
while (position < fileSize) {
long remaining = fileSize - position;
long bytesToTransfer = Math.min(chunkSize, remaining);
long transferred = sourceChannel.transferTo(position, bytesToTransfer, targetChannel);
position += transferred;
System.out.printf("Transferred chunk: %d bytes, total: %d/%d%n",
transferred, position, fileSize);
}
}
long endTime = System.nanoTime();
System.out.printf("Chunked transferTo: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Progress monitoring wrapper
public static void transferWithProgress(Path source, Path target,
ProgressListener listener) throws IOException {
long fileSize = Files.size(source);
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long position = 0;
long chunkSize = 64 * 1024 * 1024; // 64MB chunks
while (position < fileSize) {
long remaining = fileSize - position;
long bytesToTransfer = Math.min(chunkSize, remaining);
long transferred = sourceChannel.transferTo(position, bytesToTransfer, targetChannel);
position += transferred;
if (listener != null) {
int progress = (int) ((position * 100) / fileSize);
listener.onProgress(progress, position, fileSize);
}
// Small delay to see progress (remove in production)
try { Thread.sleep(10); } catch (InterruptedException e) { break; }
}
}
}
@FunctionalInterface
public interface ProgressListener {
void onProgress(int percent, long bytesTransferred, long totalBytes);
}
public static void main(String[] args) throws IOException {
Path source = Paths.get("large_source.dat");
Path target1 = Paths.get("target1.dat");
Path target2 = Paths.get("target2.dat");
Path target3 = Paths.get("target3.dat");
// Create a 500MB test file
createLargeTestFile(source, 500 * 1024 * 1024);
System.out.println("=== Zero-Copy File Transfer Benchmarks ===");
transferToCopy(source, target1);
transferFromCopy(source, target2);
System.out.println("\n=== Chunked Transfer ===");
chunkedTransferTo(source, target3, 128 * 1024 * 1024); // 128MB chunks
System.out.println("\n=== Transfer with Progress ===");
transferWithProgress(source, Paths.get("target4.dat"),
(percent, bytes, total) -> {
System.out.printf("Progress: %d%% (%d/%d bytes)%n", percent, bytes, total);
});
// Cleanup
Files.deleteIfExists(source);
Files.deleteIfExists(target1);
Files.deleteIfExists(target2);
Files.deleteIfExists(target3);
Files.deleteIfExists(Paths.get("target4.dat"));
}
private static void createLargeTestFile(Path path, long size) throws IOException {
System.out.printf("Creating test file of size: %d MB%n", size / (1024 * 1024));
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024); // 64KB buffer
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
// Fill with pattern for verification
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
if (bytesWritten % (10 * 1024 * 1024) == 0) {
System.out.printf("Created: %d MB%n", bytesWritten / (1024 * 1024));
}
}
}
System.out.println("Test file created successfully");
}
}
Memory-Mapped Files
Memory-Mapped File Operations
import java.io.*;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.*;
import java.util.Random;
public class MemoryMappedFiles {
// Basic memory-mapped file read
public static void memoryMappedRead(Path filePath) throws IOException {
long startTime = System.nanoTime();
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
long fileSize = channel.size();
MappedByteBuffer buffer = channel.map(
FileChannel.MapMode.READ_ONLY, 0, fileSize);
// Read from memory-mapped buffer
byte[] data = new byte[(int) fileSize];
buffer.get(data);
// Verify first and last bytes
System.out.printf("First byte: %d, Last byte: %d%n",
data[0] & 0xFF, data[data.length - 1] & 0xFF);
}
long endTime = System.nanoTime();
System.out.printf("Memory-mapped read: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Memory-mapped file write
public static void memoryMappedWrite(Path filePath, long size) throws IOException {
long startTime = System.nanoTime();
try (FileChannel channel = FileChannel.open(filePath,
StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.CREATE)) {
MappedByteBuffer buffer = channel.map(
FileChannel.MapMode.READ_WRITE, 0, size);
// Write data directly to memory-mapped buffer
Random random = new Random();
for (long i = 0; i < size; i++) {
buffer.put((byte) random.nextInt(256));
}
// Force changes to disk
buffer.force();
}
long endTime = System.nanoTime();
System.out.printf("Memory-mapped write: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Large file processing with memory mapping
public static void processLargeFile(Path inputPath, Path outputPath) throws IOException {
long fileSize = Files.size(inputPath);
long chunkSize = 256 * 1024 * 1024; // 256MB chunks
try (FileChannel inputChannel = FileChannel.open(inputPath, StandardOpenOption.READ);
FileChannel outputChannel = FileChannel.open(outputPath,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long position = 0;
while (position < fileSize) {
long remaining = fileSize - position;
long mapSize = Math.min(chunkSize, remaining);
// Map input chunk
MappedByteBuffer inputBuffer = inputChannel.map(
FileChannel.MapMode.READ_ONLY, position, mapSize);
// Map output chunk
MappedByteBuffer outputBuffer = outputChannel.map(
FileChannel.MapMode.READ_WRITE, position, mapSize);
// Process data (example: convert to uppercase for text, but we'll XOR for binary)
processChunk(inputBuffer, outputBuffer);
// Force output to disk
outputBuffer.force();
position += mapSize;
System.out.printf("Processed chunk: %d/%d bytes%n", position, fileSize);
}
}
}
private static void processChunk(MappedByteBuffer input, MappedByteBuffer output) {
// Example processing: simple XOR transformation
while (input.hasRemaining()) {
byte b = input.get();
output.put((byte) (b ^ 0x55)); // Simple transformation
}
}
// Shared memory between processes (conceptual)
public static class SharedMemoryProcessor {
private final Path sharedFile;
private MappedByteBuffer buffer;
private FileChannel channel;
public SharedMemoryProcessor(Path sharedFile, long size) throws IOException {
this.sharedFile = sharedFile;
this.channel = FileChannel.open(sharedFile,
StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.CREATE);
this.buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, size);
}
public void writeData(byte[] data, int offset) {
buffer.position(offset);
buffer.put(data);
buffer.force(); // Ensure data is written to disk
}
public byte[] readData(int offset, int length) {
buffer.position(offset);
byte[] data = new byte[length];
buffer.get(data);
return data;
}
public void close() throws IOException {
if (channel != null && channel.isOpen()) {
channel.close();
}
}
}
// Memory-mapped file search
public static long searchInMappedFile(Path filePath, byte[] pattern) throws IOException {
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
long fileSize = channel.size();
MappedByteBuffer buffer = channel.map(FileChannel.MapMode.READ_ONLY, 0, fileSize);
for (long i = 0; i <= fileSize - pattern.length; i++) {
boolean found = true;
for (int j = 0; j < pattern.length; j++) {
if (buffer.get((int) (i + j)) != pattern[j]) {
found = false;
break;
}
}
if (found) {
return i;
}
}
}
return -1; // Not found
}
public static void main(String[] args) throws IOException {
Path testFile = Paths.get("test_mmap.dat");
Path outputFile = Paths.get("output_mmap.dat");
System.out.println("=== Memory-Mapped File Operations ===");
// Create test file
memoryMappedWrite(testFile, 100 * 1024 * 1024); // 100MB
// Read test file
memoryMappedRead(testFile);
// Process file
System.out.println("\n=== Processing Large File ===");
processLargeFile(testFile, outputFile);
// Search in file
System.out.println("\n=== Searching in File ===");
byte[] pattern = {0x55, 0x55, 0x55, 0x55};
long position = searchInMappedFile(testFile, pattern);
System.out.printf("Pattern found at position: %d%n", position);
// Test shared memory
System.out.println("\n=== Shared Memory Test ===");
Path sharedFile = Paths.get("shared_memory.dat");
try (SharedMemoryProcessor smp = new SharedMemoryProcessor(sharedFile, 1024)) {
byte[] testData = "Hello, Shared Memory!".getBytes();
smp.writeData(testData, 0);
byte[] readData = smp.readData(0, testData.length);
System.out.println("Read from shared memory: " + new String(readData));
}
// Cleanup
Files.deleteIfExists(testFile);
Files.deleteIfExists(outputFile);
Files.deleteIfExists(sharedFile);
}
}
Direct Buffers
Direct ByteBuffer Operations
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.*;
import java.lang.reflect.Method;
public class DirectBuffers {
// Compare heap buffer vs direct buffer performance
public static void benchmarkBuffers(Path filePath, int bufferSize) throws IOException {
long fileSize = Files.size(filePath);
// Heap buffer benchmark
long heapStart = System.nanoTime();
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
ByteBuffer heapBuffer = ByteBuffer.allocate(bufferSize);
long totalRead = 0;
while (totalRead < fileSize) {
heapBuffer.clear();
int bytesRead = channel.read(heapBuffer);
if (bytesRead == -1) break;
totalRead += bytesRead;
// Process buffer (simulate work)
processBuffer(heapBuffer, bytesRead);
}
}
long heapTime = System.nanoTime() - heapStart;
// Direct buffer benchmark
long directStart = System.nanoTime();
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
ByteBuffer directBuffer = ByteBuffer.allocateDirect(bufferSize);
long totalRead = 0;
while (totalRead < fileSize) {
directBuffer.clear();
int bytesRead = channel.read(directBuffer);
if (bytesRead == -1) break;
totalRead += bytesRead;
// Process buffer (simulate work)
processBuffer(directBuffer, bytesRead);
}
}
long directTime = System.nanoTime() - directStart;
System.out.printf("Buffer size: %d KB%n", bufferSize / 1024);
System.out.printf("Heap buffer time: %d ms%n", heapTime / 1_000_000);
System.out.printf("Direct buffer time: %d ms%n", directTime / 1_000_000);
System.out.printf("Direct buffer improvement: %.2fx%n",
(double) heapTime / directTime);
}
private static void processBuffer(ByteBuffer buffer, int bytesRead) {
// Simulate some processing
buffer.flip();
for (int i = 0; i < bytesRead; i++) {
byte b = buffer.get();
// Simple processing
if (i % 1000 == 0) {
// Do nothing, just simulate work
}
}
}
// Direct buffer pool for reuse
public static class DirectBufferPool {
private final ByteBuffer[] pool;
private final boolean[] inUse;
private final int bufferSize;
public DirectBufferPool(int poolSize, int bufferSize) {
this.pool = new ByteBuffer[poolSize];
this.inUse = new boolean[poolSize];
this.bufferSize = bufferSize;
for (int i = 0; i < poolSize; i++) {
pool[i] = ByteBuffer.allocateDirect(bufferSize);
}
}
public ByteBuffer acquire() {
synchronized (pool) {
for (int i = 0; i < pool.length; i++) {
if (!inUse[i]) {
inUse[i] = true;
pool[i].clear();
return pool[i];
}
}
}
// Pool exhausted, create new buffer
return ByteBuffer.allocateDirect(bufferSize);
}
public void release(ByteBuffer buffer) {
synchronized (pool) {
for (int i = 0; i < pool.length; i++) {
if (pool[i] == buffer) {
inUse[i] = false;
return;
}
}
}
// Buffer not from pool, let GC handle it
}
public void cleanup() {
synchronized (pool) {
for (ByteBuffer buffer : pool) {
cleanDirectBuffer(buffer);
}
}
}
private void cleanDirectBuffer(ByteBuffer buffer) {
if (buffer.isDirect()) {
cleaner = ((DirectBuffer) buffer).cleaner();
if (cleaner != null) {
cleaner.clean();
}
}
} catch (Exception e) {
// Ignore, JVM will handle cleanup
}
}
}
// Efficient file copy using direct buffer pool
public static void efficientFileCopy(Path source, Path target,
DirectBufferPool pool) throws IOException {
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long fileSize = sourceChannel.size();
long bytesCopied = 0;
while (bytesCopied < fileSize) {
ByteBuffer buffer = pool.acquire();
try {
int bytesRead = sourceChannel.read(buffer);
if (bytesRead == -1) break;
buffer.flip();
while (buffer.hasRemaining()) {
targetChannel.write(buffer);
}
bytesCopied += bytesRead;
} finally {
pool.release(buffer);
}
}
}
}
// Direct buffer for network operations
public static class NetworkBuffer {
private final ByteBuffer buffer;
private final int capacity;
public NetworkBuffer(int capacity) {
this.capacity = capacity;
this.buffer = ByteBuffer.allocateDirect(capacity);
}
public void prepareForWrite() {
buffer.clear();
}
public void prepareForRead() {
buffer.flip();
}
public ByteBuffer getBuffer() {
return buffer;
}
public int writeData(byte[] data, int offset, int length) {
int bytesToWrite = Math.min(length, buffer.remaining());
buffer.put(data, offset, bytesToWrite);
return bytesToWrite;
}
public int readData(byte[] data, int offset, int length) {
int bytesToRead = Math.min(length, buffer.remaining());
buffer.get(data, offset, bytesToRead);
return bytesToRead;
}
public boolean hasRemaining() {
return buffer.hasRemaining();
}
public int remaining() {
return buffer.remaining();
}
}
public static void main(String[] args) throws IOException {
Path testFile = Paths.get("benchmark_file.dat");
createTestFile(testFile, 200 * 1024 * 1024); // 200MB
System.out.println("=== Direct Buffer Benchmark ===");
// Test different buffer sizes
int[] bufferSizes = {4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, 1024 * 1024};
for (int size : bufferSizes) {
benchmarkBuffers(testFile, size);
System.out.println();
}
// Test buffer pool
System.out.println("=== Buffer Pool Test ===");
DirectBufferPool pool = new DirectBufferPool(10, 64 * 1024);
Path targetFile = Paths.get("copy_target.dat");
efficientFileCopy(testFile, targetFile, pool);
System.out.println("File copied using buffer pool");
// Cleanup
pool.cleanup();
Files.deleteIfExists(testFile);
Files.deleteIfExists(targetFile);
}
private static void createTestFile(Path path, long size) throws IOException {
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
// Fill with test data
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
}
Network Zero-Copy
Zero-Copy Network Transfers
import java.io.*;
import java.net.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.file.*;
import java.util.concurrent.*;
public class NetworkZeroCopy {
// File server using zero-copy file transfer
public static class ZeroCopyFileServer {
private final int port;
private final Path fileDirectory;
private volatile boolean running;
public ZeroCopyFileServer(int port, Path fileDirectory) {
this.port = port;
this.fileDirectory = fileDirectory;
}
public void start() throws IOException {
running = true;
try (ServerSocketChannel serverChannel = ServerSocketChannel.open()) {
serverChannel.bind(new InetSocketAddress(port));
serverChannel.configureBlocking(true);
System.out.printf("Zero-copy file server started on port %d%n", port);
ExecutorService executor = Executors.newFixedThreadPool(10);
while (running) {
SocketChannel clientChannel = serverChannel.accept();
executor.submit(() -> handleClient(clientChannel));
}
executor.shutdown();
}
}
public void stop() {
running = false;
}
private void handleClient(SocketChannel clientChannel) {
try {
// Simple protocol: first 4 bytes = filename length, then filename, then send file
ByteBuffer headerBuffer = ByteBuffer.allocate(4);
// Read filename length
while (headerBuffer.hasRemaining()) {
if (clientChannel.read(headerBuffer) == -1) {
return;
}
}
headerBuffer.flip();
int filenameLength = headerBuffer.getInt();
// Read filename
ByteBuffer filenameBuffer = ByteBuffer.allocate(filenameLength);
while (filenameBuffer.hasRemaining()) {
if (clientChannel.read(filenameBuffer) == -1) {
return;
}
}
filenameBuffer.flip();
String filename = StandardCharsets.UTF_8.decode(filenameBuffer).toString();
Path filePath = fileDirectory.resolve(filename);
if (!Files.exists(filePath)) {
// Send error response
ByteBuffer errorBuffer = ByteBuffer.allocate(4);
errorBuffer.putInt(-1);
errorBuffer.flip();
clientChannel.write(errorBuffer);
return;
}
// Send file using zero-copy
sendFile(clientChannel, filePath);
} catch (IOException e) {
System.err.println("Error handling client: " + e.getMessage());
} finally {
try {
clientChannel.close();
} catch (IOException e) {
// Ignore
}
}
}
private void sendFile(SocketChannel clientChannel, Path filePath) throws IOException {
try (FileChannel fileChannel = FileChannel.open(filePath, StandardOpenOption.READ)) {
long fileSize = Files.size(filePath);
// Send file size first
ByteBuffer sizeBuffer = ByteBuffer.allocate(8);
sizeBuffer.putLong(fileSize);
sizeBuffer.flip();
clientChannel.write(sizeBuffer);
// Zero-copy file transfer
long position = 0;
long transferred;
do {
transferred = fileChannel.transferTo(position, fileSize - position, clientChannel);
if (transferred > 0) {
position += transferred;
}
} while (position < fileSize);
System.out.printf("Sent file: %s (%d bytes)%n",
filePath.getFileName(), fileSize);
}
}
}
// Zero-copy file client
public static class ZeroCopyFileClient {
private final String host;
private final int port;
public ZeroCopyFileClient(String host, int port) {
this.host = host;
this.port = port;
}
public void downloadFile(String filename, Path localPath) throws IOException {
try (SocketChannel serverChannel = SocketChannel.open(
new InetSocketAddress(host, port))) {
// Send filename
byte[] filenameBytes = filename.getBytes(StandardCharsets.UTF_8);
ByteBuffer headerBuffer = ByteBuffer.allocate(4 + filenameBytes.length);
headerBuffer.putInt(filenameBytes.length);
headerBuffer.put(filenameBytes);
headerBuffer.flip();
serverChannel.write(headerBuffer);
// Read file size
ByteBuffer sizeBuffer = ByteBuffer.allocate(8);
while (sizeBuffer.hasRemaining()) {
if (serverChannel.read(sizeBuffer) == -1) {
throw new IOException("Server closed connection");
}
}
sizeBuffer.flip();
long fileSize = sizeBuffer.getLong();
if (fileSize == -1) {
throw new FileNotFoundException("File not found on server: " + filename);
}
// Receive file using zero-copy
receiveFile(serverChannel, localPath, fileSize);
System.out.printf("Downloaded file: %s (%d bytes)%n", filename, fileSize);
}
}
private void receiveFile(SocketChannel serverChannel, Path localPath, long fileSize)
throws IOException {
try (FileChannel fileChannel = FileChannel.open(localPath,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long position = 0;
long transferred;
do {
transferred = fileChannel.transferFrom(serverChannel, position, fileSize - position);
if (transferred > 0) {
position += transferred;
}
} while (position < fileSize);
}
}
}
// Non-blocking zero-copy server
public static class NonBlockingZeroCopyServer {
private final int port;
private volatile boolean running;
public NonBlockingZeroCopyServer(int port) {
this.port = port;
}
public void start() throws IOException {
running = true;
try (Selector selector = Selector.open();
ServerSocketChannel serverChannel = ServerSocketChannel.open()) {
serverChannel.configureBlocking(false);
serverChannel.bind(new InetSocketAddress(port));
serverChannel.register(selector, SelectionKey.OP_ACCEPT);
System.out.printf("Non-blocking zero-copy server started on port %d%n", port);
while (running) {
if (selector.select(1000) == 0) {
continue;
}
for (SelectionKey key : selector.selectedKeys()) {
if (key.isAcceptable()) {
acceptConnection(selector, (ServerSocketChannel) key.channel());
} else if (key.isReadable()) {
handleRead(key);
} else if (key.isWritable()) {
handleWrite(key);
}
}
selector.selectedKeys().clear();
}
}
}
private void acceptConnection(Selector selector, ServerSocketChannel serverChannel)
throws IOException {
SocketChannel clientChannel = serverChannel.accept();
clientChannel.configureBlocking(false);
clientChannel.register(selector, SelectionKey.OP_READ, new ClientSession());
System.out.println("Accepted connection from: " +
clientChannel.getRemoteAddress());
}
private void handleRead(SelectionKey key) throws IOException {
SocketChannel channel = (SocketChannel) key.channel();
ClientSession session = (ClientSession) key.attachment();
// Handle protocol and file transfer setup
// Implementation depends on specific protocol
}
private void handleWrite(SelectionKey key) throws IOException {
SocketChannel channel = (SocketChannel) key.channel();
ClientSession session = (ClientSession) key.attachment();
// Handle file transfer using zero-copy
// Implementation depends on specific protocol
}
public void stop() {
running = false;
}
private static class ClientSession {
// Session state for each client connection
private String requestedFile;
private long filePosition;
private FileChannel fileChannel;
// ... other session state
}
}
public static void main(String[] args) throws Exception {
// Create test directory and file
Path testDir = Paths.get("test_server_files");
Files.createDirectories(testDir);
Path testFile = testDir.resolve("large_file.dat");
createTestFile(testFile, 50 * 1024 * 1024); // 50MB test file
// Start server in background thread
ZeroCopyFileServer server = new ZeroCopyFileServer(8080, testDir);
Thread serverThread = new Thread(() -> {
try {
server.start();
} catch (IOException e) {
e.printStackTrace();
}
});
serverThread.start();
// Give server time to start
Thread.sleep(1000);
// Test client
ZeroCopyFileClient client = new ZeroCopyFileClient("localhost", 8080);
Path downloadPath = Paths.get("downloaded_file.dat");
client.downloadFile("large_file.dat", downloadPath);
// Verify file
long originalSize = Files.size(testFile);
long downloadedSize = Files.size(downloadPath);
System.out.printf("Original size: %d, Downloaded size: %d%n",
originalSize, downloadedSize);
// Cleanup
server.stop();
serverThread.join(1000);
Files.deleteIfExists(testFile);
Files.deleteIfExists(downloadPath);
Files.deleteIfExists(testDir);
}
private static void createTestFile(Path path, long size) throws IOException {
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
}
NIO.2 Zero-Copy Operations
Files.copy and Other Zero-Copy Methods
import java.io.*;
import java.nio.file.*;
import java.nio.file.attribute.*;
import java.util.*;
public class NIO2ZeroCopy {
// Using Files.copy for zero-copy file operations
public static void filesCopyZeroCopy(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
// Files.copy may use zero-copy internally for same filesystem
Files.copy(source, target, StandardCopyOption.REPLACE_EXISTING);
long endTime = System.nanoTime();
System.out.printf("Files.copy operation: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Copy with attributes preservation
public static void copyWithAttributes(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
CopyOption[] options = {
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES,
LinkOption.NOFOLLOW_LINKS
};
Files.copy(source, target, options);
long endTime = System.nanoTime();
System.out.printf("Files.copy with attributes: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Directory tree copy with zero-copy where possible
public static void copyDirectoryTree(Path sourceDir, Path targetDir) throws IOException {
Files.walkFileTree(sourceDir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
throws IOException {
Path targetPath = targetDir.resolve(sourceDir.relativize(dir));
Files.createDirectories(targetPath);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
throws IOException {
Path targetFile = targetDir.resolve(sourceDir.relativize(file));
// Use zero-copy for regular files
Files.copy(file, targetFile,
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES);
return FileVisitResult.CONTINUE;
}
});
}
// File move/rename (often zero-copy within same filesystem)
public static void atomicMove(Path source, Path target) throws IOException {
long startTime = System.nanoTime();
try {
Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
System.out.println("Atomic move successful");
} catch (AtomicMoveNotSupportedException e) {
// Fall back to regular move
Files.move(source, target, StandardCopyOption.REPLACE_EXISTING);
System.out.println("Regular move (atomic not supported)");
}
long endTime = System.nanoTime();
System.out.printf("File move operation: %d ms%n",
(endTime - startTime) / 1_000_000);
}
// Bulk file operations
public static class BulkFileProcessor {
private final Path sourceDir;
private final Path targetDir;
public BulkFileProcessor(Path sourceDir, Path targetDir) {
this.sourceDir = sourceDir;
this.targetDir = targetDir;
}
public void processFiles() throws IOException {
List<Path> files = Files.walk(sourceDir)
.filter(Files::isRegularFile)
.toList();
// Process files in parallel
files.parallelStream().forEach(file -> {
try {
Path relativePath = sourceDir.relativize(file);
Path targetFile = targetDir.resolve(relativePath);
// Ensure target directory exists
Files.createDirectories(targetFile.getParent());
// Copy with zero-copy where possible
Files.copy(file, targetFile,
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES);
} catch (IOException e) {
System.err.println("Error processing file: " + file + " - " + e.getMessage());
}
});
System.out.printf("Processed %d files%n", files.size());
}
}
// File system monitoring with zero-copy operations
public static class FileSystemWatcher {
private final Path watchDir;
private final Path targetDir;
private volatile boolean watching;
public FileSystemWatcher(Path watchDir, Path targetDir) {
this.watchDir = watchDir;
this.targetDir = targetDir;
}
public void startWatching() throws IOException {
watching = true;
try (WatchService watchService = FileSystems.getDefault().newWatchService()) {
watchDir.register(watchService,
StandardWatchEventKinds.ENTRY_CREATE,
StandardWatchEventKinds.ENTRY_MODIFY);
System.out.println("Watching directory: " + watchDir);
while (watching) {
WatchKey key;
try {
key = watchService.take();
} catch (InterruptedException e) {
break;
}
for (WatchEvent<?> event : key.pollEvents()) {
if (event.kind() == StandardWatchEventKinds.OVERFLOW) {
continue;
}
@SuppressWarnings("unchecked")
WatchEvent<Path> pathEvent = (WatchEvent<Path>) event;
Path filename = pathEvent.context();
Path sourceFile = watchDir.resolve(filename);
Path targetFile = targetDir.resolve(filename);
handleFileEvent(sourceFile, targetFile, event.kind());
}
boolean valid = key.reset();
if (!valid) {
break;
}
}
}
}
private void handleFileEvent(Path sourceFile, Path targetFile, WatchEvent.Kind<?> kind) {
try {
if (kind == StandardWatchEventKinds.ENTRY_CREATE ||
kind == StandardWatchEventKinds.ENTRY_MODIFY) {
if (Files.isRegularFile(sourceFile)) {
// Use zero-copy for file operations
Files.copy(sourceFile, targetFile,
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.COPY_ATTRIBUTES);
System.out.printf("Copied: %s -> %s%n", sourceFile, targetFile);
}
}
} catch (IOException e) {
System.err.println("Error handling file event: " + e.getMessage());
}
}
public void stopWatching() {
watching = false;
}
}
public static void main(String[] args) throws IOException {
// Create test directories
Path sourceDir = Paths.get("test_source");
Path targetDir = Paths.get("test_target");
Path watchDir = Paths.get("test_watch");
Files.createDirectories(sourceDir);
Files.createDirectories(targetDir);
Files.createDirectories(watchDir);
// Create test files
for (int i = 0; i < 5; i++) {
Path testFile = sourceDir.resolve("file" + i + ".dat");
createTestFile(testFile, 10 * 1024 * 1024); // 10MB each
}
System.out.println("=== NIO.2 Zero-Copy Operations ===");
// Test Files.copy
Path singleSource = sourceDir.resolve("file0.dat");
Path singleTarget = targetDir.resolve("file0_copy.dat");
filesCopyZeroCopy(singleSource, singleTarget);
// Test copy with attributes
Path attributedTarget = targetDir.resolve("file0_attributed.dat");
copyWithAttributes(singleSource, attributedTarget);
// Test directory copy
System.out.println("\n=== Directory Tree Copy ===");
Path copiedDir = Paths.get("test_copied");
copyDirectoryTree(sourceDir, copiedDir);
// Test bulk processing
System.out.println("\n=== Bulk File Processing ===");
BulkFileProcessor processor = new BulkFileProcessor(sourceDir, targetDir);
processor.processFiles();
// Test file move
System.out.println("\n=== File Move Test ===");
Path moveSource = sourceDir.resolve("file1.dat");
Path moveTarget = targetDir.resolve("file1_moved.dat");
atomicMove(moveSource, moveTarget);
// Cleanup
deleteRecursive(sourceDir);
deleteRecursive(targetDir);
deleteRecursive(copiedDir);
deleteRecursive(watchDir);
}
private static void createTestFile(Path path, long size) throws IOException {
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
private static void deleteRecursive(Path path) throws IOException {
if (Files.exists(path)) {
Files.walk(path)
.sorted(Comparator.reverseOrder())
.forEach(p -> {
try {
Files.delete(p);
} catch (IOException e) {
// Ignore, might be already deleted
}
});
}
}
}
Performance Benchmarks
Comprehensive Performance Testing
import java.io.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.file.*;
import java.util.concurrent.*;
import java.util.stream.*;
public class ZeroCopyBenchmarks {
// Comprehensive benchmark of different copy methods
public static void benchmarkAllMethods(Path source, Path baseTarget, long fileSize)
throws IOException {
System.out.printf("=== Benchmarking file size: %d MB ===%n", fileSize / (1024 * 1024));
// 1. Traditional stream copy
Path target1 = baseTarget.resolve("stream_copy.dat");
long streamTime = measureTime(() -> {
try {
traditionalStreamCopy(source, target1);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// 2. Buffered stream copy
Path target2 = baseTarget.resolve("buffered_copy.dat");
long bufferedTime = measureTime(() -> {
try {
bufferedStreamCopy(source, target2);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// 3. NIO channel copy with heap buffer
Path target3 = baseTarget.resolve("nio_heap_copy.dat");
long nioHeapTime = measureTime(() -> {
try {
nioChannelCopy(source, target3, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// 4. NIO channel copy with direct buffer
Path target4 = baseTarget.resolve("nio_direct_copy.dat");
long nioDirectTime = measureTime(() -> {
try {
nioChannelCopy(source, target4, true);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// 5. Zero-copy transferTo
Path target5 = baseTarget.resolve("transferTo_copy.dat");
long transferToTime = measureTime(() -> {
try {
transferToCopy(source, target5);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// 6. Files.copy
Path target6 = baseTarget.resolve("files_copy.dat");
long filesCopyTime = measureTime(() -> {
try {
Files.copy(source, target6, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
// Print results
System.out.printf("Traditional stream: %8d ms%n", streamTime);
System.out.printf("Buffered stream: %8d ms%n", bufferedTime);
System.out.printf("NIO channel (heap): %8d ms%n", nioHeapTime);
System.out.printf("NIO channel (direct): %8d ms%n", nioDirectTime);
System.out.printf("transferTo zero-copy: %8d ms%n", transferToTime);
System.out.printf("Files.copy: %8d ms%n", filesCopyTime);
// Calculate improvements
System.out.println("\n=== Performance Improvements ===");
System.out.printf("transferTo vs stream: %.2fx faster%n",
(double) streamTime / transferToTime);
System.out.printf("transferTo vs buffered: %.2fx faster%n",
(double) bufferedTime / transferToTime);
System.out.printf("transferTo vs NIO direct: %.2fx faster%n",
(double) nioDirectTime / transferToTime);
// Cleanup
Files.deleteIfExists(target1);
Files.deleteIfExists(target2);
Files.deleteIfExists(target3);
Files.deleteIfExists(target4);
Files.deleteIfExists(target5);
Files.deleteIfExists(target6);
}
// Memory usage benchmark
public static void benchmarkMemoryUsage(Path source, int iterations) throws IOException {
System.out.println("\n=== Memory Usage Benchmark ===");
long fileSize = Files.size(source);
Runtime runtime = Runtime.getRuntime();
// Force GC before measurement
System.gc();
long initialMemory = runtime.totalMemory() - runtime.freeMemory();
// Test traditional stream copy memory usage
for (int i = 0; i < iterations; i++) {
Path target = Paths.get("memory_test_" + i + ".dat");
traditionalStreamCopy(source, target);
Files.deleteIfExists(target);
}
long afterStreamMemory = runtime.totalMemory() - runtime.freeMemory();
long streamMemoryUsed = afterStreamMemory - initialMemory;
// Force GC again
System.gc();
long resetMemory = runtime.totalMemory() - runtime.freeMemory();
// Test zero-copy memory usage
for (int i = 0; i < iterations; i++) {
Path target = Paths.get("memory_test_" + i + ".dat");
transferToCopy(source, target);
Files.deleteIfExists(target);
}
long afterZeroCopyMemory = runtime.totalMemory() - runtime.freeMemory();
long zeroCopyMemoryUsed = afterZeroCopyMemory - resetMemory;
System.out.printf("File size: %d MB%n", fileSize / (1024 * 1024));
System.out.printf("Traditional stream memory used: %d KB%n", streamMemoryUsed / 1024);
System.out.printf("Zero-copy memory used: %d KB%n", zeroCopyMemoryUsed / 1024);
System.out.printf("Memory reduction: %.2fx less memory%n",
(double) streamMemoryUsed / zeroCopyMemoryUsed);
}
// Concurrent access benchmark
public static void benchmarkConcurrentAccess(Path source, Path targetDir, int threadCount)
throws IOException, InterruptedException {
System.out.println("\n=== Concurrent Access Benchmark ===");
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
CountDownLatch startLatch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(threadCount);
List<Long> times = new CopyOnWriteArrayList<>();
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
executor.submit(() -> {
try {
startLatch.await();
Path target = targetDir.resolve("concurrent_copy_" + threadId + ".dat");
long startTime = System.nanoTime();
transferToCopy(source, target);
long endTime = System.nanoTime();
times.add(endTime - startTime);
endLatch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
});
}
// Start all threads simultaneously
startLatch.countDown();
// Wait for completion
endLatch.await();
executor.shutdown();
// Calculate statistics
LongSummaryStatistics stats = times.stream()
.mapToLong(Long::longValue)
.map(t -> t / 1_000_000) // Convert to milliseconds
.summaryStatistics();
System.out.printf("Thread count: %d%n", threadCount);
System.out.printf("Average time: %.2f ms%n", stats.getAverage());
System.out.printf("Best time: %d ms%n", stats.getMin());
System.out.printf("Worst time: %d ms%n", stats.getMax());
System.out.printf("Total throughput: %.2f MB/s%n",
(threadCount * Files.size(source) / (1024.0 * 1024.0)) /
(stats.getSum() / 1000.0));
// Cleanup
Files.walk(targetDir)
.filter(Files::isRegularFile)
.forEach(path -> {
try {
Files.delete(path);
} catch (IOException e) {
// Ignore
}
});
}
// Helper methods
private static long measureTime(Runnable operation) {
long startTime = System.nanoTime();
operation.run();
return (System.nanoTime() - startTime) / 1_000_000;
}
private static void traditionalStreamCopy(Path source, Path target) throws IOException {
try (InputStream in = Files.newInputStream(source);
OutputStream out = Files.newOutputStream(target)) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
}
private static void bufferedStreamCopy(Path source, Path target) throws IOException {
try (BufferedInputStream in = new BufferedInputStream(Files.newInputStream(source));
BufferedOutputStream out = new BufferedOutputStream(Files.newOutputStream(target))) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
}
}
}
private static void nioChannelCopy(Path source, Path target, boolean useDirectBuffer)
throws IOException {
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = useDirectBuffer ?
ByteBuffer.allocateDirect(8192) : ByteBuffer.allocate(8192);
while (sourceChannel.read(buffer) != -1) {
buffer.flip();
targetChannel.write(buffer);
buffer.clear();
}
}
}
private static void transferToCopy(Path source, Path target) throws IOException {
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
sourceChannel.transferTo(0, sourceChannel.size(), targetChannel);
}
}
public static void main(String[] args) throws Exception {
Path benchmarkDir = Paths.get("benchmark_files");
Files.createDirectories(benchmarkDir);
// Test different file sizes
long[] fileSizes = {
10 * 1024 * 1024, // 10MB
100 * 1024 * 1024, // 100MB
500 * 1024 * 1024 // 500MB
};
for (long fileSize : fileSizes) {
Path sourceFile = benchmarkDir.resolve("test_" + fileSize + ".dat");
createTestFile(sourceFile, fileSize);
benchmarkAllMethods(sourceFile, benchmarkDir, fileSize);
// Memory benchmark for medium-sized file
if (fileSize == 100 * 1024 * 1024) {
benchmarkMemoryUsage(sourceFile, 10);
}
Files.deleteIfExists(sourceFile);
}
// Concurrent benchmark
Path concurrentSource = benchmarkDir.resolve("concurrent_test.dat");
createTestFile(concurrentSource, 50 * 1024 * 1024); // 50MB
int[] threadCounts = {2, 4, 8, 16};
for (int threads : threadCounts) {
benchmarkConcurrentAccess(concurrentSource, benchmarkDir, threads);
}
Files.deleteIfExists(concurrentSource);
Files.deleteIfExists(benchmarkDir);
}
private static void createTestFile(Path path, long size) throws IOException {
System.out.printf("Creating test file: %d MB%n", size / (1024 * 1024));
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
}
Best Practices
Production-Ready Zero-Copy Implementation
import java.io.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.file.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
public class ZeroCopyBestPractices {
// Production-ready file transfer service
public static class ZeroCopyFileTransferService {
private final ExecutorService executor;
private final DirectBufferPool bufferPool;
private final AtomicLong totalBytesTransferred;
private final AtomicLong totalOperations;
public ZeroCopyFileTransferService(int threadPoolSize, int bufferPoolSize, int bufferSize) {
this.executor = Executors.newFixedThreadPool(threadPoolSize,
new ThreadFactory() {
private final AtomicLong counter = new AtomicLong();
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r,
"zero-copy-worker-" + counter.incrementAndGet());
thread.setDaemon(true);
return thread;
}
});
this.bufferPool = new DirectBufferPool(bufferPoolSize, bufferSize);
this.totalBytesTransferred = new AtomicLong();
this.totalOperations = new AtomicLong();
}
public CompletableFuture<Long> transferFile(Path source, Path target) {
return CompletableFuture.supplyAsync(() -> {
try {
long startTime = System.nanoTime();
long bytesTransferred = performTransfer(source, target);
long endTime = System.nanoTime();
totalBytesTransferred.addAndGet(bytesTransferred);
totalOperations.incrementAndGet();
long durationMs = (endTime - startTime) / 1_000_000;
System.out.printf("Transfer completed: %s -> %s (%d bytes, %d ms)%n",
source, target, bytesTransferred, durationMs);
return bytesTransferred;
} catch (IOException e) {
throw new CompletionException(e);
}
}, executor);
}
public CompletableFuture<Void> transferFiles(List<Path> sources, Path targetDir) {
List<CompletableFuture<Long>> futures = sources.stream()
.map(source -> {
Path target = targetDir.resolve(source.getFileName());
return transferFile(source, target);
})
.toList();
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
}
private long performTransfer(Path source, Path target) throws IOException {
// Try zero-copy first
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long fileSize = sourceChannel.size();
long transferred = sourceChannel.transferTo(0, fileSize, targetChannel);
// Verify complete transfer
if (transferred != fileSize) {
throw new IOException("Incomplete transfer: " + transferred + " of " + fileSize);
}
return transferred;
} catch (IOException e) {
// Fall back to buffered copy if zero-copy fails
System.err.printf("Zero-copy failed for %s, falling back to buffered copy: %s%n",
source, e.getMessage());
return fallbackBufferedCopy(source, target);
}
}
private long fallbackBufferedCopy(Path source, Path target) throws IOException {
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long fileSize = sourceChannel.size();
long bytesCopied = 0;
while (bytesCopied < fileSize) {
ByteBuffer buffer = bufferPool.acquire();
try {
buffer.clear();
int bytesRead = sourceChannel.read(buffer);
if (bytesRead == -1) break;
buffer.flip();
while (buffer.hasRemaining()) {
bytesCopied += targetChannel.write(buffer);
}
} finally {
bufferPool.release(buffer);
}
}
return bytesCopied;
}
}
public TransferStats getStats() {
return new TransferStats(
totalOperations.get(),
totalBytesTransferred.get(),
System.currentTimeMillis()
);
}
public void shutdown() {
executor.shutdown();
try {
if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
executor.shutdownNow();
}
} catch (InterruptedException e) {
executor.shutdownNow();
Thread.currentThread().interrupt();
}
bufferPool.cleanup();
}
public record TransferStats(long totalOperations, long totalBytes, long timestamp) {
public double getAverageBytesPerOperation() {
return totalOperations > 0 ? (double) totalBytes / totalOperations : 0;
}
@Override
public String toString() {
return String.format(
"TransferStats{operations=%d, bytes=%.2f MB, avg=%.2f MB/op}",
totalOperations,
totalBytes / (1024.0 * 1024.0),
getAverageBytesPerOperation() / (1024.0 * 1024.0)
);
}
}
}
// Error handling and recovery
public static class ResilientFileTransfer {
private final int maxRetries;
private final long retryDelayMs;
public ResilientFileTransfer(int maxRetries, long retryDelayMs) {
this.maxRetries = maxRetries;
this.retryDelayMs = retryDelayMs;
}
public long transferWithRetry(Path source, Path target) throws IOException {
IOException lastException = null;
for (int attempt = 1; attempt <= maxRetries; attempt++) {
try {
return performZeroCopyTransfer(source, target);
} catch (IOException e) {
lastException = e;
System.err.printf("Transfer attempt %d failed: %s%n", attempt, e.getMessage());
if (attempt < maxRetries) {
try {
Thread.sleep(retryDelayMs);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Transfer interrupted", ie);
}
// Clean up partial file before retry
try {
Files.deleteIfExists(target);
} catch (IOException deleteError) {
// Ignore cleanup errors
}
}
}
}
throw new IOException("All transfer attempts failed", lastException);
}
private long performZeroCopyTransfer(Path source, Path target) throws IOException {
try (FileChannel sourceChannel = FileChannel.open(source, StandardOpenOption.READ);
FileChannel targetChannel = FileChannel.open(target,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
long fileSize = Files.size(source);
long transferred = 0;
// Use chunked transfer for very large files
long chunkSize = 256 * 1024 * 1024; // 256MB chunks
while (transferred < fileSize) {
long remaining = fileSize - transferred;
long bytesToTransfer = Math.min(chunkSize, remaining);
long chunkTransferred = sourceChannel.transferTo(
transferred, bytesToTransfer, targetChannel);
if (chunkTransferred == 0) {
throw new IOException("Zero bytes transferred, possible disk full");
}
transferred += chunkTransferred;
}
// Verify file integrity
verifyTransfer(source, target);
return transferred;
}
}
private void verifyTransfer(Path source, Path target) throws IOException {
long sourceSize = Files.size(source);
long targetSize = Files.size(target);
if (sourceSize != targetSize) {
throw new IOException(String.format(
"Size mismatch: source=%d, target=%d", sourceSize, targetSize));
}
// For critical applications, add checksum verification
if (sourceSize < 100 * 1024 * 1024) { // Only for smaller files
long sourceChecksum = computeChecksum(source);
long targetChecksum = computeChecksum(target);
if (sourceChecksum != targetChecksum) {
throw new IOException("Checksum mismatch");
}
}
}
private long computeChecksum(Path file) throws IOException {
// Simple checksum implementation
try (InputStream in = Files.newInputStream(file)) {
long checksum = 0;
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
for (int i = 0; i < bytesRead; i++) {
checksum = (checksum * 31) + (buffer[i] & 0xFF);
}
}
return checksum;
}
}
}
// Monitoring and metrics
public static class TransferMonitor {
private final AtomicLong successfulTransfers = new AtomicLong();
private final AtomicLong failedTransfers = new AtomicLong();
private final AtomicLong totalBytes = new AtomicLong();
private final LongAdder totalTransferTime = new LongAdder();
public void recordSuccess(long bytes, long durationNanos) {
successfulTransfers.incrementAndGet();
totalBytes.addAndGet(bytes);
totalTransferTime.add(durationNanos);
}
public void recordFailure() {
failedTransfers.incrementAndGet();
}
public MonitoringStats getStats() {
long success = successfulTransfers.get();
long failure = failedTransfers.get();
long totalOps = success + failure;
double successRate = totalOps > 0 ? (double) success / totalOps * 100 : 0;
long avgTransferTime = success > 0 ? totalTransferTime.longValue() / success : 0;
double throughput = avgTransferTime > 0 ?
(totalBytes.get() / (1024.0 * 1024.0)) / (avgTransferTime / 1_000_000_000.0) : 0;
return new MonitoringStats(
totalOps, success, failure, successRate,
totalBytes.get(), avgTransferTime / 1_000_000, // Convert to ms
throughput
);
}
public record MonitoringStats(
long totalOperations,
long successfulOperations,
long failedOperations,
double successRate,
long totalBytes,
long averageTimeMs,
double throughputMBs
) {
@Override
public String toString() {
return String.format(
"MonitoringStats{ops=%d, success=%.1f%%, bytes=%.2f MB, avgTime=%d ms, throughput=%.2f MB/s}",
totalOperations, successRate, totalBytes / (1024.0 * 1024.0),
averageTimeMs, throughputMBs
);
}
}
}
public static void main(String[] args) throws Exception {
// Create test environment
Path testDir = Paths.get("production_test");
Files.createDirectories(testDir);
// Create test files
List<Path> testFiles = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Path testFile = testDir.resolve("source_" + i + ".dat");
createTestFile(testFile, 10 * 1024 * 1024); // 10MB each
testFiles.add(testFile);
}
System.out.println("=== Production-Ready Zero-Copy Implementation ===");
// Initialize service
ZeroCopyFileTransferService service = new ZeroCopyFileTransferService(
4, // 4 threads
10, // 10 buffers in pool
64 * 1024 // 64KB buffer size
);
TransferMonitor monitor = new TransferMonitor();
// Perform transfers
Path targetDir = testDir.resolve("target");
Files.createDirectories(targetDir);
List<CompletableFuture<Long>> futures = testFiles.stream()
.map(source -> {
Path target = targetDir.resolve(source.getFileName());
return service.transferFile(source, target)
.whenComplete((bytes, error) -> {
if (error == null) {
monitor.recordSuccess(bytes, 0); // Duration not tracked in this example
} else {
monitor.recordFailure();
}
});
})
.toList();
// Wait for completion
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
// Print stats
System.out.println("Service stats: " + service.getStats());
System.out.println("Monitor stats: " + monitor.getStats());
// Test resilient transfer
System.out.println("\n=== Resilient Transfer Test ===");
ResilientFileTransfer resilientTransfer = new ResilientFileTransfer(3, 1000);
Path resilientSource = testDir.resolve("resilient_source.dat");
Path resilientTarget = testDir.resolve("resilient_target.dat");
createTestFile(resilientSource, 5 * 1024 * 1024);
long transferred = resilientTransfer.transferWithRetry(resilientSource, resilientTarget);
System.out.printf("Resilient transfer completed: %d bytes%n", transferred);
// Cleanup
service.shutdown();
deleteRecursive(testDir);
}
private static void createTestFile(Path path, long size) throws IOException {
try (FileChannel channel = FileChannel.open(path,
StandardOpenOption.WRITE, StandardOpenOption.CREATE)) {
ByteBuffer buffer = ByteBuffer.allocateDirect(64 * 1024);
long bytesWritten = 0;
while (bytesWritten < size) {
buffer.clear();
int bytesToWrite = (int) Math.min(buffer.capacity(), size - bytesWritten);
buffer.limit(bytesToWrite);
for (int i = 0; i < bytesToWrite; i++) {
buffer.put((byte) (i % 256));
}
buffer.flip();
channel.write(buffer);
bytesWritten += bytesToWrite;
}
}
}
private static void deleteRecursive(Path path) throws IOException {
if (Files.exists(path)) {
Files.walk(path)
.sorted((a, b) -> -a.compareTo(b)) // Reverse order for deletion
.forEach(p -> {
try {
Files.delete(p);
} catch (IOException e) {
// Ignore cleanup errors
}
});
}
}
}
Summary
Zero-copy data transfer in Java provides significant performance benefits for I/O-intensive applications. Key takeaways:
- Use
FileChannel.transferTo()/transferFrom()for file-to-file and file-to-network transfers - Memory-mapped files provide efficient random access to large files
- Direct ByteBuffers reduce copying between JVM and native memory
- NIO.2 operations like
Files.copy()often use zero-copy internally - Proper resource management is crucial for production use
- Monitoring and metrics help optimize zero-copy implementations
By following these patterns and best practices, you can build high-performance, efficient data transfer systems in Java that minimize memory usage and CPU overhead.