Introduction to Log Retention
Log retention policies define how long log files are kept, when they are archived, and when they are deleted. Proper log management is crucial for system maintenance, compliance, and storage optimization.
System Architecture Overview
Log Retention Pipeline ├── Log Generation │ ├ - Application Logs │ ├ - Access Logs │ ├ - Audit Logs │ └ - System Logs ├── Log Processing │ ├ - Rotation by Size/Time │ ├ - Compression │ ├ - Archiving │ └ - Encryption ├── Storage Management │ ├ - Local Storage │ ├ - Cloud Storage (S3, GCS) │ ├ - Retention Policies │ └ - Cleanup Procedures └── Monitoring & Compliance ├ - Retention Validation ├ - Access Logging ├ - Compliance Reporting └ - Alerting
Core Implementation
1. Maven Dependencies
<properties>
<logback.version>1.4.14</logback.version>
<log4j2.version>2.20.0</log4j2.version>
<aws.java.sdk.version>2.20.56</aws.java.sdk.version>
</properties>
<dependencies>
<!-- Logback with Rolling Policies -->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<!-- Log4j2 -->
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>${log4j2.version}</version>
</dependency>
<!-- AWS SDK for S3 Upload -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3</artifactId>
<version>${aws.java.sdk.version}</version>
</dependency>
<!-- Compression -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
<version>1.23.0</version>
</dependency>
<!-- File Utilities -->
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.13.0</version>
</dependency>
<!-- Scheduling -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-quartz</artifactId>
<version>2.7.0</version>
</dependency>
</dependencies>
2. Logback Configuration with Retention
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- Log Retention Properties -->
<property name="LOG_HOME" value="/var/log/myapp" />
<property name="MAX_HISTORY" value="30" />
<property name="MAX_FILE_SIZE" value="100MB" />
<property name="TOTAL_SIZE_CAP" value="10GB" />
<property name="COMPRESSION_ENABLED" value="true" />
<!-- Appender for Application Logs (30-day retention) -->
<appender name="FILE-APP" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/application.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/archived/application.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
<totalSizeCap>${TOTAL_SIZE_CAP}</totalSizeCap>
<cleanHistoryOnStart>true</cleanHistoryOnStart>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- Appender for Audit Logs (1-year retention) -->
<appender name="FILE-AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/audit.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/archived/audit.%d{yyyy-MM-dd}.log.gz</fileNamePattern>
<maxHistory>365</maxHistory>
<totalSizeCap>50GB</totalSizeCap>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} | %msg%n</pattern>
</encoder>
</appender>
<!-- Appender for Error Logs (90-day retention) -->
<appender name="FILE-ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/error.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/archived/error.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>90</maxHistory>
<totalSizeCap>5GB</totalSizeCap>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- Logger Definitions -->
<logger name="com.myapp.audit" level="INFO" additivity="false">
<appender-ref ref="FILE-AUDIT" />
</logger>
<logger name="com.myapp" level="INFO" additivity="false">
<appender-ref ref="FILE-APP" />
<appender-ref ref="FILE-ERROR" />
</logger>
<root level="INFO">
<appender-ref ref="FILE-APP" />
</root>
</configuration>
3. Log4j2 Configuration with Retention
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="WARN">
<Properties>
<Property name="LOG_HOME">/var/log/myapp</Property>
<Property name="LOG_PATTERN">%d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n</Property>
</Properties>
<Appenders>
<!-- Rolling File Appender with Retention -->
<RollingFile name="FileAppender"
fileName="${LOG_HOME}/application.log"
filePattern="${LOG_HOME}/archived/application-%d{yyyy-MM-dd}-%i.log.gz">
<PatternLayout pattern="${LOG_PATTERN}"/>
<Policies>
<TimeBasedTriggeringPolicy interval="1" modulate="true"/>
<SizeBasedTriggeringPolicy size="100 MB"/>
</Policies>
<DefaultRolloverStrategy max="30" compressionLevel="9">
<Delete basePath="${LOG_HOME}/archived" maxDepth="1">
<IfFileName glob="application-*.log.gz">
<IfLastModified age="30d"/>
</IfFileName>
</Delete>
</DefaultRolloverStrategy>
</RollingFile>
<!-- Audit Logs with Custom Retention -->
<RollingFile name="AuditAppender"
fileName="${LOG_HOME}/audit.log"
filePattern="${LOG_HOME}/archived/audit-%d{yyyy-MM-dd}.log.gz">
<PatternLayout pattern="%d{yyyy-MM-dd HH:mm:ss.SSS} | AUDIT | %msg%n"/>
<Policies>
<TimeBasedTriggeringPolicy interval="1"/>
</Policies>
<DefaultRolloverStrategy>
<Delete basePath="${LOG_HOME}/archived" maxDepth="1">
<IfFileName glob="audit-*.log.gz">
<IfLastModified age="365d"/>
</IfFileName>
</Delete>
</DefaultRolloverStrategy>
</RollingFile>
<!-- Error Logs with Strict Retention -->
<RollingFile name="ErrorAppender"
fileName="${LOG_HOME}/error.log"
filePattern="${LOG_HOME}/archived/error-%d{yyyy-MM-dd}-%i.log.gz">
<ThresholdFilter level="ERROR" onMatch="ACCEPT" onMismatch="DENY"/>
<PatternLayout pattern="${LOG_PATTERN}"/>
<Policies>
<TimeBasedTriggeringPolicy interval="1"/>
<SizeBasedTriggeringPolicy size="50 MB"/>
</Policies>
<DefaultRolloverStrategy max="90">
<Delete basePath="${LOG_HOME}/archived" maxDepth="1">
<IfFileName glob="error-*.log.gz">
<IfLastModified age="90d"/>
</IfFileName>
</Delete>
</DefaultRolloverStrategy>
</RollingFile>
</Appenders>
<Loggers>
<Logger name="com.myapp.audit" level="INFO" additivity="false">
<AppenderRef ref="AuditAppender"/>
</Logger>
<Root level="INFO">
<AppenderRef ref="FileAppender"/>
<AppenderRef ref="ErrorAppender"/>
</Root>
</Loggers>
</Configuration>
4. Log Retention Service
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.core.sync.RequestBody;
import java.io.*;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.GZIPOutputStream;
@Service
public class LogRetentionService {
private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(LogRetentionService.class);
private final LogRetentionConfig retentionConfig;
private final S3Client s3Client;
public LogRetentionService(LogRetentionConfig retentionConfig, S3Client s3Client) {
this.retentionConfig = retentionConfig;
this.s3Client = s3Client;
}
/**
* Clean up old log files based on retention policy
*/
@Scheduled(cron = "0 0 2 * * ?") // Run daily at 2 AM
public void cleanupOldLogs() {
logger.info("Starting log cleanup process");
try {
Path logDir = Paths.get(retentionConfig.getLogDirectory());
if (!Files.exists(logDir)) {
logger.warn("Log directory does not exist: {}", logDir);
return;
}
AtomicLong deletedCount = new AtomicLong(0);
AtomicLong totalFreedSpace = new AtomicLong(0);
Files.walkFileTree(logDir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (shouldDeleteFile(file, attrs)) {
long fileSize = Files.size(file);
Files.delete(file);
deletedCount.incrementAndGet();
totalFreedSpace.addAndGet(fileSize);
logger.debug("Deleted old log file: {} (size: {} bytes)", file, fileSize);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
// Delete empty directories
if (isDirectoryEmpty(dir) && !dir.equals(logDir)) {
Files.delete(dir);
logger.debug("Deleted empty directory: {}", dir);
}
return FileVisitResult.CONTINUE;
}
});
logger.info("Log cleanup completed. Deleted {} files, freed {} bytes",
deletedCount.get(), totalFreedSpace.get());
} catch (Exception e) {
logger.error("Error during log cleanup", e);
}
}
/**
* Archive logs to cloud storage
*/
@Scheduled(cron = "0 30 1 * * ?") // Run daily at 1:30 AM
public void archiveLogsToCloud() {
if (!retentionConfig.isCloudArchiveEnabled()) {
return;
}
logger.info("Starting log archiving to cloud");
try {
Path archiveDir = Paths.get(retentionConfig.getLogDirectory(), "archived");
if (!Files.exists(archiveDir)) {
logger.warn("Archive directory does not exist: {}", archiveDir);
return;
}
LocalDate archiveThreshold = LocalDate.now().minusDays(retentionConfig.getLocalRetentionDays());
Files.list(archiveDir)
.filter(path -> isFileOlderThan(path, archiveThreshold))
.forEach(this::uploadToCloud);
} catch (Exception e) {
logger.error("Error during log archiving", e);
}
}
/**
* Compress old log files
*/
@Scheduled(cron = "0 0 3 * * ?") // Run daily at 3 AM
public void compressOldLogs() {
logger.info("Starting log compression");
try {
Path logDir = Paths.get(retentionConfig.getLogDirectory());
LocalDate compressionThreshold = LocalDate.now().minusDays(retentionConfig.getCompressionThresholdDays());
AtomicLong compressedCount = new AtomicLong(0);
Files.walkFileTree(logDir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (shouldCompressFile(file, attrs, compressionThreshold)) {
compressFile(file);
compressedCount.incrementAndGet();
}
return FileVisitResult.CONTINUE;
}
});
logger.info("Log compression completed. Compressed {} files", compressedCount.get());
} catch (Exception e) {
logger.error("Error during log compression", e);
}
}
/**
* Validate retention policy compliance
*/
public RetentionReport validateRetentionCompliance() {
RetentionReport report = new RetentionReport();
try {
Path logDir = Paths.get(retentionConfig.getLogDirectory());
Files.walkFileTree(logDir, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
report.incrementTotalFiles();
report.addTotalSize(Files.size(file));
if (isFileViolatingRetention(file, attrs)) {
report.addViolation(file.toString());
}
return FileVisitResult.CONTINUE;
}
});
} catch (Exception e) {
logger.error("Error during retention validation", e);
report.setError(e.getMessage());
}
return report;
}
private boolean shouldDeleteFile(Path file, BasicFileAttributes attrs) {
try {
String filename = file.getFileName().toString();
// Skip current log files
if (filename.equals("application.log") ||
filename.equals("audit.log") ||
filename.equals("error.log")) {
return false;
}
LocalDate fileDate = getFileDate(file, attrs);
LocalDate retentionThreshold = LocalDate.now().minusDays(getRetentionDaysForFile(file));
return fileDate.isBefore(retentionThreshold);
} catch (Exception e) {
logger.warn("Error checking file for deletion: {}", file, e);
return false;
}
}
private boolean shouldCompressFile(Path file, BasicFileAttributes attrs, LocalDate threshold) {
try {
// Skip already compressed files
if (file.toString().endsWith(".gz") || file.toString().endsWith(".zip")) {
return false;
}
LocalDate fileDate = getFileDate(file, attrs);
return fileDate.isBefore(threshold);
} catch (Exception e) {
logger.warn("Error checking file for compression: {}", file, e);
return false;
}
}
private boolean isFileViolatingRetention(Path file, BasicFileAttributes attrs) {
try {
LocalDate fileDate = getFileDate(file, attrs);
LocalDate maxAllowedDate = LocalDate.now().minusDays(getRetentionDaysForFile(file) + 7); // 7-day grace period
return fileDate.isBefore(maxAllowedDate);
} catch (Exception e) {
return true; // Consider files with errors as violations
}
}
private int getRetentionDaysForFile(Path file) {
String filename = file.getFileName().toString();
if (filename.contains("audit")) {
return retentionConfig.getAuditRetentionDays();
} else if (filename.contains("error")) {
return retentionConfig.getErrorRetentionDays();
} else {
return retentionConfig.getApplicationRetentionDays();
}
}
private LocalDate getFileDate(Path file, BasicFileAttributes attrs) {
try {
// Try to extract date from filename first
String filename = file.getFileName().toString();
Optional<LocalDate> dateFromName = extractDateFromFilename(filename);
if (dateFromName.isPresent()) {
return dateFromName.get();
}
// Fall back to file modification time
return attrs.lastModifiedTime().toInstant()
.atZone(ZoneId.systemDefault())
.toLocalDate();
} catch (Exception e) {
// If all else fails, use a very old date to ensure deletion
return LocalDate.of(2000, 1, 1);
}
}
private Optional<LocalDate> extractDateFromFilename(String filename) {
try {
// Match patterns like application-2023-10-15-1.log.gz
java.util.regex.Pattern pattern = java.util.regex.Pattern.compile(
".*(\\d{4}-\\d{2}-\\d{2}).*");
java.util.regex.Matcher matcher = pattern.matcher(filename);
if (matcher.find()) {
return Optional.of(LocalDate.parse(matcher.group(1)));
}
} catch (Exception e) {
logger.debug("Could not extract date from filename: {}", filename);
}
return Optional.empty();
}
private boolean isFileOlderThan(Path file, LocalDate threshold) {
try {
LocalDate fileDate = Files.getLastModifiedTime(file).toInstant()
.atZone(ZoneId.systemDefault())
.toLocalDate();
return fileDate.isBefore(threshold);
} catch (Exception e) {
return false;
}
}
private boolean isDirectoryEmpty(Path directory) throws IOException {
try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory)) {
return !dirStream.iterator().hasNext();
}
}
private void compressFile(Path sourceFile) {
Path compressedFile = Paths.get(sourceFile.toString() + ".gz");
try (FileInputStream fis = new FileInputStream(sourceFile.toFile());
FileOutputStream fos = new FileOutputStream(compressedFile.toFile());
GZIPOutputStream gzipOS = new GZIPOutputStream(fos)) {
byte[] buffer = new byte[1024];
int len;
while ((len = fis.read(buffer)) != -1) {
gzipOS.write(buffer, 0, len);
}
// Delete original file after successful compression
Files.delete(sourceFile);
logger.debug("Compressed file: {} -> {}", sourceFile, compressedFile);
} catch (Exception e) {
logger.error("Error compressing file: {}", sourceFile, e);
// Delete the potentially incomplete compressed file
try {
Files.deleteIfExists(compressedFile);
} catch (IOException ex) {
logger.warn("Could not delete incomplete compressed file: {}", compressedFile);
}
}
}
private void uploadToCloud(Path file) {
try {
String key = "logs/" + LocalDate.now().getYear() + "/" + file.getFileName();
PutObjectRequest putObjectRequest = PutObjectRequest.builder()
.bucket(retentionConfig.getS3BucketName())
.key(key)
.build();
s3Client.putObject(putObjectRequest, RequestBody.fromFile(file));
// Delete local file after successful upload
Files.delete(file);
logger.info("Uploaded log file to cloud: {} -> {}", file, key);
} catch (Exception e) {
logger.error("Error uploading file to cloud: {}", file, e);
}
}
}
5. Configuration Classes
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
@Component
@ConfigurationProperties(prefix = "log.retention")
public class LogRetentionConfig {
private String logDirectory = "/var/log/myapp";
private boolean cloudArchiveEnabled = false;
private String s3BucketName;
private int localRetentionDays = 30;
private int compressionThresholdDays = 7;
// Retention periods in days
private int applicationRetentionDays = 30;
private int auditRetentionDays = 365;
private int errorRetentionDays = 90;
private int accessRetentionDays = 180;
// Size limits
private String maxTotalSize = "100GB";
private String maxFileSize = "100MB";
// Getters and setters
public String getLogDirectory() { return logDirectory; }
public void setLogDirectory(String logDirectory) { this.logDirectory = logDirectory; }
public boolean isCloudArchiveEnabled() { return cloudArchiveEnabled; }
public void setCloudArchiveEnabled(boolean cloudArchiveEnabled) { this.cloudArchiveEnabled = cloudArchiveEnabled; }
public String getS3BucketName() { return s3BucketName; }
public void setS3BucketName(String s3BucketName) { this.s3BucketName = s3BucketName; }
public int getLocalRetentionDays() { return localRetentionDays; }
public void setLocalRetentionDays(int localRetentionDays) { this.localRetentionDays = localRetentionDays; }
public int getCompressionThresholdDays() { return compressionThresholdDays; }
public void setCompressionThresholdDays(int compressionThresholdDays) { this.compressionThresholdDays = compressionThresholdDays; }
public int getApplicationRetentionDays() { return applicationRetentionDays; }
public void setApplicationRetentionDays(int applicationRetentionDays) { this.applicationRetentionDays = applicationRetentionDays; }
public int getAuditRetentionDays() { return auditRetentionDays; }
public void setAuditRetentionDays(int auditRetentionDays) { this.auditRetentionDays = auditRetentionDays; }
public int getErrorRetentionDays() { return errorRetentionDays; }
public void setErrorRetentionDays(int errorRetentionDays) { this.errorRetentionDays = errorRetentionDays; }
public int getAccessRetentionDays() { return accessRetentionDays; }
public void setAccessRetentionDays(int accessRetentionDays) { this.accessRetentionDays = accessRetentionDays; }
public String getMaxTotalSize() { return maxTotalSize; }
public void setMaxTotalSize(String maxTotalSize) { this.maxTotalSize = maxTotalSize; }
public String getMaxFileSize() { return maxFileSize; }
public void setMaxFileSize(String maxFileSize) { this.maxFileSize = maxFileSize; }
}
6. Data Models
public class RetentionReport {
private LocalDate generatedAt;
private long totalFiles;
private long totalSizeBytes;
private List<String> violations;
private String error;
public RetentionReport() {
this.generatedAt = LocalDate.now();
this.violations = new ArrayList<>();
}
// Getters and setters
public LocalDate getGeneratedAt() { return generatedAt; }
public void setGeneratedAt(LocalDate generatedAt) { this.generatedAt = generatedAt; }
public long getTotalFiles() { return totalFiles; }
public void setTotalFiles(long totalFiles) { this.totalFiles = totalFiles; }
public void incrementTotalFiles() { this.totalFiles++; }
public long getTotalSizeBytes() { return totalSizeBytes; }
public void setTotalSizeBytes(long totalSizeBytes) { this.totalSizeBytes = totalSizeBytes; }
public void addTotalSize(long size) { this.totalSizeBytes += size; }
public List<String> getViolations() { return violations; }
public void setViolations(List<String> violations) { this.violations = violations; }
public void addViolation(String violation) { this.violations.add(violation); }
public String getError() { return error; }
public void setError(String error) { this.error = error; }
public String getTotalSizeHumanReadable() {
if (totalSizeBytes < 1024) {
return totalSizeBytes + " B";
} else if (totalSizeBytes < 1024 * 1024) {
return String.format("%.2f KB", totalSizeBytes / 1024.0);
} else if (totalSizeBytes < 1024 * 1024 * 1024) {
return String.format("%.2f MB", totalSizeBytes / (1024.0 * 1024.0));
} else {
return String.format("%.2f GB", totalSizeBytes / (1024.0 * 1024.0 * 1024.0));
}
}
}
public class LogFileInfo {
private String filename;
private long size;
private LocalDate lastModified;
private LocalDate creationDate;
private String retentionCategory;
private LocalDate scheduledDeletionDate;
// Getters and setters
public String getFilename() { return filename; }
public void setFilename(String filename) { this.filename = filename; }
public long getSize() { return size; }
public void setSize(long size) { this.size = size; }
public LocalDate getLastModified() { return lastModified; }
public void setLastModified(LocalDate lastModified) { this.lastModified = lastModified; }
public LocalDate getCreationDate() { return creationDate; }
public void setCreationDate(LocalDate creationDate) { this.creationDate = creationDate; }
public String getRetentionCategory() { return retentionCategory; }
public void setRetentionCategory(String retentionCategory) { this.retentionCategory = retentionCategory; }
public LocalDate getScheduledDeletionDate() { return scheduledDeletionDate; }
public void setScheduledDeletionDate(LocalDate scheduledDeletionDate) { this.scheduledDeletionDate = scheduledDeletionDate; }
}
7. REST API for Log Management
import org.springframework.web.bind.annotation.*;
import org.springframework.http.ResponseEntity;
import java.util.List;
import java.util.stream.Collectors;
@RestController
@RequestMapping("/api/logs")
public class LogManagementController {
private final LogRetentionService retentionService;
private final LogRetentionConfig retentionConfig;
public LogManagementController(LogRetentionService retentionService,
LogRetentionConfig retentionConfig) {
this.retentionService = retentionService;
this.retentionConfig = retentionConfig;
}
@GetMapping("/retention/report")
public ResponseEntity<RetentionReport> getRetentionReport() {
RetentionReport report = retentionService.validateRetentionCompliance();
return ResponseEntity.ok(report);
}
@PostMapping("/retention/cleanup")
public ResponseEntity<String> triggerCleanup() {
retentionService.cleanupOldLogs();
return ResponseEntity.ok("Log cleanup triggered successfully");
}
@PostMapping("/retention/compress")
public ResponseEntity<String> triggerCompression() {
retentionService.compressOldLogs();
return ResponseEntity.ok("Log compression triggered successfully");
}
@PostMapping("/retention/archive")
public ResponseEntity<String> triggerArchiving() {
retentionService.archiveLogsToCloud();
return ResponseEntity.ok("Log archiving triggered successfully");
}
@GetMapping("/retention/config")
public ResponseEntity<LogRetentionConfig> getRetentionConfig() {
return ResponseEntity.ok(retentionConfig);
}
@PutMapping("/retention/config")
public ResponseEntity<LogRetentionConfig> updateRetentionConfig(@RequestBody LogRetentionConfig newConfig) {
// In a real application, you would update the configuration and persist it
// For this example, we'll just return the new config
return ResponseEntity.ok(newConfig);
}
}
8. Advanced Retention Strategies
import org.springframework.stereotype.Component;
@Component
public class SmartRetentionStrategy {
private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SmartRetentionStrategy.class);
/**
* Adaptive retention based on disk space
*/
public int calculateAdaptiveRetentionDays(Path logDir, int defaultRetention) {
try {
FileStore store = Files.getFileStore(logDir);
long usableSpace = store.getUsableSpace();
long totalSpace = store.getTotalSpace();
double usageRatio = (double) (totalSpace - usableSpace) / totalSpace;
if (usageRatio > 0.9) { // 90% full
logger.warn("Disk usage critical ({}%), reducing retention days", usageRatio * 100);
return Math.max(7, defaultRetention / 4); // Keep at least 7 days
} else if (usageRatio > 0.8) { // 80% full
logger.info("Disk usage high ({}%), reducing retention days", usageRatio * 100);
return Math.max(14, defaultRetention / 2); // Keep at least 14 days
} else if (usageRatio > 0.7) { // 70% full
logger.info("Disk usage moderate ({}%), slightly reducing retention days", usageRatio * 100);
return (int) (defaultRetention * 0.75);
}
return defaultRetention;
} catch (Exception e) {
logger.warn("Error calculating adaptive retention, using default: {}", defaultRetention, e);
return defaultRetention;
}
}
/**
* Retention based on log importance
*/
public int getRetentionDaysForLogLevel(String logLevel) {
switch (logLevel.toUpperCase()) {
case "ERROR":
case "FATAL":
return 365; // Keep errors for a year
case "WARN":
return 180; // Keep warnings for 6 months
case "INFO":
return 90; // Keep info for 3 months
case "DEBUG":
case "TRACE":
return 30; // Keep debug/trace for 1 month
default:
return 60; // Default 2 months
}
}
/**
* Seasonal retention adjustment
*/
public int getSeasonalRetentionAdjustment() {
LocalDate now = LocalDate.now();
int month = now.getMonthValue();
// Increase retention during holiday season (Q4)
if (month >= 10 && month <= 12) {
return 15; // Add 15 days during holiday season
}
// Decrease retention during slow periods
if (month >= 6 && month <= 8) {
return -7; // Reduce by 7 days during summer
}
return 0;
}
}
9. Application Configuration
# application.yml log: retention: log-directory: /var/log/myapp cloud-archive-enabled: true s3-bucket-name: myapp-logs local-retention-days: 30 compression-threshold-days: 7 application-retention-days: 30 audit-retention-days: 365 error-retention-days: 90 access-retention-days: 180 max-total-size: 100GB max-file-size: 100MB management: endpoints: web: exposure: include: health,info,metrics,logs endpoint: logs: enabled: true spring: quartz: job-store-type: jdbc jdbc: initialize-schema: always datasource: url: jdbc:h2:mem:quartz driverClassName: org.h2.Driver username: sa password: ""
10. Testing
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest
class LogRetentionServiceTest {
@Autowired
private LogRetentionService retentionService;
@TempDir
Path tempLogDir;
@Test
void testCleanupOldLogs() throws Exception {
// Create test log files with different dates
createTestLogFile("application-2023-01-01-1.log", LocalDate.of(2023, 1, 1));
createTestLogFile("application-2023-10-01-1.log", LocalDate.of(2023, 10, 1));
createTestLogFile("application.log", LocalDate.now());
// Trigger cleanup
retentionService.cleanupOldLogs();
// Verify only recent files remain
assertThat(Files.exists(tempLogDir.resolve("application-2023-01-01-1.log"))).isFalse();
assertThat(Files.exists(tempLogDir.resolve("application-2023-10-01-1.log"))).isTrue();
assertThat(Files.exists(tempLogDir.resolve("application.log"))).isTrue();
}
@Test
void testRetentionReport() throws Exception {
createTestLogFile("old-audit.log", LocalDate.of(2022, 1, 1));
createTestLogFile("recent-error.log", LocalDate.now().minusDays(1));
RetentionReport report = retentionService.validateRetentionCompliance();
assertThat(report.getViolations()).hasSize(1);
assertThat(report.getViolations().get(0)).contains("old-audit.log");
}
private void createTestLogFile(String filename, LocalDate date) throws Exception {
Path file = tempLogDir.resolve(filename);
Files.createFile(file);
Files.setLastModifiedTime(file,
java.nio.file.attribute.FileTime.from(
date.atStartOfDay(java.time.ZoneId.systemDefault()).toInstant()
)
);
// Write some content
Files.writeString(file, "Test log content for " + filename);
}
}
Best Practices
1. Compliance Considerations
public class ComplianceRetentionStrategy {
/**
* GDPR-compliant retention - delete personal data after retention period
*/
public boolean containsPersonalData(String logEntry) {
// Implement PII detection logic
return logEntry.contains("@") || // Email
logEntry.matches(".*\\d{3}-\\d{2}-\\d{4}.*"); // SSN pattern
}
/**
* HIPAA compliance - strict retention for health data
*/
public int getHipaaRetentionDays() {
return 365 * 6; // 6 years for HIPAA
}
}
2. Monitoring and Alerting
@Component
public class RetentionMonitor {
@Scheduled(cron = "0 0 6 * * ?") // Daily at 6 AM
public void checkRetentionHealth() {
RetentionReport report = retentionService.validateRetentionCompliance();
if (!report.getViolations().isEmpty()) {
alertService.sendAlert(
"Log retention policy violations detected: " +
report.getViolations().size() + " files"
);
}
if (report.getTotalSizeBytes() > parseSize("50GB")) {
alertService.sendAlert(
"Log storage approaching limit: " +
report.getTotalSizeHumanReadable()
);
}
}
private long parseSize(String size) {
// Parse human-readable size to bytes
return Size.parse(size).toBytes();
}
}
3. Performance Optimization
public class ParallelRetentionService {
private final ExecutorService executor = Executors.newFixedThreadPool(4);
public void parallelCleanup(Path logDir) throws Exception {
List<Path> filesToProcess = Files.walk(logDir)
.filter(Files::isRegularFile)
.collect(Collectors.toList());
List<Future<Boolean>> futures = filesToProcess.stream()
.map(file -> executor.submit(() -> processFile(file)))
.collect(Collectors.toList());
// Wait for all tasks to complete
for (Future<Boolean> future : futures) {
future.get(); // Handle exceptions appropriately
}
}
}
Conclusion
This comprehensive log retention implementation provides:
- Flexible retention policies for different log types
- Automated cleanup with scheduled tasks
- Cloud archiving for long-term storage
- Compression to save disk space
- Compliance monitoring and reporting
- REST API for management and monitoring
- Adaptive strategies based on disk usage
Key benefits:
- Storage optimization through automatic cleanup and compression
- Compliance adherence with configurable retention periods
- Cost reduction by archiving to cheaper cloud storage
- Operational efficiency with automated management
- Monitoring capabilities with health checks and alerts
This system ensures that log files are managed according to organizational policies while maintaining accessibility for debugging and compliance requirements.