Understanding AWS IRSA for Kubernetes
AWS IAM Roles for Service Accounts (IRSA) allows Kubernetes pods to assume IAM roles using AWS Security Token Service (STS) and OpenID Connect (OIDC). This provides secure, fine-grained AWS access control without storing long-term credentials in your applications.
Prerequisites
- AWS EKS Cluster with OIDC provider configured
- IAM roles configured for service accounts
- Java application running in Kubernetes
- AWS CLI and
kubectlconfigured
Step 1: Setup IAM Role and Service Account
Create OIDC Provider (if not exists)
eksctl utils associate-iam-oidc-provider --cluster <cluster-name> --region <region> --approve
Create IAM Policy
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::your-application-bucket/*",
"arn:aws:s3:::your-application-bucket"
]
}
]
}
Create IAM Role and Service Account
# irsa-setup.yaml apiVersion: v1 kind: ServiceAccount metadata: name: java-app-service-account namespace: default annotations: eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/java-app-role
Step 2: Java Application Dependencies
Maven Configuration (pom.xml)
<properties>
<aws.java.sdk.version>2.20.0</aws.java.sdk.version>
</properties>
<dependencies>
<!-- AWS SDK v2 -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3</artifactId>
<version>${aws.java.sdk.version}</version>
</dependency>
<!-- STS for role assumption -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>sts</artifactId>
<version>${aws.java.sdk.version}</version>
</dependency>
<!-- HTTP clients -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>apache-client</artifactId>
<version>${aws.java.sdk.version}</version>
</dependency>
<!-- Logging -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>2.0.7</version>
</dependency>
<!-- Spring Boot (optional) -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>3.1.0</version>
</dependency>
</dependencies>
Step 3: Java Implementation
Basic AWS Service Client with IRSA
package com.example.irsa;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.S3Object;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
import java.nio.file.Paths;
public class AwsIrsaExample {
private static final Logger logger = LoggerFactory.getLogger(AwsIrsaExample.class);
private final S3Client s3Client;
private final String bucketName;
public AwsIrsaExample(String region, String bucketName) {
this.s3Client = S3Client.builder()
.region(Region.of(region))
.credentialsProvider(DefaultCredentialsProvider.create())
.build();
this.bucketName = bucketName;
}
public void uploadFile(String key, String filePath) {
try {
PutObjectRequest putObjectRequest = PutObjectRequest.builder()
.bucket(bucketName)
.key(key)
.build();
s3Client.putObject(putObjectRequest, RequestBody.fromFile(Paths.get(filePath)));
logger.info("Successfully uploaded file to s3://{}/{}", bucketName, key);
} catch (Exception e) {
logger.error("Error uploading file to S3", e);
throw new RuntimeException("S3 upload failed", e);
}
}
public void listBucketObjects() {
try {
ListObjectsV2Request listRequest = ListObjectsV2Request.builder()
.bucket(bucketName)
.build();
ListObjectsV2Response listResponse = s3Client.listObjectsV2(listRequest);
logger.info("Objects in bucket '{}':", bucketName);
for (S3Object s3Object : listResponse.contents()) {
logger.info(" - {} (size: {} bytes)", s3Object.key(), s3Object.size());
}
} catch (Exception e) {
logger.error("Error listing bucket objects", e);
throw new RuntimeException("S3 list operation failed", e);
}
}
public void close() {
if (s3Client != null) {
s3Client.close();
}
}
}
Spring Boot Service with IRSA
package com.example.irsa.service;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.S3Object;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.util.List;
import java.util.stream.Collectors;
@Service
public class S3Service {
@Value("${aws.region:us-east-1}")
private String region;
@Value("${app.s3.bucket-name}")
private String bucketName;
private S3Client s3Client;
@PostConstruct
public void init() {
this.s3Client = S3Client.builder()
.region(Region.of(region))
.credentialsProvider(DefaultCredentialsProvider.create())
.build();
}
public List<String> listObjectKeys() {
ListObjectsV2Request request = ListObjectsV2Request.builder()
.bucket(bucketName)
.build();
ListObjectsV2Response response = s3Client.listObjectsV2(request);
return response.contents().stream()
.map(S3Object::key)
.collect(Collectors.toList());
}
public boolean objectExists(String key) {
try {
s3Client.headObject(b -> b.bucket(bucketName).key(key));
return true;
} catch (Exception e) {
return false;
}
}
@PreDestroy
public void cleanup() {
if (s3Client != null) {
s3Client.close();
}
}
}
Kubernetes Deployment Manifest
apiVersion: apps/v1 kind: Deployment metadata: name: java-irsa-app namespace: default spec: replicas: 2 selector: matchLabels: app: java-irsa-app template: metadata: labels: app: java-irsa-app spec: serviceAccountName: java-app-service-account containers: - name: java-app image: your-registry/java-irsa-app:latest ports: - containerPort: 8080 env: - name: AWS_REGION value: "us-east-1" - name: S3_BUCKET_NAME value: "your-application-bucket" resources: requests: memory: "512Mi" cpu: "250m" limits: memory: "1Gi" cpu: "500m" livenessProbe: httpGet: path: /actuator/health port: 8080 initialDelaySeconds: 30 periodSeconds: 10 readinessProbe: httpGet: path: /actuator/health port: 8080 initialDelaySeconds: 5 periodSeconds: 5 --- apiVersion: v1 kind: Service metadata: name: java-irsa-service spec: selector: app: java-irsa-app ports: - protocol: TCP port: 80 targetPort: 8080 type: LoadBalancer
Step 4: Advanced Configuration
Custom AWS Configuration
package com.example.irsa.config;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import java.time.Duration;
@Configuration
public class AwsConfig {
@Value("${aws.region:us-east-1}")
private String region;
@Bean
public AwsCredentialsProvider awsCredentialsProvider() {
return DefaultCredentialsProvider.create();
}
@Bean
public S3Client s3Client(AwsCredentialsProvider credentialsProvider) {
return S3Client.builder()
.region(Region.of(region))
.credentialsProvider(credentialsProvider)
.httpClientBuilder(ApacheHttpClient.builder()
.maxConnections(100)
.connectionTimeout(Duration.ofSeconds(10))
.connectionAcquisitionTimeout(Duration.ofSeconds(5)))
.build();
}
}
Error Handling and Retry Logic
package com.example.irsa.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.core.retry.RetryPolicy;
import software.amazon.awssdk.core.retry.backoff.FullJitterBackoffStrategy;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.S3Exception;
import java.time.Duration;
import java.util.function.Supplier;
public class AwsRetryUtil {
private static final Logger logger = LoggerFactory.getLogger(AwsRetryUtil.class);
public static <T> T executeWithRetry(Supplier<T> operation, String operationName, int maxRetries) {
int attempt = 0;
while (attempt < maxRetries) {
try {
return operation.get();
} catch (S3Exception e) {
attempt++;
if (attempt >= maxRetries) {
logger.error("Operation '{}' failed after {} attempts", operationName, maxRetries, e);
throw e;
}
if (isRetryableError(e)) {
long delay = calculateBackoff(attempt);
logger.warn("Operation '{}' failed (attempt {}/{}), retrying in {} ms",
operationName, attempt, maxRetries, delay);
sleep(delay);
} else {
throw e;
}
}
}
throw new RuntimeException("Max retries exceeded for operation: " + operationName);
}
private static boolean isRetryableError(S3Exception e) {
return e.statusCode() == 500 ||
e.statusCode() == 503 ||
e.awsErrorDetails().errorCode().equals("SlowDown");
}
private static long calculateBackoff(int attempt) {
return Math.min(1000L * (1L << attempt), 30000L); // Exponential backoff, max 30s
}
private static void sleep(long milliseconds) {
try {
Thread.sleep(milliseconds);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Thread interrupted during backoff", e);
}
}
}
Step 5: Testing and Verification
Local Testing Configuration
// For local development without IRSA
@Profile("!kubernetes")
@Configuration
public class LocalAwsConfig {
@Bean
@Primary
public AwsCredentialsProvider localAwsCredentials() {
// Use default AWS credential chain (env vars, ~/.aws/credentials, etc.)
return DefaultCredentialsProvider.create();
}
}
Verification in Kubernetes
# Deploy the application kubectl apply -f deployment.yaml # Check pods kubectl get pods -l app=java-irsa-app # View logs kubectl logs -f deployment/java-irsa-app # Test service kubectl port-forward svc/java-irsa-service 8080:80 # Verify IRSA is working curl http://localhost:8080/api/s3/objects
Best Practices
- Least Privilege: Assign only necessary permissions to the IAM role
- Resource Naming: Use meaningful names for IAM roles and service accounts
- Monitoring: Implement CloudTrail logging for AWS API calls
- Security: Regularly rotate IAM roles and review permissions
- Resource Management: Always close AWS clients to prevent resource leaks
Troubleshooting
Common Issues:
- OIDC provider not configured for EKS cluster
- Incorrect IAM role trust relationship
- Missing service account annotation
- Region configuration mismatches
Debug Commands:
# Verify service account kubectl describe serviceaccount java-app-service-account # Check pod service account kubectl describe pod <pod-name> # Test AWS credentials in pod kubectl exec -it <pod-name> -- aws sts get-caller-identity
This implementation provides a secure, production-ready approach to using AWS IRSA with Java applications in Kubernetes, eliminating the need for hardcoded credentials while maintaining fine-grained access control.