Compare commits

..

1 Commits

Author SHA1 Message Date
mr-tz
48fc4a54de add gemini cli workflows 2025-08-08 09:24:35 +00:00
178 changed files with 3174 additions and 8727 deletions

View File

@@ -1,27 +0,0 @@
[tool.bumpversion]
current_version = "9.3.1"
[[tool.bumpversion.files]]
filename = "capa/version.py"
search = '__version__ = "{current_version}"'
replace = '__version__ = "{new_version}"'
[[tool.bumpversion.files]]
filename = "capa/ida/plugin/ida-plugin.json"
search = '"version": "{current_version}"'
replace = '"version": "{new_version}"'
[[tool.bumpversion.files]]
filename = "capa/ida/plugin/ida-plugin.json"
search = '"flare-capa=={current_version}"'
replace = '"flare-capa=={new_version}"'
[[tool.bumpversion.files]]
filename = "CHANGELOG.md"
search = "v{current_version}...master"
replace = "v{current_version}...{new_version}"
[[tool.bumpversion.files]]
filename = "CHANGELOG.md"
search = "master (unreleased)"
replace = "v{new_version}"

View File

@@ -4,13 +4,6 @@ updates:
directory: "/"
schedule:
interval: "weekly"
groups:
vivisect:
patterns:
- "vivisect"
- "pyasn1"
- "pyasn1-modules"
- "msgpack"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-patch"]

2
.github/flake8.ini vendored
View File

@@ -33,6 +33,8 @@ per-file-ignores =
scripts/*: T201
# capa.exe is meant to print output
capa/main.py: T201
# IDA tests emit results to output window so need to print
tests/test_ida_features.py: T201
# utility used to find the Binary Ninja API via invoking python.exe
capa/features/extractors/binja/find_binja_api.py: T201

View File

@@ -63,9 +63,6 @@ ignore_missing_imports = True
[mypy-PyQt5.*]
ignore_missing_imports = True
[mypy-binaryninja]
ignore_missing_imports = True
[mypy-binaryninja.*]
ignore_missing_imports = True

View File

@@ -20,5 +20,3 @@ closes #issue_number
- [ ] No new tests needed
<!-- Please help us keeping capa documentation up-to-date -->
- [ ] No documentation update needed
<!-- Please indicate if and how you have used AI to generate (parts of) your code submission. Include your prompt, model, tool, etc. -->
- [ ] This submission includes AI-generated code and I have provided details in the description.

View File

@@ -17,8 +17,6 @@ import sys
import capa.rules.cache
from PyInstaller.utils.hooks import collect_submodules
from pathlib import Path
# SPECPATH is a global variable which points to .spec file path
@@ -36,7 +34,6 @@ a = Analysis(
["../../capa/main.py"],
pathex=["capa"],
binaries=None,
hiddenimports=collect_submodules('rich'),
datas=[
# when invoking pyinstaller from the project root,
# this gets invoked from the directory of the spec file,
@@ -77,7 +74,6 @@ a = Analysis(
# only be installed locally.
"binaryninja",
"ida",
"ghidra",
# remove once https://github.com/mandiant/capa/issues/2681 has
# been addressed by PyInstaller
"pkg_resources",

View File

@@ -1,62 +0,0 @@
name: black auto-format
on:
pull_request:
branches: [ master ]
paths-ignore:
- 'web/**'
- 'doc/**'
- '**.md'
workflow_dispatch: # allow manual trigger
permissions:
contents: write
jobs:
black-format:
# only run on dependabot PRs or manual trigger
if: github.actor == 'dependabot[bot]' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-22.04
steps:
- name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: ${{ github.head_ref }}
# need a token with write access to push the commit
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python 3.13
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: "3.13"
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
- name: Run isort
run: pre-commit run isort --all-files
- name: Run black/continue
# black returns non-zero error code after formatting, which is what we expect
continue-on-error: true
run: pre-commit run black --all-files
- name: Check for changes
id: changes
run: |
if git diff --quiet; then
echo "has_changes=false" >> "$GITHUB_OUTPUT"
else
echo "has_changes=true" >> "$GITHUB_OUTPUT"
fi
- name: Commit and push formatting changes
if: steps.changes.outputs.has_changes == 'true'
run: |
git config user.name "${GITHUB_ACTOR}"
git config user.email "${GITHUB_ACTOR_ID}+${GITHUB_ACTOR}@users.noreply.github.com"
git add -A
git commit -m "style: auto-format with black and isort"
git push

View File

@@ -28,11 +28,6 @@ jobs:
artifact_name: capa
asset_name: linux
python_version: '3.10'
# for Ghidra
java-version: '21'
ghidra-version: '12.0'
public-version: 'PUBLIC_20251205'
ghidra-sha256: 'af43e8cfb2fa4490cf6020c3a2bde25c159d83f45236a0542688a024e8fc1941'
- os: ubuntu-22.04-arm
artifact_name: capa
asset_name: linux-arm64
@@ -51,8 +46,8 @@ jobs:
# artifact_name: capa.exe
# asset_name: windows-arm64
# python_version: '3.12'
- os: macos-15-intel
# macos-15-intel is the lowest native intel build
- os: macos-13
# use older macOS for assumed better portability
artifact_name: capa
asset_name: macos
python_version: '3.10'
@@ -111,24 +106,6 @@ jobs:
run: |
7z e "tests/data/dynamic/cape/v2.2/d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json.gz"
dist/capa -d "d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json"
- name: Set up Java ${{ matrix.java-version }}
if: matrix.os == 'ubuntu-22.04' && matrix.python_version == '3.10'
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0
with:
distribution: 'temurin'
java-version: ${{ matrix.java-version }}
- name: Install Ghidra ${{ matrix.ghidra-version }}
if: matrix.os == 'ubuntu-22.04' && matrix.python_version == '3.10'
run: |
mkdir ./.github/ghidra
wget "https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_${{ matrix.ghidra-version }}_build/ghidra_${{ matrix.ghidra-version }}_${{ matrix.public-version }}.zip" -O ./.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip
echo "${{ matrix.ghidra-sha256 }} ./.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip" | sha256sum -c -
unzip .github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip -d .github/ghidra/
- name: Does it run (Ghidra)?
if: matrix.os == 'ubuntu-22.04' && matrix.python_version == '3.10'
env:
GHIDRA_INSTALL_DIR: ${{ github.workspace }}/.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC
run: dist/capa -b ghidra -d "tests/data/Practical Malware Analysis Lab 01-01.dll_"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with:
name: ${{ matrix.asset_name }}
@@ -167,7 +144,7 @@ jobs:
- name: Set zip name
run: echo "zip_name=capa-${GITHUB_REF#refs/tags/}-${{ matrix.asset_name }}.zip" >> $GITHUB_ENV
- name: Zip ${{ matrix.artifact_name }} into ${{ env.zip_name }}
run: zip ${ZIP_NAME} ${{ matrix.artifact_name }}
run: zip ${{ env.zip_name }} ${{ matrix.artifact_name }}
- name: Upload ${{ env.zip_name }} to GH Release
uses: svenstaro/upload-release-action@2728235f7dc9ff598bd86ce3c274b74f802d2208 # v2
with:

304
.github/workflows/gemini-cli.yml vendored Normal file
View File

@@ -0,0 +1,304 @@
name: '💬 Gemini CLI'
on:
pull_request_review_comment:
types:
- 'created'
pull_request_review:
types:
- 'submitted'
issue_comment:
types:
- 'created'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: |-
${{ github.event.sender.type == 'User' && ( github.event.issue.author_association == 'OWNER' || github.event.issue.author_association == 'MEMBER' || github.event.issue.author_association == 'COLLABORATOR') }}
defaults:
run:
shell: 'bash'
permissions:
contents: 'write'
id-token: 'write'
pull-requests: 'write'
issues: 'write'
jobs:
gemini-cli:
# This condition is complex to ensure we only run when explicitly invoked.
if: |-
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'issues' && github.event.action == 'opened' &&
contains(github.event.issue.body, '@gemini-cli') &&
!contains(github.event.issue.body, '@gemini-cli /review') &&
!contains(github.event.issue.body, '@gemini-cli /triage') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.issue.author_association)
) ||
(
(
github.event_name == 'issue_comment' ||
github.event_name == 'pull_request_review_comment'
) &&
contains(github.event.comment.body, '@gemini-cli') &&
!contains(github.event.comment.body, '@gemini-cli /review') &&
!contains(github.event.comment.body, '@gemini-cli /triage') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
) ||
(
github.event_name == 'pull_request_review' &&
contains(github.event.review.body, '@gemini-cli') &&
!contains(github.event.review.body, '@gemini-cli /review') &&
!contains(github.event.review.body, '@gemini-cli /triage') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.review.author_association)
)
timeout-minutes: 10
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
if: |-
${{ vars.APP_ID }}
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ vars.APP_ID }}'
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
- name: 'Get context from event'
id: 'get_context'
env:
EVENT_NAME: '${{ github.event_name }}'
EVENT_PAYLOAD: '${{ toJSON(github.event) }}'
run: |-
set -euo pipefail
USER_REQUEST=""
ISSUE_NUMBER=""
IS_PR="false"
if [[ "${EVENT_NAME}" == "issues" ]]; then
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.body)
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.number)
elif [[ "${EVENT_NAME}" == "issue_comment" ]]; then
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .comment.body)
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.number)
if [[ $(echo "${EVENT_PAYLOAD}" | jq -r .issue.pull_request) != "null" ]]; then
IS_PR="true"
fi
elif [[ "${EVENT_NAME}" == "pull_request_review" ]]; then
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .review.body)
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .pull_request.number)
IS_PR="true"
elif [[ "${EVENT_NAME}" == "pull_request_review_comment" ]]; then
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .comment.body)
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .pull_request.number)
IS_PR="true"
fi
# Clean up user request
USER_REQUEST=$(echo "${USER_REQUEST}" | sed 's/.*@gemini-cli//' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
{
echo "user_request=${USER_REQUEST}"
echo "issue_number=${ISSUE_NUMBER}"
echo "is_pr=${IS_PR}"
} >> "${GITHUB_OUTPUT}"
- name: 'Set up git user for commits'
run: |-
git config --global user.name 'gemini-cli[bot]'
git config --global user.email 'gemini-cli[bot]@users.noreply.github.com'
- name: 'Checkout PR branch'
if: |-
${{ steps.get_context.outputs.is_pr == 'true' }}
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
with:
token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
repository: '${{ github.repository }}'
ref: 'refs/pull/${{ steps.get_context.outputs.issue_number }}/head'
fetch-depth: 0
- name: 'Checkout main branch'
if: |-
${{ steps.get_context.outputs.is_pr == 'false' }}
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
with:
token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
repository: '${{ github.repository }}'
fetch-depth: 0
- name: 'Acknowledge request'
env:
GITHUB_ACTOR: '${{ github.actor }}'
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
REPOSITORY: '${{ github.repository }}'
REQUEST_TYPE: '${{ steps.get_context.outputs.request_type }}'
run: |-
set -euo pipefail
MESSAGE="@${GITHUB_ACTOR} I've received your request and I'm working on it now! 🤖"
if [[ -n "${MESSAGE}" ]]; then
gh issue comment "${ISSUE_NUMBER}" \
--body "${MESSAGE}" \
--repo "${REPOSITORY}"
fi
- name: 'Get description'
id: 'get_description'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
run: |-
set -euo pipefail
if [[ "${IS_PR}" == "true" ]]; then
DESCRIPTION=$(gh pr view "${ISSUE_NUMBER}" --json body --template '{{.body}}')
else
DESCRIPTION=$(gh issue view "${ISSUE_NUMBER}" --json body --template '{{.body}}')
fi
{
echo "description<<EOF"
echo "${DESCRIPTION}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: 'Get comments'
id: 'get_comments'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
run: |-
set -euo pipefail
if [[ "${IS_PR}" == "true" ]]; then
COMMENTS=$(gh pr view "${ISSUE_NUMBER}" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{"\n"}}{{end}}')
else
COMMENTS=$(gh issue view "${ISSUE_NUMBER}" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{"\n"}}{{end}}')
fi
{
echo "comments<<EOF"
echo "${COMMENTS}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: 'Run Gemini'
id: 'run_gemini'
uses: 'google-github-actions/run-gemini-cli@v0'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
REPOSITORY: '${{ github.repository }}'
USER_REQUEST: '${{ steps.get_context.outputs.user_request }}'
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 50,
"telemetry": {
"enabled": false,
"target": "gcp"
}
}
prompt: |-
## Role
You are a helpful AI assistant invoked via a CLI interface in a GitHub workflow. You have access to tools to interact with the repository and respond to the user.
## Context
- **Repository**: `${{ github.repository }}`
- **Triggering Event**: `${{ github.event_name }}`
- **Issue/PR Number**: `${{ steps.get_context.outputs.issue_number }}`
- **Is this a PR?**: `${{ steps.get_context.outputs.is_pr }}`
- **Issue/PR Description**:
`${{ steps.get_description.outputs.description }}`
- **Comments**:
`${{ steps.get_comments.outputs.comments }}`
## User Request
The user has sent the following request:
`${{ steps.get_context.outputs.user_request }}`
## How to Respond to Issues, PR Comments, and Questions
This workflow supports three main scenarios:
1. **Creating a Fix for an Issue**
- Carefully read the user request and the related issue or PR description.
- Use available tools to gather all relevant context (e.g., `gh issue view`, `gh pr view`, `gh pr diff`, `cat`, `head`, `tail`).
- Identify the root cause of the problem before proceeding.
- **Show and maintain a plan as a checklist**:
- At the very beginning, outline the steps needed to resolve the issue or address the request and post them as a checklist comment on the issue or PR (use GitHub markdown checkboxes: `- [ ] Task`).
- Example:
```
### Plan
- [ ] Investigate the root cause
- [ ] Implement the fix in `file.py`
- [ ] Add/modify tests
- [ ] Update documentation
- [ ] Verify the fix and close the issue
```
- Use: `gh pr comment "${ISSUE_NUMBER}" --body "<plan>"` or `gh issue comment "${ISSUE_NUMBER}" --body "<plan>"` to post the initial plan.
- As you make progress, keep the checklist visible and up to date by editing the same comment (check off completed tasks with `- [x]`).
- To update the checklist:
1. Find the comment ID for the checklist (use `gh pr comment list "${ISSUE_NUMBER}"` or `gh issue comment list "${ISSUE_NUMBER}"`).
2. Edit the comment with the updated checklist:
- For PRs: `gh pr comment --edit <comment-id> --body "<updated plan>"`
- For Issues: `gh issue comment --edit <comment-id> --body "<updated plan>"`
3. The checklist should only be maintained as a comment on the issue or PR. Do not track or update the checklist in code files.
- If the fix requires code changes, determine which files and lines are affected. If clarification is needed, note any questions for the user.
- Make the necessary code or documentation changes using the available tools (e.g., `write_file`). Ensure all changes follow project conventions and best practices. Reference all shell variables as `"${VAR}"` (with quotes and braces) to prevent errors.
- Run any relevant tests or checks to verify the fix works as intended. If possible, provide evidence (test output, screenshots, etc.) that the issue is resolved.
- **Branching and Committing**:
- **NEVER commit directly to the `main` branch.**
- If you are working on a **pull request** (`IS_PR` is `true`), the correct branch is already checked out. Simply commit and push to it.
- `git add .`
- `git commit -m "feat: <describe the change>"`
- `git push`
- If you are working on an **issue** (`IS_PR` is `false`), create a new branch for your changes. A good branch name would be `issue/${ISSUE_NUMBER}/<short-description>`.
- `git checkout -b issue/${ISSUE_NUMBER}/my-fix`
- `git add .`
- `git commit -m "feat: <describe the fix>"`
- `git push origin issue/${ISSUE_NUMBER}/my-fix`
- After pushing, you can create a pull request: `gh pr create --title "Fixes #${ISSUE_NUMBER}: <short title>" --body "This PR addresses issue #${ISSUE_NUMBER}."`
- Summarize what was changed and why in a markdown file: `write_file("response.md", "<your response here>")`
- Post the response as a comment:
- For PRs: `gh pr comment "${ISSUE_NUMBER}" --body-file response.md`
- For Issues: `gh issue comment "${ISSUE_NUMBER}" --body-file response.md`
2. **Addressing Comments on a Pull Request**
- Read the specific comment and the context of the PR.
- Use tools like `gh pr view`, `gh pr diff`, and `cat` to understand the code and discussion.
- If the comment requests a change or clarification, follow the same process as for fixing an issue: create a checklist plan, implement, test, and commit any required changes, updating the checklist as you go.
- **Committing Changes**: The correct PR branch is already checked out. Simply add, commit, and push your changes.
- `git add .`
- `git commit -m "fix: address review comments"`
- `git push`
- If the comment is a question, answer it directly and clearly, referencing code or documentation as needed.
- Document your response in `response.md` and post it as a PR comment: `gh pr comment "${ISSUE_NUMBER}" --body-file response.md`
3. **Answering Any Question on an Issue**
- Read the question and the full issue context using `gh issue view` and related tools.
- Research or analyze the codebase as needed to provide an accurate answer.
- If the question requires code or documentation changes, follow the fix process above, including creating and updating a checklist plan and **creating a new branch for your changes as described in section 1.**
- Write a clear, concise answer in `response.md` and post it as an issue comment: `gh issue comment "${ISSUE_NUMBER}" --body-file response.md`
## Guidelines
- **Be concise and actionable.** Focus on solving the user's problem efficiently.
- **Always commit and push your changes if you modify code or documentation.**
- **If you are unsure about the fix or answer, explain your reasoning and ask clarifying questions.**
- **Follow project conventions and best practices.**

View File

@@ -0,0 +1,130 @@
name: '🏷️ Gemini Automated Issue Triage'
on:
issues:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
workflow_dispatch:
inputs:
issue_number:
description: 'issue number to triage'
required: true
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
jobs:
triage-issue:
if: |-
github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /triage') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
)
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout repository'
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
- name: 'Generate GitHub App Token'
id: 'generate_token'
if: |-
${{ vars.APP_ID }}
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ vars.APP_ID }}'
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
- name: 'Run Gemini Issue Triage'
uses: 'google-github-actions/run-gemini-cli@v0'
id: 'gemini_issue_triage'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
with:
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh label list)",
"run_shell_command(gh issue edit)"
],
"telemetry": {
"enabled": false,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue triage assistant. Analyze the current GitHub issue
and apply the most appropriate existing labels. Use the available
tools to gather information; do not ask for information to be
provided.
## Steps
1. Run: `gh label list` to get all available labels.
2. Review the issue title and body provided in the environment
variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
3. Select the most relevant labels from the existing labels. If
available, set labels that follow the `kind/*`, `area/*`, and
`priority/*` patterns.
4. Apply the selected labels to this issue using:
`gh issue edit "${ISSUE_NUMBER}" --add-label "label1,label2"`
5. If the "status/needs-triage" label is present, remove it using:
`gh issue edit "${ISSUE_NUMBER}" --remove-label "status/needs-triage"`
## Guidelines
- Only use labels that already exist in the repository
- Do not add comments or modify the issue content
- Triage only the current issue
- Assign all applicable labels based on the issue content
- Reference all shell variables as "${VAR}" (with quotes and braces)
- name: 'Post Issue Triage Failure Comment'
if: |-
${{ failure() && steps.gemini_issue_triage.outcome == 'failure' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
github.rest.issues.createComment({
owner: '${{ github.repository }}'.split('/')[0],
repo: '${{ github.repository }}'.split('/')[1],
issue_number: '${{ github.event.issue.number }}',
body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'
})

View File

@@ -0,0 +1,123 @@
name: '📋 Gemini Scheduled Issue Triage'
on:
schedule:
- cron: '0 * * * *' # Runs every hour
workflow_dispatch:
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
jobs:
triage-issues:
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout repository'
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
- name: 'Generate GitHub App Token'
id: 'generate_token'
if: |-
${{ vars.APP_ID }}
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ vars.APP_ID }}'
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
- name: 'Find untriaged issues'
id: 'find_issues'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
GITHUB_REPOSITORY: '${{ github.repository }}'
GITHUB_OUTPUT: '${{ github.output }}'
run: |-
set -euo pipefail
echo '🔍 Finding issues without labels...'
NO_LABEL_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue no:label' --json number,title,body)"
echo '🏷️ Finding issues that need triage...'
NEED_TRIAGE_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue label:"status/needs-triage"' --json number,title,body)"
echo '🔄 Merging and deduplicating issues...'
ISSUES="$(echo "${NO_LABEL_ISSUES}" "${NEED_TRIAGE_ISSUES}" | jq -c -s 'add | unique_by(.number)')"
echo '📝 Setting output for GitHub Actions...'
echo "issues_to_triage=${ISSUES}" >> "${GITHUB_OUTPUT}"
ISSUE_COUNT="$(echo "${ISSUES}" | jq 'length')"
echo "✅ Found ${ISSUE_COUNT} issues to triage! 🎯"
- name: 'Run Gemini Issue Triage'
if: |-
${{ steps.find_issues.outputs.issues_to_triage != '[]' }}
uses: 'google-github-actions/run-gemini-cli@v0'
id: 'gemini_issue_triage'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
ISSUES_TO_TRIAGE: '${{ steps.find_issues.outputs.issues_to_triage }}'
REPOSITORY: '${{ github.repository }}'
with:
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh label list)",
"run_shell_command(gh issue edit)",
"run_shell_command(gh issue list)"
],
"telemetry": {
"enabled": false,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and apply
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
## Steps
1. Run: `gh label list`
2. Check environment variable: "${ISSUES_TO_TRIAGE}" (JSON array
of issues)
3. For each issue, apply labels:
`gh issue edit "${ISSUE_NUMBER}" --add-label "label1,label2"`.
If available, set labels that follow the `kind/*`, `area/*`,
and `priority/*` patterns.
4. For each issue, if the `status/needs-triage` label is present,
remove it using:
`gh issue edit "${ISSUE_NUMBER}" --remove-label "status/needs-triage"`
## Guidelines
- Only use existing repository labels
- Do not add comments
- Triage each issue independently
- Reference all shell variables as "${VAR}" (with quotes and braces)

456
.github/workflows/gemini-pr-review.yml vendored Normal file
View File

@@ -0,0 +1,456 @@
name: '🧐 Gemini Pull Request Review'
on:
pull_request:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
pull_request_review_comment:
types:
- 'created'
pull_request_review:
types:
- 'submitted'
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to review'
required: true
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
pull-requests: 'write'
statuses: 'write'
jobs:
review-pr:
if: |-
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'pull_request' &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.pull_request.author_association)
) ||
(
(
(
github.event_name == 'issue_comment' &&
github.event.issue.pull_request
) ||
github.event_name == 'pull_request_review_comment'
) &&
contains(github.event.comment.body, '@gemini-cli /review') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
) ||
(
github.event_name == 'pull_request_review' &&
contains(github.event.review.body, '@gemini-cli /review') &&
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.review.author_association)
)
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout PR code'
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
- name: 'Generate GitHub App Token'
id: 'generate_token'
if: |-
${{ vars.APP_ID }}
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ vars.APP_ID }}'
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
- name: 'Get PR details (pull_request & workflow_dispatch)'
id: 'get_pr'
if: |-
${{ github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
EVENT_NAME: '${{ github.event_name }}'
WORKFLOW_PR_NUMBER: '${{ github.event.inputs.pr_number }}'
PULL_REQUEST_NUMBER: '${{ github.event.pull_request.number }}'
run: |-
set -euo pipefail
if [[ "${EVENT_NAME}" = "workflow_dispatch" ]]; then
PR_NUMBER="${WORKFLOW_PR_NUMBER}"
else
PR_NUMBER="${PULL_REQUEST_NUMBER}"
fi
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
# Get PR details
PR_DATA="$(gh pr view "${PR_NUMBER}" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)"
echo "pr_data=${PR_DATA}" >> "${GITHUB_OUTPUT}"
# Get file changes
CHANGED_FILES="$(gh pr diff "${PR_NUMBER}" --name-only)"
{
echo "changed_files<<EOF"
echo "${CHANGED_FILES}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: 'Get PR details (issue_comment)'
id: 'get_pr_comment'
if: |-
${{ github.event_name == 'issue_comment' }}
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
COMMENT_BODY: '${{ github.event.comment.body }}'
PR_NUMBER: '${{ github.event.issue.number }}'
run: |-
set -euo pipefail
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
# Extract additional instructions from comment
ADDITIONAL_INSTRUCTIONS="$(
echo "${COMMENT_BODY}" | sed 's/.*@gemini-cli \/review//' | xargs
)"
echo "additional_instructions=${ADDITIONAL_INSTRUCTIONS}" >> "${GITHUB_OUTPUT}"
# Get PR details
PR_DATA="$(gh pr view "${PR_NUMBER}" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)"
echo "pr_data=${PR_DATA}" >> "${GITHUB_OUTPUT}"
# Get file changes
CHANGED_FILES="$(gh pr diff "${PR_NUMBER}" --name-only)"
{
echo "changed_files<<EOF"
echo "${CHANGED_FILES}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: 'Run Gemini PR Review'
uses: 'google-github-actions/run-gemini-cli@v0'
id: 'gemini_pr_review'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
PR_NUMBER: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}'
PR_DATA: '${{ steps.get_pr.outputs.pr_data || steps.get_pr_comment.outputs.pr_data }}'
CHANGED_FILES: '${{ steps.get_pr.outputs.changed_files || steps.get_pr_comment.outputs.changed_files }}'
ADDITIONAL_INSTRUCTIONS: '${{ steps.get_pr.outputs.additional_instructions || steps.get_pr_comment.outputs.additional_instructions }}'
REPOSITORY: '${{ github.repository }}'
with:
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 20,
"mcpServers": {
"github": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-e",
"GITHUB_PERSONAL_ACCESS_TOKEN",
"ghcr.io/github/github-mcp-server"
],
"includeTools": [
"create_pending_pull_request_review",
"add_comment_to_pending_review",
"submit_pending_pull_request_review"
],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_TOKEN}"
}
}
},
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh pr view)",
"run_shell_command(gh pr diff)",
"run_shell_command(cat)",
"run_shell_command(head)",
"run_shell_command(tail)",
"run_shell_command(grep)"
],
"telemetry": {
"enabled": false,
"target": "gcp"
}
}
prompt: |-
## Role
You are an expert code reviewer. You have access to tools to gather
PR information and perform the review on GitHub. Use the available tools to
gather information; do not ask for information to be provided.
## Requirements
1. All feedback must be left on GitHub.
2. Any output that is not left in GitHub will not be seen.
## Steps
Start by running these commands to gather the required data:
1. Run: echo $"{REPOSITORY}" to get the github repository in <OWNER>/<REPO> format
2. Run: echo "${PR_DATA}" to get PR details (JSON format)
3. Run: echo "${CHANGED_FILES}" to get the list of changed files
4. Run: echo "${PR_NUMBER}" to get the PR number
5. Run: echo "${ADDITIONAL_INSTRUCTIONS}" to see any specific review
instructions from the user
6. Run: gh pr diff "${PR_NUMBER}" to see the full diff and reference
Context section to understand it
7. For any specific files, use: cat filename, head -50 filename, or
tail -50 filename
8. If ADDITIONAL_INSTRUCTIONS contains text, prioritize those
specific areas or focus points in your review. Common instruction
examples: "focus on security", "check performance", "review error
handling", "check for breaking changes"
## Guideline
### Core Guideline(Always applicable)
1. Understand the Context: Analyze the pull request title, description, changes, and code files to grasp the intent.
2. Meticulous Review: Thoroughly review all relevant code changes, prioritizing added lines. Consider the specified
focus areas and any provided style guide.
3. Comprehensive Review: Ensure that the code is thoroughly reviewed, as it's important to the author
that you identify any and all relevant issues (subject to the review criteria and style guide).
Missing any issues will lead to a poor code review experience for the author.
4. Constructive Feedback:
* Provide clear explanations for each concern.
* Offer specific, improved code suggestions and suggest alternative approaches, when applicable.
Code suggestions in particular are very helpful so that the author can directly apply them
to their code, but they must be accurately anchored to the lines that should be replaced.
5. Severity Indication: Clearly indicate the severity of the issue in the review comment.
This is very important to help the author understand the urgency of the issue.
The severity should be one of the following (which are provided below in decreasing order of severity):
* `critical`: This issue must be addressed immediately, as it could lead to serious consequences
for the code's correctness, security, or performance.
* `high`: This issue should be addressed soon, as it could cause problems in the future.
* `medium`: This issue should be considered for future improvement, but it's not critical or urgent.
* `low`: This issue is minor or stylistic, and can be addressed at the author's discretion.
6. Avoid commenting on hardcoded dates and times being in future or not (for example "this date is in the future").
* Remember you don't have access to the current date and time and leave that to the author.
7. Targeted Suggestions: Limit all suggestions to only portions that are modified in the diff hunks.
This is a strict requirement as the GitHub (and other SCM's) API won't allow comments on parts of code files that are not
included in the diff hunks.
8. Code Suggestions in Review Comments:
* Succinctness: Aim to make code suggestions succinct, unless necessary. Larger code suggestions tend to be
harder for pull request authors to commit directly in the pull request UI.
* Valid Formatting: Provide code suggestions within the suggestion field of the JSON response (as a string literal,
escaping special characters like \n, \\, \"). Do not include markdown code blocks in the suggestion field.
Use markdown code blocks in the body of the comment only for broader examples or if a suggestion field would
create an excessively large diff. Prefer the suggestion field for specific, targeted code changes.
* Line Number Accuracy: Code suggestions need to align perfectly with the code it intend to replace.
Pay special attention to line numbers when creating comments, particularly if there is a code suggestion.
Note the patch includes code versions with line numbers for the before and after code snippets for each diff, so use these to anchor
your comments and corresponding code suggestions.
* Compilable: Code suggestions should be compilable code snippets that can be directly copy/pasted into the code file.
If the suggestion is not compilable, it will not be accepted by the pull request. Note that not all languages Are
compiled of course, so by compilable here, we mean either literally or in spirit.
* Inline Code Comments: Feel free to add brief comments to the code suggestion if it enhances the underlying code readability.
Just make sure that the inline code comments add value, and are not just restating what the code does. Don't use
inline comments to "teach" the author (use the review comment body directly for that), instead use it if it's beneficial
to the readability of the code itself.
10. Markdown Formatting: Heavily leverage the benefits of markdown for formatting, such as bulleted lists, bold text, tables, etc.
11. Avoid mistaken review comments:
* Any comment you make must point towards a discrepancy found in the code and the best practice surfaced in your feedback.
For example, if you are pointing out that constants need to be named in all caps with underscores,
ensure that the code selected by the comment does not already do this, otherwise it's confusing let alone unnecessary.
12. Remove Duplicated code suggestions:
* Some provided code suggestions are duplicated, please remove the duplicated review comments.
13. Don't Approve The Pull Request
14. Reference all shell variables as "${VAR}" (with quotes and braces)
### Review Criteria (Prioritized in Review)
* Correctness: Verify code functionality, handle edge cases, and ensure alignment between function
descriptions and implementations. Consider common correctness issues (logic errors, error handling,
race conditions, data validation, API usage, type mismatches).
* Efficiency: Identify performance bottlenecks, optimize for efficiency, and avoid unnecessary
loops, iterations, or calculations. Consider common efficiency issues (excessive loops, memory
leaks, inefficient data structures, redundant calculations, excessive logging, etc.).
* Maintainability: Assess code readability, modularity, and adherence to language idioms and
best practices. Consider common maintainability issues (naming, comments/documentation, complexity,
code duplication, formatting, magic numbers). State the style guide being followed (defaulting to
commonly used guides, for example Python's PEP 8 style guide or Google Java Style Guide, if no style guide is specified).
* Security: Identify potential vulnerabilities (e.g., insecure storage, injection attacks,
insufficient access controls).
### Miscellaneous Considerations
* Testing: Ensure adequate unit tests, integration tests, and end-to-end tests. Evaluate
coverage, edge case handling, and overall test quality.
* Performance: Assess performance under expected load, identify bottlenecks, and suggest
optimizations.
* Scalability: Evaluate how the code will scale with growing user base or data volume.
* Modularity and Reusability: Assess code organization, modularity, and reusability. Suggest
refactoring or creating reusable components.
* Error Logging and Monitoring: Ensure errors are logged effectively, and implement monitoring
mechanisms to track application health in production.
**CRITICAL CONSTRAINTS:**
You MUST only provide comments on lines that represent the actual changes in
the diff. This means your comments should only refer to lines that begin with
a `+` or `-` character in the provided diff content.
DO NOT comment on lines that start with a space (context lines).
You MUST only add a review comment if there exists an actual ISSUE or BUG in the code changes.
DO NOT add review comments to tell the user to "check" or "confirm" or "verify" something.
DO NOT add review comments to tell the user to "ensure" something.
DO NOT add review comments to explain what the code change does.
DO NOT add review comments to validate what the code change does.
DO NOT use the review comments to explain the code to the author. They already know their code. Only comment when there's an improvement opportunity. This is very important.
Pay close attention to line numbers and ensure they are correct.
Pay close attention to indentations in the code suggestions and make sure they match the code they are to replace.
Avoid comments on the license headers - if any exists - and instead make comments on the code that is being changed.
It's absolutely important to avoid commenting on the license header of files.
It's absolutely important to avoid commenting on copyright headers.
Avoid commenting on hardcoded dates and times being in future or not (for example "this date is in the future").
Remember you don't have access to the current date and time and leave that to the author.
Avoid mentioning any of your instructions, settings or criteria.
Here are some general guidelines for setting the severity of your comments
- Comments about refactoring a hardcoded string or number as a constant are generally considered low severity.
- Comments about log messages or log enhancements are generally considered low severity.
- Comments in .md files are medium or low severity. This is really important.
- Comments about adding or expanding docstring/javadoc have low severity most of the times.
- Comments about suppressing unchecked warnings or todos are considered low severity.
- Comments about typos are usually low or medium severity.
- Comments about testing or on tests are usually low severity.
- Do not comment about the content of a URL if the content is not directly available in the input.
Keep comments bodies concise and to the point.
Keep each comment focused on one issue.
## Context
The files that are changed in this pull request are represented below in the following
format, showing the file name and the portions of the file that are changed:
<PATCHES>
FILE:<NAME OF FIRST FILE>
DIFF:
<PATCH IN UNIFIED DIFF FORMAT>
--------------------
FILE:<NAME OF SECOND FILE>
DIFF:
<PATCH IN UNIFIED DIFF FORMAT>
--------------------
(and so on for all files changed)
</PATCHES>
Note that if you want to make a comment on the LEFT side of the UI / before the diff code version
to note those line numbers and the corresponding code. Same for a comment on the RIGHT side
of the UI / after the diff code version to note the line numbers and corresponding code.
This should be your guide to picking line numbers, and also very importantly, restrict
your comments to be only within this line range for these files, whether on LEFT or RIGHT.
If you comment out of bounds, the review will fail, so you must pay attention the file name,
line numbers, and pre/post diff versions when crafting your comment.
Here are the patches that were implemented in the pull request, per the
formatting above:
The get the files changed in this pull request, run:
"$(gh pr diff "${PR_NUMBER}" --patch)" to get the list of changed files PATCH
## Review
Once you have the information and are ready to leave a review on GitHub, post the review to GitHub using the GitHub MCP tool by:
1. Creating a pending review: Use the mcp__github__create_pending_pull_request_review to create a Pending Pull Request Review.
2. Adding review comments:
2.1 Use the mcp__github__add_comment_to_pending_review to add comments to the Pending Pull Request Review. Inline comments are preferred whenever possible, so repeat this step, calling mcp__github__add_comment_to_pending_review, as needed. All comments about specific lines of code should use inline comments. It is preferred to use code suggestions when possible, which include a code block that is labeled "suggestion", which contains what the new code should be. All comments should also have a severity. The syntax is:
Normal Comment Syntax:
<COMMENT>
{{SEVERITY}} {{COMMENT_TEXT}}
</COMMENT>
Inline Comment Syntax: (Preferred):
<COMMENT>
{{SEVERITY}} {{COMMENT_TEXT}}
```suggestion
{{CODE_SUGGESTION}}
```
</COMMENT>
Prepend a severity emoji to each comment:
- 🟢 for low severity
- 🟡 for medium severity
- 🟠 for high severity
- 🔴 for critical severity
- 🔵 if severity is unclear
Including all of this, an example inline comment would be:
<COMMENT>
🟢 Use camelCase for function names
```suggestion
myFooBarFunction
```
</COMMENT>
A critical severity example would be:
<COMMENT>
🔴 Remove storage key from GitHub
```suggestion
```
3. Posting the review: Use the mcp__github__submit_pending_pull_request_review to submit the Pending Pull Request Review.
3.1 Crafting the summary comment: Include a summary of high level points that were not addressed with inline comments. Be concise. Do not repeat details mentioned inline.
Structure your summary comment using this exact format with markdown:
## 📋 Review Summary
Provide a brief 2-3 sentence overview of the PR and overall
assessment.
## 🔍 General Feedback
- List general observations about code quality
- Mention overall patterns or architectural decisions
- Highlight positive aspects of the implementation
- Note any recurring themes across files
## Final Instructions
Remember, you are running in a VM and no one reviewing your output. Your review must be posted to GitHub using the MCP tools to create a pending review, add comments to the pending review, and submit the pending review.
- name: 'Post PR review failure comment'
if: |-
${{ failure() && steps.gemini_pr_review.outcome == 'failure' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
github.rest.issues.createComment({
owner: '${{ github.repository }}'.split('/')[0],
repo: '${{ github.repository }}'.split('/')[1],
issue_number: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}',
body: 'There is a problem with the Gemini CLI PR review. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'
})

View File

@@ -14,8 +14,8 @@ jobs:
steps:
- name: Check out repository code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@v4
- uses: pypa/gh-action-pip-audit@1220774d901786e6f652ae159f7b6bc8fea6d266 # v1.1.0
- uses: pypa/gh-action-pip-audit@v1.0.8
with:
inputs: .

View File

@@ -21,10 +21,8 @@ jobs:
# user information is needed to create annotated tags (with a message)
git config user.email 'capa-dev@mandiant.com'
git config user.name 'Capa Bot'
name=${GITHUB_EVENT_RELEASE_TAG_NAME}
name=${{ github.event.release.tag_name }}
git tag $name -m "https://github.com/mandiant/capa/releases/$name"
env:
GITHUB_EVENT_RELEASE_TAG_NAME: ${{ github.event.release.tag_name }}
# TODO update branch name-major=${name%%.*}
- name: Push tag to capa-rules
uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0

View File

@@ -42,10 +42,10 @@ jobs:
- name: Checkout capa
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
# use latest available python to take advantage of best performance
- name: Set up Python 3.13
- name: Set up Python 3.12
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: "3.13"
python-version: "3.12"
- name: Install dependencies
run: |
pip install -r requirements.txt
@@ -70,10 +70,10 @@ jobs:
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
submodules: recursive
- name: Set up Python 3.13
- name: Set up Python 3.12
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: "3.13"
python-version: "3.12"
- name: Install capa
run: |
pip install -r requirements.txt
@@ -88,11 +88,13 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-22.04-arm, windows-2022, macos-15-intel, macos-14]
os: [ubuntu-22.04, windows-2022, macos-13]
# across all operating systems
python-version: ["3.10", "3.13"]
python-version: ["3.10", "3.11"]
include:
# on Ubuntu run these as well
- os: ubuntu-22.04
python-version: "3.10"
- os: ubuntu-22.04
python-version: "3.11"
- os: ubuntu-22.04
@@ -113,11 +115,6 @@ jobs:
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
- name: Cache vivisect workspaces
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: tests/data/**/*.viv
key: viv-${{ runner.os }}-${{ runner.arch }}-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }}
- name: Run tests (fast)
# this set of tests runs about 80% of the cases in 20% of the time,
# and should catch most errors quickly.
@@ -134,7 +131,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.13"]
python-version: ["3.10", "3.11"]
steps:
- name: Checkout capa with submodules
# do only run if BN_SERIAL is available, have to do this in every step, see https://github.com/orgs/community/discussions/26726#discussioncomment-3253118
@@ -160,7 +157,7 @@ jobs:
run: |
mkdir ./.github/binja
curl "https://raw.githubusercontent.com/Vector35/binaryninja-api/6812c97/scripts/download_headless.py" -o ./.github/binja/download_headless.py
python ./.github/binja/download_headless.py --serial ${BN_SERIAL} --output .github/binja/BinaryNinja-headless.zip
python ./.github/binja/download_headless.py --serial ${{ env.BN_SERIAL }} --output .github/binja/BinaryNinja-headless.zip
unzip .github/binja/BinaryNinja-headless.zip -d .github/binja/
python .github/binja/binaryninja/scripts/install_api.py --install-on-root --silent
- name: Run tests
@@ -176,11 +173,11 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.13"]
java-version: ["21"]
ghidra-version: ["12.0"]
public-version: ["PUBLIC_20251205"] # for ghidra releases
ghidra-sha256: ['af43e8cfb2fa4490cf6020c3a2bde25c159d83f45236a0542688a024e8fc1941']
python-version: ["3.10", "3.11"]
java-version: ["17"]
ghidra-version: ["11.0.1"]
public-version: ["PUBLIC_20240130"] # for ghidra releases
ghidrathon-version: ["4.0.0"]
steps:
- name: Checkout capa with submodules
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
@@ -199,66 +196,26 @@ jobs:
run: |
mkdir ./.github/ghidra
wget "https://github.com/NationalSecurityAgency/ghidra/releases/download/Ghidra_${{ matrix.ghidra-version }}_build/ghidra_${{ matrix.ghidra-version }}_${{ matrix.public-version }}.zip" -O ./.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip
echo "${{ matrix.ghidra-sha256 }} ./.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip" | sha256sum -c -
unzip .github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC.zip -d .github/ghidra/
- name: Install Ghidrathon
run : |
mkdir ./.github/ghidrathon
wget "https://github.com/mandiant/Ghidrathon/releases/download/v${{ matrix.ghidrathon-version }}/Ghidrathon-v${{ matrix.ghidrathon-version}}.zip" -O ./.github/ghidrathon/ghidrathon-v${{ matrix.ghidrathon-version }}.zip
unzip .github/ghidrathon/ghidrathon-v${{ matrix.ghidrathon-version }}.zip -d .github/ghidrathon/
python -m pip install -r .github/ghidrathon/requirements.txt
python .github/ghidrathon/ghidrathon_configure.py $(pwd)/.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC
unzip .github/ghidrathon/Ghidrathon-v${{ matrix.ghidrathon-version }}.zip -d .github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC/Ghidra/Extensions
- name: Install pyyaml
run: sudo apt-get install -y libyaml-dev
- name: Install capa with Ghidra extra
run: |
pip install -e .[dev,ghidra]
- name: Run tests
env:
GHIDRA_INSTALL_DIR: ${{ github.workspace }}/.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC
run: pytest -v tests/test_ghidra_features.py
idalib-tests:
name: IDA ${{ matrix.ida.version }} tests for ${{ matrix.python-version }}
runs-on: ubuntu-22.04
needs: [tests]
env:
IDA_LICENSE_ID: ${{ secrets.IDA_LICENSE_ID }}
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.13"]
ida:
- version: 9.0
slug: "release/9.0/ida-essential/ida-essential_90_x64linux.run"
- version: 9.1
slug: "release/9.1/ida-essential/ida-essential_91_x64linux.run"
- version: 9.2
slug: "release/9.2/ida-essential/ida-essential_92_x64linux.run"
steps:
- name: Checkout capa with submodules
# do only run if IDA_LICENSE_ID is available, have to do this in every step, see https://github.com/orgs/community/discussions/26726#discussioncomment-3253118
if: ${{ env.IDA_LICENSE_ID != 0 }}
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
submodules: recursive
- name: Set up Python ${{ matrix.python-version }}
if: ${{ env.IDA_LICENSE_ID != 0 }}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: ${{ matrix.python-version }}
- name: Setup uv
if: ${{ env.IDA_LICENSE_ID != 0 }}
uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0
- name: Install dependencies
if: ${{ env.IDA_LICENSE_ID != 0 }}
run: sudo apt-get install -y libyaml-dev
- name: Install capa
if: ${{ env.IDA_LICENSE_ID != 0 }}
run: |
pip install -r requirements.txt
pip install -e .[dev,scripts]
pip install idapro
- name: Install IDA ${{ matrix.ida.version }}
if: ${{ env.IDA_LICENSE_ID != 0 }}
run: |
uv run hcli --disable-updates ida install --download-id ${{ matrix.ida.slug }} --license-id ${{ secrets.IDA_LICENSE_ID }} --set-default --yes
env:
HCLI_API_KEY: ${{ secrets.HCLI_API_KEY }}
IDA_LICENSE_ID: ${{ secrets.IDA_LICENSE_ID }}
- name: Run tests
if: ${{ env.IDA_LICENSE_ID != 0 }}
run: pytest -v tests/test_idalib_features.py # explicitly refer to the idalib tests for performance. other tests run above.
run: |
mkdir ./.github/ghidra/project
.github/ghidra/ghidra_${{ matrix.ghidra-version }}_PUBLIC/support/analyzeHeadless .github/ghidra/project ghidra_test -Import ./tests/data/mimikatz.exe_ -ScriptPath ./tests/ -PostScript test_ghidra_features.py > ../output.log
cat ../output.log
exit_code=$(cat ../output.log | grep exit | awk '{print $NF}')
exit $exit_code

View File

@@ -18,18 +18,14 @@ jobs:
- uses: actions/checkout@v4
- name: Set release name
run: echo "RELEASE_NAME=capa-explorer-web-v${GITHUB_EVENT_INPUTS_VERSION}-${GITHUB_SHA::7}" >> $GITHUB_ENV
env:
GITHUB_EVENT_INPUTS_VERSION: ${{ github.event.inputs.version }}
run: echo "RELEASE_NAME=capa-explorer-web-v${{ github.event.inputs.version }}-${GITHUB_SHA::7}" >> $GITHUB_ENV
- name: Check if release already exists
run: |
if ls web/explorer/releases/capa-explorer-web-v${GITHUB_EVENT_INPUTS_VERSION}-* 1> /dev/null 2>&1; then
echo "::error:: A release with version ${GITHUB_EVENT_INPUTS_VERSION} already exists"
if ls web/explorer/releases/capa-explorer-web-v${{ github.event.inputs.version }}-* 1> /dev/null 2>&1; then
echo "::error:: A release with version ${{ github.event.inputs.version }} already exists"
exit 1
fi
env:
GITHUB_EVENT_INPUTS_VERSION: ${{ github.event.inputs.version }}
- name: Set up Node.js
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
@@ -47,24 +43,24 @@ jobs:
working-directory: web/explorer
- name: Compress bundle
run: zip -r ${RELEASE_NAME}.zip capa-explorer-web
run: zip -r ${{ env.RELEASE_NAME }}.zip capa-explorer-web
working-directory: web/explorer
- name: Create releases directory
run: mkdir -vp web/explorer/releases
- name: Move release to releases folder
run: mv web/explorer/${RELEASE_NAME}.zip web/explorer/releases
run: mv web/explorer/${{ env.RELEASE_NAME }}.zip web/explorer/releases
- name: Compute release SHA256 hash
run: |
echo "RELEASE_SHA256=$(sha256sum web/explorer/releases/${RELEASE_NAME}.zip | awk '{print $1}')" >> $GITHUB_ENV
echo "RELEASE_SHA256=$(sha256sum web/explorer/releases/${{ env.RELEASE_NAME }}.zip | awk '{print $1}')" >> $GITHUB_ENV
- name: Update CHANGELOG.md
run: |
echo "## ${RELEASE_NAME}" >> web/explorer/releases/CHANGELOG.md
echo "## ${{ env.RELEASE_NAME }}" >> web/explorer/releases/CHANGELOG.md
echo "- Release Date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> web/explorer/releases/CHANGELOG.md
echo "- SHA256: ${RELEASE_SHA256}" >> web/explorer/releases/CHANGELOG.md
echo "- SHA256: ${{ env.RELEASE_SHA256 }}" >> web/explorer/releases/CHANGELOG.md
echo "" >> web/explorer/releases/CHANGELOG.md
cat web/explorer/releases/CHANGELOG.md
@@ -77,7 +73,7 @@ jobs:
run: |
git config --local user.email "capa-dev@mandiant.com"
git config --local user.name "Capa Bot"
git add -f web/explorer/releases/${RELEASE_NAME}.zip web/explorer/releases/CHANGELOG.md
git add -f web/explorer/releases/${{ env.RELEASE_NAME }}.zip web/explorer/releases/CHANGELOG.md
git add -u web/explorer/releases/
- name: Create Pull Request

1
.gitignore vendored
View File

@@ -122,7 +122,6 @@ scripts/perf/*.zip
*/.DS_Store
Pipfile
Pipfile.lock
uv.lock
/cache/
.github/binja/binaryninja
.github/binja/download_headless.py

View File

@@ -136,8 +136,8 @@ repos:
- "tests/"
- "--ignore=tests/test_binja_features.py"
- "--ignore=tests/test_ghidra_features.py"
- "--ignore=tests/test_ida_features.py"
- "--ignore=tests/test_viv_features.py"
- "--ignore=tests/test_idalib_features.py"
- "--ignore=tests/test_main.py"
- "--ignore=tests/test_scripts.py"
always_run: true

View File

@@ -3,153 +3,29 @@
## master (unreleased)
### New Features
- ghidra: support PyGhidra @mike-hunhoff #2788
- vmray: extract number features from whitelisted void_ptr parameters (hKey, hKeyRoot) @adeboyedn #2835
### Breaking Changes
### New Rules (23)
- nursery/run-as-nodejs-native-module mehunhoff@google.com
- nursery/inject-shellcode-using-thread-pool-work-insertion-with-tp_io still@teamt5.org
- nursery/inject-shellcode-using-thread-pool-work-insertion-with-tp_timer still@teamt5.org
- nursery/inject-shellcode-using-thread-pool-work-insertion-with-tp_work still@teamt5.org
- data-manipulation/encryption/hc-256/encrypt-data-using-hc-256 wballenthin@hex-rays.com
- anti-analysis/anti-llm/terminate-anthropic-session-via-magic-strings wballenthin@hex-rays.com
- nursery/access-aws-credentials maximemorin@google.com
- nursery/access-cloudflare-credentials maximemorin@google.com
- nursery/access-docker-credentials maximemorin@google.com
- nursery/access-gcp-credentials maximemorin@google.com
- nursery/access-kubernetes-credentials maximemorin@google.com
- nursery/enumerate-aws-cloudformation maximemorin@google.com
- nursery/enumerate-aws-cloudtrail maximemorin@google.com
- nursery/enumerate-aws-direct-connect maximemorin@google.com
- nursery/enumerate-aws-ec2 maximemorin@google.com
- nursery/enumerate-aws-iam maximemorin@google.com
- nursery/enumerate-aws-s3 maximemorin@google.com
- nursery/enumerate-aws-support-cases maximemorin@google.com
- persistence/registry/persist-via-shellserviceobjectdelayload-registry-key xpzhxhm@gmail.com
- nursery/get-http-response-date @cosmoworker
- host-interaction/process/create/create-process-in-dotnet moritz.raabe@mandiant.com social.tarang@gmail.com
- nursery/read-file-in-dotnet moritz.raabe@mandiant.com anushka.virgaonkar@mandiant.com
- nursery/write-file-in-dotnet william.ballenthin@mandiant.com anushka.virgaonkar@mandiant.com
-
### Bug Fixes
- main: suggest --os flag in unsupported OS error message to help users override ELF OS detection @devs6186 #2577
- render: escape sample-controlled strings before passing to Rich to prevent MarkupError @devs6186 #2699
- rules: handle empty or invalid YAML documents gracefully in `Rule.from_yaml` and `get_rules` @devs6186 #2900
- Fixed insecure deserialization vulnerability in YAML loading @0x1622 (#2770)
- loader: gracefully handle ELF files with unsupported architectures kamranulhaq2002@gmail.com #2800
- loader: handle SegmentationViolation for malformed ELF files @kami922 #2799
- lint: disable rule caching during linting @Maijin #2817
- vmray: skip processes with invalid PID or missing filename @EclipseAditya #2807
- features: fix Regex.get_value_str() returning escaped pattern instead of raw regex @EclipseAditya #1909
- render: use default styling for dynamic -vv API/call details so they are easier to see @devs6186 #1865
- loader: handle struct.error from dnfile and show clear CorruptFile message @devs6186 #2442
- address: fix TypeError when sorting locations containing mixed address types @devs6186 #2195
- loader: skip PE files with unrealistically large section virtual sizes to prevent resource exhaustion @devs6186 #1989
### capa Explorer Web
- webui: fix 404 for "View rule in capa-rules" by using encodeURIComponent for rule name in URL @devs6186 #2482
- webui: show error when JSON does not follow expected result document schema; suggest reanalyzing for VT URLs @devs6186 #2363
- webui: fix global search to match feature types (match, regex, api, …) @devs6186 #2349
### capa Explorer IDA Pro plugin
### Performance
- perf: eliminate O(n²) tuple growth and reduce per-match overhead @devs6186 #2890
### Development
- doc: document that default output shows top-level matches only; -v/-vv show nested matches @devs6186 #1410
- doc: fix typo in usage.md, add documentation links to README @devs6186 #2274
- doc: add table comparing ways to consume capa output (CLI, IDA, Ghidra, dynamic sandbox, web) @devs6186 #2273
- binja: add mypy config for top-level binaryninja module to fix mypy issues @devs6186 #2399
- ci: deprecate macos-13 runner and use Python v3.13 for testing @mike-hunhoff #2777
- ci: pin pip-audit action SHAs and update to v1.1.0 @kami922 #1131
### Raw diffs
- [capa v9.3.1...master](https://github.com/mandiant/capa/compare/v9.3.1...master)
- [capa-rules v9.3.1...master](https://github.com/mandiant/capa-rules/compare/v9.3.1...master)
## v9.3.1
This patch release fixes a missing import for the capa explorer plugin for IDA Pro.
### Bug Fixes
- add missing ida-netnode dependency to project.toml @mike-hunhoff #2765
### Development
- ci: bump binja min version @mike-hunhoff #2763
### Raw diffs
- [capa v9.3.0...master](https://github.com/mandiant/capa/compare/v9.3.0...master)
- [capa-rules v9.3.0...master](https://github.com/mandiant/capa-rules/compare/v9.3.0...master)
## v9.3.0
capa v9.3.0 comes with over 20 new and/or impoved rules.
For IDA users the capa explorer plugin is now available via the IDA Pro plugin repository and contains Qt compatibility layer for PyQt5 and PySide6 support.
Additionally a Binary Ninja bug has been fixed. Released binaries now include ARM64 binaries (Linux and macOS).
### New Features
- ci: add support for arm64 binary releases
- tests: run tests against IDA via idalib @williballenthin #2742
### Breaking Changes
### New Rules (24)
### New Rules (2)
- anti-analysis/anti-vm/vm-detection/detect-mouse-movement-via-activity-checks-on-windows tevajdr@gmail.com
- nursery/create-executable-heap moritz.raabe@mandiant.com
- anti-analysis/packer/dxpack/packed-with-dxpack jakubjozwiak@google.com
- anti-analysis/anti-av/patch-bitdefender-hooking-dll-function jakubjozwiak@google.com
- nursery/acquire-load-driver-privileges mehunhoff@google.com
- nursery/communicate-using-ftp mehunhoff@google.com
- linking/static/eclipse-paho-mqtt-c/linked-against-eclipse-paho-mqtt-c jakubjozwiak@google.com
- linking/static/qmqtt/linked-against-qmqtt jakubjozwiak@google.com
- anti-analysis/anti-forensic/disable-powershell-transcription jakubjozwiak@google.com
- host-interaction/powershell/bypass-powershell-constrained-language-mode-via-getsystemlockdownpolicy-patch jakubjozwiak@google.com
- linking/static/grpc/linked-against-grpc jakubjozwiak@google.com
- linking/static/hp-socket/linked-against-hp-socket jakubjozwiak@google.com
- load-code/execute-jscript-via-vsaengine-in-dotnet jakubjozwiak@google.com
- linking/static/funchook/linked-against-funchook jakubjozwiak@google.com
- linking/static/plthook/linked-against-plthook jakubjozwiak@google.com
- host-interaction/network/enumerate-tcp-connections-via-wmi-com-api jakubjozwiak@google.com
- host-interaction/network/routing-table/create-routing-table-entry jakubjozwiak@google.com
- host-interaction/network/routing-table/get-routing-table michael.hunhoff@mandiant.com
- host-interaction/file-system/use-io_uring-io-interface-on-linux jakubjozwiak@google.com
- collection/keylog/log-keystrokes-via-direct-input zeze-zeze
- nursery/compiled-from-fsharp mehunhoff@google.com
- nursery/decrypt-data-using-aes-via-dotnet mehunhoff@google.com
- nursery/get-dotnet-assembly-entry-point mehunhoff@google.com
-
### Bug Fixes
- binja: fix a crash during feature extraction when the MLIL is unavailable @xusheng6 #2714
### capa Explorer Web
### capa Explorer IDA Pro plugin
- add `ida-plugin.json` for inclusion in the IDA Pro plugin repository @williballenthin
- ida plugin: add Qt compatibility layer for PyQt5 and PySide6 support @williballenthin #2707
- delay import to not load Qt* when running under idalib @mr-tz #2752
### Development
- ci: remove redundant "test_run" action from build workflow @mike-hunhoff #2692
- dev: add bumpmyversion to bump and sync versions across the project @mr-tz
### Raw diffs
- [capa v9.2.1...9.3.0](https://github.com/mandiant/capa/compare/v9.2.1...9.3.0)
- [capa-rules v9.2.1...9.3.0](https://github.com/mandiant/capa-rules/compare/v9.2.1...9.3.0)
- [capa v9.2.1...master](https://github.com/mandiant/capa/compare/v9.2.1...master)
- [capa-rules v9.2.1...master](https://github.com/mandiant/capa-rules/compare/v9.2.1...master)
## v9.2.1

View File

@@ -87,33 +87,6 @@ Download stable releases of the standalone capa binaries [here](https://github.c
To use capa as a library or integrate with another tool, see [doc/installation.md](https://github.com/mandiant/capa/blob/master/doc/installation.md) for further setup instructions.
**Documentation:** [Usage and tips](doc/usage.md) · [Installation](doc/installation.md) · [Limitations](doc/limitations.md) · [FAQ](doc/faq.md)
# mapa html map
mapa can render a standalone HTML overview of functions, string tags, and referenced strings.
```bash
python -m mapa binaries/01/16/mpbindump.exe --output html-map > report.html
```
On the `mpbindump.exe` sample, the generated report contains:
```text
doctype <!doctype html>
functions 1406
tags 12
strings 81
```
Use `--open` to write the report to a temporary file and open it in your browser.
```bash
python -m mapa binaries/01/16/mpbindump.exe --output html-map --open
```
The executable proof for this example is in `doc/demos/mapa-html-map.md`.
# capa Explorer Web
The [capa Explorer Web](https://mandiant.github.io/capa/explorer/) enables you to interactively explore capa results in your web browser. Besides the online version you can download a standalone HTML file for local offline usage.
@@ -318,17 +291,11 @@ It also uses your local changes to the .idb to extract better features, such as
![capa + IDA Pro integration](https://github.com/mandiant/capa/blob/master/doc/img/explorer_expanded.png)
# Ghidra integration
capa supports using Ghidra (via [PyGhidra](https://github.com/NationalSecurityAgency/ghidra/tree/master/Ghidra/Features/PyGhidra)) as a feature extraction backend. This allows you to run capa against binaries using Ghidra's analysis engine.
You can run and view capa results in the Ghidra UI using [capa explorer for Ghidra](https://github.com/mandiant/capa/tree/master/capa/ghidra/plugin).
If you use Ghidra, then you can use the [capa + Ghidra integration](/capa/ghidra/) to run capa's analysis directly on your Ghidra database and render the results in Ghidra's user interface.
<img src="https://github.com/mandiant/capa/assets/66766340/eeae33f4-99d4-42dc-a5e8-4c1b8c661492" width=300>
You can also run capa from the command line using the [Ghidra backend](https://github.com/mandiant/capa/tree/master/capa/ghidra).
# blog posts
- [Riding Dragons: capa Harnesses Ghidra](https://www.mandiant.com/resources/blog/capa-harnesses-ghidra)
- [Dynamic capa: Exploring Executable Run-Time Behavior with the CAPE Sandbox](https://www.mandiant.com/resources/blog/dynamic-capa-executable-behavior-cape-sandbox)
- [capa v4: casting a wider .NET](https://www.mandiant.com/resources/blog/capa-v4-casting-wider-net) (.NET support)
- [ELFant in the Room capa v3](https://www.mandiant.com/resources/elfant-in-the-room-capa-v3) (ELF support)
@@ -348,6 +315,3 @@ You can also run capa from the command line using the [Ghidra backend](https://g
## capa testfiles
The [capa-testfiles repository](https://github.com/mandiant/capa-testfiles) contains the data we use to test capa's code and rules
## mailing list
Subscribe to the FLARE mailing list for community announcements! Email "subscribe" to [flare-external@google.com](mailto:flare-external@google.com?subject=subscribe).

View File

@@ -277,9 +277,7 @@ def find_dynamic_capabilities(
all_span_matches: MatchResults = collections.defaultdict(list)
all_call_matches: MatchResults = collections.defaultdict(list)
# Accumulate into a list to avoid O(n²) tuple concatenation.
# Tuples are immutable, so `t += (x,)` copies the entire tuple each time.
process_feature_counts: list[rdoc.ProcessFeatureCount] = []
feature_counts = rdoc.DynamicFeatureCounts(file=0, processes=())
assert isinstance(extractor, DynamicFeatureExtractor)
processes: list[ProcessHandle] = list(extractor.get_processes())
@@ -291,10 +289,10 @@ def find_dynamic_capabilities(
task = pbar.add_task("matching", total=n_processes, unit="processes")
for p in processes:
process_capabilities = find_process_capabilities(ruleset, extractor, p)
process_feature_counts.append(
feature_counts.processes += (
rdoc.ProcessFeatureCount(
address=frz.Address.from_capa(p.address), count=process_capabilities.feature_count
)
),
)
for rule_name, res in process_capabilities.process_matches.items():
@@ -319,11 +317,7 @@ def find_dynamic_capabilities(
capa.engine.index_rule_matches(process_and_lower_features, rule, locations)
all_file_capabilities = find_file_capabilities(ruleset, extractor, process_and_lower_features)
feature_counts = rdoc.DynamicFeatureCounts(
file=all_file_capabilities.feature_count,
processes=tuple(process_feature_counts),
)
feature_counts.file = all_file_capabilities.feature_count
matches = dict(
itertools.chain(

View File

@@ -156,11 +156,8 @@ def find_static_capabilities(
all_bb_matches: MatchResults = collections.defaultdict(list)
all_insn_matches: MatchResults = collections.defaultdict(list)
# Accumulate into lists to avoid O(n²) tuple concatenation.
# Tuples are immutable, so `t += (x,)` copies the entire tuple each time.
# For binaries with thousands of functions this becomes quadratic in memory work.
function_feature_counts: list[rdoc.FunctionFeatureCount] = []
library_functions_list: list[rdoc.LibraryFunction] = []
feature_counts = rdoc.StaticFeatureCounts(file=0, functions=())
library_functions: tuple[rdoc.LibraryFunction, ...] = ()
assert isinstance(extractor, StaticFeatureExtractor)
functions: list[FunctionHandle] = list(extractor.get_functions())
@@ -179,20 +176,20 @@ def find_static_capabilities(
if extractor.is_library_function(f.address):
function_name = extractor.get_function_name(f.address)
logger.debug("skipping library function 0x%x (%s)", f.address, function_name)
library_functions_list.append(
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name)
library_functions += (
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name),
)
n_libs = len(library_functions_list)
n_libs = len(library_functions)
percentage = round(100 * (n_libs / n_funcs))
pbar.update(task, postfix=f"skipped {n_libs} library functions, {percentage}%")
pbar.advance(task)
continue
code_capabilities = find_code_capabilities(ruleset, extractor, f)
function_feature_counts.append(
feature_counts.functions += (
rdoc.FunctionFeatureCount(
address=frz.Address.from_capa(f.address), count=code_capabilities.feature_count
)
),
)
t1 = time.time()
@@ -233,11 +230,7 @@ def find_static_capabilities(
capa.engine.index_rule_matches(function_and_lower_features, rule, locations)
all_file_capabilities = find_file_capabilities(ruleset, extractor, function_and_lower_features)
feature_counts = rdoc.StaticFeatureCounts(
file=all_file_capabilities.feature_count,
functions=tuple(function_feature_counts),
)
feature_counts.file = all_file_capabilities.feature_count
matches: MatchResults = dict(
itertools.chain(
@@ -251,4 +244,4 @@ def find_static_capabilities(
)
)
return Capabilities(matches, feature_counts, tuple(library_functions_list))
return Capabilities(matches, feature_counts, library_functions)

View File

@@ -189,11 +189,6 @@ class _NoAddress(Address):
def __lt__(self, other):
return False
def __gt__(self, other):
# Mixed-type comparison: (real_address < NO_ADDRESS) invokes this so sort works.
# NoAddress sorts last.
return other is not self
def __hash__(self):
return hash(0)

View File

@@ -369,12 +369,6 @@ class Regex(String):
else:
return Result(False, _MatchedRegex(self, {}), [])
def get_value_str(self) -> str:
# return the raw regex pattern, not the escaped version from String.get_value_str().
# see #1909.
assert isinstance(self.value, str)
return self.value
def __str__(self):
assert isinstance(self.value, str)
return f"regex(string =~ {self.value})"

View File

@@ -20,7 +20,6 @@ Proto files generated via protobuf v24.4:
from BinExport2 at 6916731d5f6693c4a4f0a052501fd3bd92cfd08b
https://github.com/google/binexport/blob/6916731/binexport2.proto
"""
import io
import hashlib
import logging

View File

@@ -84,14 +84,16 @@ def extract_insn_number_features(
yield OperandOffset(i, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int] ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int]! ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack)], #int ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int] ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int]! ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack)], #int ; capture #int
""")
"""
)
def extract_insn_offset_features(
@@ -115,10 +117,12 @@ def extract_insn_offset_features(
yield OperandOffset(match.operand_index, value), ih.address
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
eor reg, reg, reg
eor reg, reg, #int
""")
"""
)
def extract_insn_nzxor_characteristic_features(
@@ -140,9 +144,11 @@ def extract_insn_nzxor_characteristic_features(
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
blx|bx|blr reg
""")
"""
)
def extract_function_indirect_call_characteristic_features(

View File

@@ -34,14 +34,17 @@ from capa.features.extractors.binexport2.arch.intel.helpers import SECURITY_COOK
logger = logging.getLogger(__name__)
IGNORE_NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
IGNORE_NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ret #int
retn #int
add reg(stack), #int
sub reg(stack), #int
""")
"""
)
NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
push #int0 ; capture #int0
# its a little tedious to enumerate all the address forms
@@ -61,7 +64,8 @@ NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
# imagine reg is zero'd out, then this is like `mov reg, #int`
# which is not uncommon.
lea reg, [reg + #int] ; capture #int
""")
"""
)
def extract_insn_number_features(
@@ -96,7 +100,8 @@ def extract_insn_number_features(
yield OperandOffset(match.operand_index, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb|cmp [reg + reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg + reg + #int0], #int ; capture #int0
@@ -109,15 +114,18 @@ OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
mov|movzx|movsb|cmp|lea reg, [reg * #int + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg + reg + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg(not-stack) + #int0] ; capture #int0
""")
"""
)
# these are patterns that access offset 0 from some pointer
# (pointer is not the stack pointer).
OFFSET_ZERO_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
OFFSET_ZERO_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb [reg(not-stack)], reg
mov|movzx|movsb [reg(not-stack)], #int
lea reg, [reg(not-stack)]
""")
"""
)
def extract_insn_offset_features(
@@ -181,10 +189,12 @@ def is_security_cookie(
return False
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
xor|xorpd|xorps|pxor reg, reg
xor|xorpd|xorps|pxor reg, #int
""")
"""
)
def extract_insn_nzxor_characteristic_features(
@@ -218,7 +228,8 @@ def extract_insn_nzxor_characteristic_features(
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
call|jmp reg0
call|jmp [reg + reg * #int + #int]
call|jmp [reg + reg * #int]
@@ -226,7 +237,8 @@ INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str("""
call|jmp [reg + reg + #int]
call|jmp [reg + #int]
call|jmp [reg]
""")
"""
)
def extract_function_indirect_call_characteristic_features(

View File

@@ -19,6 +19,7 @@ from binaryninja import (
Function,
BinaryView,
SymbolType,
ILException,
RegisterValueType,
VariableSourceType,
LowLevelILOperation,
@@ -191,8 +192,9 @@ def extract_stackstring(fh: FunctionHandle):
if bv is None:
return
mlil = func.mlil
if mlil is None:
try:
mlil = func.mlil
except ILException:
return
for block in mlil.basic_blocks:

View File

@@ -35,7 +35,7 @@ from capa.features.extractors.base_extractor import (
logger = logging.getLogger(__name__)
TESTED_VERSIONS = {"2.2-CAPE", "2.4-CAPE", "2.5-CAPE"}
TESTED_VERSIONS = {"2.2-CAPE", "2.4-CAPE"}
class CapeExtractor(DynamicFeatureExtractor):

View File

@@ -27,12 +27,7 @@ import capa.features.extractors.dnfile.file
import capa.features.extractors.dnfile.insn
import capa.features.extractors.dnfile.function
from capa.features.common import Feature
from capa.features.address import (
NO_ADDRESS,
Address,
DNTokenAddress,
DNTokenOffsetAddress,
)
from capa.features.address import NO_ADDRESS, Address, DNTokenAddress, DNTokenOffsetAddress
from capa.features.extractors.dnfile.types import DnType, DnUnmanagedMethod
from capa.features.extractors.base_extractor import (
BBHandle,
@@ -44,7 +39,6 @@ from capa.features.extractors.base_extractor import (
from capa.features.extractors.dnfile.helpers import (
get_dotnet_types,
get_dotnet_fields,
load_dotnet_image,
get_dotnet_managed_imports,
get_dotnet_managed_methods,
get_dotnet_unmanaged_imports,
@@ -89,7 +83,7 @@ class DnFileFeatureExtractorCache:
class DnfileFeatureExtractor(StaticFeatureExtractor):
def __init__(self, path: Path):
self.pe = load_dotnet_image(path)
self.pe: dnfile.dnPE = dnfile.dnPE(str(path))
super().__init__(hashes=SampleHashes.from_bytes(path.read_bytes()))
# pre-compute .NET token lookup tables; each .NET method has access to this cache for feature extraction
@@ -118,12 +112,7 @@ class DnfileFeatureExtractor(StaticFeatureExtractor):
fh: FunctionHandle = FunctionHandle(
address=DNTokenAddress(token),
inner=method,
ctx={
"pe": self.pe,
"calls_from": set(),
"calls_to": set(),
"cache": self.token_cache,
},
ctx={"pe": self.pe, "calls_from": set(), "calls_to": set(), "cache": self.token_cache},
)
# method tokens should be unique

View File

@@ -15,10 +15,8 @@
from __future__ import annotations
import struct
import logging
from typing import Union, Iterator, Optional
from pathlib import Path
import dnfile
from dncil.cil.body import CilMethodBody
@@ -32,16 +30,6 @@ from capa.features.extractors.dnfile.types import DnType, DnUnmanagedMethod
logger = logging.getLogger(__name__)
def load_dotnet_image(path: Path) -> dnfile.dnPE:
"""load a .NET PE file, raising CorruptFile on struct.error with the original error message."""
try:
return dnfile.dnPE(str(path))
except struct.error as e:
from capa.loader import CorruptFile
raise CorruptFile(f"Invalid or truncated .NET metadata: {e}") from e
class DnfileMethodBodyReader(CilMethodBodyReaderBase):
def __init__(self, pe: dnfile.dnPE, row: dnfile.mdtable.MethodDefRow):
self.pe: dnfile.dnPE = pe
@@ -163,9 +151,7 @@ def get_dotnet_managed_imports(pe: dnfile.dnPE) -> Iterator[DnType]:
)
def get_dotnet_methoddef_property_accessors(
pe: dnfile.dnPE,
) -> Iterator[tuple[int, str]]:
def get_dotnet_methoddef_property_accessors(pe: dnfile.dnPE) -> Iterator[tuple[int, str]]:
"""get MethodDef methods used to access properties
see https://www.ntcore.com/files/dotnetformat.htm
@@ -240,13 +226,7 @@ def get_dotnet_managed_methods(pe: dnfile.dnPE) -> Iterator[DnType]:
typedefnamespace, typedefname = resolve_nested_typedef_name(nested_class_table, rid, typedef, pe)
yield DnType(
token,
typedefname,
namespace=typedefnamespace,
member=method_name,
access=access,
)
yield DnType(token, typedefname, namespace=typedefnamespace, member=method_name, access=access)
def get_dotnet_fields(pe: dnfile.dnPE) -> Iterator[DnType]:
@@ -279,9 +259,7 @@ def get_dotnet_fields(pe: dnfile.dnPE) -> Iterator[DnType]:
yield DnType(token, typedefname, namespace=typedefnamespace, member=field.row.Name)
def get_dotnet_managed_method_bodies(
pe: dnfile.dnPE,
) -> Iterator[tuple[int, CilMethodBody]]:
def get_dotnet_managed_method_bodies(pe: dnfile.dnPE) -> Iterator[tuple[int, CilMethodBody]]:
"""get managed methods from MethodDef table"""
for rid, method_def in iter_dotnet_table(pe, dnfile.mdtable.MethodDef.number):
assert isinstance(method_def, dnfile.mdtable.MethodDefRow)
@@ -360,10 +338,7 @@ def get_dotnet_table_row(pe: dnfile.dnPE, table_index: int, row_index: int) -> O
def resolve_nested_typedef_name(
nested_class_table: dict,
index: int,
typedef: dnfile.mdtable.TypeDefRow,
pe: dnfile.dnPE,
nested_class_table: dict, index: int, typedef: dnfile.mdtable.TypeDefRow, pe: dnfile.dnPE
) -> tuple[str, tuple[str, ...]]:
"""Resolves all nested TypeDef class names. Returns the namespace as a str and the nested TypeRef name as a tuple"""

View File

@@ -42,7 +42,6 @@ from capa.features.extractors.dnfile.types import DnType
from capa.features.extractors.base_extractor import SampleHashes, StaticFeatureExtractor
from capa.features.extractors.dnfile.helpers import (
iter_dotnet_table,
load_dotnet_image,
is_dotnet_mixed_mode,
get_dotnet_managed_imports,
get_dotnet_managed_methods,
@@ -185,8 +184,8 @@ GLOBAL_HANDLERS = (
class DotnetFileFeatureExtractor(StaticFeatureExtractor):
def __init__(self, path: Path):
super().__init__(hashes=SampleHashes.from_bytes(path.read_bytes()))
self.path = path
self.pe = load_dotnet_image(path)
self.path: Path = path
self.pe: dnfile.dnPE = dnfile.dnPE(str(path))
def get_base_address(self):
return NO_ADDRESS
@@ -218,10 +217,7 @@ class DotnetFileFeatureExtractor(StaticFeatureExtractor):
assert self.pe.net.struct.MajorRuntimeVersion is not None
assert self.pe.net.struct.MinorRuntimeVersion is not None
return (
self.pe.net.struct.MajorRuntimeVersion,
self.pe.net.struct.MinorRuntimeVersion,
)
return self.pe.net.struct.MajorRuntimeVersion, self.pe.net.struct.MinorRuntimeVersion
def get_meta_version_string(self) -> str:
assert self.pe.net is not None

View File

@@ -83,7 +83,7 @@ def bb_contains_stackstring(bb: ghidra.program.model.block.CodeBlock) -> bool:
true if basic block contains enough moves of constant bytes to the stack
"""
count = 0
for insn in capa.features.extractors.ghidra.helpers.get_current_program().getListing().getInstructions(bb, True):
for insn in currentProgram().getListing().getInstructions(bb, True): # type: ignore [name-defined] # noqa: F821
if is_mov_imm_to_stack(insn):
count += get_printable_len(insn.getScalar(1))
if count > MIN_STACKSTRING_LEN:
@@ -96,9 +96,7 @@ def _bb_has_tight_loop(bb: ghidra.program.model.block.CodeBlock):
parse tight loops, true if last instruction in basic block branches to bb start
"""
# Reverse Ordered, first InstructionDB
last_insn = (
capa.features.extractors.ghidra.helpers.get_current_program().getListing().getInstructions(bb, False).next()
)
last_insn = currentProgram().getListing().getInstructions(bb, False).next() # type: ignore [name-defined] # noqa: F821
if last_insn.getFlowType().isJump():
return last_insn.getAddress(0) == bb.getMinAddress()
@@ -142,3 +140,20 @@ def extract_features(fh: FunctionHandle, bbh: BBHandle) -> Iterator[tuple[Featur
for bb_handler in BASIC_BLOCK_HANDLERS:
for feature, addr in bb_handler(fh, bbh):
yield feature, addr
def main():
features = []
from capa.features.extractors.ghidra.extractor import GhidraFeatureExtractor
for fh in GhidraFeatureExtractor().get_functions():
for bbh in capa.features.extractors.ghidra.helpers.get_function_blocks(fh):
features.extend(list(extract_features(fh, bbh)))
import pprint
pprint.pprint(features) # noqa: T203
if __name__ == "__main__":
main()

View File

@@ -1,44 +0,0 @@
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
class GhidraContext:
"""
State holder for the Ghidra backend to avoid passing state to every function.
PyGhidra uses a context manager to set up the Ghidra environment (program, transaction, etc.).
We store the relevant objects here to allow easy access throughout the extractor
without needing to pass them as arguments to every feature extraction method.
"""
def __init__(self, program, flat_api, monitor):
self.program = program
self.flat_api = flat_api
self.monitor = monitor
_context: Optional[GhidraContext] = None
def set_context(program, flat_api, monitor):
global _context
_context = GhidraContext(program, flat_api, monitor)
def get_context() -> GhidraContext:
if _context is None:
raise RuntimeError("GhidraContext not initialized")
return _context

View File

@@ -12,14 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import weakref
import contextlib
from typing import Iterator
import capa.features.extractors.ghidra.file
import capa.features.extractors.ghidra.insn
import capa.features.extractors.ghidra.global_
import capa.features.extractors.ghidra.helpers as ghidra_helpers
import capa.features.extractors.ghidra.function
import capa.features.extractors.ghidra.basicblock
from capa.features.common import Feature
@@ -34,20 +31,19 @@ from capa.features.extractors.base_extractor import (
class GhidraFeatureExtractor(StaticFeatureExtractor):
def __init__(self, ctx_manager=None, tmpdir=None):
self.ctx_manager = ctx_manager
self.tmpdir = tmpdir
def __init__(self):
import capa.features.extractors.ghidra.helpers as ghidra_helpers
super().__init__(
SampleHashes(
md5=ghidra_helpers.get_current_program().getExecutableMD5(),
md5=capa.ghidra.helpers.get_file_md5(),
# ghidra doesn't expose this hash.
# https://ghidra.re/ghidra_docs/api/ghidra/program/model/listing/Program.html
#
# the hashes are stored in the database, not computed on the fly,
# so it's probably not trivial to add SHA1.
sha1="",
sha256=ghidra_helpers.get_current_program().getExecutableSHA256(),
sha256=capa.ghidra.helpers.get_file_sha256(),
)
)
@@ -59,14 +55,8 @@ class GhidraFeatureExtractor(StaticFeatureExtractor):
self.externs = ghidra_helpers.get_file_externs()
self.fakes = ghidra_helpers.map_fake_import_addrs()
# Register cleanup to run when the extractor is garbage collected or when the program exits.
# We use weakref.finalize instead of __del__ to avoid issues with reference cycles and
# to ensure deterministic cleanup on interpreter shutdown.
if self.ctx_manager or self.tmpdir:
weakref.finalize(self, cleanup, self.ctx_manager, self.tmpdir)
def get_base_address(self):
return AbsoluteVirtualAddress(ghidra_helpers.get_current_program().getImageBase().getOffset())
return AbsoluteVirtualAddress(currentProgram().getImageBase().getOffset()) # type: ignore [name-defined] # noqa: F821
def extract_global_features(self):
yield from self.global_features
@@ -75,6 +65,7 @@ class GhidraFeatureExtractor(StaticFeatureExtractor):
yield from capa.features.extractors.ghidra.file.extract_features()
def get_functions(self) -> Iterator[FunctionHandle]:
import capa.features.extractors.ghidra.helpers as ghidra_helpers
for fhandle in ghidra_helpers.get_function_symbols():
fh: FunctionHandle = FunctionHandle(
@@ -86,14 +77,14 @@ class GhidraFeatureExtractor(StaticFeatureExtractor):
@staticmethod
def get_function(addr: int) -> FunctionHandle:
func = ghidra_helpers.get_flat_api().getFunctionContaining(ghidra_helpers.get_flat_api().toAddr(addr))
func = getFunctionContaining(toAddr(addr)) # type: ignore [name-defined] # noqa: F821
return FunctionHandle(address=AbsoluteVirtualAddress(func.getEntryPoint().getOffset()), inner=func)
def extract_function_features(self, fh: FunctionHandle) -> Iterator[tuple[Feature, Address]]:
yield from capa.features.extractors.ghidra.function.extract_features(fh)
def get_basic_blocks(self, fh: FunctionHandle) -> Iterator[BBHandle]:
import capa.features.extractors.ghidra.helpers as ghidra_helpers
yield from ghidra_helpers.get_function_blocks(fh)
@@ -101,17 +92,9 @@ class GhidraFeatureExtractor(StaticFeatureExtractor):
yield from capa.features.extractors.ghidra.basicblock.extract_features(fh, bbh)
def get_instructions(self, fh: FunctionHandle, bbh: BBHandle) -> Iterator[InsnHandle]:
import capa.features.extractors.ghidra.helpers as ghidra_helpers
yield from ghidra_helpers.get_insn_in_range(bbh)
def extract_insn_features(self, fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle):
yield from capa.features.extractors.ghidra.insn.extract_features(fh, bbh, ih)
def cleanup(ctx_manager, tmpdir):
if ctx_manager:
with contextlib.suppress(Exception):
ctx_manager.__exit__(None, None, None)
if tmpdir:
with contextlib.suppress(Exception):
tmpdir.cleanup()

View File

@@ -80,54 +80,22 @@ def extract_file_embedded_pe() -> Iterator[tuple[Feature, Address]]:
for i in range(256)
]
for block in capa.features.extractors.ghidra.helpers.get_current_program().getMemory().getBlocks():
for block in currentProgram().getMemory().getBlocks(): # type: ignore [name-defined] # noqa: F821
if not all((block.isLoaded(), block.isInitialized(), "Headers" not in block.getName())):
continue
for off, _ in find_embedded_pe(capa.features.extractors.ghidra.helpers.get_block_bytes(block), mz_xor):
# add offset back to block start
ea_addr = block.getStart().add(off)
ea = ea_addr.getOffset()
f_offset = capa.features.extractors.ghidra.helpers.get_file_offset(ea_addr)
if f_offset != -1:
ea = f_offset
ea: int = block.getStart().add(off).getOffset()
yield Characteristic("embedded pe"), FileOffsetAddress(ea)
def extract_file_export_names() -> Iterator[tuple[Feature, Address]]:
"""extract function exports"""
program = capa.features.extractors.ghidra.helpers.get_current_program()
st = program.getSymbolTable()
st = currentProgram().getSymbolTable() # type: ignore [name-defined] # noqa: F821
for addr in st.getExternalEntryPointIterator():
sym = st.getPrimarySymbol(addr)
name = sym.getName()
# Check for forwarded export
is_forwarded = False
refs = program.getReferenceManager().getReferencesFrom(addr)
for ref in refs:
if ref.getToAddress().isExternalAddress():
ext_sym = st.getPrimarySymbol(ref.getToAddress())
if ext_sym:
ext_loc = program.getExternalManager().getExternalLocation(ext_sym)
if ext_loc:
# It is a forwarded export
libname = ext_loc.getLibraryName()
if libname.lower().endswith(".dll"):
libname = libname[:-4]
forwarded_name = f"{libname}.{ext_loc.getLabel()}"
forwarded_name = capa.features.extractors.helpers.reformat_forwarded_export_name(forwarded_name)
yield Export(forwarded_name), AbsoluteVirtualAddress(addr.getOffset())
yield Characteristic("forwarded export"), AbsoluteVirtualAddress(addr.getOffset())
is_forwarded = True
break
if not is_forwarded:
yield Export(name), AbsoluteVirtualAddress(addr.getOffset())
yield Export(st.getPrimarySymbol(addr).getName()), AbsoluteVirtualAddress(addr.getOffset())
def extract_file_import_names() -> Iterator[tuple[Feature, Address]]:
@@ -142,7 +110,7 @@ def extract_file_import_names() -> Iterator[tuple[Feature, Address]]:
- importname
"""
for f in capa.features.extractors.ghidra.helpers.get_current_program().getFunctionManager().getExternalFunctions():
for f in currentProgram().getFunctionManager().getExternalFunctions(): # type: ignore [name-defined] # noqa: F821
for r in f.getSymbol().getReferences():
if r.getReferenceType().isData():
addr = r.getFromAddress().getOffset() # gets pointer to fake external addr
@@ -158,14 +126,14 @@ def extract_file_import_names() -> Iterator[tuple[Feature, Address]]:
def extract_file_section_names() -> Iterator[tuple[Feature, Address]]:
"""extract section names"""
for block in capa.features.extractors.ghidra.helpers.get_current_program().getMemory().getBlocks():
for block in currentProgram().getMemory().getBlocks(): # type: ignore [name-defined] # noqa: F821
yield Section(block.getName()), AbsoluteVirtualAddress(block.getStart().getOffset())
def extract_file_strings() -> Iterator[tuple[Feature, Address]]:
"""extract ASCII and UTF-16 LE strings"""
for block in capa.features.extractors.ghidra.helpers.get_current_program().getMemory().getBlocks():
for block in currentProgram().getMemory().getBlocks(): # type: ignore [name-defined] # noqa: F821
if not block.isInitialized():
continue
@@ -185,8 +153,7 @@ def extract_file_function_names() -> Iterator[tuple[Feature, Address]]:
extract the names of statically-linked library functions.
"""
for sym in capa.features.extractors.ghidra.helpers.get_current_program().getSymbolTable().getAllSymbols(True):
for sym in currentProgram().getSymbolTable().getAllSymbols(True): # type: ignore [name-defined] # noqa: F821
# .isExternal() misses more than this config for the function symbols
if sym.getSymbolType() == SymbolType.FUNCTION and sym.getSource() == SourceType.ANALYSIS and sym.isGlobal():
name = sym.getName() # starts to resolve names based on Ghidra's FidDB
@@ -203,7 +170,7 @@ def extract_file_function_names() -> Iterator[tuple[Feature, Address]]:
def extract_file_format() -> Iterator[tuple[Feature, Address]]:
ef = capa.features.extractors.ghidra.helpers.get_current_program().getExecutableFormat()
ef = currentProgram().getExecutableFormat() # type: ignore [name-defined] # noqa: F821
if "PE" in ef:
yield Format(FORMAT_PE), NO_ADDRESS
elif "ELF" in ef:
@@ -231,3 +198,14 @@ FILE_HANDLERS = (
extract_file_function_names,
extract_file_format,
)
def main():
""" """
import pprint
pprint.pprint(list(extract_features())) # noqa: T203
if __name__ == "__main__":
main()

View File

@@ -26,25 +26,21 @@ from capa.features.extractors.base_extractor import FunctionHandle
def extract_function_calls_to(fh: FunctionHandle):
"""extract callers to a function"""
f: "ghidra.program.database.function.FunctionDB" = fh.inner
f: ghidra.program.database.function.FunctionDB = fh.inner
for ref in f.getSymbol().getReferences():
if ref.getReferenceType().isCall():
yield Characteristic("calls to"), AbsoluteVirtualAddress(ref.getFromAddress().getOffset())
def extract_function_loop(fh: FunctionHandle):
f: "ghidra.program.database.function.FunctionDB" = fh.inner
f: ghidra.program.database.function.FunctionDB = fh.inner
edges = []
for block in SimpleBlockIterator(
BasicBlockModel(capa.features.extractors.ghidra.helpers.get_current_program()),
f.getBody(),
capa.features.extractors.ghidra.helpers.get_monitor(),
):
dests = block.getDestinations(capa.features.extractors.ghidra.helpers.get_monitor())
for block in SimpleBlockIterator(BasicBlockModel(currentProgram()), f.getBody(), monitor()): # type: ignore [name-defined] # noqa: F821
dests = block.getDestinations(monitor()) # type: ignore [name-defined] # noqa: F821
s_addrs = block.getStartAddresses()
while dests.hasNext():
while dests.hasNext(): # For loop throws Python TypeError
for addr in s_addrs:
edges.append((addr.getOffset(), dests.next().getDestinationAddress().getOffset()))
@@ -53,17 +49,32 @@ def extract_function_loop(fh: FunctionHandle):
def extract_recursive_call(fh: FunctionHandle):
f: "ghidra.program.database.function.FunctionDB" = fh.inner
f: ghidra.program.database.function.FunctionDB = fh.inner
for func in f.getCalledFunctions(capa.features.extractors.ghidra.helpers.get_monitor()):
for func in f.getCalledFunctions(monitor()): # type: ignore [name-defined] # noqa: F821
if func.getEntryPoint().getOffset() == f.getEntryPoint().getOffset():
yield Characteristic("recursive call"), AbsoluteVirtualAddress(f.getEntryPoint().getOffset())
def extract_features(fh: FunctionHandle) -> Iterator[tuple[Feature, Address]]:
for function_handler in FUNCTION_HANDLERS:
for feature, addr in function_handler(fh):
for func_handler in FUNCTION_HANDLERS:
for feature, addr in func_handler(fh):
yield feature, addr
FUNCTION_HANDLERS = (extract_function_calls_to, extract_function_loop, extract_recursive_call)
def main():
""" """
features = []
for fhandle in capa.features.extractors.ghidra.helpers.get_function_symbols():
features.extend(list(extract_features(fhandle)))
import pprint
pprint.pprint(features) # noqa: T203
if __name__ == "__main__":
main()

View File

@@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
def extract_os() -> Iterator[tuple[Feature, Address]]:
format_name: str = capa.features.extractors.ghidra.helpers.get_current_program().getExecutableFormat()
format_name: str = currentProgram().getExecutableFormat() # type: ignore [name-defined] # noqa: F821
if "PE" in format_name:
yield OS(OS_WINDOWS), NO_ADDRESS
@@ -53,7 +53,7 @@ def extract_os() -> Iterator[tuple[Feature, Address]]:
def extract_arch() -> Iterator[tuple[Feature, Address]]:
lang_id = capa.features.extractors.ghidra.helpers.get_current_program().getMetadata().get("Language ID")
lang_id = currentProgram().getMetadata().get("Language ID") # type: ignore [name-defined] # noqa: F821
if "x86" in lang_id and "64" in lang_id:
yield Arch(ARCH_AMD64), NO_ADDRESS

View File

@@ -22,22 +22,9 @@ from ghidra.program.model.symbol import SourceType, SymbolType
from ghidra.program.model.address import AddressSpace
import capa.features.extractors.helpers
import capa.features.extractors.ghidra.context as ghidra_context
from capa.features.common import THUNK_CHAIN_DEPTH_DELTA
from capa.features.address import AbsoluteVirtualAddress
from capa.features.extractors.base_extractor import BBHandle, InsnHandle
def get_current_program():
return ghidra_context.get_context().program
def get_monitor():
return ghidra_context.get_context().monitor
def get_flat_api():
return ghidra_context.get_context().flat_api
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
def ints_to_bytes(bytez: list[int]) -> bytes:
@@ -49,7 +36,7 @@ def ints_to_bytes(bytez: list[int]) -> bytes:
return bytes([b & 0xFF for b in bytez])
def find_byte_sequence(addr: "ghidra.program.model.address.Address", seq: bytes) -> Iterator[int]:
def find_byte_sequence(addr: ghidra.program.model.address.Address, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
args:
@@ -57,25 +44,12 @@ def find_byte_sequence(addr: "ghidra.program.model.address.Address", seq: bytes)
seq: bytes to search e.g. b"\x01\x03"
"""
seqstr = "".join([f"\\x{b:02x}" for b in seq])
eas = get_flat_api().findBytes(addr, seqstr, java.lang.Integer.MAX_VALUE, 1)
eas = findBytes(addr, seqstr, java.lang.Integer.MAX_VALUE, 1) # type: ignore [name-defined] # noqa: F821
yield from eas
def get_file_offset(addr: "ghidra.program.model.address.Address") -> int:
"""get file offset for an address"""
block = get_current_program().getMemory().getBlock(addr)
if not block:
return -1
for info in block.getSourceInfos():
if info.contains(addr):
return info.getFileBytesOffset(addr)
return -1
def get_bytes(addr: "ghidra.program.model.address.Address", length: int) -> bytes:
def get_bytes(addr: ghidra.program.model.address.Address, length: int) -> bytes:
"""yield length bytes at addr
args:
@@ -83,12 +57,12 @@ def get_bytes(addr: "ghidra.program.model.address.Address", length: int) -> byte
length: length of bytes to pull
"""
try:
return ints_to_bytes(get_flat_api().getBytes(addr, int(length)))
except Exception:
return ints_to_bytes(getBytes(addr, length)) # type: ignore [name-defined] # noqa: F821
except RuntimeError:
return b""
def get_block_bytes(block: "ghidra.program.model.mem.MemoryBlock") -> bytes:
def get_block_bytes(block: ghidra.program.model.mem.MemoryBlock) -> bytes:
"""yield all bytes in a given block
args:
@@ -99,21 +73,20 @@ def get_block_bytes(block: "ghidra.program.model.mem.MemoryBlock") -> bytes:
def get_function_symbols():
"""yield all non-external function symbols"""
yield from get_current_program().getFunctionManager().getFunctionsNoStubs(True)
yield from currentProgram().getFunctionManager().getFunctionsNoStubs(True) # type: ignore [name-defined] # noqa: F821
def get_function_blocks(fh: "capa.features.extractors.base_extractor.FunctionHandle") -> Iterator[BBHandle]:
"""
yield the basic blocks of the function
"""
def get_function_blocks(fh: FunctionHandle) -> Iterator[BBHandle]:
"""yield BBHandle for each bb in a given function"""
for block in SimpleBlockIterator(BasicBlockModel(get_current_program()), fh.inner.getBody(), get_monitor()):
yield BBHandle(address=AbsoluteVirtualAddress(block.getMinAddress().getOffset()), inner=block)
func: ghidra.program.database.function.FunctionDB = fh.inner
for bb in SimpleBlockIterator(BasicBlockModel(currentProgram()), func.getBody(), monitor()): # type: ignore [name-defined] # noqa: F821
yield BBHandle(address=AbsoluteVirtualAddress(bb.getMinAddress().getOffset()), inner=bb)
def get_insn_in_range(bbh: BBHandle) -> Iterator[InsnHandle]:
"""yield InshHandle for each insn in a given basicblock"""
for insn in get_current_program().getListing().getInstructions(bbh.inner, True):
for insn in currentProgram().getListing().getInstructions(bbh.inner, True): # type: ignore [name-defined] # noqa: F821
yield InsnHandle(address=AbsoluteVirtualAddress(insn.getAddress().getOffset()), inner=insn)
@@ -122,7 +95,7 @@ def get_file_imports() -> dict[int, list[str]]:
import_dict: dict[int, list[str]] = {}
for f in get_current_program().getFunctionManager().getExternalFunctions():
for f in currentProgram().getFunctionManager().getExternalFunctions(): # type: ignore [name-defined] # noqa: F821
for r in f.getSymbol().getReferences():
if r.getReferenceType().isData():
addr = r.getFromAddress().getOffset() # gets pointer to fake external addr
@@ -160,7 +133,7 @@ def get_file_externs() -> dict[int, list[str]]:
extern_dict: dict[int, list[str]] = {}
for sym in get_current_program().getSymbolTable().getAllSymbols(True):
for sym in currentProgram().getSymbolTable().getAllSymbols(True): # type: ignore [name-defined] # noqa: F821
# .isExternal() misses more than this config for the function symbols
if sym.getSymbolType() == SymbolType.FUNCTION and sym.getSource() == SourceType.ANALYSIS and sym.isGlobal():
name = sym.getName() # starts to resolve names based on Ghidra's FidDB
@@ -198,7 +171,7 @@ def map_fake_import_addrs() -> dict[int, list[int]]:
"""
fake_dict: dict[int, list[int]] = {}
for f in get_current_program().getFunctionManager().getExternalFunctions():
for f in currentProgram().getFunctionManager().getExternalFunctions(): # type: ignore [name-defined] # noqa: F821
for r in f.getSymbol().getReferences():
if r.getReferenceType().isData():
fake_dict.setdefault(f.getEntryPoint().getOffset(), []).append(r.getFromAddress().getOffset())
@@ -207,7 +180,7 @@ def map_fake_import_addrs() -> dict[int, list[int]]:
def check_addr_for_api(
addr: "ghidra.program.model.address.Address",
addr: ghidra.program.model.address.Address,
fakes: dict[int, list[int]],
imports: dict[int, list[str]],
externs: dict[int, list[str]],
@@ -229,18 +202,18 @@ def check_addr_for_api(
return False
def is_call_or_jmp(insn: "ghidra.program.database.code.InstructionDB") -> bool:
def is_call_or_jmp(insn: ghidra.program.database.code.InstructionDB) -> bool:
return any(mnem in insn.getMnemonicString() for mnem in ["CALL", "J"]) # JMP, JNE, JNZ, etc
def is_sp_modified(insn: "ghidra.program.database.code.InstructionDB") -> bool:
def is_sp_modified(insn: ghidra.program.database.code.InstructionDB) -> bool:
for i in range(insn.getNumOperands()):
if insn.getOperandType(i) == OperandType.REGISTER:
return "SP" in insn.getRegister(i).getName() and insn.getOperandRefType(i).isWrite()
return False
def is_stack_referenced(insn: "ghidra.program.database.code.InstructionDB") -> bool:
def is_stack_referenced(insn: ghidra.program.database.code.InstructionDB) -> bool:
"""generic catch-all for stack references"""
for i in range(insn.getNumOperands()):
if insn.getOperandType(i) == OperandType.REGISTER:
@@ -252,7 +225,7 @@ def is_stack_referenced(insn: "ghidra.program.database.code.InstructionDB") -> b
return any(ref.isStackReference() for ref in insn.getReferencesFrom())
def is_zxor(insn: "ghidra.program.database.code.InstructionDB") -> bool:
def is_zxor(insn: ghidra.program.database.code.InstructionDB) -> bool:
# assume XOR insn
# XOR's against the same operand zero out
ops = []
@@ -268,29 +241,29 @@ def is_zxor(insn: "ghidra.program.database.code.InstructionDB") -> bool:
return all(n == operands[0] for n in operands)
def handle_thunk(addr: "ghidra.program.model.address.Address"):
def handle_thunk(addr: ghidra.program.model.address.Address):
"""Follow thunk chains down to a reasonable depth"""
ref = addr
for _ in range(THUNK_CHAIN_DEPTH_DELTA):
thunk_jmp = get_flat_api().getInstructionAt(ref)
thunk_jmp = getInstructionAt(ref) # type: ignore [name-defined] # noqa: F821
if thunk_jmp and is_call_or_jmp(thunk_jmp):
if OperandType.isAddress(thunk_jmp.getOperandType(0)):
ref = thunk_jmp.getAddress(0)
else:
thunk_dat = get_flat_api().getDataContaining(ref)
thunk_dat = getDataContaining(ref) # type: ignore [name-defined] # noqa: F821
if thunk_dat and thunk_dat.isDefined() and thunk_dat.isPointer():
ref = thunk_dat.getValue()
break # end of thunk chain reached
return ref
def dereference_ptr(insn: "ghidra.program.database.code.InstructionDB"):
def dereference_ptr(insn: ghidra.program.database.code.InstructionDB):
addr_code = OperandType.ADDRESS | OperandType.CODE
to_deref = insn.getAddress(0)
dat = get_flat_api().getDataContaining(to_deref)
dat = getDataContaining(to_deref) # type: ignore [name-defined] # noqa: F821
if insn.getOperandType(0) == addr_code:
thfunc = get_flat_api().getFunctionContaining(to_deref)
thfunc = getFunctionContaining(to_deref) # type: ignore [name-defined] # noqa: F821
if thfunc and thfunc.isThunk():
return handle_thunk(to_deref)
else:
@@ -321,7 +294,7 @@ def find_data_references_from_insn(insn, max_depth: int = 10):
to_addr = reference.getToAddress()
for _ in range(max_depth - 1):
data = get_flat_api().getDataAt(to_addr)
data = getDataAt(to_addr) # type: ignore [name-defined] # noqa: F821
if data and data.isPointer():
ptr_value = data.getValue()

View File

@@ -234,7 +234,7 @@ def extract_insn_bytes_features(fh: FunctionHandle, bb: BBHandle, ih: InsnHandle
push offset iid_004118d4_IShellLinkA ; riid
"""
for addr in capa.features.extractors.ghidra.helpers.find_data_references_from_insn(ih.inner):
data = capa.features.extractors.ghidra.helpers.get_flat_api().getDataAt(addr)
data = getDataAt(addr) # type: ignore [name-defined] # noqa: F821
if data and not data.hasStringValue():
extracted_bytes = capa.features.extractors.ghidra.helpers.get_bytes(addr, MAX_BYTES_FEATURE_SIZE)
if extracted_bytes and not capa.features.extractors.helpers.all_zeros(extracted_bytes):
@@ -249,9 +249,9 @@ def extract_insn_string_features(fh: FunctionHandle, bb: BBHandle, ih: InsnHandl
push offset aAcr ; "ACR > "
"""
for addr in capa.features.extractors.ghidra.helpers.find_data_references_from_insn(ih.inner):
data = capa.features.extractors.ghidra.helpers.get_flat_api().getDataAt(addr)
data = getDataAt(addr) # type: ignore [name-defined] # noqa: F821
if data and data.hasStringValue():
yield String(str(data.getValue())), ih.address
yield String(data.getValue()), ih.address
def extract_insn_mnemonic_features(
@@ -361,8 +361,8 @@ def extract_insn_cross_section_cflow(
if capa.features.extractors.ghidra.helpers.check_addr_for_api(ref, fakes, imports, externs):
return
this_mem_block = capa.features.extractors.ghidra.helpers.get_flat_api().getMemoryBlock(insn.getAddress())
ref_block = capa.features.extractors.ghidra.helpers.get_flat_api().getMemoryBlock(ref)
this_mem_block = getMemoryBlock(insn.getAddress()) # type: ignore [name-defined] # noqa: F821
ref_block = getMemoryBlock(ref) # type: ignore [name-defined] # noqa: F821
if ref_block != this_mem_block:
yield Characteristic("cross section flow"), ih.address
@@ -425,19 +425,19 @@ def check_nzxor_security_cookie_delta(
Check if insn within last addr of last bb - delta
"""
model = SimpleBlockModel(capa.features.extractors.ghidra.helpers.get_current_program())
model = SimpleBlockModel(currentProgram()) # type: ignore [name-defined] # noqa: F821
insn_addr = insn.getAddress()
func_asv = fh.getBody()
first_addr = func_asv.getMinAddress()
if insn_addr < first_addr.add(SECURITY_COOKIE_BYTES_DELTA):
first_bb = model.getFirstCodeBlockContaining(first_addr, capa.features.extractors.ghidra.helpers.get_monitor())
first_bb = model.getFirstCodeBlockContaining(first_addr, monitor()) # type: ignore [name-defined] # noqa: F821
if first_bb.contains(insn_addr):
return True
last_addr = func_asv.getMaxAddress()
if insn_addr > last_addr.add(SECURITY_COOKIE_BYTES_DELTA * -1):
last_bb = model.getFirstCodeBlockContaining(last_addr, capa.features.extractors.ghidra.helpers.get_monitor())
last_bb = model.getFirstCodeBlockContaining(last_addr, monitor()) # type: ignore [name-defined] # noqa: F821
if last_bb.contains(insn_addr):
return True
@@ -488,3 +488,22 @@ INSTRUCTION_HANDLERS = (
extract_function_calls_from,
extract_function_indirect_call_characteristic_features,
)
def main():
""" """
features = []
from capa.features.extractors.ghidra.extractor import GhidraFeatureExtractor
for fh in GhidraFeatureExtractor().get_functions():
for bb in capa.features.extractors.ghidra.helpers.get_function_blocks(fh):
for insn in capa.features.extractors.ghidra.helpers.get_insn_in_range(bb):
features.extend(list(extract_features(fh, bb, insn)))
import pprint
pprint.pprint(features) # noqa: T203
if __name__ == "__main__":
main()

View File

@@ -18,7 +18,6 @@ import idaapi
import idautils
import capa.features.extractors.ida.helpers
from capa.features.file import FunctionName
from capa.features.common import Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors import loops
@@ -51,39 +50,10 @@ def extract_recursive_call(fh: FunctionHandle):
yield Characteristic("recursive call"), fh.address
def extract_function_name(fh: FunctionHandle) -> Iterator[tuple[Feature, Address]]:
ea = fh.inner.start_ea
name = idaapi.get_name(ea)
if name.startswith("sub_"):
# skip default names, like "sub_401000"
return
yield FunctionName(name), fh.address
if name.startswith("_"):
# some linkers may prefix linked routines with a `_` to avoid name collisions.
# extract features for both the mangled and un-mangled representations.
# e.g. `_fwrite` -> `fwrite`
# see: https://stackoverflow.com/a/2628384/87207
yield FunctionName(name[1:]), fh.address
def extract_function_alternative_names(fh: FunctionHandle):
"""Get all alternative names for an address."""
for aname in capa.features.extractors.ida.helpers.get_function_alternative_names(fh.inner.start_ea):
yield FunctionName(aname), fh.address
def extract_features(fh: FunctionHandle) -> Iterator[tuple[Feature, Address]]:
for func_handler in FUNCTION_HANDLERS:
for feature, addr in func_handler(fh):
yield feature, addr
FUNCTION_HANDLERS = (
extract_function_calls_to,
extract_function_loop,
extract_recursive_call,
extract_function_name,
extract_function_alternative_names,
)
FUNCTION_HANDLERS = (extract_function_calls_to, extract_function_loop, extract_recursive_call)

View File

@@ -20,7 +20,6 @@ import idaapi
import ida_nalt
import idautils
import ida_bytes
import ida_funcs
import ida_segment
from capa.features.address import AbsoluteVirtualAddress
@@ -437,16 +436,3 @@ def is_basic_block_return(bb: idaapi.BasicBlock) -> bool:
def has_sib(oper: idaapi.op_t) -> bool:
# via: https://reverseengineering.stackexchange.com/a/14300
return oper.specflag1 == 1
def find_alternative_names(cmt: str):
for line in cmt.split("\n"):
if line.startswith("Alternative name is '") and line.endswith("'"):
name = line[len("Alternative name is '") : -1] # Extract name between quotes
yield name
def get_function_alternative_names(fva: int):
"""Get all alternative names for an address."""
yield from find_alternative_names(ida_bytes.get_cmt(fva, False) or "")
yield from find_alternative_names(ida_funcs.get_func_cmt(idaapi.get_func(fva), False) or "")

View File

@@ -22,7 +22,6 @@ import idautils
import capa.features.extractors.helpers
import capa.features.extractors.ida.helpers
from capa.features.file import FunctionName
from capa.features.insn import API, MAX_STRUCTURE_SIZE, Number, Offset, Mnemonic, OperandNumber, OperandOffset
from capa.features.common import MAX_BYTES_FEATURE_SIZE, THUNK_CHAIN_DEPTH_DELTA, Bytes, String, Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
@@ -130,8 +129,8 @@ def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle)
# not a function (start)
return
name = idaapi.get_name(target_func.start_ea)
if target_func.flags & idaapi.FUNC_LIB or not name.startswith("sub_"):
if target_func.flags & idaapi.FUNC_LIB:
name = idaapi.get_name(target_func.start_ea)
yield API(name), ih.address
if name.startswith("_"):
# some linkers may prefix linked routines with a `_` to avoid name collisions.
@@ -140,10 +139,6 @@ def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle)
# see: https://stackoverflow.com/a/2628384/87207
yield API(name[1:]), ih.address
for altname in capa.features.extractors.ida.helpers.get_function_alternative_names(target_func.start_ea):
yield FunctionName(altname), ih.address
yield API(altname), ih.address
def extract_insn_number_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle

View File

@@ -56,7 +56,7 @@ def get_previous_instructions(vw: VivWorkspace, va: int) -> list[int]:
if ploc is not None:
# from vivisect.const:
# location: (L_VA, L_SIZE, L_LTYPE, L_TINFO)
pva, _, ptype, pinfo = ploc
(pva, _, ptype, pinfo) = ploc
if ptype == LOC_OP and not (pinfo & IF_NOFALL):
ret.append(pva)

View File

@@ -176,7 +176,7 @@ def extract_insn_api_features(fh: FunctionHandle, bb, ih: InsnHandle) -> Iterato
elif isinstance(insn.opers[0], envi.archs.i386.disasm.i386RegOper):
try:
_, target = resolve_indirect_call(f.vw, insn.va, insn=insn)
(_, target) = resolve_indirect_call(f.vw, insn.va, insn=insn)
except NotFoundError:
# not able to resolve the indirect call, sorry
return

View File

@@ -26,16 +26,6 @@ from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, Pr
logger = logging.getLogger(__name__)
VOID_PTR_NUMBER_PARAMS = frozenset(
{
"hKey",
"hKeyRoot",
"hkResult",
"samDesired",
}
)
def get_call_param_features(param: Param, ch: CallHandle) -> Iterator[tuple[Feature, Address]]:
if param.deref is not None:
# pointer types contain a special "deref" member that stores the deref'd value
@@ -49,31 +39,10 @@ def get_call_param_features(param: Param, ch: CallHandle) -> Iterator[tuple[Feat
# parsing the data up to here results in double-escaped backslashes, remove those here
yield String(param.deref.value.replace("\\\\", "\\")), ch.address
else:
if param.name in VOID_PTR_NUMBER_PARAMS:
try:
yield Number(hexint(param.deref.value)), ch.address
except (ValueError, TypeError) as e:
logger.debug(
"failed to parse whitelisted void_ptr param %s value %s: %s",
param.name,
param.deref.value,
e,
)
else:
logger.debug("skipping deref param type %s", param.deref.type_)
logger.debug("skipping deref param type %s", param.deref.type_)
elif param.value is not None:
if param.type_ in PARAM_TYPE_INT:
yield Number(hexint(param.value)), ch.address
elif param.type_ == "void_ptr" and param.name in VOID_PTR_NUMBER_PARAMS:
try:
yield Number(hexint(param.value)), ch.address
except (ValueError, TypeError) as e:
logger.debug(
"failed to parse whitelisted void_ptr param %s value %s: %s",
param.name,
param.value,
e,
)
def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[tuple[Feature, Address]]:

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Iterator
from pathlib import Path
@@ -39,8 +39,6 @@ from capa.features.extractors.base_extractor import (
DynamicFeatureExtractor,
)
logger = logging.getLogger(__name__)
def get_formatted_params(params: ParamList) -> list[str]:
params_list: list[str] = []
@@ -89,16 +87,6 @@ class VMRayExtractor(DynamicFeatureExtractor):
def get_processes(self) -> Iterator[ProcessHandle]:
for monitor_process in self.analysis.monitor_processes.values():
# skip invalid/incomplete monitor process entries, see #2807
if monitor_process.pid == 0 or not monitor_process.filename:
logger.debug(
"skipping incomplete process entry: pid=%d, filename=%s, monitor_id=%d",
monitor_process.pid,
monitor_process.filename,
monitor_process.monitor_id,
)
continue
address: ProcessAddress = ProcessAddress(pid=monitor_process.pid, ppid=monitor_process.ppid)
yield ProcessHandle(address, inner=monitor_process)

View File

@@ -1,75 +1,107 @@
# capa analysis using Ghidra
<div align="center">
<img src="../../doc/img/ghidra_backend_logo.png" width=240 height=125>
</div>
capa supports using Ghidra (via [PyGhidra](https://github.com/NationalSecurityAgency/ghidra/tree/master/Ghidra/Features/PyGhidra)) as a feature extraction backend. This enables you to run capa against binaries using Ghidra's analysis engine.
# capa + Ghidra
[capa](https://github.com/mandiant/capa) is the FLARE teams open-source tool that detects capabilities in executable files. [Ghidra](https://github.com/NationalSecurityAgency/ghidra) is an open-source software reverse engineering framework created and maintained by the National Security Agency Research Directorate. capa + Ghidra brings capas detection capabilities directly to Ghidras user interface helping speed up your reverse engineering tasks by identifying what parts of a program suggest interesting behavior, such as setting a registry value. You can execute the included Python 3 scripts [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) or [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) to run capas analysis and view the results in Ghidra. You may be asking yourself, “Python 3 scripts in Ghidra?”. You read that correctly. This integration is written entirely in Python 3 and relies on [Ghidrathon]( https://github.com/mandiant/ghidrathon), an open source Ghidra extension that adds Python 3 scripting to Ghidra.
Check out our capa + Ghidra blog posts:
* [Riding Dragons: capa Harnesses Ghidra](https://www.mandiant.com/resources/blog/capa-harnesses-ghidra)
## UI Integration
[capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) renders capa results in Ghidra's UI to help you quickly navigate them. This includes adding matched functions to Ghidras Symbol Tree and Bookmarks windows and adding comments to functions that indicate matched capabilities and features. You can execute this script using Ghidras Script Manager window.
### Symbol Tree Window
Matched functions are added to Ghidra's Symbol Tree window under a custom namespace that maps to the capabilities' [capa namespace](https://github.com/mandiant/capa-rules/blob/master/doc/format.md#rule-namespace).
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/eeae33f4-99d4-42dc-a5e8-4c1b8c661492" width=300>
</div>
### Comments
Comments are added at the beginning of matched functions indicating matched capabilities and inline comments are added to functions indicating matched features. You can view these comments in Ghidras Disassembly Listing and Decompile windows.
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/bb2b4170-7fd4-45fc-8c7b-ff8f2e2f101b" width=1000>
</div>
### Bookmarks
Bookmarks are added to functions that matched a capability that is mapped to a MITRE ATT&CK and/or Malware Behavior Catalog (MBC) technique. You can view these bookmarks in Ghidra's Bookmarks window.
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/7f9a66a9-7be7-4223-91c6-4b8fc4651336" width=825>
</div>
## Text-based Integration
[capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) outputs text-based capa results that mirror the output of capas standalone tool. You can execute this script using Ghidras Script Manager and view its output in Ghidras Console window.
<div align="center">
<img src="../../doc/img/ghidra_script_mngr_output.png" width=700>
</div>
You can also execute [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) using Ghidra's Headless Analyzer to view its output in a terminal window.
<div align="center">
<img src="../../doc/img/ghidra_headless_analyzer.png">
</div>
# Getting Started
## Requirements
| Tool | Version | Source |
|------------|---------|--------|
| capa | `>= 7.0.0` | https://github.com/mandiant/capa/releases |
| Ghidrathon | `>= 3.0.0` | https://github.com/mandiant/Ghidrathon/releases |
| Ghidra | `>= 10.3.2` | https://github.com/NationalSecurityAgency/ghidra/releases |
| Python | `>= 3.10.0` | https://www.python.org/downloads |
## Installation
**Note**: capa + Ghidra relies on [Ghidrathon]( https://github.com/mandiant/ghidrathon) to execute Python 3 code in Ghidra. You must first install and configure Ghidrathon using the [steps outlined in its README]( https://github.com/mandiant/ghidrathon?tab=readme-ov-file#installing-ghidrathon). Then, you must use the Python 3 interpreter that you configured with Ghidrathon to complete the following steps:
1. Install capa and its dependencies from PyPI using the following command:
```bash
$ capa -b ghidra Practical\ Malware\ Analysis\ Lab\ 01-01.exe_
┌──────────┬──────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ md5 │ bb7425b82141a1c0f7d60e5106676bb1 │
│ sha1 │ │
│ sha256 │ 58898bd42c5bd3bf9b1389f0eee5b39cd59180e8370eb9ea838a0b327bd6fe47 │
│ analysis │ static │
│ os │ windows │
│ format │ pe │
│ arch │ i386 │
│ path │ ~/Documents/capa/tests/data/Practical Malware Analysis Lab 01-01.exe_ │
└──────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────┘
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ ATT&CK Tactic ┃ ATT&CK Technique ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ DISCOVERY │ File and Directory Discovery [T1083]
└────────────────────────────────────┴─────────────────────────────────────────────────────────────┘
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ MBC Objective ┃ MBC Behavior ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ DISCOVERY │ File and Directory Discovery [E1083]
│ FILE SYSTEM │ Copy File [C0045]
│ │ Read File [C0051]
│ PROCESS │ Terminate Process [C0018]
└────────────────────────────────────┴─────────────────────────────────────────────────────────────┘
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Capability ┃ Namespace ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ copy file │ host-interaction/file-system/copy │
│ enumerate files recursively │ host-interaction/file-system/files/list │
read file via mapping (2 matches) │ host-interaction/file-system/read │
│ terminate process (2 matches) │ host-interaction/process/terminate │
│ resolve function by parsing PE exports │ load-code/pe │
└────────────────────────────────────────────────┴─────────────────────────────────────────────────┘
$ pip install flare-capa
```
## getting started
### requirements
- [Ghidra](https://github.com/NationalSecurityAgency/ghidra) >= 12.0 must be installed and available via the `GHIDRA_INSTALL_DIR` environment variable.
#### standalone binary (recommended)
The capa [standalone binary](https://github.com/mandiant/capa/releases) is the preferred way to run capa with the Ghidra backend.
Although the binary does not bundle the Java environment or Ghidra itself, it will dynamically load them at runtime.
#### python package
You can also use the Ghidra backend with the capa Python package by installing `flare-capa` with the `ghidra` extra.
2. Download and extract the [official capa rules](https://github.com/mandiant/capa-rules/releases) that match the capa version you have installed. You can use the following command to view the version of capa you have installed:
```bash
$ pip install "flare-capa[ghidra]"
$ pip show flare-capa
OR
$ capa --version
```
### usage
3. Copy [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) and [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) to your `ghidra_scripts` directory or manually add the parent directory of each script using Ghidras Script Manager.
To use the Ghidra backend, specify it with the `-b` or `--backend` flag:
## Usage
You can execute [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) and [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) using Ghidras Script Manager. [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) can also be executed using Ghidra's Headless Analyzer.
### Execution using Ghidras Script Manager
You can execute [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) and [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) using Ghidra's Script Manager as follows:
1. Navigate to `Window > Script Manager`
2. Expand the `Python 3 > capa` category
3. Double-click a script to execute it
Both scripts ask you to provide the path of your capa rules directory (see installation step 2). [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) also has you choose one of `default`, `verbose`, and `vverbose` output formats which mirror the output formats of capas standalone tool.
### Execution using Ghidras Headless Analyzer
You can execute [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) using Ghidras Headless Analyzer by invoking the `analyzeHeadless` script included with Ghidra in its `support` directory. The following arguments must be provided:
| Argument | Description |
|----|----|
|`<project_path>`| Path to Ghidra project|
| `<project_name>`| Name of Ghidra Project|
| `-Process <sample_name>` OR `-Import <sample_path>`| Name of sample `<sample_name>` already imported into `<project_name>` OR absolute path of sample `<sample_path>` to import into `<project_name>`|
| `-ScriptPath <script_path>`| OPTIONAL parent directory `<script_path>` of [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py)|
| `-PostScript capa_ghidra.py`| Execute [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) after Ghidra analysis|
| `"<script_args>"`| Quoted string `"<script_args>"` containing script arguments passed to [capa_ghidra.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_ghidra.py) that must specify a capa rules path and optionally the output format (`--verbose`, `--vverbose`, `--json`) you can specify `”help”` to view the scripts help message |
The following is an example of combining these arguments into a single `analyzeHeadless` script command:
```bash
$ capa -b ghidra /path/to/sample
$ analyzeHeadless /home/wumbo/demo demo -Import /home/wumbo/capa/tests/data/Practical\ Malware\ Analysis\ Lab\ 01-01.dll_ -PostScript capa_ghidra.py "/home/wumbo/capa/rules --verbose"
```
capa will:
1. Initialize a headless Ghidra instance.
2. Create a temporary project.
3. Import and analyze the sample.
4. Extract features and match rules.
5. Clean up the temporary project.
**Note:** The first time you run this, it may take a few moments to initialize the Ghidra environment.

View File

@@ -1,3 +1,7 @@
# Run capa against loaded Ghidra database and render results in Ghidra UI
# @author Colton Gabertan (gabertan.colton@gmail.com)
# @category Python 3.capa
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,63 +16,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Run capa against loaded Ghidra database and render results in Ghidra UI
# @author Colton Gabertan (gabertan.colton@gmail.com)
# @category capa
# @runtime PyGhidra
import sys
import json
import logging
import pathlib
from typing import Any
from java.util import ArrayList
from ghidra.util import Msg
from ghidra.app.cmd.label import AddLabelCmd, CreateNamespacesCmd
from ghidra.util.exception import CancelledException
from ghidra.program.flatapi import FlatProgramAPI
from ghidra.program.model.symbol import Namespace, SourceType, SymbolType
import capa
import capa.main
import capa.rules
import capa.version
import capa.render.json
import capa.ghidra.helpers
import capa.capabilities.common
import capa.features.extractors.ghidra.context
import capa.features.extractors.ghidra.extractor
logger = logging.getLogger("capa_explorer")
def show_monitor_message(msg):
capa.ghidra.helpers.get_monitor().checkCanceled()
capa.ghidra.helpers.get_monitor().setMessage(msg)
def show_error(msg):
Msg.showError(None, None, "capa explorer", msg)
def show_warn(msg):
Msg.showWarn(None, None, "capa explorer", msg)
def show_info(msg):
Msg.showInfo(None, None, "capa explorer", msg)
def add_bookmark(addr, txt, category="CapaExplorer"):
"""create bookmark at addr"""
capa.ghidra.helpers.get_current_program().getBookmarkManager().setBookmark(addr, "Info", category, txt)
currentProgram().getBookmarkManager().setBookmark(addr, "Info", category, txt) # type: ignore [name-defined] # noqa: F821
def create_namespace(namespace_str):
"""create new Ghidra namespace for each capa namespace"""
cmd = CreateNamespacesCmd(namespace_str, SourceType.USER_DEFINED)
cmd.applyTo(capa.ghidra.helpers.get_current_program())
cmd.applyTo(currentProgram()) # type: ignore [name-defined] # noqa: F821
return cmd.getNamespace()
@@ -76,7 +53,7 @@ def create_label(ghidra_addr, name, capa_namespace):
"""custom label cmd to overlay symbols under capa-generated namespaces"""
# prevent duplicate labels under the same capa-generated namespace
symbol_table = capa.ghidra.helpers.get_current_program().getSymbolTable()
symbol_table = currentProgram().getSymbolTable() # type: ignore [name-defined] # noqa: F821
for sym in symbol_table.getSymbols(ghidra_addr):
if sym.getName(True) == capa_namespace.getName(True) + Namespace.DELIMITER + name:
return
@@ -84,7 +61,7 @@ def create_label(ghidra_addr, name, capa_namespace):
# create SymbolType.LABEL at addr
# prioritize capa-generated namespace (duplicate match @ new addr), else put under global Ghidra one (new match)
cmd = AddLabelCmd(ghidra_addr, name, True, SourceType.USER_DEFINED)
cmd.applyTo(capa.ghidra.helpers.get_current_program())
cmd.applyTo(currentProgram()) # type: ignore [name-defined] # noqa: F821
# assign new match overlay label to capa-generated namespace
cmd.getSymbol().setNamespace(capa_namespace)
@@ -115,8 +92,8 @@ class CapaMatchData:
return
for key in self.matches.keys():
addr = capa.ghidra.helpers.get_flat_api().toAddr(hex(key))
func = capa.ghidra.helpers.get_flat_api().getFunctionContaining(addr)
addr = toAddr(hex(key)) # type: ignore [name-defined] # noqa: F821
func = getFunctionContaining(addr) # type: ignore [name-defined] # noqa: F821
# bookmark & tag MITRE ATT&CK tactics & MBC @ function scope
if func is not None:
@@ -140,160 +117,140 @@ class CapaMatchData:
def set_plate_comment(self, ghidra_addr):
"""set plate comments at matched functions"""
comment = capa.ghidra.helpers.get_flat_api().getPlateComment(ghidra_addr)
comment = getPlateComment(ghidra_addr) # type: ignore [name-defined] # noqa: F821
rule_path = self.namespace.replace(Namespace.DELIMITER, "/")
# 2 calls to avoid duplicate comments via subsequent script runs
if comment is None:
# first comment @ function
comment = rule_path + "\n"
capa.ghidra.helpers.get_flat_api().setPlateComment(ghidra_addr, comment)
setPlateComment(ghidra_addr, comment) # type: ignore [name-defined] # noqa: F821
elif rule_path not in comment:
comment = comment + rule_path + "\n"
capa.ghidra.helpers.get_flat_api().setPlateComment(ghidra_addr, comment)
setPlateComment(ghidra_addr, comment) # type: ignore [name-defined] # noqa: F821
else:
return
def set_pre_comment(self, ghidra_addr, sub_type, description):
"""set pre comments at subscoped matches of main rules"""
comment = capa.ghidra.helpers.get_flat_api().getPreComment(ghidra_addr)
comment = getPreComment(ghidra_addr) # type: ignore [name-defined] # noqa: F821
if comment is None:
comment = "capa: " + sub_type + "(" + description + ")" + ' matched in "' + self.capability + '"\n'
capa.ghidra.helpers.get_flat_api().setPreComment(ghidra_addr, comment)
setPreComment(ghidra_addr, comment) # type: ignore [name-defined] # noqa: F821
elif self.capability not in comment:
comment = (
comment + "capa: " + sub_type + "(" + description + ")" + ' matched in "' + self.capability + '"\n'
)
capa.ghidra.helpers.get_flat_api().setPreComment(ghidra_addr, comment)
setPreComment(ghidra_addr, comment) # type: ignore [name-defined] # noqa: F821
else:
return
def label_matches(self, do_namespaces, do_comments):
def label_matches(self):
"""label findings at function scopes and comment on subscope matches"""
capa_namespace = None
if do_namespaces:
capa_namespace = create_namespace(self.namespace)
symbol_table = capa.ghidra.helpers.get_current_program().getSymbolTable()
capa_namespace = create_namespace(self.namespace)
symbol_table = currentProgram().getSymbolTable() # type: ignore [name-defined] # noqa: F821
# handle function main scope of matched rule
# these will typically contain further matches within
if self.scope == "function":
for addr in self.matches.keys():
ghidra_addr = capa.ghidra.helpers.get_flat_api().toAddr(hex(addr))
ghidra_addr = toAddr(hex(addr)) # type: ignore [name-defined] # noqa: F821
# classify new function label under capa-generated namespace
if do_namespaces:
sym = symbol_table.getPrimarySymbol(ghidra_addr)
if sym is not None:
if sym.getSymbolType() == SymbolType.FUNCTION:
create_label(ghidra_addr, sym.getName(), capa_namespace)
sym = symbol_table.getPrimarySymbol(ghidra_addr)
if sym is not None:
if sym.getSymbolType() == SymbolType.FUNCTION:
create_label(ghidra_addr, sym.getName(), capa_namespace)
self.set_plate_comment(ghidra_addr)
if do_comments:
self.set_plate_comment(ghidra_addr)
# parse the corresponding nodes, and pre-comment subscope matched features
# under the encompassing function(s)
for sub_match in self.matches.get(addr):
for loc, node in sub_match.items():
sub_ghidra_addr = toAddr(hex(loc)) # type: ignore [name-defined] # noqa: F821
if sub_ghidra_addr == ghidra_addr:
# skip duplicates
continue
# parse the corresponding nodes, and pre-comment subscope matched features
# under the encompassing function(s)
for sub_match in self.matches.get(addr):
for loc, node in sub_match.items():
sub_ghidra_addr = capa.ghidra.helpers.get_flat_api().toAddr(hex(loc))
if sub_ghidra_addr == ghidra_addr:
# skip duplicates
continue
# precomment subscope matches under the function
if node != {} and do_comments:
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
# precomment subscope matches under the function
if node != {}:
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
else:
# resolve the encompassing function for the capa namespace
# of non-function scoped main matches
for addr in self.matches.keys():
ghidra_addr = capa.ghidra.helpers.get_flat_api().toAddr(hex(addr))
ghidra_addr = toAddr(hex(addr)) # type: ignore [name-defined] # noqa: F821
# basic block / insn scoped main matches
# Ex. See "Create Process on Windows" Rule
func = capa.ghidra.helpers.get_flat_api().getFunctionContaining(ghidra_addr)
func = getFunctionContaining(ghidra_addr) # type: ignore [name-defined] # noqa: F821
if func is not None:
func_addr = func.getEntryPoint()
if do_namespaces:
create_label(func_addr, func.getName(), capa_namespace)
if do_comments:
self.set_plate_comment(func_addr)
create_label(func_addr, func.getName(), capa_namespace)
self.set_plate_comment(func_addr)
# create subscope match precomments
for sub_match in self.matches.get(addr):
for loc, node in sub_match.items():
sub_ghidra_addr = capa.ghidra.helpers.get_flat_api().toAddr(hex(loc))
sub_ghidra_addr = toAddr(hex(loc)) # type: ignore [name-defined] # noqa: F821
if node != {}:
if func is not None:
# basic block/ insn scope under resolved function
if do_comments:
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
else:
# this would be a global/file scoped main match
# try to resolve the encompassing function via the subscope match, instead
# Ex. "run as service" rule
sub_func = capa.ghidra.helpers.get_flat_api().getFunctionContaining(sub_ghidra_addr)
sub_func = getFunctionContaining(sub_ghidra_addr) # type: ignore [name-defined] # noqa: F821
if sub_func is not None:
sub_func_addr = sub_func.getEntryPoint()
# place function in capa namespace & create the subscope match label in Ghidra's global namespace
if do_namespaces:
create_label(sub_func_addr, sub_func.getName(), capa_namespace)
if do_comments:
self.set_plate_comment(sub_func_addr)
if do_comments:
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
create_label(sub_func_addr, sub_func.getName(), capa_namespace)
self.set_plate_comment(sub_func_addr)
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
else:
# addr is in some other file section like .data
# represent this location with a label symbol under the capa namespace
# Ex. See "Reference Base64 String" rule
if do_namespaces:
for _sub_type, _description in parse_node(node):
# in many cases, these will be ghidra-labeled data, so just add the existing
# label symbol to the capa namespace
for sym in symbol_table.getSymbols(sub_ghidra_addr):
if sym.getSymbolType() == SymbolType.LABEL:
sym.setNamespace(capa_namespace)
if do_comments:
for sub_type, description in parse_node(node):
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
for sub_type, description in parse_node(node):
# in many cases, these will be ghidra-labeled data, so just add the existing
# label symbol to the capa namespace
for sym in symbol_table.getSymbols(sub_ghidra_addr):
if sym.getSymbolType() == SymbolType.LABEL:
sym.setNamespace(capa_namespace)
self.set_pre_comment(sub_ghidra_addr, sub_type, description)
def get_capabilities():
rules_dir = ""
show_monitor_message(f"requesting capa {capa.version.__version__} rules directory")
selected_dir = askDirectory(f"choose capa {capa.version.__version__} rules directory", "Ok") # type: ignore [name-defined] # noqa: F821
if selected_dir:
rules_dir = selected_dir.getPath()
rules_dir: str = ""
try:
selected_dir = askDirectory("Choose capa rules directory", "Ok") # type: ignore [name-defined] # noqa: F821
if selected_dir:
rules_dir = selected_dir.getPath()
except RuntimeError:
# RuntimeError thrown when user selects "Cancel"
pass
if not rules_dir:
raise CancelledException
logger.info("You must choose a capa rules directory before running capa.")
return "" # return empty str to avoid handling both int and str types
rules_path: pathlib.Path = pathlib.Path(rules_dir)
logger.info("running capa using rules from %s", str(rules_path))
show_monitor_message(f"loading rules from {rules_path}")
rules = capa.rules.get_rules([rules_path])
show_monitor_message("collecting binary metadata")
meta = capa.ghidra.helpers.collect_metadata([rules_path])
show_monitor_message("running capa analysis")
extractor = capa.features.extractors.ghidra.extractor.GhidraFeatureExtractor()
capabilities = capa.capabilities.common.find_capabilities(rules, extractor, True)
show_monitor_message("checking for static limitations")
if capa.capabilities.common.has_static_limitation(rules, capabilities, is_standalone=False):
show_warn(
"capa explorer encountered warnings during analysis. Please check the console output for more information.",
)
popup("capa explorer encountered warnings during analysis. Please check the console output for more information.") # type: ignore [name-defined] # noqa: F821
logger.info("capa encountered warnings during analysis")
show_monitor_message("rendering results")
return capa.render.json.render(meta, rules, capabilities.matches)
@@ -371,12 +328,12 @@ def parse_json(capa_data):
# this requires the correct delimiter used by Ghidra
# Ex. 'communication/named-pipe/create/create pipe' -> capa::communication::named-pipe::create::create-pipe
namespace_str = Namespace.DELIMITER.join(meta["namespace"].split("/"))
namespace = "capa_explorer" + Namespace.DELIMITER + namespace_str + fmt_rule
namespace = "capa" + Namespace.DELIMITER + namespace_str + fmt_rule
else:
# lib rules via the official rules repo will not contain data
# for the "namespaces" key, so format using rule itself
# Ex. 'contain loop' -> capa::lib::contain-loop
namespace = "capa_explorer" + Namespace.DELIMITER + "lib" + fmt_rule
namespace = "capa" + Namespace.DELIMITER + "lib" + fmt_rule
yield CapaMatchData(namespace, scope, rule, rule_matches, attack, mbc)
@@ -385,79 +342,44 @@ def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
choices = ["namespaces", "bookmarks", "comments"]
# use ArrayList to resolve ambiguous askChoices overloads (List vs List, List) in PyGhidra
choices_java = ArrayList()
for c in choices:
choices_java.add(c)
if isRunningHeadless(): # type: ignore [name-defined] # noqa: F821
logger.error("unsupported Ghidra execution mode")
return capa.main.E_UNSUPPORTED_GHIDRA_EXECUTION_MODE
choice_labels = [
'add "capa_explorer" namespace for matched functions',
"add bookmarks for matched functions",
"add comments to matched functions",
]
# use ArrayList to resolve ambiguous askChoices overloads (List vs List, List) in PyGhidra
choice_labels_java = ArrayList()
for c in choice_labels:
choice_labels_java.add(c)
selected = list(askChoices("capa explorer", "select actions:", choices_java, choice_labels_java)) # type: ignore [name-defined] # noqa: F821
do_namespaces = "namespaces" in selected
do_comments = "comments" in selected
do_bookmarks = "bookmarks" in selected
if not any((do_namespaces, do_comments, do_bookmarks)):
raise CancelledException("no actions selected")
# initialize the context for the extractor/helpers
capa.features.extractors.ghidra.context.set_context(
currentProgram, # type: ignore [name-defined] # noqa: F821
FlatProgramAPI(currentProgram), # type: ignore [name-defined] # noqa: F821
monitor, # type: ignore [name-defined] # noqa: F821
)
show_monitor_message("checking supported Ghidra version")
if not capa.ghidra.helpers.is_supported_ghidra_version():
show_error("unsupported Ghidra version")
logger.error("unsupported Ghidra version")
return capa.main.E_UNSUPPORTED_GHIDRA_VERSION
show_monitor_message("checking supported file type")
if not capa.ghidra.helpers.is_supported_file_type():
show_error("unsupported file type")
logger.error("unsupported file type")
return capa.main.E_INVALID_FILE_TYPE
show_monitor_message("checking supported file architecture")
if not capa.ghidra.helpers.is_supported_arch_type():
show_error("unsupported file architecture")
logger.error("unsupported file architecture")
return capa.main.E_INVALID_FILE_ARCH
# capa_data will always contain {'meta':..., 'rules':...}
# if the 'rules' key contains no values, then there were no matches
capa_data = json.loads(get_capabilities())
if capa_data.get("rules") is None:
show_info("capa explorer found no matches.")
logger.info("capa explorer found no matches")
popup("capa explorer found no matches.") # type: ignore [name-defined] # noqa: F821
return capa.main.E_EMPTY_REPORT
show_monitor_message("processing matches")
for item in parse_json(capa_data):
if do_bookmarks:
show_monitor_message("adding bookmarks")
item.bookmark_functions()
if do_namespaces or do_comments:
show_monitor_message("adding labels")
item.label_matches(do_namespaces, do_comments)
show_info("capa explorer analysis complete.")
item.bookmark_functions()
item.label_matches()
logger.info("capa explorer analysis complete")
popup("capa explorer analysis complete.\nPlease see results in the Bookmarks Window and Namespaces section of the Symbol Tree Window.") # type: ignore [name-defined] # noqa: F821
return 0
if __name__ == "__main__":
try:
if main() != 0:
show_error(
"capa explorer encountered errors during analysis. Please check the console output for more information.",
)
except CancelledException:
show_info("capa explorer analysis cancelled.")
if sys.version_info < (3, 10):
from capa.exceptions import UnsupportedRuntimeError
raise UnsupportedRuntimeError("This version of capa can only be used with Python 3.10+")
exit_code = main()
if exit_code != 0:
popup("capa explorer encountered errors during analysis. Please check the console output for more information.") # type: ignore [name-defined] # noqa: F821
sys.exit(exit_code)

174
capa/ghidra/capa_ghidra.py Normal file
View File

@@ -0,0 +1,174 @@
# Run capa against loaded Ghidra database and render results in Ghidra Console window
# @author Mike Hunhoff (mehunhoff@google.com)
# @category Python 3.capa
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import pathlib
import argparse
import capa
import capa.main
import capa.rules
import capa.ghidra.helpers
import capa.render.default
import capa.capabilities.common
import capa.features.extractors.ghidra.extractor
logger = logging.getLogger("capa_ghidra")
def run_headless():
parser = argparse.ArgumentParser(description="The FLARE team's open-source tool to integrate capa with Ghidra.")
parser.add_argument(
"rules",
type=str,
help="path to rule file or directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="enable verbose result document (no effect with --json)"
)
parser.add_argument(
"-vv", "--vverbose", action="store_true", help="enable very verbose result document (no effect with --json)"
)
parser.add_argument("-d", "--debug", action="store_true", help="enable debugging output on STDERR")
parser.add_argument("-q", "--quiet", action="store_true", help="disable all output but errors")
parser.add_argument("-j", "--json", action="store_true", help="emit JSON instead of text")
script_args = list(getScriptArgs()) # type: ignore [name-defined] # noqa: F821
if not script_args or len(script_args) > 1:
script_args = []
else:
script_args = script_args[0].split()
for idx, arg in enumerate(script_args):
if arg.lower() == "help":
script_args[idx] = "--help"
args = parser.parse_args(args=script_args)
if args.quiet:
logging.basicConfig(level=logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
logger.debug("running in Ghidra headless mode")
rules_path = pathlib.Path(args.rules)
logger.debug("rule path: %s", rules_path)
rules = capa.rules.get_rules([rules_path])
meta = capa.ghidra.helpers.collect_metadata([rules_path])
extractor = capa.features.extractors.ghidra.extractor.GhidraFeatureExtractor()
capabilities = capa.capabilities.common.find_capabilities(rules, extractor, False)
meta.analysis.feature_counts = capabilities.feature_counts
meta.analysis.library_functions = capabilities.library_functions
meta.analysis.layout = capa.loader.compute_layout(rules, extractor, capabilities.matches)
if capa.capabilities.common.has_static_limitation(rules, capabilities, is_standalone=True):
logger.info("capa encountered warnings during analysis")
if args.json:
print(capa.render.json.render(meta, rules, capabilities.matches)) # noqa: T201
elif args.vverbose:
print(capa.render.vverbose.render(meta, rules, capabilities.matches)) # noqa: T201
elif args.verbose:
print(capa.render.verbose.render(meta, rules, capabilities.matches)) # noqa: T201
else:
print(capa.render.default.render(meta, rules, capabilities.matches)) # noqa: T201
return 0
def run_ui():
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
rules_dir: str = ""
try:
selected_dir = askDirectory("Choose capa rules directory", "Ok") # type: ignore [name-defined] # noqa: F821
if selected_dir:
rules_dir = selected_dir.getPath()
except RuntimeError:
# RuntimeError thrown when user selects "Cancel"
pass
if not rules_dir:
logger.info("You must choose a capa rules directory before running capa.")
return capa.main.E_MISSING_RULES
verbose = askChoice( # type: ignore [name-defined] # noqa: F821
"capa output verbosity", "Choose capa output verbosity", ["default", "verbose", "vverbose"], "default"
)
rules_path: pathlib.Path = pathlib.Path(rules_dir)
logger.info("running capa using rules from %s", str(rules_path))
rules = capa.rules.get_rules([rules_path])
meta = capa.ghidra.helpers.collect_metadata([rules_path])
extractor = capa.features.extractors.ghidra.extractor.GhidraFeatureExtractor()
capabilities = capa.capabilities.common.find_capabilities(rules, extractor, True)
meta.analysis.feature_counts = capabilities.feature_counts
meta.analysis.library_functions = capabilities.library_functions
meta.analysis.layout = capa.loader.compute_layout(rules, extractor, capabilities.matches)
if capa.capabilities.common.has_static_limitation(rules, capabilities, is_standalone=False):
logger.info("capa encountered warnings during analysis")
if verbose == "vverbose":
print(capa.render.vverbose.render(meta, rules, capabilities.matches)) # noqa: T201
elif verbose == "verbose":
print(capa.render.verbose.render(meta, rules, capabilities.matches)) # noqa: T201
else:
print(capa.render.default.render(meta, rules, capabilities.matches)) # noqa: T201
return 0
def main():
if not capa.ghidra.helpers.is_supported_ghidra_version():
return capa.main.E_UNSUPPORTED_GHIDRA_VERSION
if not capa.ghidra.helpers.is_supported_file_type():
return capa.main.E_INVALID_FILE_TYPE
if not capa.ghidra.helpers.is_supported_arch_type():
return capa.main.E_INVALID_FILE_ARCH
if isRunningHeadless(): # type: ignore [name-defined] # noqa: F821
return run_headless()
else:
return run_ui()
if __name__ == "__main__":
if sys.version_info < (3, 10):
from capa.exceptions import UnsupportedRuntimeError
raise UnsupportedRuntimeError("This version of capa can only be used with Python 3.10+")
sys.exit(main())

View File

@@ -22,7 +22,6 @@ import capa.version
import capa.features.common
import capa.features.freeze
import capa.render.result_document as rdoc
import capa.features.extractors.ghidra.context as ghidra_context
import capa.features.extractors.ghidra.helpers
from capa.features.address import AbsoluteVirtualAddress
@@ -32,18 +31,6 @@ logger = logging.getLogger("capa")
SUPPORTED_FILE_TYPES = ("Executable and Linking Format (ELF)", "Portable Executable (PE)", "Raw Binary")
def get_current_program():
return ghidra_context.get_context().program
def get_flat_api():
return ghidra_context.get_context().flat_api
def get_monitor():
return ghidra_context.get_context().monitor
class GHIDRAIO:
"""
An object that acts as a file-like object,
@@ -61,12 +48,7 @@ class GHIDRAIO:
self.offset = offset
def read(self, size):
logger.debug(
"reading 0x%x bytes at 0x%x (ea: 0x%x)",
size,
self.offset,
get_current_program().getImageBase().add(self.offset).getOffset(),
)
logger.debug("reading 0x%x bytes at 0x%x (ea: 0x%x)", size, self.offset, currentProgram().getImageBase().add(self.offset).getOffset()) # type: ignore [name-defined] # noqa: F821
if size > len(self.bytes_) - self.offset:
logger.debug("cannot read 0x%x bytes at 0x%x (ea: BADADDR)", size, self.offset)
@@ -78,7 +60,7 @@ class GHIDRAIO:
return
def get_bytes(self):
file_bytes = get_current_program().getMemory().getAllFileBytes()[0]
file_bytes = currentProgram().getMemory().getAllFileBytes()[0] # type: ignore [name-defined] # noqa: F821
# getOriginalByte() allows for raw file parsing on the Ghidra side
# other functions will fail as Ghidra will think that it's reading uninitialized memory
@@ -88,32 +70,21 @@ class GHIDRAIO:
def is_supported_ghidra_version():
import ghidra.framework
version = ghidra.framework.Application.getApplicationVersion()
try:
# version format example: "11.1.2" or "11.4"
major, minor = map(int, version.split(".")[:2])
if major < 12:
logger.error("-" * 80)
logger.error(" Ghidra version %s is not supported.", version)
logger.error(" ")
logger.error(" capa requires Ghidra 12.0 or higher.")
logger.error("-" * 80)
return False
except ValueError:
logger.warning("could not parse Ghidra version: %s", version)
version = float(getGhidraVersion()[:4]) # type: ignore [name-defined] # noqa: F821
if version < 10.2:
warning_msg = "capa does not support this Ghidra version"
logger.warning(warning_msg)
logger.warning("Your Ghidra version is: %s. Supported versions are: Ghidra >= 10.2", version)
return False
return True
def is_running_headless():
return True # PyGhidra is always headless in this context
return isRunningHeadless() # type: ignore [name-defined] # noqa: F821
def is_supported_file_type():
file_info = get_current_program().getExecutableFormat()
file_info = currentProgram().getExecutableFormat() # type: ignore [name-defined] # noqa: F821
if file_info not in SUPPORTED_FILE_TYPES:
logger.error("-" * 80)
logger.error(" Input file does not appear to be a supported file type.")
@@ -128,7 +99,7 @@ def is_supported_file_type():
def is_supported_arch_type():
lang_id = str(get_current_program().getLanguageID()).lower()
lang_id = str(currentProgram().getLanguageID()).lower() # type: ignore [name-defined] # noqa: F821
if not all((lang_id.startswith("x86"), any(arch in lang_id for arch in ("32", "64")))):
logger.error("-" * 80)
@@ -141,18 +112,18 @@ def is_supported_arch_type():
def get_file_md5():
return get_current_program().getExecutableMD5()
return currentProgram().getExecutableMD5() # type: ignore [name-defined] # noqa: F821
def get_file_sha256():
return get_current_program().getExecutableSHA256()
return currentProgram().getExecutableSHA256() # type: ignore [name-defined] # noqa: F821
def collect_metadata(rules: list[Path]):
md5 = get_file_md5()
sha256 = get_file_sha256()
info = get_current_program().getLanguageID().toString()
info = currentProgram().getLanguageID().toString() # type: ignore [name-defined] # noqa: F821
if "x86" in info and "64" in info:
arch = "x86_64"
elif "x86" in info and "32" in info:
@@ -160,11 +131,11 @@ def collect_metadata(rules: list[Path]):
else:
arch = "unknown arch"
format_name: str = get_current_program().getExecutableFormat()
format_name: str = currentProgram().getExecutableFormat() # type: ignore [name-defined] # noqa: F821
if "PE" in format_name:
os = "windows"
elif "ELF" in format_name:
with contextlib.closing(GHIDRAIO()) as f:
with contextlib.closing(capa.ghidra.helpers.GHIDRAIO()) as f:
os = capa.features.extractors.elf.detect_elf_os(f)
else:
os = "unknown os"
@@ -177,18 +148,16 @@ def collect_metadata(rules: list[Path]):
md5=md5,
sha1="",
sha256=sha256,
path=get_current_program().getExecutablePath(),
path=currentProgram().getExecutablePath(), # type: ignore [name-defined] # noqa: F821
),
flavor=rdoc.Flavor.STATIC,
analysis=rdoc.StaticAnalysis(
format=get_current_program().getExecutableFormat(),
format=currentProgram().getExecutableFormat(), # type: ignore [name-defined] # noqa: F821
arch=arch,
os=os,
extractor="ghidra",
rules=tuple(r.resolve().absolute().as_posix() for r in rules),
base_address=capa.features.freeze.Address.from_capa(
AbsoluteVirtualAddress(get_current_program().getImageBase().getOffset())
),
base_address=capa.features.freeze.Address.from_capa(AbsoluteVirtualAddress(currentProgram().getImageBase().getOffset())), # type: ignore [name-defined] # noqa: F821
layout=rdoc.StaticLayout(
functions=(),
),

View File

@@ -1,54 +0,0 @@
<div align="center">
<img src="https://github.com/mandiant/capa/blob/master/doc/img/ghidra_backend_logo.png" width=240 height=125>
</div>
# capa explorer for Ghidra
capa explorer for Ghidra brings capas detection capabilities directly to Ghidras user interface helping speed up your reverse engineering tasks by identifying what parts of a program suggest interesting behavior, such as setting a registry value. You can execute (via [PyGhidra](https://github.com/NationalSecurityAgency/ghidra/tree/master/Ghidra/Features/PyGhidra)) the script [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/plugin/capa_explorer.py) using Ghidras Script Manager window to run capas analysis and view the results in Ghidra.
## ui integration
[capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/capa_explorer.py) renders capa results in Ghidra's UI to help you quickly navigate them. This includes adding matched functions to Ghidras Symbol Tree and Bookmarks windows and adding comments to functions that indicate matched capabilities and features. You can execute this script using Ghidras Script Manager window.
### symbol tree window
Matched functions are added to Ghidra's Symbol Tree window under a custom namespace that maps to the capabilities' [capa namespace](https://github.com/mandiant/capa-rules/blob/master/doc/format.md#rule-namespace).
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/eeae33f4-99d4-42dc-a5e8-4c1b8c661492" width=300>
</div>
### comments
Comments are added at the beginning of matched functions indicating matched capabilities and inline comments are added to functions indicating matched features. You can view these comments in Ghidras Disassembly Listing and Decompile windows.
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/bb2b4170-7fd4-45fc-8c7b-ff8f2e2f101b" width=1000>
</div>
### bookmarks
Bookmarks are added to functions that matched a capability that is mapped to a MITRE ATT&CK and/or Malware Behavior Catalog (MBC) technique. You can view these bookmarks in Ghidra's Bookmarks window.
<div align="center">
<img src="https://github.com/mandiant/capa/assets/66766340/7f9a66a9-7be7-4223-91c6-4b8fc4651336" width=825>
</div>
# getting started
## requirements
- [Ghidra](https://github.com/NationalSecurityAgency/ghidra) >= 12.0 must be installed.
- [flare-capa](https://pypi.org/project/flare-capa/) >= 10.0 must be installed (virtual environment recommended) with the `ghidra` extra (e.g., `pip install "flare-capa[ghidra]"`).
- [capa rules](https://github.com/mandiant/capa-rules) must be downloaded for the version of capa you are using.
## execution
### 1. run Ghidra with PyGhidra
You must start Ghidra using the `pyghidraRun` script provided in the support directory of your Ghidra installation to ensure the Python environment is correctly loaded. You should execute `pyghidraRun` from within the Python environment that you used to install capa.
```bash
<ghidra_install>/support/pyghidraRun
```
### 2. run capa_explorer.py
1. Open your Ghidra project and CodeBrowser.
2. Open the Script Manager.
3. Add [capa_explorer.py](https://raw.githubusercontent.com/mandiant/capa/master/capa/ghidra/plugin/capa_explorer.py) to the script directories.
4. Filter for capa and run the script.
5. When prompted, select the directory containing the downloaded capa rules.

View File

@@ -96,7 +96,11 @@ def is_runtime_ida():
def is_runtime_ghidra():
return importlib.util.find_spec("ghidra") is not None
try:
currentProgram # type: ignore [name-defined] # noqa: F821
except NameError:
return False
return True
def assert_never(value) -> NoReturn:
@@ -327,9 +331,6 @@ def log_unsupported_os_error():
logger.error(" ")
logger.error(" capa currently only analyzes executables for some operating systems")
logger.error(" (including Windows, Linux, and Android).")
logger.error(" ")
logger.error(" If you know the target OS, you can specify it explicitly, for example:")
logger.error(" capa --os linux <sample>")
logger.error("-" * 80)

View File

@@ -17,6 +17,7 @@ import logging
import idaapi
import ida_kernwin
from capa.ida.plugin.form import CapaExplorerForm
from capa.ida.plugin.icon import ICON
logger = logging.getLogger(__name__)
@@ -73,9 +74,6 @@ class CapaExplorerPlugin(idaapi.plugin_t):
arg (int): bitflag. Setting LSB enables automatic analysis upon
loading. The other bits are currently undefined. See `form.Options`.
"""
# delay import to not trigger load of Qt components when not running in idaq, i.e., in idalib
from capa.ida.plugin.form import CapaExplorerForm
if not self.form:
self.form = CapaExplorerForm(self.PLUGIN_NAME, arg)
else:

View File

@@ -14,9 +14,9 @@
import ida_kernwin
from PyQt5 import QtCore
from capa.ida.plugin.error import UserCancelledError
from capa.ida.plugin.qt_compat import QtCore, Signal
from capa.features.extractors.ida.extractor import IdaFeatureExtractor
from capa.features.extractors.base_extractor import FunctionHandle
@@ -24,7 +24,7 @@ from capa.features.extractors.base_extractor import FunctionHandle
class CapaExplorerProgressIndicator(QtCore.QObject):
"""implement progress signal, used during feature extraction"""
progress = Signal(str)
progress = QtCore.pyqtSignal(str)
def update(self, text):
"""emit progress update

View File

@@ -23,6 +23,7 @@ from pathlib import Path
import idaapi
import ida_kernwin
import ida_settings
from PyQt5 import QtGui, QtCore, QtWidgets
import capa.main
import capa.rules
@@ -50,7 +51,6 @@ from capa.ida.plugin.hooks import CapaExplorerIdaHooks
from capa.ida.plugin.model import CapaExplorerDataModel
from capa.ida.plugin.proxy import CapaExplorerRangeProxyModel, CapaExplorerSearchProxyModel
from capa.ida.plugin.extractor import CapaExplorerFeatureExtractor
from capa.ida.plugin.qt_compat import QtGui, QtCore, QtWidgets
from capa.features.extractors.base_extractor import FunctionHandle
logger = logging.getLogger(__name__)
@@ -1358,7 +1358,7 @@ class CapaExplorerForm(idaapi.PluginForm):
@param state: checked state
"""
if state:
if state == QtCore.Qt.Checked:
self.limit_results_to_function(idaapi.get_func(idaapi.get_screen_ea()))
else:
self.range_model_proxy.reset_address_range_filter()
@@ -1367,7 +1367,7 @@ class CapaExplorerForm(idaapi.PluginForm):
def slot_checkbox_limit_features_by_ea(self, state):
""" """
if state:
if state == QtCore.Qt.Checked:
self.view_rulegen_features.filter_items_by_ea(idaapi.get_screen_ea())
else:
self.view_rulegen_features.show_all_items()

View File

@@ -1,38 +0,0 @@
{
"IDAMetadataDescriptorVersion": 1,
"plugin": {
"name": "capa",
"entryPoint": "capa_explorer.py",
"version": "9.3.1",
"idaVersions": ">=7.4",
"description": "Identify capabilities in executable files using FLARE's capa framework",
"license": "Apache-2.0",
"categories": [
"malware-analysis",
"api-scripting-and-automation",
"ui-ux-and-visualization"
],
"pythonDependencies": ["flare-capa==9.3.1"],
"urls": {
"repository": "https://github.com/mandiant/capa"
},
"authors": [
{"name": "Willi Ballenthin", "email": "wballenthin@hex-rays.com"},
{"name": "Moritz Raabe", "email": "moritzraabe@google.com"},
{"name": "Mike Hunhoff", "email": "mike.hunhoff@gmail.com"},
{"name": "Yacine Elhamer", "email": "elhamer.yacine@gmail.com"}
],
"keywords": [
"capability-detection",
"malware-analysis",
"behavior-analysis",
"reverse-engineering",
"att&ck",
"rule-engine",
"feature-extraction",
"yara-like-rules",
"static-analysis",
"dynamic-analysis"
]
}
}

View File

@@ -18,10 +18,10 @@ from typing import Iterator, Optional
import idc
import idaapi
from PyQt5 import QtCore
import capa.ida.helpers
from capa.features.address import Address, FileOffsetAddress, AbsoluteVirtualAddress
from capa.ida.plugin.qt_compat import QtCore, qt_get_item_flag_tristate
def info_to_name(display):
@@ -55,7 +55,7 @@ class CapaExplorerDataItem:
self.flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if self._can_check:
self.flags = self.flags | QtCore.Qt.ItemIsUserCheckable | qt_get_item_flag_tristate()
self.flags = self.flags | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsTristate
if self.pred:
self.pred.appendChild(self)

View File

@@ -18,6 +18,7 @@ from collections import deque
import idc
import idaapi
from PyQt5 import QtGui, QtCore
import capa.rules
import capa.ida.helpers
@@ -41,7 +42,6 @@ from capa.ida.plugin.item import (
CapaExplorerInstructionViewItem,
)
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.ida.plugin.qt_compat import QtGui, QtCore
# default highlight color used in IDA window
DEFAULT_HIGHLIGHT = 0xE6C700
@@ -269,7 +269,7 @@ class CapaExplorerDataModel(QtCore.QAbstractItemModel):
visited.add(child_index)
for idx in range(self.rowCount(child_index)):
stack.append(self.index(idx, 0, child_index))
stack.append(child_index.child(idx, 0))
def reset_ida_highlighting(self, item, checked):
"""reset IDA highlight for item

View File

@@ -12,8 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from capa.ida.plugin.model import CapaExplorerDataModel
from capa.ida.plugin.qt_compat import Qt, QtCore
class CapaExplorerRangeProxyModel(QtCore.QSortFilterProxyModel):

View File

@@ -1,79 +0,0 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Qt compatibility layer for capa IDA Pro plugin.
Handles PyQt5 (IDA < 9.2) vs PySide6 (IDA >= 9.2) differences.
This module provides a unified import interface for Qt modules and handles
API changes between Qt5 and Qt6.
"""
try:
# IDA 9.2+ uses PySide6
from PySide6 import QtGui, QtCore, QtWidgets
from PySide6.QtGui import QAction
QT_LIBRARY = "PySide6"
Signal = QtCore.Signal
except ImportError:
# Older IDA versions use PyQt5
try:
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtWidgets import QAction
QT_LIBRARY = "PyQt5"
Signal = QtCore.pyqtSignal
except ImportError:
raise ImportError("Neither PySide6 nor PyQt5 is available. Cannot initialize capa IDA plugin.")
Qt = QtCore.Qt
def qt_get_item_flag_tristate():
"""
Get the tristate item flag compatible with Qt5 and Qt6.
Qt5 (PyQt5): Uses Qt.ItemIsTristate
Qt6 (PySide6): Qt.ItemIsTristate was removed, uses Qt.ItemIsAutoTristate
ItemIsAutoTristate automatically manages tristate based on child checkboxes,
matching the original ItemIsTristate behavior where parent checkboxes reflect
the check state of their children.
Returns:
int: The appropriate flag value for the Qt version
Raises:
AttributeError: If the tristate flag cannot be found in the Qt library
"""
if QT_LIBRARY == "PySide6":
# Qt6: ItemIsTristate was removed, replaced with ItemIsAutoTristate
# Try different possible locations (API varies slightly across PySide6 versions)
if hasattr(Qt, "ItemIsAutoTristate"):
return Qt.ItemIsAutoTristate
elif hasattr(Qt, "ItemFlag") and hasattr(Qt.ItemFlag, "ItemIsAutoTristate"):
return Qt.ItemFlag.ItemIsAutoTristate
else:
raise AttributeError(
"Cannot find ItemIsAutoTristate in PySide6. "
+ "Your PySide6 version may be incompatible with capa. "
+ f"Available Qt attributes: {[attr for attr in dir(Qt) if 'Item' in attr]}"
)
else:
# Qt5: Use the original ItemIsTristate flag
return Qt.ItemIsTristate
__all__ = ["qt_get_item_flag_tristate", "Signal", "QAction", "QtGui", "QtCore", "QtWidgets"]

View File

@@ -18,6 +18,7 @@ from collections import Counter
import idc
import idaapi
from PyQt5 import QtGui, QtCore, QtWidgets
import capa.rules
import capa.engine
@@ -27,7 +28,6 @@ import capa.features.basicblock
from capa.ida.plugin.item import CapaExplorerFunctionItem
from capa.features.address import AbsoluteVirtualAddress, _NoAddress
from capa.ida.plugin.model import CapaExplorerDataModel
from capa.ida.plugin.qt_compat import QtGui, QtCore, Signal, QAction, QtWidgets
MAX_SECTION_SIZE = 750
@@ -147,7 +147,7 @@ def calc_item_depth(o):
def build_action(o, display, data, slot):
""" """
action = QAction(display, o)
action = QtWidgets.QAction(display, o)
action.setData(data)
action.triggered.connect(lambda checked: slot(action))
@@ -312,7 +312,7 @@ class CapaExplorerRulegenPreview(QtWidgets.QTextEdit):
class CapaExplorerRulegenEditor(QtWidgets.QTreeWidget):
updated = Signal()
updated = QtCore.pyqtSignal()
def __init__(self, preview, parent=None):
""" """

View File

@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import logging
import datetime
@@ -22,13 +23,24 @@ from pathlib import Path
from rich.console import Console
from typing_extensions import assert_never
import capa.perf
import capa.rules
import capa.engine
import capa.helpers
import capa.version
import capa.render.json
import capa.rules.cache
import capa.render.default
import capa.render.verbose
import capa.features.common
import capa.features.freeze as frz
import capa.render.vverbose
import capa.features.extractors
import capa.render.result_document
import capa.render.result_document as rdoc
import capa.features.extractors.common
import capa.features.extractors.base_extractor
import capa.features.extractors.cape.extractor
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.exceptions import UnsupportedOSError, UnsupportedArchError, UnsupportedFormatError
@@ -67,7 +79,6 @@ BACKEND_VMRAY = "vmray"
BACKEND_FREEZE = "freeze"
BACKEND_BINEXPORT2 = "binexport2"
BACKEND_IDA = "ida"
BACKEND_GHIDRA = "ghidra"
class CorruptFile(ValueError):
@@ -126,57 +137,6 @@ def get_meta_str(vw):
return f"{', '.join(meta)}, number of functions: {len(vw.getFunctions())}"
def _is_probably_corrupt_pe(path: Path) -> bool:
"""
Heuristic check for obviously malformed PE samples that provoke
pathological behavior in vivisect (see GH-1989).
We treat a PE as "probably corrupt" when any section declares an
unrealistically large virtual size compared to the file size, e.g.
hundreds of megabytes in a tiny file. Such cases lead vivisect to
try to map enormous regions and can exhaust CPU/memory.
"""
try:
import pefile
except Exception:
# If pefile is unavailable, fall back to existing behavior.
return False
try:
pe = pefile.PE(str(path), fast_load=True)
except pefile.PEFormatError:
# Not a PE file (or badly formed); let existing checks handle it.
return False
except Exception:
return False
try:
file_size = path.stat().st_size
except OSError:
return False
if file_size <= 0:
return False
# Flag sections whose declared virtual size is wildly disproportionate
# to the file size (e.g. 900MB section in a ~400KB sample).
_VSIZE_FILE_RATIO = 128
_MAX_REASONABLE_VSIZE = 512 * 1024 * 1024 # 512 MB
max_reasonable = max(file_size * _VSIZE_FILE_RATIO, _MAX_REASONABLE_VSIZE)
for section in getattr(pe, "sections", []):
vsize = getattr(section, "Misc_VirtualSize", 0) or 0
if vsize > max_reasonable:
logger.debug(
"detected unrealistic PE section virtual size: 0x%x (file size: 0x%x), treating as corrupt",
vsize,
file_size,
)
return True
return False
def get_workspace(path: Path, input_format: str, sigpaths: list[Path]):
"""
load the program at the given path into a vivisect workspace using the given format.
@@ -194,18 +154,11 @@ def get_workspace(path: Path, input_format: str, sigpaths: list[Path]):
"""
# lazy import enables us to not require viv if user wants another backend.
import envi.exc
import viv_utils
import viv_utils.flirt
logger.debug("generating vivisect workspace for: %s", path)
if input_format in (FORMAT_PE, FORMAT_AUTO) and _is_probably_corrupt_pe(path):
raise CorruptFile(
"PE file appears to contain unrealistically large sections and is likely corrupt"
+ " - skipping analysis to avoid excessive resource usage."
)
try:
if input_format == FORMAT_AUTO:
if not is_supported_format(path):
@@ -222,20 +175,11 @@ def get_workspace(path: Path, input_format: str, sigpaths: list[Path]):
vw = viv_utils.getShellcodeWorkspaceFromFile(str(path), arch="amd64", analyze=False)
else:
raise ValueError("unexpected format: " + input_format)
except envi.exc.SegmentationViolation as e:
raise CorruptFile(f"Invalid memory access during binary parsing: {e}") from e
except Exception as e:
# vivisect raises raw Exception instances, and we don't want
# to do a subclass check via isinstance.
if type(e) is Exception and e.args:
error_msg = str(e.args[0])
if "Couldn't convert rva" in error_msg:
raise CorruptFile(error_msg) from e
elif "Unsupported Architecture" in error_msg:
# Extract architecture number if available
arch_info = e.args[1] if len(e.args) > 1 else "unknown"
raise CorruptFile(f"Unsupported architecture: {arch_info}") from e
if type(e) is Exception and "Couldn't convert rva" in e.args[0]:
raise CorruptFile(e.args[0]) from e
raise
viv_utils.flirt.register_flirt_signature_analyzers(vw, [str(s) for s in sigpaths])
@@ -394,24 +338,12 @@ def get_extractor(
import capa.features.extractors.ida.extractor
logger.debug("idalib: opening database...")
idapro.enable_console_messages(False)
with console.status("analyzing program...", spinner="dots"):
# we set the primary and secondary Lumina servers to 0.0.0.0 to disable Lumina,
# which sometimes provides bad names, including overwriting names from debug info.
#
# use -R to load resources, which can help us embedded PE files.
#
# return values from open_database:
# 0 - Success
# 2 - User cancelled or 32-64 bit conversion failed
# 4 - Database initialization failed
# -1 - Generic errors (database already open, auto-analysis failed, etc.)
# -2 - User cancelled operation
ret = idapro.open_database(
str(input_path), run_auto_analysis=True, args="-Olumina:host=0.0.0.0 -Osecondary_lumina:host=0.0.0.0 -R"
)
if ret != 0:
raise RuntimeError("failed to analyze input file")
# idalib writes to stdout (ugh), so we have to capture that
# so as not to screw up structured output.
with capa.helpers.stdout_redirector(io.BytesIO()):
with console.status("analyzing program...", spinner="dots"):
if idapro.open_database(str(input_path), run_auto_analysis=True):
raise RuntimeError("failed to analyze input file")
logger.debug("idalib: waiting for analysis...")
ida_auto.auto_wait()
@@ -419,69 +351,6 @@ def get_extractor(
return capa.features.extractors.ida.extractor.IdaFeatureExtractor()
elif backend == BACKEND_GHIDRA:
import pyghidra
with console.status("analyzing program...", spinner="dots"):
if not pyghidra.started():
pyghidra.start()
import capa.ghidra.helpers
if not capa.ghidra.helpers.is_supported_ghidra_version():
raise RuntimeError("unsupported Ghidra version")
import tempfile
tmpdir = tempfile.TemporaryDirectory()
project_cm = pyghidra.open_project(tmpdir.name, "CapaProject", create=True)
project = project_cm.__enter__()
try:
from ghidra.util.task import TaskMonitor
monitor = TaskMonitor.DUMMY
# Import file
loader = pyghidra.program_loader().project(project).source(str(input_path)).name(input_path.name)
with loader.load() as load_results:
load_results.save(monitor)
# Open program
program, consumer = pyghidra.consume_program(project, "/" + input_path.name)
# Analyze
pyghidra.analyze(program, monitor)
from ghidra.program.flatapi import FlatProgramAPI
flat_api = FlatProgramAPI(program)
import capa.features.extractors.ghidra.context as ghidra_context
ghidra_context.set_context(program, flat_api, monitor)
# Wrapper to handle cleanup of program (consumer) and project
class GhidraContextWrapper:
def __init__(self, project_cm, program, consumer):
self.project_cm = project_cm
self.program = program
self.consumer = consumer
def __exit__(self, exc_type, exc_val, exc_tb):
self.program.release(self.consumer)
self.project_cm.__exit__(exc_type, exc_val, exc_tb)
cm = GhidraContextWrapper(project_cm, program, consumer)
except Exception:
project_cm.__exit__(None, None, None)
tmpdir.cleanup()
raise
import capa.features.extractors.ghidra.extractor
return capa.features.extractors.ghidra.extractor.GhidraFeatureExtractor(ctx_manager=cm, tmpdir=tmpdir)
else:
raise ValueError("unexpected backend: " + backend)

View File

@@ -55,7 +55,6 @@ from capa.loader import (
BACKEND_VMRAY,
BACKEND_DOTNET,
BACKEND_FREEZE,
BACKEND_GHIDRA,
BACKEND_PEFILE,
BACKEND_DRAKVUF,
BACKEND_BINEXPORT2,
@@ -299,7 +298,6 @@ def install_common_args(parser, wanted=None):
(BACKEND_BINJA, "Binary Ninja"),
(BACKEND_DOTNET, ".NET"),
(BACKEND_BINEXPORT2, "BinExport2"),
(BACKEND_GHIDRA, "Ghidra"),
(BACKEND_FREEZE, "capa freeze"),
(BACKEND_CAPE, "CAPE"),
(BACKEND_DRAKVUF, "DRAKVUF"),
@@ -394,7 +392,6 @@ class ShouldExitError(Exception):
"""raised when a main-related routine indicates the program should exit."""
def __init__(self, status_code: int):
super().__init__(status_code)
self.status_code = status_code
@@ -661,9 +658,7 @@ def get_rules_from_cli(args) -> RuleSet:
raises:
ShouldExitError: if the program is invoked incorrectly and should exit.
"""
enable_cache: bool = getattr(args, "enable_cache", True)
# this allows calling functions to easily disable rule caching, e.g., used by the rule linter to avoid
enable_cache: bool = True
try:
if capa.helpers.is_running_standalone() and args.is_default_rules:
cache_dir = get_default_root() / "cache"
@@ -945,7 +940,8 @@ def main(argv: Optional[list[str]] = None):
argv = sys.argv[1:]
desc = "The FLARE team's open-source tool to identify capabilities in executable files."
epilog = textwrap.dedent("""
epilog = textwrap.dedent(
"""
By default, capa uses a default set of embedded rules.
You can see the rule set here:
https://github.com/mandiant/capa-rules
@@ -972,7 +968,8 @@ def main(argv: Optional[list[str]] = None):
filter rules by meta fields, e.g. rule name or namespace
capa -t "create TCP socket" suspicious.exe
""")
"""
)
parser = argparse.ArgumentParser(
description=desc, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter
@@ -1107,26 +1104,14 @@ def ida_main():
def ghidra_main():
from ghidra.program.flatapi import FlatProgramAPI
import capa.rules
import capa.ghidra.helpers
import capa.render.default
import capa.features.extractors.ghidra.context
import capa.features.extractors.ghidra.extractor
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
# These are provided by the Ghidra scripting environment
# but are not available when running standard python
# so we have to ignore the linting errors
program = currentProgram # type: ignore [name-defined] # noqa: F821
monitor_ = monitor # type: ignore [name-defined] # noqa: F821
flat_api = FlatProgramAPI(program)
capa.features.extractors.ghidra.context.set_context(program, flat_api, monitor_)
logger.debug("-" * 80)
logger.debug(" Using default embedded rules.")
logger.debug(" ")

View File

@@ -31,7 +31,6 @@ $ protoc.exe --python_out=. --mypy_out=. <path_to_proto> (e.g. capa/render/proto
Alternatively, --pyi_out=. can be used to generate a Python Interface file that supports development
"""
import datetime
from typing import Any, Union

View File

@@ -17,7 +17,6 @@ import io
from typing import Union, Iterator, Optional
import rich.console
from rich.markup import escape
from rich.progress import Text
import capa.render.result_document as rd
@@ -25,21 +24,21 @@ import capa.render.result_document as rd
def bold(s: str) -> Text:
"""draw attention to the given string"""
return Text.from_markup(f"[cyan]{escape(s)}")
return Text.from_markup(f"[cyan]{s}")
def bold2(s: str) -> Text:
"""draw attention to the given string, within a `bold` section"""
return Text.from_markup(f"[green]{escape(s)}")
return Text.from_markup(f"[green]{s}")
def mute(s: str) -> Text:
"""draw attention away from the given string"""
return Text.from_markup(f"[dim]{escape(s)}")
return Text.from_markup(f"[dim]{s}")
def warn(s: str) -> Text:
return Text.from_markup(f"[yellow]{escape(s)}")
return Text.from_markup(f"[yellow]{s}")
def format_parts_id(data: Union[rd.AttackSpec, rd.MBCSpec]):

View File

@@ -159,8 +159,9 @@ def render_call(layout: rd.DynamicLayout, addr: frz.Address) -> str:
s.append(f"){rest}")
newline = "\n"
# Use default (non-dim) styling for API details so they remain readable in -vv output
return f"{pname}{{pid:{call.thread.process.pid},tid:{call.thread.tid},call:{call.id}}}\n{newline.join(s)}"
return (
f"{pname}{{pid:{call.thread.process.pid},tid:{call.thread.tid},call:{call.id}}}\n{rutils.mute(newline.join(s))}"
)
def render_short_call(layout: rd.DynamicLayout, addr: frz.Address) -> str:
@@ -179,8 +180,7 @@ def render_short_call(layout: rd.DynamicLayout, addr: frz.Address) -> str:
s.append(f"){rest}")
newline = "\n"
# Use default (non-dim) styling for API details so they remain readable in -vv output
return f"call:{call.id}\n{newline.join(s)}"
return f"call:{call.id}\n{rutils.mute(newline.join(s))}"
def render_static_meta(console: Console, meta: rd.StaticMetadata):

View File

@@ -274,8 +274,12 @@ SUPPORTED_FEATURES[Scope.FUNCTION].update(SUPPORTED_FEATURES[Scope.BASIC_BLOCK])
class InvalidRule(ValueError):
def __init__(self, msg):
super().__init__()
self.msg = msg
def __str__(self):
return f"invalid rule: {super().__str__()}"
return f"invalid rule: {self.msg}"
def __repr__(self):
return str(self)
@@ -285,15 +289,20 @@ class InvalidRuleWithPath(InvalidRule):
def __init__(self, path, msg):
super().__init__(msg)
self.path = path
self.msg = msg
self.__cause__ = None
def __str__(self):
return f"invalid rule: {self.path}: {super(InvalidRule, self).__str__()}"
return f"invalid rule: {self.path}: {self.msg}"
class InvalidRuleSet(ValueError):
def __init__(self, msg):
super().__init__()
self.msg = msg
def __str__(self):
return f"invalid rule set: {super().__str__()}"
return f"invalid rule set: {self.msg}"
def __repr__(self):
return str(self)
@@ -1093,15 +1102,15 @@ class Rule:
@lru_cache()
def _get_yaml_loader():
try:
# prefer to use CLoader to be fast, see #306 / CSafeLoader is the same as CLoader but with safe loading
# prefer to use CLoader to be fast, see #306
# on Linux, make sure you install libyaml-dev or similar
# on Windows, get WHLs from pyyaml.org/pypi
logger.debug("using libyaml CSafeLoader.")
return yaml.CSafeLoader
logger.debug("using libyaml CLoader.")
return yaml.CLoader
except Exception:
logger.debug("unable to import libyaml CSafeLoader, falling back to Python yaml parser.")
logger.debug("unable to import libyaml CLoader, falling back to Python yaml parser.")
logger.debug("this will be slower to load rules.")
return yaml.SafeLoader
return yaml.Loader
@staticmethod
def _get_ruamel_yaml_parser():
@@ -1143,8 +1152,6 @@ class Rule:
else:
# use pyyaml because it can be much faster than ruamel (pure python)
doc = yaml.load(s, Loader=cls._get_yaml_loader())
if doc is None or not isinstance(doc, dict) or "rule" not in doc:
raise InvalidRule("empty or invalid YAML document")
return cls.from_dict(doc, s)
@classmethod
@@ -1449,13 +1456,6 @@ class RuleSet:
scope: self._index_rules_by_feature(scope, self.rules_by_scope[scope], scores_by_rule) for scope in scopes
}
# Pre-compute the topological index mapping for each scope.
# This avoids rebuilding the dict on every call to _match (which runs once per
# instruction/basic-block/function/file scope, i.e. potentially millions of times).
self._rule_index_by_scope: dict[Scope, dict[str, int]] = {
scope: {rule.name: i for i, rule in enumerate(self.rules_by_scope[scope])} for scope in scopes
}
@property
def file_rules(self):
return self.rules_by_scope[Scope.FILE]
@@ -1885,13 +1885,11 @@ class RuleSet:
"""
done = []
# use a list as a stack: append new items and pop() from the end, both O(1).
# order doesn't matter here since every rule in the queue is processed eventually.
rules_stack = list(rules)
while rules_stack:
rule = rules_stack.pop()
# use a queue of rules, because we'll be modifying the list (appending new items) as we go.
while rules:
rule = rules.pop(0)
for subscope_rule in rule.extract_subscope_rules():
rules_stack.append(subscope_rule)
rules.append(subscope_rule)
done.append(rule)
return done
@@ -1940,11 +1938,11 @@ class RuleSet:
"""
feature_index: RuleSet._RuleFeatureIndex = self._feature_indexes_by_scopes[scope]
rules: list[Rule] = self.rules_by_scope[scope]
# Topologic location of rule given its name.
# That is, rules with a lower index should be evaluated first, since their dependencies
# will be evaluated later.
# Pre-computed in __init__ to avoid rebuilding on every _match call.
rule_index_by_rule_name = self._rule_index_by_scope[scope]
rule_index_by_rule_name = {rule.name: i for i, rule in enumerate(rules)}
# This algorithm is optimized to evaluate as few rules as possible,
# because the less work we do, the faster capa can run.
@@ -2040,9 +2038,7 @@ class RuleSet:
candidate_rules = [self.rules[name] for name in candidate_rule_names]
# Order rules topologically, so that rules with dependencies work correctly.
# Sort descending so pop() from the end yields the topologically-first rule in O(1).
RuleSet._sort_rules_by_index(rule_index_by_rule_name, candidate_rules)
candidate_rules.reverse()
#
# The following is derived from ceng.match
@@ -2057,7 +2053,7 @@ class RuleSet:
augmented_features = features
while candidate_rules:
rule = candidate_rules.pop()
rule = candidate_rules.pop(0)
res = rule.evaluate(augmented_features, short_circuit=True)
if res:
# we first matched the rule with short circuiting enabled.
@@ -2096,7 +2092,6 @@ class RuleSet:
candidate_rule_names.update(new_candidates)
candidate_rules.extend([self.rules[rule_name] for rule_name in new_candidates])
RuleSet._sort_rules_by_index(rule_index_by_rule_name, candidate_rules)
candidate_rules.reverse()
return (augmented_features, results)
@@ -2233,10 +2228,7 @@ def get_rules(
try:
rule = Rule.from_yaml(content.decode("utf-8"))
except InvalidRule as e:
if e.args and e.args[0] == "empty or invalid YAML document":
logger.warning("skipping %s: %s", path, e)
continue
except InvalidRule:
raise
else:
rule.meta["capa/path"] = path.as_posix()

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "9.3.1"
__version__ = "9.2.1"
def get_major_version():

View File

@@ -1,34 +0,0 @@
# mapa html-map demo
*2026-03-16T17:05:38Z by Showboat 0.6.1*
<!-- showboat-id: 1cf46a16-c3d7-459d-8593-c23080cb12f6 -->
Generate a standalone HTML report for a sample binary and summarize the report contents.
```bash
tmp=$(mktemp /tmp/mapa-html-map-XXXXXX.html)
PYTHONWARNINGS=ignore ./.venv/bin/python -m mapa binaries/01/16/mpbindump.exe --output html-map --quiet > "$tmp"
PYTHONWARNINGS=ignore /usr/bin/python3 - "$tmp" <<"PY"
import json
import re
import sys
from pathlib import Path
text = Path(sys.argv[1]).read_text()
match = re.search(r"<script type=\"application/json\" id=\"mapa-data\">(.*?)</script>", text, re.S)
data = json.loads(match.group(1))
print("doctype", text.splitlines()[0])
print("functions", len(data["functions"]))
print("tags", len(data["tags"]))
print("strings", len(data["strings"]))
PY
rm "$tmp"
```
```output
doctype <!doctype html>
functions 1406
tags 12
strings 81
```
To open the report directly in your browser, use `python -m mapa <sample> --output html-map --open`.

Binary file not shown.

After

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -1,105 +0,0 @@
# Compilation Unit Boundary Evaluation
Ground truth data from assemblage: ~500K binaries with PDB-derived source file mappings, ~1000 functions each. This document describes how to turn that data into a repeatable evaluation pipeline for CU boundary detection.
## Ground truth definition
Each function in a binary has a source file path from debug info. Source files are either **impl files** (`.c`, `.cpp`, `.cc`, `.cxx`) or **headers** (`.h`, `.hpp`, extensionless STL headers like `vector`, `xmemory`, etc.). A compilation unit is defined by its impl file — header-sourced functions are template instantiations or inline expansions compiled into whichever CU includes them.
To assign each function to a CU:
1. Sort functions by start address.
2. For each function, if its source is an impl file, that's its CU.
3. Otherwise, forward-fill: inherit the CU of the most recent preceding impl function. If none exists (header functions at the start of the binary), backward-fill from the first impl file.
4. Ground truth boundaries = the set of gap indices where `CU[i] != CU[i-1]`.
This assignment was validated on 10 binaries (21K functions). Every ground truth boundary has an impl file on at least one side — there are zero boundaries where both flanking functions come from headers. Forward-fill produces CU fragmentation ratios of 1.0-1.2x (nearly perfectly contiguous CUs).
## What the data looks like
Source file paths fall into classifiable categories based on filesystem structure. User code lives under `c:\assemblage\builds\HASH\PROJECT\*.cpp` (not under `external/`). Vendored libraries are at `...\PROJECT\external\LIBNAME\*`. MSVC STL headers are under `C:\Program Files\...\MSVC\...\include\` (e.g., `vector`, `xmemory`). CRT sources come from `D:\a\_work\...\vctools\crt\...` or `d:\th\minkernel\crts\...`.
Library identity is derivable from the path prefix. The assemblage build system makes external libraries explicit via the `external/LIBNAME/` convention.
### Template/STL interleaving
Functions from STL headers appear heavily interspersed within CUs. In SmxDecompiler.exe, 72% of functions come from MSVC STL headers, appearing in runs of up to 258 functions within a single CU. These are per-CU template instantiation copies (the same `std::_Verify_range` appears in 4 different CUs), not independent entities.
This means "source file changed" is a terrible boundary signal because it fires on every header-to-impl or header-to-header transition within a CU. On 3DSIFT.exe (heavy Eigen template use), source-file-change achieves only 3% precision. The actual CU boundary rate is low: only 5.9% of all inter-function gaps are real CU boundaries.
### CRT fragmentation
CRT code (from the ucrt static library) shows heavy fragmentation: dozens of tiny 1-3 function CUs interleaved by the linker. These are the hardest boundaries to detect and arguably the least useful to distinguish (separating `delete_scalar.cpp` from `new_array.cpp` has limited practical value). Consider collapsing all CRT functions into a single "CRT" meta-CU for scoring, or reporting CRT and non-CRT scores separately.
### Multi-fragment CUs
Some impl files appear in non-contiguous blocks (e.g., `inflate.c` interrupted by `infback.c`/`inffast.c` in zlib). This is uncommon in application code but frequent in CRT. For boundary scoring, treat each contiguous fragment as its own segment — the metric measures where transitions occur regardless of whether distant segments share a label.
## Metrics
### Primary: Boundary F1
For a predicted set of boundary gap indices vs the ground truth set, compute precision (fraction of predicted boundaries that are real), recall (fraction of real boundaries that are predicted), and F1 (harmonic mean). This directly measures the "where are the seams?" question. It's strict: a boundary predicted one function off from the true position scores as one FP + one FN.
### Secondary: WindowDiff
Standard text segmentation metric, more forgiving of near-misses. Slides a window of size `k` across the sequence and counts positions where the number of boundaries within the window differs between prediction and ground truth. Lower is better. Use `k = avg_segment_length / 2`.
### Library-level metrics
| Problem | Ground truth | Metric |
|---------|-------------|--------|
| CU boundaries | Impl-file assignment (above) | Boundary F1 + WindowDiff |
| Library clustering | Path-derived category (user-code, external:zlib, crt, msvc-stdlib, ...) | Adjusted Rand Index |
| User vs library | Binary classification from category | Per-function accuracy |
These are separate evaluations that can be run independently and solved in order: CU boundaries first, then library grouping, then user/library classification.
## Baseline results
Tested on the 10-binary sample (21K functions, 1238 CU boundaries):
| Heuristic | Precision | Recall | F1 range |
|-----------|-----------|--------|----------|
| No boundaries | - | 0.00 | 0.00 |
| Every source-file change | 0.03-0.79 | 1.00 | 0.06-0.88 |
| Category change | 0.11-0.50 | 0.02-0.28 | 0.04-0.29 |
| Impl-file stream tracking | 1.00 | 1.00 | 1.00 |
The impl-file oracle (skip headers, flag when the .c/.cpp changes) achieves perfect F1. This is the ceiling; it confirms that CU boundaries are exactly the impl-file transitions in the source data. The gap between the naive baselines and this oracle is the space we're trying to close using binary-level features.
## Evaluation pipeline
### Step 1: Ground truth extraction
From debug info, produce a normalized table per binary:
```
binary_hash | func_index | func_start | func_end | cu_id | library_category
```
`cu_id` is the impl file path (or a hash of it). `library_category` is derived from the path prefix. Store as Parquet or SQLite since the CSV format won't scale to 500K binaries.
Pre-compute per-binary boundary sets as arrays of gap indices.
### Step 2: Predictor interface
A predictor takes a binary's function list (start addresses and sizes only, no source info, no debug info) and returns a set of predicted boundary indices. The predictor can use any features extractable from the raw binary: inter-function gap sizes and padding patterns, call graph edge density across each gap, data reference patterns, string attribution, function naming (if symbols present), alignment byte patterns, etc.
### Step 3: Scoring
For each binary, compute boundary F1 between prediction and ground truth. Aggregate across the dataset:
Report both macro-average (mean F1 across binaries, treating each binary equally) and micro-average (pool all gaps, compute F1 once, biased toward larger binaries). Add per-category breakdowns (user code only, CRT only, etc.) to understand where the predictor succeeds and fails.
### Step 4: Iterate
Try different feature combinations and scoring strategies, compare F1. Per mapa-cu-ideas.md, the boundary scorer is the spine of the pipeline and every algorithm becomes a feature generator for gap scoring.
## Design decisions
Boundary F1 was chosen over clustering metrics (ARI, NMI) because those conflate boundary detection with label assignment. We want to decouple the two: find boundaries first, then label segments.
Forward-fill for CU assignment matches how compilation works. The compiler processes the impl file's own functions first, then template instantiations from included headers. Header functions belong to the most recent impl file. The 1.0-1.2x fragmentation ratio validates this model.
Header-to-header transitions are never counted as boundaries because the data shows zero CU boundaries where both flanking functions come from headers. Every CU has at least one impl file as its root. Template instantiations from different headers within the same CU are not separate CUs.

View File

@@ -1,100 +0,0 @@
# mapa design
## Architecture
Seven layers, each in its own module under the `mapa/` package:
| Module | Responsibility |
|---|---|
| `mapa/model.py` | Backend-neutral dataclasses: `MapaReport`, `MapaMeta`, `MapaSection`, `MapaLibrary`, `MapaFunction`, `MapaCall`, `MapaCaller`, `MapaString`, `MapaProgramString`, `AssemblageRecord` |
| `mapa/assemblage.py` | Assemblage CSV loading, column validation, SHA-256 filtering, RVA-to-VA conversion, exact-row deduplication |
| `mapa/ida_db.py` | IDA database lifecycle: `resolve_database()`, `open_database_session()`, SHA-256 caching, flock-based concurrency guard |
| `mapa/collector.py` | Populates `MapaReport` from an open `ida_domain.Database`. All IDA queries live here. |
| `mapa/renderer.py` | Rich-based text rendering and plain single-function summary formatting from `MapaReport`. No IDA dependency. |
| `mapa/html_renderer.py` | Self-contained `html-map` rendering from `MapaReport`. No IDA dependency. |
| `mapa/cli.py` | Argument parsing, capa/assemblage loading, output-mode selection, `--open` temp-file/browser handling, orchestration |
`scripts/mapa.py` is a thin entry point that delegates to `mapa.cli.main()`.
The CLI validates output-mode combinations before analysis. For `--output html-map --open`, it renders the HTML once, writes it to a temporary `.html` file via `NamedTemporaryFile(delete=False)`, and opens the browser with `webbrowser.open(file://...)`.
## Database lifecycle
Modeled on `idals.py` from idawilli:
1. If input is `.i64`/`.idb`, use directly.
2. Otherwise, hash the file (MD5 + SHA-256), check `~/.cache/mandiant/mapa/<sha256>.i64`.
3. On cache miss: acquire advisory flock, create database via `Database.open()` with `IdaCommandOptions(auto_analysis=True, new_database=True, output_database=..., load_resources=True)`, wait for `ida_auto.auto_wait()`.
4. On cache hit or after creation: open read-only with `new_database=False, save_on_close=False`.
5. Concurrency guard: poll for `.nam` file disappearance + `fcntl.flock` on `<db>.lock` + TOCTOU re-check.
## Assemblage loading
Assemblage loading is deferred until after mapa opens the IDA database, because the effective sample SHA-256 may come from either the raw input file or the database metadata.
`mapa.assemblage.load_assemblage_records()`:
- reads the CSV with `csv.DictReader`
- requires `hash`, `name`, `start`, `end`, and `source_file`
- filters rows by sample SHA-256, case-insensitively
- converts `start` and `end` RVAs to VAs by adding `db.base_address`
- strips the trailing provenance suffix from `source_file` only at render time, via `AssemblageRecord.source_path`
- deduplicates exact duplicate rows while preserving CSV order for distinct ambiguous matches
The result is `dict[int, list[AssemblageRecord]]`, keyed by function start VA.
## Collector design
The collector builds several indexes before the main function loop:
- import_index: `dict[int, (module, name)]` from `db.imports.get_all_imports()`
- extern_addrs: `set[int]` from functions in XTRN segments
- thunk_targets: `dict[int, int]` via `_resolve_thunk_target()` — follows code refs then data refs, max depth 5, single-target chains only
- resolved_callers/callees: built by walking all non-thunk function flowcharts, resolving call targets through thunk chains, classifying as internal vs API
String extraction follows single data-reference chains from each instruction up to depth 10. The collector returns both the discovered string VA and the raw string value for each hit.
The collector stores string data in two shapes:
- `MapaFunction.strings` for the text report and tooltip summaries. These stay function-local and deduplicate by trimmed display value.
- `MapaReport.program_strings` for `html-map`. These are keyed by string VA, preserve duplicate display values at different addresses, merge tags across repeated references, and track the set of referencing function addresses.
Assemblage data is attached per function during collection. `MapaFunction.assemblage_records` carries zero or more `AssemblageRecord` values for the function start address. The collector does not use Assemblage to rename functions, callers, or callees.
## ida-domain API usage
Primary queries used:
- `db.functions` — iteration, `get_at()`, `get_name()`, `get_flags()`, `get_flowchart()`
- `db.segments.get_all()` — section enumeration
- `db.imports.get_all_modules()`, `get_all_imports()` — library/import enumeration
- `db.xrefs.code_refs_from_ea()`, `data_refs_from_ea()`, `calls_from_ea()` — call/thunk resolution
- `db.instructions.is_call_instruction()`, `get_mnemonic()` — instruction classification
- `db.heads.size()` — instruction byte size
- `FlowChart` with `FlowChartFlags.NOEXT | FlowChartFlags.PREDS` — CFG traversal
- `FunctionFlags.THUNK`, `FunctionFlags.LIB` — function classification
No legacy `ida_*` module calls are used. All queries go through `ida-domain`.
## Rendering
`mapa/renderer.py` prints the text report in function address order. For each function, it prints the IDA-derived header first and then any Assemblage annotations as `assemblage name:` and `assemblage file:` lines. When multiple distinct Assemblage rows map to one function start address, the renderer prints all of them in order.
The text renderer also exposes a plain single-function summary formatter used by `html-map` tooltips. The row order matches text mode: Assemblage lines, xrefs, CFG stats, capa matches, internal calls, APIs, and strings.
For source-file separators, mapa uses the first Assemblage record's normalized source path as the function's primary source path. The text renderer tracks the last seen non-empty primary path across the function list. Missing Assemblage data does not trigger a separator and does not reset that state. When a later function introduces a different primary path, the renderer prints a muted horizontal rule with `[ <path> ]` immediately before that function.
`mapa/html_renderer.py` renders a single self-contained HTML document. It emits a split view: a left function pane and a right string pane, both with independent scrolling. The panes are separated by a draggable vertical divider implemented with a small inline pointer-event handler. The renderer emits one square per function in address order, one program-string row per string VA in address order, tag controls with visible function counts, direction and depth controls for neighborhood traversal, a small legend for heat/seed/dim semantics, right-aligned visible tags in each string row, inline JSON data for function summaries, direct tag memberships, direct string memberships, and caller/callee adjacency, plus a single floating tooltip and a small inline script. That script resolves the active seed source from a hovered or locked function, tag, or string; chooses caller-only, callee-only, or undirected traversal; runs a bounded breadth-first search from each seed; sums geometric-decay contributions using per-seed shortest distance; and renders the result as a heat overlay with a distinct seed outline.
## String tagging
Vendored Quantum Strand string databases live under `mapa/string_tags/data/` in five families: OSS/CRT libraries (gzipped JSONL), expert rules (plain JSONL), Windows API names (gzipped text), global prevalence (gzipped JSONL + binary hash files), and junk-code strings (gzipped JSONL).
The `mapa/string_tags/` package has three modules:
- `model.py``StringTagMatch` and `StringTagResult` dataclasses
- `loaders.py` — file-format readers using `msgspec`, `gzip`, `hashlib`, and `importlib.resources`
- `tagger.py``StringTagger` class with `tag_string(raw) -> StringTagResult`, plus `load_default_tagger()` which lazily loads and caches all databases process-wide
The collector tags raw strings before `rstrip()` trimming. When two raw strings collapse to the same display value, their tags and match metadata are merged. `MapaString` carries `tags: tuple[str, ...]` and `tag_matches: tuple[StringTagMatch, ...]`.
The text renderer uses a Rich `Text`-based helper to right-align the visible tag column on `string:` rows. The HTML renderer reuses the same visible-tag policy, builds its top tag controls from those visible tags only, shows the distinct-function count for each visible tag, and renders the visible tags right-aligned in each program-string row. The visible tag policy suppresses `#common` when a more-specific tag is also present.

View File

@@ -1,256 +0,0 @@
> read problem-transcript.md and digest it, summarize it, restructure it and explain it.
>
> ok. now ultrathink. consider the problem deeply. try to give some insights, some novel perspective, ideas for techniques and algorithms. what strikes you?
I've been sitting with this for a while. Here's what strikes me, organized from the structural observations that feel most fundamental down to concrete algorithmic ideas.
------
## The Deep Structure: A Graph Pinned to a Line
The most mathematically distinctive thing about this problem is that you have a *graph* (the call graph, the data xref graph) whose nodes are *pinned to a one-dimensional manifold* (the address space). This is a very specific structure that most generic graph algorithms ignore entirely. It means you're not just doing community detection — you're doing community detection where the communities are *intervals*. That's a massive constraint that dramatically shrinks the search space.
Think about it: if you have 10,000 functions, the number of arbitrary partitions is astronomically large. But the number of ways to partition 10,000 linearly-ordered nodes into *contiguous groups* is only on the order of 2^10,000 in the worst case, and if you're looking for, say, 20 boundaries, it's just C(10000, 20) — large, but tractable with dynamic programming. The contiguity assumption turns a combinatorially explosive problem into something closer to 1D segmentation, which is a much better-understood and more tractable class of problems.
## The Call Edge Length Distribution — A Fingerprint of Structure
Here's an idea I find really compelling. For every edge in the call graph, compute the *address distance* between caller and callee: `|addr(A) - addr(B)|`. Now plot the histogram of these edge lengths across the whole binary.
I'd expect this histogram to be multimodal. There should be a peak near zero representing intra-compilation-unit calls (functions calling their neighbors). There should be a broader hump at medium range representing intra-library cross-CU calls. And there should be long-range outliers representing cross-boundary calls — where user code calls into a library at a distant address.
The valleys between these modes are telling you the *characteristic scales* of the hierarchy. The first valley gives you the typical size of a compilation unit. The second gives you the typical size of a library. This is essentially a multi-scale analysis of the binary's structure, and you get it almost for free before running any clustering algorithm. You could use those characteristic distances as thresholds for later steps.
## Min-Cut / Max-Flow for Boundary Detection
The transcript describes a "tension" along paths between known first-party anchors (like `main`) and known library anchors (like FLIRT-matched or string-anchored functions). This maps almost perfectly to a **minimum cut problem**.
Set it up like this: create a super-source *S* connected to all known first-party nodes, and a super-sink *T* connected to all known library nodes. The edges in the call graph get capacities. What should the capacity be? This is where you encode your priors — you could weight by *inverse address distance* (so cutting a long-range edge is cheap, cutting a short-range edge between neighbors is expensive), or by the number of shared data references, or both. Then the min-cut between S and T partitions the graph into first-party and third-party, and the *cut edges themselves are literally the API boundary* — the places where user code transitions into library code.
This is elegant because it naturally handles ambiguity. You don't have to hand-define a propagation rule; the flow algorithm figures out where the cheapest place to draw the line is. And you can add new anchors (from FLIRT, strings, human input) simply by connecting them to S or T, and the cut re-adjusts.
## The Genome Annotation Analogy
This problem is strikingly similar to **genome annotation** in bioinformatics. You have a long linear sequence (the binary). It contains functional regions (genes = functions). These regions cluster into operons and gene families (compilation units, libraries). You have some known markers (homolog matches ≈ FLIRT signatures). You want to segment the sequence into regions of different character, exploiting the fact that related elements are co-located.
The classic tool for this is the **Hidden Markov Model**. Imagine an HMM where the hidden states represent "this function belongs to first-party code," "this function belongs to library A, CU 1," etc. The observations at each state are the features of that function — its string references, its call graph characteristics, its instruction patterns. The transition probabilities encode the contiguity assumption: staying in the same CU is high probability, transitioning between CUs within a library is medium, jumping between libraries is low.
You'd train this in a semi-supervised way, anchoring some states with your ground truth, and letting Viterbi or forward-backward propagate through the rest. The HMM framework is natural here because the binary *is* a sequence, and the Markov property (what matters is what group you're currently in, not the full history) matches the linker's behavior well.
## Personalized PageRank as a Soft Classification
Here's a technique that handles the graph propagation cleanly. Run **personalized PageRank** (PPR) seeded from each anchor set. One PPR vector seeded from `main` gives you a "reachability-from-first-party" score for every node. Another seeded from known zlib functions gives a "zlib affinity" score. A third from OpenSSL anchors gives an "OpenSSL affinity" score.
Each function is then softly classified by which PPR vector dominates at that node. The beauty is that PPR naturally decays with graph distance and respects the connectivity structure — a tightly-knit library will "trap" the random walk, giving high PPR scores internally and low scores outside. The boundary between two regions is where the competing PPR scores cross over.
You could even visualize this as a heat map over the address space, and the boundaries would pop out visually.
## Exploiting the Bipartite Structure: Code ↔ Data
The transcript mentions that data locality mirrors code locality, but I think this signal is underexploited if you treat it as a separate, independent confirmation channel. Consider the *bipartite graph* between functions and the data items they reference. Two functions that reference overlapping or nearby data are likely from the same compilation unit. This gives you a similarity measure between functions that's *completely independent* of the call graph.
You could build a function-to-function similarity matrix where similarity = how much their referenced data overlaps or how close their referenced data is in the data section. Then cluster on this matrix. The resulting clusters should correspond to compilation units, and they give you a second opinion that you can reconcile with the call-graph-derived clusters.
When both signals agree, you have high confidence. Where they disagree, something interesting is happening — maybe a utility function shared across CUs, or a data section that got reordered.
## Directed Graph Asymmetry as a Library Detector
Here's a subtlety the transcript alludes to but doesn't fully develop. Call graphs are *directed*, and the directionality matters. Library code has a specific shape: it gets **called from far away** (user code invokes the library's public API) but **calls to nearby functions** (internal library routines). So a library entry point has high *in-degree from distant addresses*but low *out-degree to distant addresses*.
You could compute, for each function, two metrics: the average address distance of its callers, and the average address distance of its callees. Plot every function on this 2D plane. I'd expect clusters to emerge — first-party code has short-range callers and a mix of callees (some local, some long-range calls into libraries), while library internals have short-range everything. Library *entry points* are the distinctive ones: long-range callers, short-range callees. They're the "necks" between the communities.
## The Bisection Oracle Strategy
The "git bisect" idea from the transcript is really interesting and I think it deserves formal treatment. If you have an expensive oracle (heavy analysis, LLM query, human analyst) and you want to minimize queries, this is an **active learning** problem.
Concretely: you have a sequence of functions ordered by address, and you know that somewhere in this sequence there's a boundary between first-party and library code. You believe the sequence is monotonic (once you cross from first-party to library, you don't come back — at least within a region). Binary search finds the boundary in O(log n) queries. But real binaries might have multiple interleaved regions, so you'd need a slightly more sophisticated approach — maybe something like the **multi-armed bandit** formulation, where each candidate boundary location has an uncertainty estimate, and you query the point that maximally reduces global uncertainty.
## Strongly Connected Components as Ground Truth CU Detectors
One more concrete trick: compute the **strongly connected components** of the call graph. Mutual recursion (A calls B, B calls A) almost always happens within a single compilation unit. So SCCs give you "for free" sets of functions that definitely belong together. These become seeds for clustering — merge each SCC into a single super-node, then run your boundary detection on the reduced graph.
------
## Synthesis
If I were designing the pipeline, I'd layer these techniques:
Start with **cheap deterministic anchors** — FLIRT matches, string attribution, `main` identification. Then use the **edge-length histogram** to estimate the characteristic scales. Run **SCC detection** to collapse definite-same-CU functions. Build edge weights from address proximity and shared data references, then use **min-cut** from known-first-party to known-library to find the primary boundary. Within each side, use **agglomerative clustering** on the address axis, breaking only at points where the call graph edge density drops off, to find compilation unit boundaries. Use **personalized PageRank** from each library's anchors for soft assignment. Finally, reconcile against the **data xref bipartite clustering** as independent validation.
The result would be a hierarchical segmentation of the address space with confidence scores, and the boundaries would be exactly the API transition points that a reverse engineer cares about.
---
These are good, and the interesting thing is where they overlap with what I already proposed — and more importantly, where the *gaps between them* reveal something deeper. Let me think through what these trigger.
------
> here are some additional ideas. do they cause you to have any new insights or ideas?
>
> To transition MAPA from a good heuristic tool to a cutting-edge analytical engine, here are four algorithms that fit perfectly into their architecture: Algorithm 1: Spatial-Graph Label Propagation (Solving the "Tension") Instead of manually traversing graphs, use Label Propagation.
>
> 1. Build an adjacency matrix of the functions.
> 2. Weight the edges by both call frequency AND physical memory proximity. (e.g., Function A and B are strongly linked if A calls B and they are 100 bytes apart).
> 3. Lock the "Anchor" nodes (Ground Truth).
> 4. Run the propagation algorithm. It will mathematically resolve the "tension" the speaker mentioned, assigning a probability score to every single function (e.g., "Function 0x40100 is 88% likely to be zlib"). Algorithm 2: Spectral Clustering for "Compilation Units" The speaker wants to identify individual compilation units (source files). Because linkers lay out source files contiguously, compilation units form ultra-dense "cliques" in the graph.
>
> - Use Spectral Graph Partitioning. By calculating the Laplacian of the call graph, the algorithm will naturally shatter the binary into discrete chunks.
> - Because compilation units are sequential in memory, you can penalize the clustering algorithm for grouping functions that are physically far apart. The output will be distinct, colored blocks perfectly mapping to the original .c or .cpp files. Algorithm 3: Minimum Cut / Maximum Flow for API Boundaries To find the exact boundary between user code and library code, treat the graph like a pipe network carrying water from main to the library strings.
> - Run a Min-Cut/Max-Flow algorithm. The algorithm will identify the "bottleneck" edges where water struggles to pass from the malware to the library.
> - Those bottlenecks are, almost by definition, the public API functions of the statically linked library.
## The Three Algorithms Are Solving Three Different Sub-Problems
The most important realization is that these three aren't competing approaches — they're a **layered pipeline** attacking different levels of the hierarchy. Min-cut finds the macro boundary (first-party vs. library). Spectral clustering finds the meso-structure (compilation units). Label propagation assigns soft identities (which library, what confidence). That ordering matters, and each stage's output feeds the next.
But there's something missing from this pipeline: none of them address **how to discover the right number of groups**. Min-cut gives you a bisection, but a real binary might have first-party code plus six libraries. Spectral clustering needs you to pick *k*. Label propagation needs seed labels. So the first real question is: **how many libraries are we even looking for?**
## The Eigengap Tells You How Many Libraries Exist
This is where the spectral approach becomes more powerful than the proposal suggests. Don't just use spectral clustering to *partition* — first use the **eigenvalue spectrum of the graph Laplacian** to *discover* the structure. Specifically, compute the first, say, 50 eigenvalues of the address-weighted call graph Laplacian and look for the **eigengap** — the largest jump between consecutive eigenvalues. If the first three eigenvalues are near zero and then the fourth jumps up, that's telling you the graph has roughly three natural communities. You've learned the number of libraries (plus first-party code) before doing any clustering at all.
Even better: the **eigenvectors** themselves, when plotted against function address, should look like step functions. Within a compilation unit, all functions have similar eigenvector values (because they're tightly coupled). At a boundary, the values jump. So you can literally do **change-point detection on the Fiedler vector** (the eigenvector corresponding to the second-smallest eigenvalue) plotted over the address axis, and the change points are your compilation unit boundaries. This fuses the spectral and spatial approaches in a way that's more principled than penalizing distance in the clustering objective.
## The Weighting Scheme Deserves More Thought
Algorithm 1 proposes weighting edges by call frequency AND memory proximity. This sounds right but the *combination function* matters enormously. Additive weighting (`w = α·calls + β·proximity`) treats them as interchangeable signals. Multiplicative weighting (`w = calls · f(proximity)`) treats proximity as a **prior** that modulates graph evidence. I think the multiplicative form is more correct.
Here's why: a call edge between two functions 50 bytes apart is *qualitatively different* from a call edge between two functions 500KB apart. The first is almost certainly intra-CU. The second is almost certainly cross-boundary. The call exists in both cases, but its *meaning* changes based on distance. A multiplicative weight captures this — a long-range call gets a small weight, so cutting it is cheap (it's probably a cross-boundary call), while cutting a short-range call is expensive.
Concretely, something like `w(e) = calls(e) · exp(-distance(e) / λ)` where λ is the characteristic CU size (which you estimated from the edge-length histogram I described earlier). This exponential decay naturally encodes "nearby calls bind tightly, distant calls bind weakly."
## A Critique of Min-Cut That Leads Somewhere Interesting
The min-cut formulation as described has a subtle problem: it's **binary**. It partitions into first-party and library. But what if there are three libraries? You'd need to run it multiple times, or use multi-terminal cut, which gets expensive.
But here's what the min-cut framing *does* give you that's uniquely valuable: the **cut edges are the API boundary**, and the **capacity of the cut is a confidence measure**. A large min-cut value means the boundary is well-defined (lots of edges had to be severed). A small min-cut means the separation is tenuous — maybe you've drawn the line in the wrong place, or maybe the "library" is actually tightly integrated with user code.
This leads to a practical idea: **iterate**. Run min-cut to get a first-party/library partition. Then *within the library side*, run min-cut again between the two most distant anchor sets (say, zlib anchors vs. OpenSSL anchors). This recursive bisection via min-cut naturally produces the hierarchy. And at each level, the cut capacity tells you how confident you should be.
## A New Idea These Three Together Trigger: Diffusion Distance as a Universal Metric
All three algorithms are really measuring the same underlying thing from different angles: **how easily does information flow between two nodes?** Label propagation is a diffusion process. Spectral clustering uses the eigenvectors of the diffusion operator. Min-cut measures the bottleneck in flow.
This suggests a unifying abstraction: compute the **diffusion distance** between every pair of functions. Diffusion distance is the L2 distance between the rows of the matrix `exp(-t·L)` where L is the graph Laplacian and t is a time parameter. At small t, only very close (tightly connected, nearby) functions are similar. At large t, entire libraries blur into single points.
The beauty is that **different values of t reveal different levels of the hierarchy**. Small t resolves compilation units. Medium t resolves libraries. Large t resolves the first-party/library split. You get the entire hierarchical segmentation from a single mathematical object just by tuning a parameter.
You could visualize this as a dendrogram: at what "diffusion time" do two functions merge into the same cluster? Functions in the same CU merge almost immediately. Functions in the same library merge at medium time. Everything merges eventually. The dendrogram *is* the hierarchical classification you're after.
## One Final Practical Insight: Robustness to Bad Disassembly
All of these techniques assume the call graph is accurate, but in real binaries — especially malware — disassembly is imperfect. Indirect calls get missed. Tail calls look like jumps rather than calls. Obfuscated code deliberately breaks analysis.
This is where the **spatial signal acts as a safety net**. Even when the graph is incomplete or noisy, the address layout is always there and always reliable. If your spectral clustering says two functions belong in different groups but they're 20 bytes apart with no other functions between them, the spatial evidence should override. The address axis is your highest-confidence signal; the graph is the informative but noisy supplement. Any algorithm you build should be designed to degrade gracefully — if the graph were *completely* absent, you should still be able to do a passable job purely on address-range heuristics and data xref locality. The graph makes it better, but the 1D structure is the backbone.
---
> here are some further considerations. how should we integrate them into our list of strategies?
>
> 1. Features that seem underexploited.
>
> - 23a. Shared constant pools and tables.
> - 23b. Exception/unwind metadata neighborhoods.
> - 23c. Import-family fingerprints.
> - 23d. Repeated alignment and gap patterns between code blocks.
> - 23e. Data-section locality matching code-section locality.
> - 23f. Ego-network shape: not just who a function calls, but what its 1-hop and 2-hop neighborhood looks like.
> - 23g. Call-direction asymmetry across a candidate boundary.
>
> 1. A specific novel tactic: score gaps, not just nodes.
>
> - 24a. For each gap between adjacent functions in address order, compute “boundary likelihood.”
> - 24b. Features could include:
> - sudden change in seed evidence
> - reduced internal xrefs across the gap
> - padding/alignment pattern
> - change in referenced data neighborhood
> - change in import/API usage style
> - change in graph embedding neighborhood
> - 24c. Then solve for a sparse set of boundaries.
> - 24d. This is more direct than asking “what label is each function?”
>
> 1. Another promising tactic: recover “data islands” first.
>
> - 25a. Cluster strings/tables/rodata/data by address locality and xref neighborhoods.
> - 25b. Label those islands with higher confidence than code.
> - 25c. Then attach functions to the islands they reference.
> - 25d. Libraries often expose themselves through data long before their code is obvious.
>
> 1. A third tactic: active refinement near high-entropy frontiers.
>
> - 26a. Most of the binary may be easy.
> - 26b. The cost is concentrated near a few uncertain boundaries.
> - 26c. So only there should MAPA use expensive methods:
> - deeper semantic analysis
> - more complex embeddings
> - decompiler-derived features
> - analyst interaction
> - 26d. This is much better than spending equal effort on every function.
These four ideas aren't just additions to the list — they restructure how I think the whole pipeline should work. Let me explain why.
## The Paradigm Shift: Score Boundaries, Not Functions
Item 24 is the single most important idea in this entire conversation so far, and I think it should become the **primary framing** of the problem rather than a tactic alongside others.
Every algorithm we've discussed — label propagation, spectral clustering, min-cut, PPR, HMMs — has been asking "what label does each function get?" But that's the indirect question. The direct question is "where are the seams?" And those seams live *between* functions, in the gaps. This reframing matters practically because the number of gaps (N-1 for N functions) is fixed, the features per gap are rich and computable, and the output you actually want — a segmentation — is literally a sparse subset of those gaps marked as boundaries.
This turns the whole problem into **supervised or semi-supervised boundary classification on a 1D sequence of gaps**, which is vastly simpler and more tractable than graph community detection. You compute a feature vector for each gap, you have some gaps where you know a boundary exists (between a FLIRT-matched library function and an adjacent non-library function, for instance), and you train a classifier. Or you don't even need training — you define a scoring function and then solve for the *sparsest* set of boundaries that explains your evidence, which is essentially LASSO or L1-penalized segmentation.
The gap features listed in 24b are excellent, and I'd add a few more that fall out of the earlier discussion: the diffusion distance between the functions flanking the gap, the min-cut capacity if you sever the graph at that point, and the eigenvector discontinuity at that gap in the Fiedler vector. Every algorithm we previously discussed becomes a **feature generator for the gap scorer** rather than a standalone solution. That's the architectural insight — the boundary scorer is the spine, and everything else feeds into it.
## Data Islands as the Foundation Layer
Item 25 is the second most important idea, and it reorders the pipeline in a way I didn't previously consider. The argument is subtle and correct: data is *more reliably clusterable than code* for several reasons. Strings are human-readable and often uniquely attributable to a library. Data items don't have the ambiguity of indirect calls. The data section layout reflects compilation unit structure just as faithfully as the code section, but with less noise. And critically, data doesn't get obfuscated nearly as often as code does in malware — the strings still need to be there at runtime.
So the pipeline should be: cluster the data section first into "islands" using address locality and cross-reference structure. Label those islands using string content, constant patterns, and any other attributable features. Then project those labels back onto functions via data references. A function that references three strings in the "zlib island" is almost certainly zlib code, even if you've never seen its bytes before and FLIRT doesn't match it.
This is powerful because it provides anchors that are **denser and more confident** than what you get from FLIRT or code-side analysis alone. It also means the data-island clustering becomes the *first* stage of the pipeline, before any code-side graph analysis. You start from data, anchor the code, then refine.
## The Underexploited Features Suggest a Richer Feature Space for Gaps
Items 23a through 23g aren't just a wishlist — they fill specific blind spots in the gap scoring framework:
**23a (shared constant pools/tables)** and **23e (data-section locality)** feed directly into the data-island strategy. But they also provide a gap feature: if the functions on either side of a gap reference data from the *same* data island, the gap is unlikely to be a boundary. If they reference data from *different* islands, it probably is.
**23b (exception/unwind metadata)** is a particularly clever signal I hadn't considered. Exception handling tables are generated per-compilation-unit by most compilers, and the unwinder metadata groups functions together in a way that directly reflects CU boundaries. This is almost ground truth when it's present — you should parse `.pdata`/`.xdata` (on Windows) or `.eh_frame` (on Linux) and treat co-membership in the same unwind group as a hard constraint. Functions sharing an unwind info record are in the same CU, period.
**23c (import-family fingerprints)** is interesting because it captures *style*. First-party code that does file I/O will call a different set of APIs than zlib's compression internals. You're not just looking at individual API calls — you're looking at the *distribution* of API families a function touches. Two functions that both use memory allocation + string manipulation + crypto APIs probably belong to the same logical unit (maybe an OpenSSL subsystem), while a function that only touches file I/O is probably a different module. This becomes a gap feature: does the API usage style change across this gap?
**23d (alignment and padding patterns)** is a beautiful low-level signal. Compilers emit characteristic alignment between functions (16-byte alignment, `int3` padding on MSVC, `nop` sleds on GCC). But *linkers* insert different padding between compilation units — often larger gaps or different padding bytes. So the gap's physical characteristics (how many bytes of padding, what the padding looks like) are direct evidence of whether it's an intra-CU gap or an inter-CU gap. This is a nearly free feature that requires no graph analysis at all.
**23f (ego-network shape)** is where things get really interesting algorithmically. Two functions might both have degree 5 in the call graph, but one's ego network is a tree (it calls 5 leaf functions that call nothing else — probably a dispatcher or API facade) while the other's ego network is a dense clique (its 5 callees all call each other — probably deep library internals). The *shape* of the local neighborhood, not just its size, is a fingerprint of where you are in the program's structure. You could compute small graph motif counts or local clustering coefficients and use those as features.
**23g (call-direction asymmetry)** completes the picture for boundary detection specifically. At a real API boundary, you expect calls going *into* the library (from the user-code side) but not calls going *out* of the library back into user code (libraries don't call their callers). So at a candidate boundary gap, you count directed edges crossing it in each direction. A strong asymmetry — many calls going deeper, few or none coming back — is a strong boundary signal. A gap where calls flow freely in both directions is probably internal to a single module.
## Active Refinement as the Final Stage, Not a Separate Strategy
Item 26 should be understood as the **execution strategy** for the whole pipeline, not a separate algorithm. The core insight is that uncertainty is spatially concentrated. After running the cheap, fast stages (data island clustering, gap feature computation, label propagation from anchors), most of the binary is probably classified with high confidence. What remains is a handful of ambiguous frontier zones — maybe 5-10% of the address space where two competing labels are close in score.
This is where you focus your expensive techniques: run the decompiler only on the ambiguous functions. Compute embeddings only for the frontier. Query the analyst only about the uncertain gaps. And do it in the "bisect" order — query the gap in the middle of the largest remaining uncertain region first, because its answer splits the ambiguity in half.
This means the pipeline architecture should explicitly support a **confidence map** over the address space. Every stage produces not just labels but uncertainty estimates. The uncertainty map guides where to spend further effort. And there's a natural stopping criterion: stop when the maximum remaining uncertainty drops below a threshold, or when the analyst says "good enough."
## The Revised Pipeline
Integrating everything, here's how I'd now structure it:
**Stage 0 — Feature extraction.** Parse the binary. Extract functions, call graph, data xrefs, strings, exception metadata, alignment patterns, import calls. Cheap and mechanical.
**Stage 1 — Data island clustering.** Cluster the data section by address locality and xref structure. Label islands using string content and constant pool attribution. This gives you the densest, most confident set of anchors.
**Stage 2 — Anchor projection.** Project data-island labels onto functions via xrefs. Combine with FLIRT matches and known entry points (`main`, etc.). Every function now has either a confident label, a tentative label, or no label.
**Stage 3 — Gap scoring.** For every inter-function gap, compute the full feature vector: padding pattern, data-island change, API style change, call-graph edge density across the gap, call-direction asymmetry, ego-network similarity of flanking functions, exception metadata boundary, eigenvector discontinuity. Score each gap's boundary likelihood.
**Stage 4 — Segmentation.** Solve for the sparse set of boundaries that best explains the gap scores and the anchor constraints. This could be L1-penalized changepoint detection, or an HMM where the gap scores inform transition probabilities.
**Stage 5 — Label assignment.** Within each segment, assign a library/CU label using the anchors, propagated labels, and graph community structure. This is where label propagation or spectral methods work *within* the already-established boundaries rather than trying to find them.
**Stage 6 — Active refinement.** Identify the highest-uncertainty boundaries. Apply expensive techniques — decompilation, semantic analysis, analyst queries — specifically at those points. Update the segmentation. Repeat until confidence is sufficient.
The key design principle is that boundary detection (stages 3-4) and label assignment (stage 5) are **decoupled**. You find where the seams are first, then figure out what's on each side. And the most reliable signals (data, metadata, physical layout) come first, with the noisier signals (graph topology, semantic analysis) used for refinement rather than discovery.

View File

@@ -1,83 +0,0 @@
# mapa plan: html call-graph neighborhood view
This plan explores a larger-neighborhood view for the HTML map. Today the page shows only direct relationships. A tag marks functions that reference strings with that tag, and a string marks functions that reference that exact string. The proposed experiment is to push that signal backward through the call graph so the page can show the code region around those direct hits.
The real question is whether the larger neighborhood stays localized enough to be useful. If a `#zlib` string grows into one compact region when we include one, two, or three caller hops, that supports the idea that tagged strings can anchor a broader library region. If the overlay quickly spreads into generic wrappers and dispatchers, the direct-hit view is probably carrying most of the useful signal already.
## Current behavior
The collector already has most of the data needed for this experiment. `mapa/collector.py` resolves thunk targets, builds `resolved_callers` and `resolved_callees`, and stores those relationships on each `MapaFunction` as `callers` and `calls`. The report model therefore already contains a usable reverse call graph.
The HTML renderer does not use that graph today. `mapa/html_renderer.py` emits only the function tooltip text, a tag-to-function index, and string rows with their direct function indices. The browser script then applies three binary states: tag border on, string fill on, or dim. There is no traversal, no score, and no way to distinguish direct evidence from nearby code.
One detail matters for later weighting. `_collect_tag_entries()` collapses each tag to a set of functions. That is enough for the current binary view, but it drops multiplicity. If one function references several `#zlib` strings, the current page still treats that as one direct hit. That simplification is acceptable for a first neighborhood experiment, but it becomes relevant if we later want repeated string evidence to count more strongly.
## Proposed model
The first experiment should use caller-depth propagation only. For a selected tag or string, define the directly matching functions as depth 0. Then walk backward through callers. Depth 1 is the callers of direct-match functions, depth 2 is the callers of depth-1 functions, and so on. A bounded depth of 0 to 3, or at most 4, is enough for the experiment.
The overlay should weaken with distance. Direct matches stay strongest. Indirect matches fade with depth. A simple additive model is enough. Each direct-match function contributes a seed weight of 1.0. A caller at depth `d` receives `seed_weight * decay(d)`. If several matching paths reach the same function, add the contributions together. That gives the effect we want. A function that sits above several tagged descendants should stand out more than a function that reaches only one.
Two decay families are worth trying. Harmonic decay follows the original intuition of 1.0, 1/2, 1/3, 1/4. Geometric decay uses 1.0, 1/2, 1/4, 1/8. Harmonic keeps more mass at larger depths. Geometric is more conservative and is less likely to smear across the whole map through generic caller chains. The page should probably expose both and default to geometric.
For the first pass, the seed should be per direct-match function rather than per direct-match string. That keeps the experiment focused on graph expansion instead of mixing graph expansion with direct evidence density. If the idea looks useful, then a second pass can try per-string seeds or rarity-weighted seeds.
## What the page should show
The current page already has a useful split between tag selection and string selection. Tag selection uses the border and string selection uses the fill. That can stay. The propagation engine can compute two separate score arrays, one for the border channel and one for the fill channel. If both a tag and a string are active, both overlays remain visible.
The main challenge is interpretation. A single cumulative heat map can look compelling while hiding the reason a function is lit up. The user should be able to tell the difference between a direct match, a one-hop caller, and a function that receives several weak contributions through different paths. For that reason, the page should support both cumulative and exact-hop views. In cumulative mode, the page shows the total score up to the chosen depth. In exact-hop mode, the page shows only depth 0, or only depth 1, or only depth 2. If there is room, small multiples would be even better. Four aligned copies of the same function grid for depths 0 through 3 would make locality much easier to judge.
Direct matches should remain visually distinct from indirect ones. A direct seed should not disappear into the same faint channel as a weak propagated score. Tooltips should also report the exact score for the active overlay. Otherwise the view will be hard to debug.
## Recommended first implementation
The narrow version of this feature can stay almost entirely inside `mapa/html_renderer.py`. The collector already records the caller relationships needed for bounded backward traversal.
The renderer should emit one more structure in its inline JSON: `callersByIndex`, a reverse adjacency list keyed by function index. It can keep the existing direct seed data for tags and strings. The browser script can then compute scores on demand for the active selection. The data volume is small enough that this should stay cheap on typical samples.
The computation can stay simple. Initialize the direct-match functions as the depth-0 frontier. For each depth from 1 to `maxDepth`, push the previous frontier to its callers, multiply the step by the chosen decay factor, and add the contributions into a total score array. This counts bounded walks rather than strictly simple paths. With shallow depths, that is a reasonable approximation.
The first UI pass only needs a few controls: maximum caller depth, decay family, and view mode. The existing function grid and string list can remain in place.
A practical first cut would keep these defaults:
- caller-only propagation
- default depth of 2
- geometric decay
- additive scoring
- cumulative view, with exact-hop available as a switch
## Rendering considerations
The current 10-pixel square is large enough for a binary on/off signal, but it may be too small for subtle border intensity changes. Border alpha alone may be hard to read. A better approach is to keep direct tag hits as a solid border and render propagated tag scores with either a stronger border color ramp or a small glow. For strings, direct matches can keep the current solid fill while propagated scores use a fill alpha ramp. The exact styling can stay simple, but direct and indirect states should be separable at a glance.
The page should also keep dimming non-matches when any overlay is active. Otherwise weak propagated scores will be visually lost in large samples.
## Risks
The main risk is graph bleed. Generic wrappers, initialization code, dispatchers, shared error handlers, and utility helpers often sit one or two caller hops above many unrelated regions. Those functions can make the map look more connected than the underlying library region really is.
Cycles are another risk. Recursive or mutually recursive groups can accumulate score in ways that are mathematically consistent under a bounded-walk model but visually misleading. Strongly connected component collapsing is a possible future refinement, but it should not be part of the first pass.
These risks argue for conservative defaults. Caller-only propagation is easier to reason about than a bidirectional neighborhood. Depth should stay shallow. Geometric decay is a safer default than harmonic. Exact-hop inspection should be available so the user can see whether the first one or two shells are still localized.
## Variations worth testing
If the first pass looks promising, there are several obvious follow-ons.
One variation is seed definition. Compare per-function seeds, per-string seeds, and rarity-weighted seeds. The last option is appealing because `StringTagMatch` already preserves `global_count`, and rare strings are usually more diagnostic than common ones.
Another variation is degree normalization. Raw additive scoring favors functions that sit above many matching descendants. That is partly what we want, but it also rewards generic coordinator functions. A normalized variant could divide contributions by a degree term and ask a different question: how concentrated is the evidence in this function's neighborhood.
A thresholded view is also worth trying. Instead of showing a continuous score ramp, let the user set a minimum score and mark only functions above that threshold. That could make contiguous regions easier to spot.
A comparison mode would be useful as well. Showing direct-only and propagated views side by side would make it easy to see whether the larger neighborhood adds a coherent region or just noise.
## How to evaluate the idea
The function grid is address-ordered, so this experiment is really about locality in address space. The first thing to look for is whether shallow propagation expands a direct-hit cluster into a still-coherent region. Good test cases are binaries where we already expect a compact static-library region, such as zlib, OpenSSL, or sqlite3.
It would also help to add a few quantitative summaries. The page could report the smallest contiguous address span containing most of the score mass, how many separate spans remain above a threshold, and how those numbers change as depth increases. That would make the result less subjective.
The concept is worth implementing as an experiment. The hard data is already present in the report model, and a conservative first pass can stay mostly inside the HTML renderer. If shallow caller propagation still yields compact regions for known libraries, then richer weighting models are worth exploring. If it smears immediately, that is still a useful result and tells us that the direct-hit view is already close to the limit of the available signal.

View File

@@ -1,502 +0,0 @@
# mapa IDA/IDALib port plan
Goal: preserve the current `scripts/mapa.py` report while replacing the Lancelot/BinExport2 backend with IDALib. Use the `ida-domain` API for normal analysis where it cleanly maps to the needed data. Reuse the existing capa IDA backend as the semantic reference for imports, thunks, string resolution, function naming, and database bootstrap.
This revision adds concrete guidance from capa's existing IDA backend.
## Sources used
Primary sources:
- `scripts/mapa.py`
- https://github.com/HexRaysSA/ida-claude-plugins/blob/main/plugins/ida-plugin-development/skills/ida-domain-api/SKILL.md
- `capa/features/extractors/ida/idalib.py`
- `capa/features/extractors/ida/extractor.py`
- `capa/features/extractors/ida/helpers.py`
- `capa/features/extractors/ida/file.py`
- `capa/features/extractors/ida/function.py`
- `capa/features/extractors/ida/insn.py`
- `capa/features/extractors/ida/basicblock.py`
- `capa/ida/helpers.py`
- `capa/loader.py`
- `tests/fixtures.py`
- `tests/test_idalib_features.py`
- `capa/features/common.py`
- `idals.py` from https://github.com/williballenthin/idawilli/tree/master/idals
Domain API references:
- Overview: https://ida-domain.docs.hex-rays.com/llms.txt
- Getting started: https://ida-domain.docs.hex-rays.com/getting_started/index.md
- Examples: https://ida-domain.docs.hex-rays.com/examples/index.md
- Database: https://ida-domain.docs.hex-rays.com/ref/database/index.md
- Functions: https://ida-domain.docs.hex-rays.com/ref/functions/index.md
- Flowchart: https://ida-domain.docs.hex-rays.com/ref/flowchart/index.md
- Instructions: https://ida-domain.docs.hex-rays.com/ref/instructions/index.md
- Xrefs: https://ida-domain.docs.hex-rays.com/ref/xrefs/index.md
- Strings: https://ida-domain.docs.hex-rays.com/ref/strings/index.md
- Segments: https://ida-domain.docs.hex-rays.com/ref/segments/index.md
- Names: https://ida-domain.docs.hex-rays.com/ref/names/index.md
- Entries: https://ida-domain.docs.hex-rays.com/ref/entries/index.md
## Key correction after reviewing capa
The current `capa/features/extractors/ida/` backend is IDALib-capable, but it is not written against `ida-domain` today. It uses the classic IDA Python surface: `idapro`, `idaapi`, `idautils`, `idc`, `ida_bytes`, `ida_funcs`, `ida_segment`, and related modules.
That means the correct migration strategy is not "invent a fresh IDA collector from scratch". The correct strategy is:
- use capa's existing IDA backend as the behavioral spec and a source of proven heuristics
- implement the new collector against `ida-domain` wherever the needed API exists cleanly
- treat the existing legacy helpers as reference material, not as the default implementation path
- only introduce lower-level `ida_*` calls if the implementer can point to a concrete `ida-domain` gap and document it
This is especially important for:
- IDALib database bootstrap
- import and extern enumeration
- thunk-chain resolution
- string/data-reference chasing
- alternative function names from comments
- known IDA version caveats
## Current mapa output that must remain stable
The current script renders these sections, in this order:
- `meta`
- `modules`
- `sections`
- `libraries`
- `functions`
Accepted intentional change for the port: remove `modules` entirely.
Inside `functions`, it currently:
- iterates functions in address order
- prints `thunk ...` for thunk functions
- prints `function ...` for normal functions
- inserts source-file separators when the primary Assemblage source path changes, ignoring gaps with missing Assemblage data
- annotates functions with Assemblage source name and file when available
- forwards callers through thunk targets so callers of a thunk appear on the real target
- prints `B/E/I` as basic blocks / CFG edges / instructions plus total instruction bytes
- prints capa rule names attached to the function
- prints `calls:` for internal non-library callees
- prints `api:` for import/external/library callees
- prints `string:` for referenced strings
That output contract should stay stable unless a deliberate change is accepted and documented.
## What capa already gives you
There are three reusable assets.
The first is a proven `ida-domain` database-resolution and session-opening path in `../idawilli/idals/idals.py`. That code already does the part mapa needs most: accept either a raw sample or an existing `.i64` / `.idb`, hash raw inputs, cache analyzed databases by SHA-256, and guard concurrent access.
The second is capa's proven IDALib bootstrap path:
- `capa/features/extractors/ida/idalib.py`
- `capa/loader.py`
- `tests/fixtures.py`
The third is capa's proven semantic definition of the data mapa cares about:
- function enumeration: `capa/features/extractors/ida/extractor.py`
- segments, imports, externs: `capa/features/extractors/ida/file.py`, `helpers.py`
- callers and function names: `capa/features/extractors/ida/function.py`
- API calls, call targets, strings, mnemonics, offsets: `capa/features/extractors/ida/insn.py`
- CFG/basic blocks: `capa/features/extractors/ida/basicblock.py`, `helpers.py`
- hashes, architecture, imagebase, file-type helpers: `capa/ida/helpers.py`
The practical split is simple. Use `idals.py` as the model for database resolution, caching, and guarded open/close. Use capa's IDA backend as the model for analysis semantics and parity behavior. Use `ida-domain` as the primary query surface inside the collector. Do not depend on Lancelot anywhere in the new implementation.
## Important behavioral facts from capa's backend
1. IDALib bootstrap in capa is not a bare `Database.open(...)` call.
- It uses `capa.features.extractors.ida.idalib.has_idalib()` and `load_idalib()`.
- It then calls `idapro.open_database(..., run_auto_analysis=True, args="-Olumina:host=0.0.0.0 -Osecondary_lumina:host=0.0.0.0 -R")`.
- It disables console chatter with `idapro.enable_console_messages(False)`.
- It waits for analysis completion with `ida_auto.auto_wait()`.
2. Capa explicitly disables Lumina during IDALib analysis.
- Reason documented in `capa/loader.py`: Lumina can inject bad names or overwrite debug-info names.
- mapa should do the same unless there is a deliberate decision to trust Lumina.
3. Capa requests resource loading with `-R`.
- This matters for some file-scope extraction.
- `tests/test_idalib_features.py` notes that IDA 9.0 had resource-loading limitations under IDALib.
4. The existing `IdaFeatureExtractor.get_functions()` is not a direct drop-in for mapa.
- It calls `helpers.get_functions(skip_thunks=True, skip_libs=True)`.
- mapa must render thunk functions, so mapa needs its own full function inventory.
5. Capa already encodes the thunk semantics mapa needs.
- `THUNK_CHAIN_DEPTH_DELTA` is defined in `capa/features/common.py` as `5`.
- `capa/features/extractors/ida/insn.py:check_for_api_call()` follows code refs, then data refs, through thunk chains to resolve imports/externs.
- `capa/features/extractors/binexport2/__init__.py:BinExport2Analysis._compute_thunks()` shows the intended "single-target thunk chain" rule: only resolve through chains with exactly one callee per thunk hop.
6. Capa already encodes mapa-relevant string semantics.
- `helpers.find_data_reference_from_insn(insn, max_depth=10)` follows single data-reference chains.
- `helpers.find_string_at(ea)` looks for C strings and works around an IDA Unicode-decoding quirk.
- `insn.extract_insn_string_features()` and `extract_insn_bytes_features()` use that behavior.
7. Capa already has the import and extern logic mapa needs.
- `helpers.get_file_imports()` enumerates import modules and normalizes names.
- `helpers.get_file_externs()` enumerates functions from `SEG_XTRN` segments.
- `file.extract_file_import_names()` shows how capa treats name-vs-ordinal imports.
8. Capa already has alternative-name logic.
- `helpers.get_function_alternative_names()` parses comments that look like `Alternative name is 'foo'`.
- `function.extract_function_alternative_names()` exposes them as `FunctionName` features.
9. Capa already has the CFG behavior mapa should match.
- `helpers.get_function_blocks()` uses `idaapi.FlowChart(f, flags=(idaapi.FC_PREDS | idaapi.FC_NOEXT))`.
- The `NOEXT` part matters: it avoids useless external blocks contaminating B/E/I counts.
10. The test suite documents real version caveats.
- IDA 9.0 and 9.1 had some ELF symbol issues.
- IDA 9.0 under IDALib had resource-loading limitations.
- mapa validation should account for those when comparing outputs.
## Database resolution and caching pattern to copy from idals
`../idawilli/idals/idals.py` is the best starting point for the "raw file or existing database" problem. It already solves the user-visible behavior mapa needs.
Its pattern is:
- if the input suffix is `.i64` or `.idb`, use that database directly
- otherwise compute hashes for the raw file with `compute_file_hashes()` and use the SHA-256 as the cache key
- store the generated database in a common cache directory, currently `~/.cache/hex-rays/idals/<sha256>.i64`
- serialize access with `database_access_guard()`
- detect an already-open or unpacked database by watching for the companion `.nam` file
- use an advisory `flock` on `<db>.lock` to avoid concurrent writers
- after acquiring the lock, re-check `.nam` to close the TOCTOU hole
- on a cache miss, analyze the raw sample with `Database.open(..., IdaCommandOptions(auto_analysis=True, new_database=True, output_database=..., load_resources=True), save_on_close=True)`
- after the cached database exists, open it read-only with `open_database_session(..., auto_analysis=False)` and `save_on_close=False`
mapa should adopt that pattern with only minor changes:
- use the same SHA-256-keyed cache strategy
- keep the same locking protocol
- put the cache in a mapa-specific directory, or intentionally share the idals directory if reuse is desired
- expose the cache location as a small helper or constant so it can be documented and tested
- reuse the computed SHA-256 for the `meta` section instead of hashing the sample twice
There is one deliberate integration check to make here. `idals.py` uses `ida-domain`'s `Database.open(...)`, while capa's bootstrap path uses `idapro.open_database(...)` and disables Lumina explicitly. For mapa, prefer the `idals.py` open-and-cache pattern because it already handles the database lifecycle correctly. Then verify whether the `ida-domain` open path offers an equivalent way to suppress Lumina. If it does, use it. If it does not, decide whether that matters for mapa output or whether database creation should fall back to capa's `idapro.open_database(...)` path while cached-session opens keep the `idals.py` pattern.
## Recommended architecture
Do not port `scripts/mapa.py` by replacing each Lancelot query inline. Split it into four layers:
- CLI and argument parsing
- IDA bootstrap and environment setup
- report collection
- rendering
Use backend-neutral dataclasses for the report model:
- `MapaReport`
- `MapaMeta`
- `MapaSection`
- `MapaLibrary`
- `MapaFunction`
- `MapaCall`
- `MapaString`
- `AssemblageRecord`
The collector should have one primary data-access layer: `ida-domain` for functions, flowcharts, instructions, strings, names, segments, xrefs, and database lifecycle. Existing capa helpers remain useful as semantic references and regression oracles.
## Best practical strategy
The implementation target is an IDALib-only collector with `ida-domain` as the primary API surface.
Concretely:
- use `ida-domain` for function inventory, instruction iteration, CFG stats, name lookup, segment listing, xref walking, and cached database open/create
- use the existing capa IDA code to understand the intended semantics for imports, externs, thunk resolution, data-reference chasing, and alternative names
- if the implementer discovers a real `ida-domain` gap, document the gap explicitly before introducing lower-level `ida_*` calls
That gives the next implementer a clear target: no Lancelot, no default hybrid backend, and no legacy helper dependency unless a concrete gap forces it.
## Concrete mapping from mapa fields to capa/backend logic
| mapa field/behavior | First source to consult | Recommended implementation |
|---|---|---|
| IDALib discovery | `capa/features/extractors/ida/idalib.py` | Reuse `has_idalib()` / `load_idalib()` logic if mapa needs to bootstrap `idapro` availability itself. |
| resolve/open DB | `../idawilli/idals/idals.py` | Use `resolve_database()` and `open_database_session()` as the primary pattern. |
| cache key and cache DB path | `../idawilli/idals/idals.py` | Hash raw inputs once and key cached databases by SHA-256. |
| Lumina suppression policy | `capa/loader.py`, `tests/fixtures.py` | Carry forward capa's disable-Lumina behavior if the chosen open path supports it. |
| sample hashes | `../idawilli/idals/idals.py`, `capa/ida/helpers.py`, `extractor.py` | Reuse the SHA-256 computed for cache lookup; prefer IDA-provided hashes when opening an existing database. |
| image base | `capa/ida/helpers.py` | Prefer IDA imagebase helper; use Domain API only if it exposes the same value clearly. |
| sections | `helpers.get_segments()`, `file.extract_file_section_names()` | Use `db.segments`; match capa's header-segment filtering rules if needed. |
| import modules/functions | `helpers.get_file_imports()` | Implement with `ida-domain` if the needed import data is exposed cleanly; otherwise use this helper as the semantic reference for normalization. |
| externs | `helpers.get_file_externs()` | Match this behavior with `ida-domain` if possible; if not, document the missing API and then fall back deliberately. |
| function inventory | `extractor.py`, `helpers.get_functions()` | Do not use extractor's default function list because it skips thunks/libs. Build a mapa-specific inventory with `ida-domain`. |
| callers | `function.extract_function_calls_to()` | Reproduce the same behavior with domain xrefs and compare against this helper during validation. |
| call targets | `insn.extract_function_calls_from()` | Reproduce the same behavior with domain xrefs and compare against this helper during validation. |
| API calls | `insn.extract_insn_api_features()` | Match the import/extern/thunk resolution semantics exposed by this function. |
| string refs | `helpers.find_data_reference_from_insn()`, `find_string_at()` | Match the same single-ref-chain behavior and max depth `10`. |
| function names | `function.extract_function_name()`, alternative-name helpers | Use normal name, demangled name, alternative names, and render Assemblage annotations separately without renaming the IDA function. |
| B/E/I stats | `helpers.get_function_blocks()` | Match `PREDS | NOEXT` semantics; use domain flowchart if possible. |
| function ordering | current `scripts/mapa.py` | Keep address order for deltas and rendering stability. |
## Step-by-step implementation plan
### 1. Freeze the current mapa output
Before editing code, save golden outputs from the current `scripts/mapa.py` for:
- a sample with normal internal calls and imports
- a sample with thunk-heavy call patterns
- a sample with capa and Assemblage overlays
These are the parity targets.
### 2. Add `resolve_database()` and `open_database_session()` helpers
Base these directly on `../idawilli/idals/idals.py`.
`resolve_database()` should:
- accept either a raw sample or an existing `.i64` / `.idb`
- return existing databases unchanged
- hash raw inputs once and use SHA-256 as the cache key
- place cached databases under the XDG cache root in `mandiant/mapa/`, i.e. `$XDG_CACHE_HOME/mandiant/mapa/` when set, else `~/.cache/mandiant/mapa/`
- guard cache creation with the same `.nam` + `flock` protocol from `database_access_guard()`
- analyze cache misses with `Database.open(..., IdaCommandOptions(auto_analysis=True, new_database=True, output_database=..., load_resources=True), save_on_close=True)`
- keep cache creation transparent in normal mode and only log cache details in verbose/debug mode
`open_database_session()` should:
- use the same guard before opening the database
- open cached or user-supplied databases with `new_database=False`
- default to `save_on_close=False`
- optionally run `ida_auto.auto_wait()` when `auto_analysis=True`
This should become mapa's primary database lifecycle.
Then add one capa-derived check on top: if the chosen open path can suppress Lumina, do so. If the `ida-domain` path cannot, verify whether that difference affects naming enough to justify a fallback to capa's `idapro.open_database(...)` path during cache creation.
### 3. Introduce a backend-neutral report model
Before touching the collector logic, split `scripts/mapa.py` into:
- CLI
- collector
- renderer
- input-overlay parsing for capa JSON and Assemblage CSV
Keep the renderer stable. The collector should return value objects only.
### 4. Build a mapa-specific function inventory
Do not use `IdaFeatureExtractor.get_functions()` as-is, because it skips thunks and library functions.
Instead:
- enumerate all functions in address order with `ida-domain` if possible
- keep flags for `is_thunk`, `is_library`, and `is_external`
- retain enough metadata to render thunks, skip imports from the function list, and compute deltas
For parity, compare your inventory against:
- `helpers.get_functions(skip_thunks=False, skip_libs=False)`
- IDA function flags such as `FUNC_THUNK` and `FUNC_LIB`
### 5. Recreate import and extern logic using capa's semantics
For the `libraries` section and for `api:` classification, start from the behavior encoded in:
- `helpers.get_file_imports()`
- `helpers.get_file_externs()`
That behavior already handles:
- PE imports with `__imp_` prefixes
- ELF imports with `@@version` suffixes
- ordinal imports
- extern functions in `SEG_XTRN`
The implementation target remains `ida-domain`. The next implementer should reproduce this behavior there if the API surface is available. If a real gap appears, document the gap before introducing any fallback.
### 6. Implement thunk resolution with capa's exact semantics
Build one cached helper, for example `resolve_thunk_target(ea)`, and use it everywhere.
Behavior should match capa's existing semantics:
- maximum thunk-chain depth: `THUNK_CHAIN_DEPTH_DELTA == 5`
- follow code refs first, then data refs if needed
- only resolve through single-target chains
- stop on cycles, zero-target, or multi-target cases
- allow the final resolved target to be an import or extern
Use two existing code paths as references:
- `capa/features/extractors/ida/insn.py:check_for_api_call()`
- `capa/features/extractors/binexport2/__init__.py:BinExport2Analysis._compute_thunks()`
This helper must drive:
- caller forwarding
- `calls:` lines
- `api:` lines
- capa match attachment when a match lands in a thunk
### 7. Use capa features as references, not as the collector
Do not build mapa by instantiating `IdaFeatureExtractor()` and aggregating capa features into the final report. That would create a hidden second backend and blur the migration target.
Instead, query IDA directly through `ida-domain` and use the capa feature-extraction code as a reference when the intended semantics are unclear. The implementer should compare specific results against:
- `Characteristic("calls to")`
- `Characteristic("calls from")`
- `API`
- `String`
- `FunctionName`
- `Mnemonic`
This keeps the delivered collector IDALib-only while still giving the implementer a precise oracle for parity checks.
### 8. Recreate callers and callees
Use a precomputed normalized call graph. Do not compute callers ad hoc during rendering.
For each non-import function:
- walk its instructions
- identify call or jump-to-import patterns using the same logic as `extract_insn_api_features()`
- resolve thunk chains
- classify the resolved target as internal or API/import/extern
- record caller and callee relationships on resolved targets
For parity, verify against these capa semantics:
- function callers: `function.extract_function_calls_to()`
- outgoing calls: `insn.extract_function_calls_from()`
- API calls: `insn.extract_insn_api_features()`
Important detail: the existing helper treats both `call` and `jmp` as API-bearing instructions in some thunk/import cases. Do not assume `call` only.
### 9. Recreate B/E/I with capa's CFG semantics
For each rendered function:
- basic blocks: count basic blocks using the equivalent of `helpers.get_function_blocks()`
- edges: sum successors across those blocks
- instructions: count instructions across those blocks
- bytes: sum instruction sizes
The important parity rule is the CFG construction mode:
- match `idaapi.FlowChart(f, flags=(idaapi.FC_PREDS | idaapi.FC_NOEXT))`
If the Domain API flowchart differs, use it only if it can match the no-external-block behavior. Otherwise use a tiny legacy helper for block enumeration and keep everything else in the Domain API.
### 10. Recreate string extraction with capa's data-ref chasing
Do not just test `db.strings.get_at(xref.to_ea)` and stop. That will miss the semantics capa already uses.
Start from capa's behavior:
- follow a single data-reference chain from the instruction, up to depth `10`
- if the final target is a string, emit it
- otherwise it may be bytes, not a string
For mapa specifically:
- only render strings, not raw bytes
- deduplicate by rendered string value, matching the current script
- trim trailing whitespace the same way the current script does
Reference implementation:
- `helpers.find_data_reference_from_insn()`
- `helpers.find_string_at()`
- `insn.extract_insn_string_features()`
### 11. Reuse capa's name and alternative-name semantics
For the function display name, use this order:
- demangled name
- IDA function name
- alternative names from comments if they help and the main name is poor
- final fallback such as `sub_{ea:x}`
Render Assemblage source name and source file as annotations beneath the function header. Do not mutate the database just to apply Assemblage data.
Reference points:
- `function.extract_function_name()`
- `helpers.get_function_alternative_names()`
### 12. Reattach capa matches by containing function
Keep the current capa JSON input format, but simplify the mapping logic.
Recommended algorithm:
- parse the capa JSON as today
- for each absolute match address, ask IDA for the containing function
- if that function is a thunk, resolve it through the thunk resolver
- attach the rule name to the resolved function start EA
- warn when no containing function exists
This is simpler than the current BinExport-specific mapping and aligns better with IDA's data model.
### 13. Rebuild top-level sections using capa-backed semantics
For `meta`:
- sample name: input path or IDA metadata
- hashes: prefer IDA-provided hash helpers in `capa/ida/helpers.py`
- architecture: reuse the logic in `capa/features/extractors/ida/global_.py`
- timestamp: define explicitly, because BinExport's old field is gone
For `sections`:
- use `ida-domain` segments if possible
- match capa's `skip_header_segments` behavior if needed
For `libraries`:
- use `helpers.get_file_imports()` and group/display import modules accordingly
For `modules`:
- remove the section entirely as an intentional interface change
- document the removal in the spec so future ports do not try to reintroduce BinExport-specific `module` semantics accidentally
### 14. Add tests using capa's existing IDALib pattern
Pure tests should cover:
- Assemblage parsing and RVA-to-VA mapping
- thunk-chain resolution
- import/extern normalization
- string de-duplication and trimming
- final rendering from a prebuilt `MapaReport`
Integration tests should reuse the same lifecycle mapa will use in production:
- resolve the input to an existing or cached database
- open it through the guarded session helper
- collect the mapa report
- compare key functions and sections against golden outputs
Use `tests/test_idalib_features.py` as the reference for version-specific skips and expectations, and use `../idawilli/idals/idals.py` as the reference for database resolution and guarded open/close behavior.
### 15. Validate parity and document deliberate differences
Compare the new output against the frozen Lancelot output on the supplied samples.
Verify specifically:
- function ordering
- thunk rendering
- thunk-forwarded callers
- internal vs API call classification
- libraries/imports section contents
- string extraction
- B/E/I counts
- Assemblage annotations and source-file separators
- capa attachment
Document every known delta. The likely ones are:
- function discovery differences between IDA and Lancelot
- the intentional removal of the `modules` section
- symbol differences across IDA versions, especially ELF on older 9.x
- resource-dependent differences on older IDALib versions
## Minimal implementation checklist
A good order of work is:
1. freeze current mapa outputs
2. add backend-neutral report dataclasses
3. add `resolve_database()` and `open_database_session()` helpers modeled on `idals.py`
4. implement the XDG cache path and quiet-by-default cache creation behavior
5. build a full mapa function inventory that includes thunks
6. port sections and metadata
7. implement import/extern classification to match capa semantics
8. implement the thunk resolver using capa's existing semantics
9. build normalized caller/callee/API indexes
10. port B/E/I using `PREDS | NOEXT`-equivalent CFG traversal
11. port string extraction using capa's data-ref-chain semantics
12. port Assemblage overlay handling
13. port capa JSON address-to-function attachment
14. remove the `modules` section and document the interface change
15. compare outputs against golden references
16. document any proven `ida-domain` gaps and any intentional differences in spec/design during implementation
## Resolved decisions for the implementation handoff
Record these in `spec.md` or `design.md` during implementation so the behavior stays stable.
- accepted inputs: raw binary and existing IDA databases
- cached databases live under the XDG cache root in `mandiant/mapa/`
- mapa may create and persist cached IDA databases automatically
- cache creation stays quiet in normal mode and only surfaces in verbose/debug logging
- Lumina stays disabled for now
- `meta.ts` becomes `datetime.now()`
- remove the `modules` section from the report
- the implementation target is IDALib only and all Lancelot dependencies should be removed
- assume `ida-domain` is sufficient unless the implementer can demonstrate a specific missing API; any lower-level fallback must be justified and documented

View File

@@ -1,169 +0,0 @@
# mapa plan: vendor Quantum Strand string tags
This plan describes how to extend mapa so every rendered `string:` line can carry right-aligned tags from Quantum Strand's string databases. The implementation target is broader than the earlier draft. It should include the full set of useful database-backed tags now: open-source library tags, CRT tags, expert tags, winapi tags, global-prevalence tags, and junk-code tags. The feature is still strictly limited to database matches. It must not import Quantum Strand or FLOSS as a runtime library, and it must not pull in Quantum Strand's whole-file layout analysis, structure tagging, file offsets, encoding columns, or any other non-database context.
The implementor should work only in `mapa/`, `tests/`, and packaging/docs as needed. Nothing belongs under `capa/`. The sibling checkout at `../quantumstrand/` is only a research source and a place to copy vendored resources from once.
## What mapa should do when this lands
When mapa emits a string referenced by a function, the left side should stay in the current mapa style and the right side should show database-derived tags. The renderer should use Rich width calculations so the tag column stays visible and the string text is clipped first if necessary. The output should continue to be function-centric and concise.
A representative result looks like this:
```text
string: "invalid distance code" #zlib
string: "GetProcAddress" #winapi
string: "!This program cannot be run in DOS mode." #common
string: "CurrencyDispenser1" #capa
string: "_initterm" #winapi #code-junk
```
The model should retain richer match metadata than the text renderer shows. The renderer only needs visible tags. The report model should still remember which database family matched and any associated metadata that might matter later.
## Non-goals
This feature is not a Quantum Strand port. Do not bring over its file-layout tree, structure labels like `import table`, section box rendering, code-vs-data analysis, duplicate-string tagging, relocation tagging, xor-decoding tags, or hidden-string filtering. Those features solve a different problem. mapa already knows which function references a string and only needs database-backed context for that string.
mapa should not start suppressing strings based on database matches. Even if an upstream expert rule has `action: hide`, mapa should still render the string. Store the action in metadata if it exists, but do not use it to drop rows.
## Upstream resources to copy
All of the following resources should be vendored into mapa under a new package such as `mapa/string_tags/data/`.
Library and CRT databases:
```bash
mkdir -p mapa/string_tags/data/oss mapa/string_tags/data/crt
cp ../quantumstrand/floss/qs/db/data/oss/*.jsonl.gz mapa/string_tags/data/oss/
cp ../quantumstrand/floss/qs/db/data/crt/msvc_v143.jsonl.gz mapa/string_tags/data/crt/
```
This copies these library databases:
`brotli.jsonl.gz`, `bzip2.jsonl.gz`, `cryptopp.jsonl.gz`, `curl.jsonl.gz`, `detours.jsonl.gz`, `jemalloc.jsonl.gz`, `jsoncpp.jsonl.gz`, `kcp.jsonl.gz`, `liblzma.jsonl.gz`, `libpcap.jsonl.gz`, `libsodium.jsonl.gz`, `mbedtls.jsonl.gz`, `openssl.jsonl.gz`, `sqlite3.jsonl.gz`, `tomcrypt.jsonl.gz`, `wolfssl.jsonl.gz`, `zlib.jsonl.gz`, plus the CRT database `msvc_v143.jsonl.gz`.
Expert, winapi, prevalence, and junk-code databases:
```bash
mkdir -p mapa/string_tags/data/expert mapa/string_tags/data/winapi mapa/string_tags/data/gp
cp ../quantumstrand/floss/qs/db/data/expert/capa.jsonl mapa/string_tags/data/expert/
cp ../quantumstrand/floss/qs/db/data/winapi/apis.txt.gz mapa/string_tags/data/winapi/
cp ../quantumstrand/floss/qs/db/data/winapi/dlls.txt.gz mapa/string_tags/data/winapi/
cp ../quantumstrand/floss/qs/db/data/gp/gp.jsonl.gz mapa/string_tags/data/gp/
cp ../quantumstrand/floss/qs/db/data/gp/cwindb-native.jsonl.gz mapa/string_tags/data/gp/
cp ../quantumstrand/floss/qs/db/data/gp/cwindb-dotnet.jsonl.gz mapa/string_tags/data/gp/
cp ../quantumstrand/floss/qs/db/data/gp/junk-code.jsonl.gz mapa/string_tags/data/gp/
cp ../quantumstrand/floss/qs/db/data/gp/xaa-hashes.bin mapa/string_tags/data/gp/
cp ../quantumstrand/floss/qs/db/data/gp/yaa-hashes.bin mapa/string_tags/data/gp/
```
The implementor should also create `mapa/string_tags/SOURCES.md` and record the upstream repo path, upstream commit, copied files, and any code copied or rewritten from upstream. The research for this plan used upstream commit `73eb1541e896c065fc694ba7b01067f56871631b`.
## Upstream code to read before implementing
The useful Quantum Strand code is small. Before writing anything, read `../quantumstrand/floss/qs/db/oss.py`, `expert.py`, `gp.py`, `winapi.py`, `../quantumstrand/floss/qs/main.py`, and the tests `../quantumstrand/tests/test_oss_db.py`, `test_winapi_db.py`, `test_gp_db.py`, `test_qs.py`, and `test_qs_pma0101.py`.
The only part of `floss/qs/main.py` that should influence mapa design is the small tagging and Rich rendering logic. Leave the rest of that file behind.
## Behavior that must be preserved from Quantum Strand
Quantum Strand's database lookups are simple and should be preserved exactly.
The OSS and CRT databases are gzip-compressed JSONL files. Each line contains one `OpenSourceString` record with fields such as `string`, `library_name`, `library_version`, `file_path`, `function_name`, and `line_number`. Lookup is exact by `string`. A match emits tag `#<library_name>`. The CRT file uses `library_name: "msvc"`, so it emits `#msvc`.
The expert database file is plain `capa.jsonl`, not gzip-compressed despite what the readme says. Each record is an `ExpertRule` with `type`, `value`, `tag`, `action`, and descriptive metadata. Matching behavior follows `floss/qs/db/expert.py`: exact string match for `type == "string"`, substring search for `type == "substring"`, and `re.compile(rule.value).search(...)` for `type == "regex"`. A match emits `rule.tag`, which in the current vendored file is typically `#capa`.
The winapi database is two gzip-compressed text files. `dlls.txt.gz` is loaded into a lowercase set and matched against `string.lower()`. `apis.txt.gz` is loaded into a case-sensitive set and matched against the string verbatim. A match from either source emits `#winapi`.
The global-prevalence JSONL databases are `gp.jsonl.gz`, `cwindb-native.jsonl.gz`, and `cwindb-dotnet.jsonl.gz`. Quantum Strand loads them as `StringGlobalPrevalenceDatabase` and does exact string lookup. Any hit in any of those databases emits `#common`.
The junk-code JSONL database is `junk-code.jsonl.gz`. It has the same file format as the prevalence JSONL databases, but Quantum Strand treats it separately. Any hit emits `#code-junk`.
The hash databases are `xaa-hashes.bin` and `yaa-hashes.bin`. Each file is a flat sequence of 8-byte truncated MD5 digests. Quantum Strand computes `md5(string.encode("utf-8")).digest()[:8]` and checks membership in the set. A hit emits `#common`.
These match rules are the core of the feature. They are much more important than matching Quantum Strand's internal class names.
## Recommended mapa package layout
Add a dedicated package under `mapa/`. A good layout is `mapa/string_tags/__init__.py`, `model.py`, `loaders.py`, `tagger.py`, a `data/` subtree, and `SOURCES.md`.
Do not copy upstream modules verbatim unless necessary. A mapa-local rewrite is cleaner because the code is short and mapa needs a narrower API than Quantum Strand.
`model.py` should define two small dataclasses. `StringTagMatch` should capture one concrete match with fields like `tag`, `source_family`, `source_name`, `kind`, and optional metadata such as `library_name`, `library_version`, `file_path`, `function_name`, `line_number`, `note`, `description`, `action`, `global_count`, `encoding`, and `location`. `StringTagResult` should hold the final sorted tag tuple plus the tuple of `StringTagMatch` entries.
`loaders.py` should own the file-format readers. It should use `gzip`, `hashlib`, `msgspec`, and `importlib.resources`. There is no reason to invent a new parser. This project already depends on `msgspec`, which is also what Quantum Strand uses for the JSONL formats.
`tagger.py` should own the process-wide cached tagger. A simple shape is `load_default_tagger()` plus an object with `tag_string(raw: str) -> StringTagResult`. The tagger should lazily load and cache the vendored databases once per process.
## Report-model changes
`mapa/model.py` should be extended so a rendered mapa string can carry tags and match metadata. The minimal change is to add `tags` and `tag_matches` to `MapaString`. The existing `value` field should remain the display string. If the implementor wants to preserve the exact raw string too, add a `raw_value` field. That is worthwhile because mapa currently trims trailing whitespace before storing the string, and exact-match databases should run against the untrimmed value.
The most important collector rule is this: match against the raw extracted string first, derive the display string second, and deduplicate on the display string only after the database matches have been computed. If two raw strings collapse to the same display value after `rstrip()`, their tags and metadata should be merged onto the single rendered `MapaString` entry.
## Collector guidance
The collector should keep its existing string-discovery behavior. This plan does not ask the implementor to revisit how mapa follows data references or how it discovers a string in IDA. Once `collect_report()` recovers a raw string, the new tagging pipeline begins.
A good implementation sequence inside `mapa/collector.py` is: recover `raw_value`, call the vendored tagger on `raw_value`, compute `display_value = raw_value.rstrip()`, skip empty display values, and then either create or update the `MapaString` entry for that display value. The update path should union tag names and append only unique `StringTagMatch` values. The final `MapaString.tags` should be sorted for stable rendering and stable tests.
This is the one place where the current mapa behavior is most likely to cause silent misses. If the implementor tags only the trimmed string, exact-match results from Quantum Strand can be lost.
## Tag aggregation rules
The model should preserve all concrete matches, even when multiple databases emit the same visible tag. This matters most for `#common`, because a string may hit several prevalence databases and one or both hash databases. The visible tag list should deduplicate tag names, but the metadata should preserve every source that contributed.
The tagger should produce tags in a deterministic order. A simple stable order is alphabetical order on the tag name after aggregation. The metadata order should also be deterministic, for example by `(tag, source_family, source_name, library_name, note, value)`.
## Rendering guidance
Replace the current plain markup string for `string:` rows with a dedicated Rich `Text` builder. The implementor should read `render_string()` and related helpers in `../quantumstrand/floss/qs/main.py` and copy only the layout idea. The left side is the existing `string: "..."` text. The right side is the space-joined visible tag list. Width should come from Rich's own measurement.
A helper such as `Renderer.render_string_line(value: str, tags: Sequence[str]) -> Text` is sufficient. It should use `self.console.size.width - (self.indent * 2)` as the available width for the line content, build a `Text` object for the left side and another for the right side, reserve at least one separating space, and then align or truncate the left side so the right side stays visible. If the terminal is too narrow for that layout, fall back to a single-column form that still shows the tags.
mapa should adopt one Quantum Strand display rule because it reduces noise without hiding information: when a string has `#common` plus one or more more-specific tags, omit `#common` from the visible tag column but keep it in `tag_matches` and `MapaString.tags`. That is a rendering choice only. The underlying data should stay intact.
No string row should be hidden by tag policy. `#common` and `#code-junk` may be styled in a muted color. `#capa` may be highlighted. `#winapi` and library tags can use the default string-tag style unless the implementor finds a better minimal palette. The important behavior is visibility and stable alignment. Decorative styling is secondary.
## Recommended visible-tag policy
The rendered tag column should follow these rules.
Show all tags except `#common` when a more-specific tag is also present. Keep `#common` visible only if it is the only tag. Show `#code-junk` even when other tags are present because it communicates a different kind of context than `#common`. Show `#winapi`, `#capa`, and library tags directly. Do not invent mapa-specific aliases or rename the upstream tags.
This yields readable outputs such as `#winapi #code-junk`, `#capa`, `#zlib`, or `#common`. It avoids noisy combinations like `#common #winapi` on every common API name.
## Packaging guidance
If mapa needs to work from an installed package, `pyproject.toml` will need changes because it currently only packages `capa*`. The implementor should include `mapa*` packages and package data under `mapa/string_tags/data/`. The loader should use `importlib.resources.files()` so it works both from a source checkout and an installed wheel.
Even if packaging is deferred, the code should still use `importlib.resources` because it centralizes the resource lookup and avoids hard-coded repository-relative paths.
## Implementation steps for the handoff
The implementor should start by copying the resources, writing `mapa/string_tags/SOURCES.md`, and adding pure loader tests before touching mapa's collector or renderer. Then they should implement the small loader layer for the five upstream database families: OSS/CRT, expert, winapi, prevalence JSONL, and prevalence hash files. After that they should implement the aggregated tagger and add pure tagger tests using known literals from the vendored datasets.
Once the tagger is stable, they should extend `MapaString`, thread tagging through `mapa/collector.py`, and finally switch `mapa/renderer.py` to the Rich `Text`-based string-row helper. Only after all of that is working should they update packaging and installed-resource handling, because those changes are easier to verify when the core behavior already exists.
During implementation they should update `doc/plans/spec.md` and `doc/plans/design.md` to record the final user-visible behavior and the final module layout. The spec should say that `string:` rows may carry right-aligned database tags and should document the visible-tag policy. The design doc should say where the vendored databases live, how the loader is structured, and how the collector merges raw-string matches into deduplicated display strings.
## Concrete test plan
Most tests should avoid IDA. Start with pure loader and tagger tests. Known-good assertions from the upstream data include `"invalid distance code" -> #zlib`, `"IsolationAware function called after IsolationAwareCleanup" -> #msvc`, `"CurrencyDispenser1" -> #capa`, `"kernel32.dll" -> #winapi`, `"CreateFileA" -> #winapi`, and `"!This program cannot be run in DOS mode." -> #common`. `"_initterm"` is a useful mixed case because Quantum Strand's own tests show it as both `#winapi` and `#code-junk`.
Cover the expert database's three rule types: exact, substring, and regex. Add a hash-database test that emits `#common` even when the string is absent from the JSONL prevalence files. Add another case where several databases contribute the same visible tag and the metadata still records every contributing match.
Add renderer tests using a fixed-width Rich console. One test should show that an untagged string row still matches the old mapa format. Another should show that a tagged row keeps the tag column at the right edge. A narrow-width test should show that the string side is clipped first. Another should check that `#common` disappears from the visible tag list when a more-specific tag exists while remaining present in the underlying model.
Finally, add report tests in `tests/test_mapa.py` that build a small `MapaReport` directly. At least one string should carry a library tag, at least one should carry `#common`, and at least one should carry a multi-tag combination like `#winapi #code-junk`. None of these tests should require IDA.
## Performance and memory notes
Vendoring every requested database is still practical, but loading them all eagerly may have a noticeable startup cost. The tagger should therefore be cached process-wide and built lazily. Hash files should be read once into memory as sets of 8-byte digests. The string databases should be decoded once into in-memory maps. This is a good place to keep the code simple first and optimize only if startup becomes a measured problem.
The current compressed data footprint is modest for OSS, CRT, expert, and winapi. The prevalence family is the largest part of the set, especially the hash files. That is another reason to centralize loading and avoid repeated per-function or per-string initialization.
## Notes the implementor should not miss
`floss/qs/db/data/expert/readme.md` says the expert database is gzip-compressed, but the shipped file is plain `capa.jsonl`. Follow the code and the actual file on disk. `floss/qs/db/oss.py` includes the CRT file in `DEFAULT_PATHS`, so treat `#msvc` as part of the library tagging feature. Quantum Strand's `remove_false_positive_lib_strings()` should not be copied because its five-hit threshold is tuned for whole-file triage and fits mapa's per-function presentation poorly.
The main risk in this work is not the file formats. It is silent semantic drift during integration. The implementor should preserve Quantum Strand's exact query rules, tag against the raw string before trimming, keep all concrete matches in metadata, and only simplify at the renderer boundary.

File diff suppressed because one or more lines are too long

View File

@@ -1,92 +0,0 @@
# mapa specification
mapa renders either a structured text report or a self-contained HTML map of a binary's function map: metadata, sections, import libraries, and a per-function breakdown of callers, callees, API calls, strings, CFG stats, capa rule matches, and optional Assemblage ground truth.
## Invocation
```
python scripts/mapa.py <input_file> [--capa <capa.json>] [--assemblage <functions.csv>] [--output {text,html-map}] [--open] [--verbose] [--quiet]
```
`input_file` accepts raw binaries (PE, ELF), existing IDA databases (`.i64`, `.idb`), or any file IDA can analyze. For raw files, mapa automatically creates and caches an analyzed IDA database under the XDG cache root (`$XDG_CACHE_HOME/mandiant/mapa/` or `~/.cache/mandiant/mapa/`) keyed by the file's SHA-256 hash.
## Backend
IDALib only. All analysis uses `ida-domain` as the primary query API. The Lancelot/BinExport2 backend has been removed.
## Output modes
`text` is the default. It renders the existing structured terminal report to stdout.
`html-map` renders a single standalone HTML document to stdout. The page inlines all HTML, CSS, JavaScript, and data. It has a compact metadata summary, a tag control strip, and neighborhood controls at the top, then a split view below. The left pane contains the function grid and the right pane contains the program-string list.
The two panes scroll independently. A draggable vertical divider lets the user resize the panes horizontally. Function squares stay in function-address order and still use the naive left-to-right wrapping layout, but they now wrap within the current width of the left pane rather than the full page width.
Function squares are fixed small blocks laid out left-to-right and wrapped responsively within the left pane. Hovering a function square previews that function's neighborhood. Clicking a function square locks or unlocks that function as the active seed. Hovering a tag previews the neighborhood seeded by every function that directly references a string with that tag. Clicking a tag locks or unlocks that tag seed set. Hovering a string row previews the neighborhood seeded by every function that directly references that string. Clicking a string row locks or unlocks that string seed set. Only one active seed source is used at a time. Any locked function, tag, or string overrides hover previews from the other sources until it is cleared.
The neighborhood controls choose graph direction and traversal depth. Direction can be `callers`, `callees`, or `both`, with `both` as the default. Depth can be 1 through 4, with 3 as the default. For the active seed set, mapa runs a bounded breadth-first search from each seed over the selected direction mode, uses shortest distance from that seed, applies geometric decay `0.5 ** distance`, and adds the per-seed contributions together. Direct seed functions get an outline. Reached functions get a fill intensity scaled within the current view. Functions outside the active neighborhood dim.
The tag strip is sorted by descending distinct-function count, then tag name, and each control shows that count. The page also shows a small legend for heat, seed outline, and dim states. The string list shows each string's virtual address explicitly, preserves duplicate display values at different addresses, and shows visible tags right-aligned in each row. Function hover shows a tooltip containing the same single-function mapa summary content as text mode, plus the current heat, seed state, and shortest distance when a neighborhood is active. Top-level tag controls use only string tags. Capa rule names are not included there.
`--open` is only valid with `--output html-map`. In that mode, mapa writes the HTML to a temporary `.html` file, opens the user's local web browser on the corresponding `file://` URL, and does not write the HTML document to stdout.
The visible-tag policy is the same in both modes: hide `#common` when a more-specific tag is present, but keep it visible when it is the only tag.
## Report sections
The text report renders these sections in order:
1. meta — file name, SHA-256, architecture, timestamp
2. sections — memory segments with address, permissions (rwx), and size
3. libraries — import modules
4. functions — per-function detail in address order
### Functions section
Each function renders as either `thunk <name> @ <address>` or `function <name> @ <address>` followed by:
- source-file separator — a horizontal rule inserted before a function when its primary Assemblage source path differs from the last seen non-empty source path
- `assemblage name:` — source function name from Assemblage, when available
- `assemblage file:` — source file path from Assemblage, when available
- `xref:` — callers with direction arrow and function-order delta
- `B/E/I:` — basic blocks / CFG edges / instructions (total bytes)
- `capa:` — matched capa rule names
- `calls:` — internal non-library callees with direction and delta
- `api:` — import/external/library callees
- `string:` — referenced strings (deduplicated, whitespace-trimmed), with optional right-aligned database tags
Thunk functions show only the header plus any Assemblage lines.
### Assemblage overlay
When `--assemblage` is provided, mapa reads a CSV file and requires these columns: `hash`, `name`, `start`, `end`, and `source_file`.
Assemblage matching works like this:
- mapa resolves the sample SHA-256 from the input file or the opened IDA database.
- mapa keeps only CSV rows whose `hash` matches that SHA-256, case-insensitively.
- mapa treats `start` and `end` as RVAs and adds the IDA database base address to map them to function VAs.
- mapa does not rename functions, callers, or callees from Assemblage data. The displayed function header stays IDA-derived.
- mapa strips the trailing provenance suffix from `source_file` before rendering, for example `C:\src\foo.c (MD5: ...)` renders as `C:\src\foo.c`.
- Exact duplicate CSV rows are deduplicated. If multiple distinct Assemblage rows map to the same function address, mapa renders all of them in CSV order.
- For source-file separators, mapa uses the first Assemblage record's normalized `source_file` path as the function's primary source path.
- Missing Assemblage data does not start or end a source-file run. It does not trigger a separator and does not reset the last seen non-empty source path.
- When a later function has a different primary source path from the last seen non-empty source path, mapa inserts a separator immediately before that function.
## Deliberate interface changes from the Lancelot/BinExport2 version
- The `modules` section has been removed. BinExport2's module concept has no IDA equivalent.
## Decisions
- 2026-03-16: Lumina disabled during database creation via `IdaCommandOptions(plugin_options="lumina:host=0.0.0.0 -Osecondary_lumina:host=0.0.0.0")`, matching capa's `loader.py`. The `plugin_options` field maps to IDA's `-O` switch; embedding `-O` in the value for the second option works because `build_args()` concatenates it verbatim. Resource loading enabled via `load_resources=True` (maps to `-R`).
- 2026-03-16: Cache directory is `$XDG_CACHE_HOME/mandiant/mapa/` (or `~/.cache/mandiant/mapa/`). Separate from idals cache.
- 2026-03-16: `meta.ts` is `datetime.now(UTC).isoformat()` — no longer sourced from BinExport2.
- 2026-03-16: Thunk chain depth limit is 5 (matches capa's `THUNK_CHAIN_DEPTH_DELTA`).
- 2026-03-16: CFG stats use `FlowChartFlags.NOEXT | FlowChartFlags.PREDS` to match capa's block enumeration semantics.
- 2026-03-16: String extraction follows single data-reference chains up to depth 10, matching capa's `find_data_reference_from_insn`.
- 2026-03-16: String rows may carry right-aligned database tags derived from vendored Quantum Strand string databases. Tags include `#<library>` (e.g. `#zlib`, `#openssl`), `#msvc`, `#capa`, `#winapi`, `#common`, and `#code-junk`. Visible tag policy: `#common` is hidden when a more-specific tag is present; `#code-junk` is always shown. Tags are matched against the raw (untrimmed) string value. The underlying model preserves all match metadata even when the renderer suppresses a visible tag.
- 2026-03-16: Assemblage input is a CSV keyed by sample SHA-256. mapa matches rows by `hash`, converts `start`/`end` RVAs to VAs using the database base address, annotates functions with `assemblage name:` and `assemblage file:` lines, and does not override IDA-derived function names.
- 2026-03-16: `--output html-map` uses only string tags in the top control strip, sorts them by descending distinct-function count then name, shows those counts in the controls, and lists program strings by string VA with explicit addresses.
- 2026-03-16: `--output html-map` uses a split view with independently scrolling function and string panes, a draggable vertical divider, right-aligned visible tags in each string row, function/tag/string seed selection, caller/callee/both direction controls, depth controls from 1 to 4, and additive geometric-decay neighborhood heat based on bounded shortest-distance traversal.
- 2026-03-16: `--open` is valid only with `--output html-map`. It writes the HTML report to a temporary `.html` file and opens the local browser on that file instead of writing the HTML to stdout.

View File

@@ -7,7 +7,6 @@
- [ ] Review changes
- capa https://github.com/mandiant/capa/compare/\<last-release\>...master
- capa-rules https://github.com/mandiant/capa-rules/compare/\<last-release>\...master
- [ ] Run `$ bump-my-version bump {patch/minor/major} [--allow-dirty]` to update [capa/version.py](https://github.com/mandiant/capa/blob/master/capa/version.py) and other version files
- [ ] Update [CHANGELOG.md](https://github.com/mandiant/capa/blob/master/CHANGELOG.md)
- Do not forget to add a nice introduction thanking contributors
- Remember that we need a major release if we introduce breaking changes
@@ -37,6 +36,7 @@
- [capa <release>...master](https://github.com/mandiant/capa/compare/<release>...master)
- [capa-rules <release>...master](https://github.com/mandiant/capa-rules/compare/<release>...master)
```
- [ ] Update [capa/version.py](https://github.com/mandiant/capa/blob/master/capa/version.py)
- [ ] Create a PR with the updated [CHANGELOG.md](https://github.com/mandiant/capa/blob/master/CHANGELOG.md) and [capa/version.py](https://github.com/mandiant/capa/blob/master/capa/version.py). Copy this checklist in the PR description.
- [ ] Update the [homepage](https://github.com/mandiant/capa/blob/master/web/public/index.html) (i.e. What's New section)
- [ ] After PR review, merge the PR and [create the release in GH](https://github.com/mandiant/capa/releases/new) using text from the [CHANGELOG.md](https://github.com/mandiant/capa/blob/master/CHANGELOG.md).

View File

@@ -2,21 +2,6 @@
See `capa -h` for all supported arguments and usage examples.
## Ways to consume capa output
| Method | Output / interface | Typical use |
|--------|--------------------|-------------|
| **CLI** | Text (default, `-v`, `-vv`), JSON (`-j`), or other formats | Scripting, CI, one-off analysis |
| [**IDA Pro**](https://github.com/mandiant/capa/tree/master/capa/ida/plugin) | capa Explorer plugin inside IDA | Interactive analysis with jump-to-address |
| [**Ghidra**](https://github.com/mandiant/capa/tree/master/capa/ghidra/plugin) | capa Explorer plugin inside Ghidra | Interactive analysis with Ghidra integration |
| [**Binary Ninja**](https://github.com/mandiant/capa/tree/master/capa/features/extractors/binja) | capa run using Binary Ninja as the analysis backend | Interactive analysis with Binary Ninja integration |
| [**Dynamic (Sandbox)**](https://www.mandiant.com/resources/blog/dynamic-capa-executable-behavior-cape-sandbox) | capa run on dynamic sandbox report (CAPE, VMRay, etc.) | Dynamic analysis of sandbox output |
| [**Web (capa Explorer)**](https://mandiant.github.io/capa/explorer/) | Web UI (upload JSON or load from URL) | Sharing results, viewing from VirusTotal or similar |
## Default vs verbose output
By default, capa shows only *top-level* rule matches: capabilities that are not already implied by another displayed rule. For example, if a rule "persist via Run registry key" matches and it *contains* a match for "set registry value", the default output lists only "persist via Run registry key". This keeps the default output short while still reflecting all detected capabilities at the top level. Use **`-v`** to see all rule matches, including nested ones. Use **`-vv`** for an even more detailed view that shows how each rule matched.
## tips and tricks
### only run selected rules
@@ -26,7 +11,7 @@ For example, `capa -t william.ballenthin@mandiant.com` runs rules that reference
### only analyze selected functions
Use the `--restrict-to-functions` option to extract capabilities from only a selected set of functions. This is useful for analyzing
large functions and figuring out their capabilities and their address of occurrence; for example: PEB access, RC4 encryption, etc.
large functions and figuring out their capabilities and their address of occurance; for example: PEB access, RC4 encryption, etc.
To use this, you can copy the virtual addresses from your favorite disassembler and pass them to capa as follows:
`capa sample.exe --restrict-to-functions 0x4019C0,0x401CD0`. If you add the `-v` option then capa will extract the interesting parts of a function for you.

View File

View File

@@ -1,5 +0,0 @@
import sys
from mapa.cli import main
sys.exit(main())

View File

@@ -1,57 +0,0 @@
from __future__ import annotations
import csv
import logging
from collections import defaultdict
from pathlib import Path
from mapa.model import AssemblageRecord
logger = logging.getLogger(__name__)
REQUIRED_COLUMNS = frozenset({"hash", "name", "start", "end", "source_file"})
def validate_assemblage_columns(fieldnames: list[str] | None) -> None:
columns = set(fieldnames or [])
missing = sorted(REQUIRED_COLUMNS - columns)
if missing:
raise ValueError(
f"assemblage CSV is missing required columns: {', '.join(missing)}"
)
def load_assemblage_records(
assemblage_path: Path,
sample_sha256: str,
base_address: int,
) -> dict[int, list[AssemblageRecord]]:
if not sample_sha256:
raise ValueError("sample sha256 is required to load assemblage data")
normalized_sha256 = sample_sha256.lower()
records_by_address: defaultdict[int, list[AssemblageRecord]] = defaultdict(list)
seen_by_address: defaultdict[int, set[AssemblageRecord]] = defaultdict(set)
with assemblage_path.open("rt", encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle)
validate_assemblage_columns(reader.fieldnames)
for row in reader:
row_hash = (row.get("hash") or "").strip().lower()
if row_hash != normalized_sha256:
continue
record = AssemblageRecord.from_csv_row(row, base_address=base_address)
seen = seen_by_address[record.address]
if record in seen:
continue
seen.add(record)
records_by_address[record.address].append(record)
logger.debug(
"loaded %d assemblage records for %s from %s",
sum(len(records) for records in records_by_address.values()),
normalized_sha256,
assemblage_path,
)
return dict(records_by_address)

View File

@@ -1,235 +0,0 @@
from __future__ import annotations
import argparse
import json
import logging
import sys
import time
import webbrowser
from collections import defaultdict
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Callable
from rich.console import Console
from rich.logging import RichHandler
from rich.theme import Theme
from mapa.assemblage import load_assemblage_records
from mapa.model import AssemblageRecord
logger = logging.getLogger("mapa")
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="mapa: binary function map")
parser.add_argument(
"input_file", type=Path, help="path to input file (binary, .i64, or .idb)"
)
parser.add_argument("--capa", type=Path, help="path to capa JSON results file")
parser.add_argument("--assemblage", type=Path, help="path to Assemblage CSV file")
parser.add_argument(
"--output",
choices=("text", "html-map"),
default="text",
help="output format",
)
parser.add_argument(
"--open",
action="store_true",
help="when used with --output html-map, write to a temp file and open it in a browser",
)
parser.add_argument("--verbose", action="store_true", help="enable verbose logging")
parser.add_argument(
"--quiet", action="store_true", help="disable all output but errors"
)
return parser
def validate_output_options(output: str, open_report: bool) -> None:
if open_report and output != "html-map":
raise ValueError("--open requires --output html-map")
def write_temp_html_report(content: str, directory: Path | None = None) -> Path:
temp_dir = None if directory is None else str(directory)
with NamedTemporaryFile(
mode="w",
encoding="utf-8",
suffix=".html",
prefix="mapa-",
delete=False,
dir=temp_dir,
) as handle:
handle.write(content)
return Path(handle.name)
def open_html_report(
content: str,
opener: Callable[[str], bool] | None = None,
directory: Path | None = None,
) -> Path:
report_path = write_temp_html_report(content, directory=directory)
browser_opener = webbrowser.open if opener is None else opener
opened = browser_opener(report_path.as_uri())
if not opened:
raise RuntimeError(f"failed to open browser for {report_path}")
return report_path
def _load_capa_matches(
capa_path: Path,
thunk_targets: dict[int, int],
) -> dict[int, set[str]]:
"""Load capa JSON and map matches to function addresses."""
doc = json.loads(capa_path.read_text())
functions_by_basic_block: dict[int, int] = {}
for function in doc["meta"]["analysis"]["layout"]["functions"]:
for basic_block in function["matched_basic_blocks"]:
functions_by_basic_block[basic_block["address"]["value"]] = function[
"address"
]["value"]
matches_by_address: defaultdict[int, set[str]] = defaultdict(set)
for rule_name, results in doc["rules"].items():
for location, _ in results["matches"]:
if location["type"] != "absolute":
continue
matches_by_address[location["value"]].add(rule_name)
matches_by_function: defaultdict[int, set[str]] = defaultdict(set)
for address, matches in matches_by_address.items():
func_addr = functions_by_basic_block.get(address, address)
if func_addr in thunk_targets:
logger.debug(
"forwarding capa matches from thunk 0x%x to 0x%x",
func_addr,
thunk_targets[func_addr],
)
func_addr = thunk_targets[func_addr]
matches_by_function[func_addr].update(matches)
for match in matches:
logger.info("capa: 0x%x: %s", func_addr, match)
return dict(matches_by_function)
def main(argv: list[str] | None = None) -> int:
if argv is None:
argv = sys.argv[1:]
parser = build_parser()
args = parser.parse_args(args=argv)
try:
validate_output_options(args.output, args.open)
except ValueError as error:
parser.error(str(error))
stderr_console = Console(stderr=True)
logging.basicConfig(
level=logging.DEBUG
if args.verbose
else (logging.ERROR if args.quiet else logging.INFO),
format="%(message)s",
handlers=[
RichHandler(
console=stderr_console, show_path=False, rich_tracebacks=args.verbose
)
],
)
from mapa.collector import collect_report
from mapa.html_renderer import render_html_map
from mapa.ida_db import open_database_session, resolve_database
from mapa.renderer import render_report
t0 = time.time()
db_path, md5, sha256 = resolve_database(args.input_file)
logger.debug("perf: resolve_database: %0.2fs", time.time() - t0)
theme = Theme(
{
"decoration": "grey54",
"title": "yellow",
"key": "blue",
"value": "blue",
"default": "blue",
},
inherit=False,
)
console = Console(theme=theme, markup=False, emoji=False)
t0 = time.time()
with open_database_session(db_path) as db:
logger.debug("perf: open_database: %0.2fs", time.time() - t0)
base_address = db.base_address or 0
effective_sha256 = sha256 or db.sha256 or ""
assemblage_records_by_address: dict[int, list[AssemblageRecord]] = {}
if args.assemblage:
assemblage_records_by_address = load_assemblage_records(
args.assemblage,
sample_sha256=effective_sha256,
base_address=base_address,
)
matches_by_function: dict[int, set[str]] = {}
if args.capa:
from ida_domain.functions import FunctionFlags
from mapa.collector import (
_build_extern_index,
_build_import_index,
_resolve_thunk_target,
)
import_index = _build_import_index(db)
extern_addrs = _build_extern_index(db)
thunk_targets: dict[int, int] = {}
for func in db.functions:
flags = db.functions.get_flags(func)
if flags and FunctionFlags.THUNK in flags:
target = _resolve_thunk_target(
db, int(func.start_ea), import_index, extern_addrs
)
if target is not None:
thunk_targets[int(func.start_ea)] = target
matches_by_function = _load_capa_matches(
args.capa,
thunk_targets,
)
t0 = time.time()
report = collect_report(
db,
md5=md5,
sha256=effective_sha256,
matches_by_function=matches_by_function,
assemblage_records_by_address=assemblage_records_by_address,
)
logger.debug("perf: collect_report: %0.2fs", time.time() - t0)
t0 = time.time()
if args.output == "html-map":
html = render_html_map(report)
if args.open:
report_path = open_html_report(html)
logger.info("opened html map: %s", report_path)
else:
sys.stdout.write(html)
else:
render_report(report, console)
logger.debug("perf: render_report: %0.2fs", time.time() - t0)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,492 +0,0 @@
from __future__ import annotations
import logging
from datetime import datetime, timezone
from ida_domain.database import Database
from ida_domain.flowchart import FlowChartFlags
from ida_domain.functions import FunctionFlags
from mapa.model import (
AssemblageRecord,
MapaCall,
MapaCaller,
MapaFunction,
MapaLibrary,
MapaMeta,
MapaProgramString,
MapaReport,
MapaSection,
MapaString,
)
from mapa.strings import (
MAX_STRING_READ,
extract_ascii_from_buf,
extract_utf16le_from_buf,
)
from mapa.string_tags.tagger import StringTagger, load_default_tagger
logger = logging.getLogger(__name__)
THUNK_CHAIN_DEPTH_DELTA = 5
def _get_permissions_string(perm: int) -> str:
return (
("r" if perm & 4 else "-")
+ ("w" if perm & 2 else "-")
+ ("x" if perm & 1 else "-")
)
def _collect_meta(db: Database, md5: str, sha256: str) -> MapaMeta:
name = db.path or ""
if not md5 and db.md5:
md5 = db.md5
if not sha256 and db.sha256:
sha256 = db.sha256
arch = db.architecture or ""
base_address = db.base_address or 0
timestamp = datetime.now(timezone.utc).isoformat()
return MapaMeta(
name=name,
sha256=sha256,
md5=md5,
arch=arch,
timestamp=timestamp,
base_address=base_address,
)
def _collect_sections(db: Database) -> list[MapaSection]:
sections: list[MapaSection] = []
for seg in db.segments.get_all():
sections.append(
MapaSection(
address=int(seg.start_ea),
size=int(seg.end_ea) - int(seg.start_ea),
perms=_get_permissions_string(int(seg.perm)),
name=db.segments.get_name(seg) or "",
)
)
return sections
def _normalize_module_name(name: str) -> str:
"""Normalize an import module name to include extension.
IDA strips .dll from PE import module names (e.g. 'KERNEL32' instead of
'KERNEL32.dll'). Add it back when the name has no extension.
"""
if "." not in name:
return f"{name}.dll".lower()
return name.lower()
def _collect_libraries(db: Database) -> list[MapaLibrary]:
libraries: list[MapaLibrary] = []
for module in db.imports.get_all_modules():
libraries.append(MapaLibrary(name=_normalize_module_name(module.name)))
return libraries
def _build_import_index(db: Database) -> dict[int, tuple[str, str]]:
"""Build address -> (module, function_name) for all imports."""
imports: dict[int, tuple[str, str]] = {}
for imp in db.imports.get_all_imports():
name = imp.name or f"ord{imp.ordinal}"
imports[int(imp.address)] = (_normalize_module_name(imp.module_name), name)
return imports
def _build_extern_index(db: Database) -> set[int]:
"""Collect addresses in XTRN segments."""
externs: set[int] = set()
for seg in db.segments.get_all():
seg_class = db.segments.get_class(seg)
if seg_class and seg_class.upper() == "XTRN":
for func in db.functions.get_between(int(seg.start_ea), int(seg.end_ea)):
externs.add(int(func.start_ea))
return externs
def _resolve_thunk_target(
db: Database,
ea: int,
import_index: dict[int, tuple[str, str]],
extern_addrs: set[int],
) -> int | None:
"""Follow thunk chains up to THUNK_CHAIN_DEPTH_DELTA hops.
Returns the final resolved address, or None if resolution fails.
"""
current = ea
for _ in range(THUNK_CHAIN_DEPTH_DELTA):
code_refs = list(db.xrefs.code_refs_from_ea(current, flow=False))
if len(code_refs) == 1:
target = int(code_refs[0])
if target in import_index or target in extern_addrs:
return target
target_func = db.functions.get_at(target)
if target_func:
flags = db.functions.get_flags(target_func)
if flags and FunctionFlags.THUNK in flags:
current = target
continue
return target
data_refs = list(db.xrefs.data_refs_from_ea(current))
if len(data_refs) == 1:
target = int(data_refs[0])
if target in import_index or target in extern_addrs:
return target
target_func = db.functions.get_at(target)
if target_func:
flags = db.functions.get_flags(target_func)
if flags and FunctionFlags.THUNK in flags:
current = target
continue
return target
break
return None
def _find_string_at(db: Database, ea: int) -> str | None:
"""Read bytes at the given address and check for ASCII or UTF-16 LE string."""
try:
buf = db.bytes.get_bytes_at(ea, MAX_STRING_READ)
except Exception:
return None
if not buf:
return None
result = extract_ascii_from_buf(buf)
if result is not None:
return result
return extract_utf16le_from_buf(buf)
def _find_data_reference_string(
db: Database, insn_ea: int, max_depth: int = 10
) -> tuple[int, str] | None:
"""Follow single data-reference chains from an instruction to find a string."""
current = insn_ea
for _ in range(max_depth):
try:
data_refs = list(db.xrefs.data_refs_from_ea(current))
except Exception:
break
if len(data_refs) != 1:
break
target = int(data_refs[0])
if not db.is_valid_ea(target):
break
result = _find_string_at(db, target)
if result is not None:
return target, result
current = target
return None
def _merge_string_metadata(
tags: tuple[str, ...],
tag_matches: tuple,
new_tags: tuple[str, ...],
new_tag_matches: tuple,
) -> tuple[tuple[str, ...], tuple]:
merged_tags = tuple(sorted(set(tags) | set(new_tags)))
seen_match_keys = {match.sort_key for match in tag_matches}
unique_new = tuple(
match for match in new_tag_matches if match.sort_key not in seen_match_keys
)
return merged_tags, tag_matches + unique_new
def collect_report(
db: Database,
md5: str = "",
sha256: str = "",
matches_by_function: dict[int, set[str]] | None = None,
assemblage_records_by_address: dict[int, list[AssemblageRecord]] | None = None,
tagger: StringTagger | None = None,
) -> MapaReport:
"""Collect a complete mapa report from an open IDA database."""
if matches_by_function is None:
matches_by_function = {}
if assemblage_records_by_address is None:
assemblage_records_by_address = {}
if tagger is None:
tagger = load_default_tagger()
meta = _collect_meta(db, md5, sha256)
sections = _collect_sections(db)
libraries = _collect_libraries(db)
import_index = _build_import_index(db)
extern_addrs = _build_extern_index(db)
all_functions: list[tuple[int, object, bool, bool]] = []
for func in db.functions:
ea = int(func.start_ea)
flags = db.functions.get_flags(func)
is_thunk = flags is not None and FunctionFlags.THUNK in flags
is_lib = flags is not None and FunctionFlags.LIB in flags
all_functions.append((ea, func, is_thunk, is_lib))
all_functions.sort(key=lambda x: x[0])
func_address_to_order: dict[int, int] = {}
for i, (ea, _, _, _) in enumerate(all_functions):
func_address_to_order[ea] = i
thunk_targets: dict[int, int] = {}
for ea, func, is_thunk, _ in all_functions:
if is_thunk:
target = _resolve_thunk_target(db, ea, import_index, extern_addrs)
if target is not None:
thunk_targets[ea] = target
resolved_callers: dict[int, set[int]] = {}
resolved_callees: dict[int, list[tuple[int, bool]]] = {}
for ea, func, is_thunk, is_lib in all_functions:
if is_thunk or ea in import_index or ea in extern_addrs:
continue
fc = db.functions.get_flowchart(
func, flags=FlowChartFlags.NOEXT | FlowChartFlags.PREDS
)
if fc is None:
continue
seen_callees: set[int] = set()
callees: list[tuple[int, bool]] = []
for block in fc:
insns = block.get_instructions()
if insns is None:
continue
for insn in insns:
if not db.instructions.is_call_instruction(insn):
# also check for jumps to imports (thunk pattern)
mnem = db.instructions.get_mnemonic(insn)
if mnem and mnem.lower().startswith("jmp"):
call_targets = list(
db.xrefs.code_refs_from_ea(int(insn.ea), flow=False)
)
else:
continue
else:
call_targets = list(db.xrefs.calls_from_ea(int(insn.ea)))
if not call_targets:
call_targets = list(
db.xrefs.code_refs_from_ea(int(insn.ea), flow=False)
)
for target_ea in call_targets:
target_ea = int(target_ea)
resolved_target = target_ea
if target_ea in thunk_targets:
resolved_target = thunk_targets[target_ea]
if resolved_target in seen_callees:
continue
seen_callees.add(resolved_target)
is_api = (
resolved_target in import_index
or resolved_target in extern_addrs
)
callees.append((resolved_target, is_api))
if resolved_target not in resolved_callers:
resolved_callers[resolved_target] = set()
resolved_callers[resolved_target].add(ea)
resolved_callees[ea] = callees
mapa_functions: list[MapaFunction] = []
program_strings_by_address: dict[int, MapaProgramString] = {}
for ea, func, is_thunk, is_lib in all_functions:
if ea in import_index or ea in extern_addrs:
continue
name = db.functions.get_name(func) or f"sub_{ea:x}"
order = func_address_to_order[ea]
mf = MapaFunction(
address=ea,
name=name,
is_thunk=is_thunk,
is_library=is_lib,
assemblage_records=list(assemblage_records_by_address.get(ea, [])),
)
if is_thunk:
mapa_functions.append(mf)
continue
fc = db.functions.get_flowchart(
func, flags=FlowChartFlags.NOEXT | FlowChartFlags.PREDS
)
if fc is not None:
num_blocks = 0
num_edges = 0
num_insns = 0
total_bytes = 0
for block in fc:
num_blocks += 1
num_edges += block.count_successors()
insns = block.get_instructions()
if insns is None:
continue
for insn in insns:
num_insns += 1
insn_size = db.heads.size(int(insn.ea))
total_bytes += insn_size
mf.num_basic_blocks = num_blocks
mf.num_edges = num_edges
mf.num_instructions = num_insns
mf.total_instruction_bytes = total_bytes
for caller_ea in sorted(resolved_callers.get(ea, set())):
if caller_ea not in func_address_to_order:
continue
caller_order = func_address_to_order[caller_ea]
delta = caller_order - order
direction = "" if delta < 0 else ""
caller_func = db.functions.get_at(caller_ea)
caller_name = (
db.functions.get_name(caller_func)
if caller_func
else f"sub_{caller_ea:x}"
)
mf.callers.append(
MapaCaller(
name=caller_name or f"sub_{caller_ea:x}",
address=caller_ea,
delta=delta,
direction=direction,
)
)
for target_ea, is_api in resolved_callees.get(ea, []):
if is_api:
if target_ea in import_index:
module_name, func_name = import_index[target_ea]
api_name = f"{module_name}!{func_name}"
else:
target_func = db.functions.get_at(target_ea)
api_name = (
db.functions.get_name(target_func)
if target_func
else f"sub_{target_ea:x}"
)
api_name = api_name or f"sub_{target_ea:x}"
mf.apis.append(
MapaCall(
name=api_name,
address=target_ea,
is_api=True,
)
)
else:
if target_ea not in func_address_to_order:
continue
target_order = func_address_to_order[target_ea]
delta = target_order - order
direction = "" if delta < 0 else ""
target_func = db.functions.get_at(target_ea)
target_name = (
db.functions.get_name(target_func)
if target_func
else f"sub_{target_ea:x}"
)
mf.calls.append(
MapaCall(
name=target_name or f"sub_{target_ea:x}",
address=target_ea,
is_api=False,
delta=delta,
direction=direction,
)
)
if fc is not None:
seen_strings: dict[str, MapaString] = {}
fc2 = db.functions.get_flowchart(
func, flags=FlowChartFlags.NOEXT | FlowChartFlags.PREDS
)
if fc2 is not None:
for block in fc2:
insns = block.get_instructions()
if insns is None:
continue
for insn in insns:
string_result = _find_data_reference_string(db, int(insn.ea))
if string_result is None:
continue
string_ea, raw = string_result
tag_result = tagger.tag_string(raw)
display = raw.rstrip()
if not display:
continue
if display in seen_strings:
existing = seen_strings[display]
existing.tags, existing.tag_matches = _merge_string_metadata(
existing.tags,
existing.tag_matches,
tag_result.tags,
tag_result.matches,
)
existing.address = min(existing.address, string_ea)
else:
ms = MapaString(
value=display,
address=string_ea,
tags=tuple(sorted(set(tag_result.tags))),
tag_matches=tag_result.matches,
)
seen_strings[display] = ms
mf.strings.append(ms)
if string_ea in program_strings_by_address:
existing_program_string = program_strings_by_address[string_ea]
existing_program_string.tags, existing_program_string.tag_matches = _merge_string_metadata(
existing_program_string.tags,
existing_program_string.tag_matches,
tag_result.tags,
tag_result.matches,
)
existing_program_string.function_addresses = tuple(
sorted(
set(existing_program_string.function_addresses)
| {ea}
)
)
else:
program_strings_by_address[string_ea] = MapaProgramString(
value=display,
address=string_ea,
tags=tuple(sorted(set(tag_result.tags))),
tag_matches=tag_result.matches,
function_addresses=(ea,),
)
mf.capa_matches = sorted(matches_by_function.get(ea, set()))
mapa_functions.append(mf)
return MapaReport(
meta=meta,
sections=sections,
libraries=libraries,
functions=mapa_functions,
program_strings=sorted(
program_strings_by_address.values(),
key=lambda string: string.address,
),
)

View File

@@ -1,553 +0,0 @@
from __future__ import annotations
import json
from html import escape
from mapa.model import MapaProgramString, MapaReport
from mapa.renderer import _visible_tags, render_function_summary_text
def _to_json(value: object) -> str:
return json.dumps(value, separators=(",", ":")).replace("</", "<\\/")
def _collect_tag_entries(report: MapaReport) -> list[tuple[str, list[int]]]:
function_index_by_address = {
function.address: index for index, function in enumerate(report.functions)
}
tag_to_functions: dict[str, set[int]] = {}
for program_string in report.program_strings:
visible_tags = _visible_tags(program_string.tags)
if not visible_tags:
continue
function_indices = {
function_index_by_address[address]
for address in program_string.function_addresses
if address in function_index_by_address
}
if not function_indices:
continue
for tag in visible_tags:
tag_to_functions.setdefault(tag, set()).update(function_indices)
return sorted(
(
(tag, sorted(function_indices))
for tag, function_indices in tag_to_functions.items()
),
key=lambda item: (-len(item[1]), item[0]),
)
def _collect_function_graph(
report: MapaReport, function_index_by_address: dict[int, int]
) -> tuple[list[list[int]], list[list[int]]]:
callers_by_index: list[list[int]] = []
callees_by_index: list[list[int]] = []
for function in report.functions:
caller_indices = sorted(
{
function_index_by_address[caller.address]
for caller in function.callers
if caller.address in function_index_by_address
}
)
callee_indices = sorted(
{
function_index_by_address[call.address]
for call in function.calls
if call.address in function_index_by_address
}
)
callers_by_index.append(caller_indices)
callees_by_index.append(callee_indices)
return callers_by_index, callees_by_index
def _render_string_row(program_string: MapaProgramString, index: int) -> str:
visible_tags = _visible_tags(program_string.tags)
tag_text = " ".join(visible_tags)
tag_span = ""
if tag_text:
tag_span = f'<span class="string-tags">{escape(tag_text)}</span>'
return (
f'<button type="button" class="string-row" data-string-index="{index}" '
f'data-string-address="{escape(hex(program_string.address), quote=True)}" '
f'data-string-value="{escape(program_string.value, quote=True)}" '
f'data-string-tags="{escape(tag_text, quote=True)}">'
f'<span class="string-address">{escape(hex(program_string.address))}</span>'
f'<span class="string-value">{escape(program_string.value)}</span>'
f"{tag_span}"
"</button>"
)
def render_html_map(report: MapaReport) -> str:
tag_entries = _collect_tag_entries(report)
function_index_by_address = {
function.address: index for index, function in enumerate(report.functions)
}
callers_by_index, callees_by_index = _collect_function_graph(
report, function_index_by_address
)
program_strings = sorted(report.program_strings, key=lambda string: string.address)
data = {
"functions": [
{
"name": function.name,
"address": hex(function.address),
"label": f"{function.name} @ {hex(function.address)}",
"summary": render_function_summary_text(function),
}
for function in report.functions
],
"callersByIndex": callers_by_index,
"calleesByIndex": callees_by_index,
"tags": {tag: function_indices for tag, function_indices in tag_entries},
"strings": [
{
"address": hex(program_string.address),
"value": program_string.value,
"functionIndices": [
function_index_by_address[address]
for address in program_string.function_addresses
if address in function_index_by_address
],
}
for program_string in program_strings
],
}
style = """
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
html,body{height:100%}
:root{--bg:#fff;--fg:#111;--muted:#666;--line:#cfcfcf;--fill:#d9d9d9;--heat:#2563eb;--seed:#f59e0b;--square:10px}
body{height:100vh;overflow:hidden;background:var(--bg);color:var(--fg);font:13px/1.4 ui-monospace,SFMono-Regular,Menlo,Consolas,monospace;padding:16px}
body.is-resizing{cursor:col-resize}
body.is-resizing *{user-select:none}
main{height:100%;min-height:0;display:flex;flex-direction:column;gap:16px}
section{display:flex;flex-direction:column;gap:8px}
.meta,.legend,.pane-header{color:var(--muted)}
.controls{display:flex;flex-wrap:wrap;gap:6px}
.control-row{display:flex;flex-wrap:wrap;align-items:center;gap:8px}
.control-label{color:var(--muted)}
.control{border:1px solid var(--line);background:transparent;color:inherit;padding:2px 6px;font:inherit;cursor:pointer}
.control.is-active{border-color:var(--heat);color:var(--heat)}
.control-count{color:var(--muted)}
.split-view{flex:1;min-height:0;display:flex;align-items:stretch}
.pane{min-height:0;display:flex;flex-direction:column;overflow:auto;border:1px solid var(--line);background:var(--bg)}
.pane-functions{flex:0 0 50%;min-width:18rem}
.pane-strings{flex:1 1 auto;min-width:18rem}
.pane-header{position:sticky;top:0;z-index:1;background:var(--bg);border-bottom:1px solid var(--line);padding:8px 10px}
.pane-body{display:flex;flex-direction:column;gap:8px;padding:8px 10px}
.splitter{position:relative;flex:0 0 12px;cursor:col-resize;touch-action:none}
.splitter::before{content:'';position:absolute;top:0;bottom:0;left:50%;width:1px;background:var(--line);transform:translateX(-50%)}
.splitter::after{content:'';position:absolute;top:50%;left:50%;width:3px;height:40px;border-left:1px solid var(--line);border-right:1px solid var(--line);transform:translate(-50%,-50%)}
.function-grid{display:flex;flex-wrap:wrap;gap:1px;align-content:flex-start}
.function-box{position:relative;z-index:0;width:var(--square);height:var(--square);border:1px solid var(--line);background:var(--fill);overflow:visible}
.function-box::before{content:'';position:absolute;inset:0;background:var(--heat);opacity:var(--heat-opacity,0);pointer-events:none}
.function-box::after{content:'';position:absolute;inset:-2px;border:1px solid var(--seed);opacity:0;pointer-events:none}
.function-box.is-seed::after{opacity:1}
.function-box.is-dim{opacity:.28}
.string-list{display:flex;flex-direction:column;gap:2px}
.string-row{display:flex;align-items:flex-start;gap:8px;width:100%;border:1px solid transparent;background:transparent;color:inherit;padding:3px 4px;font:inherit;text-align:left;cursor:pointer}
.string-row:hover,.string-row.is-active{border-color:var(--line)}
.string-address{color:var(--muted);white-space:nowrap;flex:0 0 auto}
.string-value{min-width:0;flex:1 1 auto;white-space:pre-wrap;word-break:break-word}
.string-tags{margin-left:auto;flex:0 0 auto;padding-left:8px;color:var(--muted);white-space:nowrap}
.tooltip{position:fixed;z-index:10;display:none;width:min(42rem,calc(100vw - 24px));max-height:calc(100vh - 24px);overflow:auto;border:1px solid var(--line);background:#fff;padding:8px;pointer-events:none;white-space:pre-wrap;box-shadow:0 2px 8px rgba(0,0,0,.08)}
.tooltip.is-visible{display:block}
h1,h2{font-size:inherit}
""".strip()
script = """
const data=JSON.parse(document.getElementById('mapa-data').textContent);
const splitView=document.getElementById('split-view');
const functionsPane=document.getElementById('functions-pane');
const splitter=document.getElementById('splitter');
const functionBoxes=[...document.querySelectorAll('.function-box')];
const tagControls=[...document.querySelectorAll('.tag-control')];
const stringRows=[...document.querySelectorAll('.string-row')];
const directionControls=[...document.querySelectorAll('.direction-control')];
const depthControls=[...document.querySelectorAll('.depth-control')];
const neighborhoodStatus=document.getElementById('neighborhood-status');
const tooltip=document.getElementById('tooltip');
const bothByIndex=data.callersByIndex.map((callers,index)=>[...new Set([...callers,...data.calleesByIndex[index]])]);
const functionCount=data.functions.length;
let hoveredFunction=null;
let lockedFunction=null;
let hoveredTag=null;
let lockedTag=null;
let hoveredString=null;
let lockedString=null;
let directionMode='both';
let maxDepth=3;
let activePointerId=null;
let tooltipFunctionIndex=null;
let currentNeighborhood=null;
const getDecayScore=(distance)=>0.5**distance;
const hasLockedSeed=()=>lockedFunction!==null||lockedTag!==null||lockedString!==null;
const getUniqueSeedIndices=(seedIndices)=>[...new Set(seedIndices)];
const clearHoveredSeeds=()=>{
hoveredFunction=null;
hoveredTag=null;
hoveredString=null;
};
const clearLockedSeeds=()=>{
lockedFunction=null;
lockedTag=null;
lockedString=null;
};
const buildSeedSource=(kind,key,label,seedIndices)=>{
const uniqueSeedIndices=getUniqueSeedIndices(seedIndices);
if(uniqueSeedIndices.length===0){return null;}
return {kind,key,label,seedIndices:uniqueSeedIndices};
};
const getFunctionSeedSource=(index)=>buildSeedSource('function',String(index),data.functions[index].label,[index]);
const getTagSeedSource=(tag)=>buildSeedSource('tag',tag,tag,data.tags[tag]||[]);
const getStringSeedSource=(index)=>{
const stringData=data.strings[index];
return buildSeedSource('string',String(index),`${stringData.value} @ ${stringData.address}`,stringData.functionIndices);
};
const getLockedSeedSource=()=>{
if(lockedFunction!==null){return getFunctionSeedSource(lockedFunction);}
if(lockedTag!==null){return getTagSeedSource(lockedTag);}
if(lockedString!==null){return getStringSeedSource(lockedString);}
return null;
};
const getHoveredSeedSource=()=>{
if(hoveredFunction!==null){return getFunctionSeedSource(hoveredFunction);}
if(hoveredTag!==null){return getTagSeedSource(hoveredTag);}
if(hoveredString!==null){return getStringSeedSource(hoveredString);}
return null;
};
const getActiveSeedSource=()=>getLockedSeedSource()??getHoveredSeedSource();
const getAdjacency=()=>{
if(directionMode==='callers'){return data.callersByIndex;}
if(directionMode==='callees'){return data.calleesByIndex;}
return bothByIndex;
};
const computeNeighborhoodState=(seedIndices)=>{
const adjacency=getAdjacency();
const uniqueSeedIndices=getUniqueSeedIndices(seedIndices);
const scores=new Float32Array(functionCount);
const bestDistances=new Array(functionCount).fill(null);
for(const seedIndex of uniqueSeedIndices){
const distances=new Array(functionCount).fill(-1);
const queue=[seedIndex];
distances[seedIndex]=0;
for(let queueIndex=0;queueIndex<queue.length;queueIndex++){
const functionIndex=queue[queueIndex];
const distance=distances[functionIndex];
if(distance===maxDepth){continue;}
for(const neighborIndex of adjacency[functionIndex]){
if(distances[neighborIndex]!==-1){continue;}
distances[neighborIndex]=distance+1;
queue.push(neighborIndex);
}
}
distances.forEach((distance,functionIndex)=>{
if(distance===-1){return;}
scores[functionIndex]+=getDecayScore(distance);
const bestDistance=bestDistances[functionIndex];
if(bestDistance===null||distance<bestDistance){bestDistances[functionIndex]=distance;}
});
}
let maxScore=0;
scores.forEach((score)=>{
if(score>maxScore){maxScore=score;}
});
return {
scores:Array.from(scores),
bestDistances,
seedIndices:uniqueSeedIndices,
seedSet:new Set(uniqueSeedIndices),
maxScore,
};
};
const renderTooltipText=(index)=>{
const summary=data.functions[index].summary;
if(currentNeighborhood===null){return summary;}
const lines=[`heat: ${currentNeighborhood.scores[index].toFixed(2)}`,`seed: ${currentNeighborhood.seedSet.has(index)?'yes':'no'}`];
const distance=currentNeighborhood.bestDistances[index];
if(distance!==null){lines.push(`distance: ${distance}`);}
return `${lines.join('\\n')}\\n\\n${summary}`;
};
const updateTooltip=()=>{
if(tooltipFunctionIndex===null){return;}
tooltip.textContent=renderTooltipText(tooltipFunctionIndex);
};
const updateStatus=()=>{
const activeSeedSource=getActiveSeedSource();
if(activeSeedSource===null){
neighborhoodStatus.textContent='hover or click a function, tag, or string';
return;
}
const seedCount=currentNeighborhood===null?0:currentNeighborhood.seedIndices.length;
neighborhoodStatus.textContent=`${activeSeedSource.kind} ${activeSeedSource.label} · direction ${directionMode} · depth ${maxDepth} · ${seedCount} seed${seedCount===1?'':'s'}`;
};
const updateView=()=>{
const activeSeedSource=getActiveSeedSource();
currentNeighborhood=activeSeedSource===null?null:computeNeighborhoodState(activeSeedSource.seedIndices);
const hasActive=currentNeighborhood!==null;
functionBoxes.forEach((box,index)=>{
const score=currentNeighborhood===null?0:currentNeighborhood.scores[index];
const heatOpacity=currentNeighborhood===null||currentNeighborhood.maxScore===0?0:score/currentNeighborhood.maxScore;
box.style.setProperty('--heat-opacity',heatOpacity.toFixed(3));
box.classList.toggle('is-seed',currentNeighborhood!==null&&currentNeighborhood.seedSet.has(index));
box.classList.toggle('is-dim',hasActive&&score===0);
});
tagControls.forEach((control)=>{
control.classList.toggle('is-active',activeSeedSource!==null&&activeSeedSource.kind==='tag'&&control.dataset.tag===activeSeedSource.key);
});
stringRows.forEach((row)=>{
row.classList.toggle('is-active',activeSeedSource!==null&&activeSeedSource.kind==='string'&&row.dataset.stringIndex===activeSeedSource.key);
});
directionControls.forEach((control)=>{
control.classList.toggle('is-active',control.dataset.direction===directionMode);
});
depthControls.forEach((control)=>{
control.classList.toggle('is-active',Number(control.dataset.depth)===maxDepth);
});
updateStatus();
updateTooltip();
};
const placeTooltip=(event)=>{
const offset=12;
let left=event.clientX+offset;
let top=event.clientY+offset;
const rect=tooltip.getBoundingClientRect();
if(left+rect.width>window.innerWidth-8){left=Math.max(8,window.innerWidth-rect.width-8);}
if(top+rect.height>window.innerHeight-8){top=Math.max(8,window.innerHeight-rect.height-8);}
tooltip.style.left=`${left}px`;
tooltip.style.top=`${top}px`;
};
const getPaneMinWidth=()=>parseFloat(getComputedStyle(document.documentElement).fontSize)*18;
const resizePanes=(clientX)=>{
const rect=splitView.getBoundingClientRect();
const splitterWidth=splitter.getBoundingClientRect().width;
const paneMinWidth=getPaneMinWidth();
const minLeft=rect.left+paneMinWidth;
const maxLeft=rect.right-paneMinWidth-splitterWidth;
if(maxLeft<=minLeft){functionsPane.style.flexBasis='50%';return;}
const clampedLeft=Math.min(maxLeft,Math.max(minLeft,clientX));
functionsPane.style.flexBasis=`${clampedLeft-rect.left}px`;
};
const clampPaneSize=()=>{
const basis=parseFloat(functionsPane.style.flexBasis);
if(Number.isFinite(basis)){resizePanes(splitView.getBoundingClientRect().left+basis);}
};
const stopResizing=(event)=>{
if(activePointerId===null||event.pointerId!==activePointerId){return;}
if(splitter.hasPointerCapture(event.pointerId)){splitter.releasePointerCapture(event.pointerId);}
activePointerId=null;
document.body.classList.remove('is-resizing');
};
functionBoxes.forEach((box,index)=>{
box.addEventListener('mouseenter',(event)=>{
tooltipFunctionIndex=index;
tooltip.classList.add('is-visible');
if(!hasLockedSeed()){hoveredFunction=index;}
updateView();
placeTooltip(event);
});
box.addEventListener('mousemove',placeTooltip);
box.addEventListener('mouseleave',()=>{
tooltip.classList.remove('is-visible');
tooltipFunctionIndex=null;
if(!hasLockedSeed()&&hoveredFunction===index){
hoveredFunction=null;
updateView();
}
});
box.addEventListener('click',()=>{
const functionIndex=Number(box.dataset.functionIndex);
if(lockedFunction===functionIndex){
lockedFunction=null;
}else{
clearLockedSeeds();
lockedFunction=functionIndex;
}
clearHoveredSeeds();
updateView();
});
});
tagControls.forEach((control)=>{
control.addEventListener('mouseenter',()=>{
if(hasLockedSeed()){return;}
hoveredTag=control.dataset.tag;
updateView();
});
control.addEventListener('mouseleave',()=>{
if(hasLockedSeed()||hoveredTag!==control.dataset.tag){return;}
hoveredTag=null;
updateView();
});
control.addEventListener('click',()=>{
const tag=control.dataset.tag;
if(lockedTag===tag){
lockedTag=null;
}else{
clearLockedSeeds();
lockedTag=tag;
}
clearHoveredSeeds();
updateView();
});
});
stringRows.forEach((row)=>{
row.addEventListener('mouseenter',()=>{
if(hasLockedSeed()){return;}
hoveredString=Number(row.dataset.stringIndex);
updateView();
});
row.addEventListener('mouseleave',()=>{
if(hasLockedSeed()||hoveredString!==Number(row.dataset.stringIndex)){return;}
hoveredString=null;
updateView();
});
row.addEventListener('click',()=>{
const stringIndex=Number(row.dataset.stringIndex);
if(lockedString===stringIndex){
lockedString=null;
}else{
clearLockedSeeds();
lockedString=stringIndex;
}
clearHoveredSeeds();
updateView();
});
});
directionControls.forEach((control)=>{
control.addEventListener('click',()=>{
directionMode=control.dataset.direction;
updateView();
});
});
depthControls.forEach((control)=>{
control.addEventListener('click',()=>{
maxDepth=Number(control.dataset.depth);
updateView();
});
});
splitter.addEventListener('pointerdown',(event)=>{
activePointerId=event.pointerId;
splitter.setPointerCapture(event.pointerId);
document.body.classList.add('is-resizing');
resizePanes(event.clientX);
event.preventDefault();
});
splitter.addEventListener('pointermove',(event)=>{if(activePointerId===event.pointerId){resizePanes(event.clientX);}});
splitter.addEventListener('pointerup',stopResizing);
splitter.addEventListener('pointercancel',stopResizing);
window.addEventListener('resize',clampPaneSize);
updateView();
""".strip()
parts: list[str] = [
"<!doctype html>",
'<html lang="en">',
"<head>",
'<meta charset="utf-8">',
f"<title>{escape(report.meta.name)} - mapa html map</title>",
"<style>",
style,
"</style>",
"</head>",
"<body>",
"<main>",
"<section>",
f"<div>{escape(report.meta.name)}</div>",
(
f'<div class="meta">sha256 {escape(report.meta.sha256)} · arch {escape(report.meta.arch)}'
f" · functions {len(report.functions)} · strings {len(program_strings)} · tags {len(tag_entries)}</div>"
),
"<h1>tags</h1>",
'<div class="controls" id="tag-controls">',
]
for tag, function_indices in tag_entries:
parts.append(
(
f'<button type="button" class="control tag-control" data-tag="{escape(tag, quote=True)}" '
f'data-count="{len(function_indices)}">{escape(tag)} '
f'<span class="control-count">({len(function_indices)})</span></button>'
)
)
parts.extend(
[
"</div>",
'<div class="control-row">',
'<span class="control-label">direction</span>',
'<div class="controls" id="direction-controls">',
'<button type="button" class="control direction-control" data-direction="callers">callers</button>',
'<button type="button" class="control direction-control" data-direction="callees">callees</button>',
'<button type="button" class="control direction-control is-active" data-direction="both">both</button>',
"</div>",
'<span class="control-label">depth</span>',
'<div class="controls" id="depth-controls">',
'<button type="button" class="control depth-control" data-depth="1">1</button>',
'<button type="button" class="control depth-control" data-depth="2">2</button>',
'<button type="button" class="control depth-control is-active" data-depth="3">3</button>',
'<button type="button" class="control depth-control" data-depth="4">4</button>',
"</div>",
"</div>",
'<div class="meta" id="neighborhood-status"></div>',
'<div class="legend">fill = neighborhood heat · outline = seed · dim = outside neighborhood</div>',
"</section>",
'<div class="split-view" id="split-view">',
'<section class="pane pane-functions" id="functions-pane">',
f'<div class="pane-header">functions ({len(report.functions)})</div>',
'<div class="pane-body">',
'<div class="function-grid" id="function-grid">',
]
)
for index, function in enumerate(report.functions):
parts.append(
(
f'<div class="function-box" data-function-index="{index}" '
f'data-function-address="{escape(hex(function.address), quote=True)}" '
f'aria-label="{escape(function.name, quote=True)}"></div>'
)
)
parts.extend(
[
"</div>",
"</div>",
"</section>",
'<div class="splitter" id="splitter" role="separator" aria-orientation="vertical" aria-label="resize panes"></div>',
'<section class="pane pane-strings" id="strings-pane">',
f'<div class="pane-header">strings ({len(program_strings)})</div>',
'<div class="pane-body">',
'<div class="string-list" id="string-list">',
]
)
for index, program_string in enumerate(program_strings):
parts.append(_render_string_row(program_string, index))
parts.extend(
[
"</div>",
"</div>",
"</section>",
"</div>",
"</main>",
'<div class="tooltip" id="tooltip"></div>',
f'<script type="application/json" id="mapa-data">{_to_json(data)}</script>',
"<script>",
script,
"</script>",
"</body>",
"</html>",
]
)
return "\n".join(parts)

View File

@@ -1,165 +0,0 @@
from __future__ import annotations
import contextlib
import fcntl
import hashlib
import logging
import os
import time
from pathlib import Path
from typing import Iterator
import idapro # must be first: mutates sys.path so ida_auto and ida_domain are importable
import ida_auto
from ida_domain.database import Database, IdaCommandOptions
logger = logging.getLogger(__name__)
DATABASE_ACCESS_TIMEOUT = 5.0
DATABASE_ANALYSIS_TIMEOUT = 120.0
DATABASE_POLL_INTERVAL = 0.25
def get_cache_dir() -> Path:
xdg = os.environ.get("XDG_CACHE_HOME")
if xdg:
base = Path(xdg)
else:
base = Path.home() / ".cache"
return base / "mandiant" / "mapa"
def compute_file_hashes(file_path: Path) -> tuple[str, str]:
"""Compute (md5, sha256) for a file.
Raises:
OSError: If the file cannot be read.
"""
md5_digest = hashlib.md5()
sha256_digest = hashlib.sha256()
with file_path.open("rb") as fh:
for chunk in iter(lambda: fh.read(65536), b""):
md5_digest.update(chunk)
sha256_digest.update(chunk)
return md5_digest.hexdigest(), sha256_digest.hexdigest()
def _wait_for_repack(db_path: Path, timeout: float) -> None:
nam_path = db_path.with_suffix(".nam")
deadline = time.monotonic() + timeout
while nam_path.exists():
if time.monotonic() >= deadline:
raise RuntimeError(
f"Database {db_path} appears to be open in another program "
f"({nam_path} still exists after {timeout:.0f}s)."
)
time.sleep(DATABASE_POLL_INTERVAL)
@contextlib.contextmanager
def database_access_guard(db_path: Path, timeout: float) -> Iterator[None]:
"""Advisory guard that serialises access to an IDA database.
Uses .nam polling + flock on <db>.lock with TOCTOU re-check.
Raises:
RuntimeError: On timeout waiting for the database.
"""
_wait_for_repack(db_path, timeout)
lock_path = Path(str(db_path) + ".lock")
lock_fd = lock_path.open("w")
deadline = time.monotonic() + timeout
try:
while True:
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except OSError:
if time.monotonic() >= deadline:
raise RuntimeError(
f"Timed out waiting for lock on {db_path} after {timeout:.0f}s."
)
time.sleep(DATABASE_POLL_INTERVAL)
_wait_for_repack(db_path, max(0, deadline - time.monotonic()))
yield
finally:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
lock_fd.close()
def resolve_database(file_path: Path) -> tuple[Path, str, str]:
"""Resolve an input path to an .i64/.idb database path.
Returns (db_path, md5, sha256). For existing databases, hashes are empty
strings (they'll be read from IDA metadata instead).
Raises:
RuntimeError: If analysis or caching fails.
"""
suffix = file_path.suffix.lower()
if suffix in {".i64", ".idb"}:
logger.debug("Using existing database: %s", file_path)
return file_path, "", ""
cache_dir = get_cache_dir()
cache_dir.mkdir(parents=True, exist_ok=True)
md5, sha256 = compute_file_hashes(file_path)
cache_path = cache_dir / f"{sha256}.i64"
if cache_path.exists():
logger.debug("Cache hit for %s -> %s", file_path, cache_path)
return cache_path, md5, sha256
logger.debug("Cache miss for %s; analyzing to %s", file_path, cache_path)
with database_access_guard(cache_path, timeout=DATABASE_ANALYSIS_TIMEOUT):
if cache_path.exists():
logger.debug("Cache populated while waiting for lock: %s", cache_path)
return cache_path, md5, sha256
logger.info("Analyzing %s (this may take a moment)...", file_path.name)
idapro.enable_console_messages(False)
ida_options = IdaCommandOptions(
auto_analysis=True,
new_database=True,
output_database=str(cache_path),
load_resources=True,
plugin_options="lumina:host=0.0.0.0 -Osecondary_lumina:host=0.0.0.0",
)
try:
with Database.open(str(file_path), ida_options, save_on_close=True):
ida_auto.auto_wait()
except Exception as exc:
raise RuntimeError(f"Analysis failed for {file_path}: {exc}") from exc
if not cache_path.exists():
raise RuntimeError(f"Analysis produced no database for {file_path}")
logger.debug("Analysis completed: %s", cache_path)
return cache_path, md5, sha256
@contextlib.contextmanager
def open_database_session(db_path: Path, auto_analysis: bool = False) -> Iterator[Database]:
"""Open a database session with advisory locking.
Raises:
RuntimeError: If opening fails or the database is locked.
"""
with database_access_guard(db_path, timeout=DATABASE_ACCESS_TIMEOUT):
ida_options = IdaCommandOptions(auto_analysis=auto_analysis, new_database=False)
logger.debug("Opening database session: %s (auto_analysis=%s)", db_path, auto_analysis)
idapro.enable_console_messages(False)
try:
database = Database.open(str(db_path), ida_options, save_on_close=False)
except Exception as exc:
raise RuntimeError(f"Failed to open {db_path}: {exc}") from exc
with database:
if auto_analysis:
ida_auto.auto_wait()
yield database
logger.debug("Closed database session: %s", db_path)

View File

@@ -1,126 +0,0 @@
from __future__ import annotations
from collections.abc import Mapping
from dataclasses import dataclass, field
@dataclass(frozen=True)
class AssemblageRecord:
sha256: str
name: str
start_rva: int
end_rva: int
address: int
end_address: int
source_file: str
@property
def source_path(self) -> str:
if not self.source_file.endswith(")"):
return self.source_file
head, separator, _ = self.source_file.rpartition(" (")
if separator:
return head
return self.source_file
@classmethod
def from_csv_row(
cls, row: Mapping[str, str], base_address: int
) -> AssemblageRecord:
start_rva = int(row["start"], 0)
end_rva = int(row["end"], 0)
return cls(
sha256=row["hash"].strip().lower(),
name=row["name"].strip(),
start_rva=start_rva,
end_rva=end_rva,
address=base_address + start_rva,
end_address=base_address + end_rva,
source_file=row["source_file"].strip(),
)
@dataclass
class MapaString:
value: str
address: int
tags: tuple[str, ...] = ()
tag_matches: tuple = ()
@dataclass
class MapaProgramString:
value: str
address: int
tags: tuple[str, ...] = ()
tag_matches: tuple = ()
function_addresses: tuple[int, ...] = ()
@dataclass
class MapaCall:
name: str
address: int
is_api: bool
delta: int = 0
direction: str = ""
@dataclass
class MapaCaller:
name: str
address: int
delta: int = 0
direction: str = ""
@dataclass
class MapaFunction:
address: int
name: str
is_thunk: bool = False
is_library: bool = False
num_basic_blocks: int = 0
num_edges: int = 0
num_instructions: int = 0
total_instruction_bytes: int = 0
callers: list[MapaCaller] = field(default_factory=list)
calls: list[MapaCall] = field(default_factory=list)
apis: list[MapaCall] = field(default_factory=list)
strings: list[MapaString] = field(default_factory=list)
capa_matches: list[str] = field(default_factory=list)
assemblage_records: list[AssemblageRecord] = field(default_factory=list)
@dataclass
class MapaSection:
address: int
size: int
perms: str
name: str = ""
@dataclass
class MapaLibrary:
name: str
is_static: bool = False
load_address: int | None = None
@dataclass
class MapaMeta:
name: str
sha256: str
md5: str = ""
arch: str = ""
timestamp: str = ""
base_address: int = 0
@dataclass
class MapaReport:
meta: MapaMeta
sections: list[MapaSection] = field(default_factory=list)
libraries: list[MapaLibrary] = field(default_factory=list)
functions: list[MapaFunction] = field(default_factory=list)
program_strings: list[MapaProgramString] = field(default_factory=list)

View File

@@ -1,273 +0,0 @@
from __future__ import annotations
import contextlib
import rich.padding
from rich.console import Console
from rich.markup import escape
from rich.text import Text
from mapa.model import MapaFunction, MapaReport, MapaString
class Renderer:
def __init__(self, console: Console):
self.console: Console = console
self.indent: int = 0
@contextlib.contextmanager
def indenting(self):
self.indent += 1
try:
yield
finally:
self.indent -= 1
@staticmethod
def markup(s: str, **kwargs) -> Text:
escaped_args = {
k: (escape(v) if isinstance(v, str) else v) for k, v in kwargs.items()
}
return Text.from_markup(s.format(**escaped_args))
def print(self, renderable, **kwargs):
if not kwargs:
return self.console.print(
rich.padding.Padding(renderable, (0, 0, 0, self.indent * 2))
)
assert isinstance(renderable, str)
return self.print(self.markup(renderable, **kwargs))
def writeln(self, s: str):
self.print(s)
@contextlib.contextmanager
def section(self, name):
if isinstance(name, str):
self.print("[title]{name}", name=name)
elif isinstance(name, Text):
name = name.copy()
name.stylize_before(self.console.get_style("title"))
self.print(name)
else:
raise ValueError("unexpected section name")
with self.indenting():
yield
def _visible_tags(tags: tuple[str, ...]) -> list[str]:
tag_set = set(tags)
has_specific = any(t != "#common" for t in tag_set)
result = []
for t in tags:
if t == "#common" and has_specific:
continue
result.append(t)
return result
def _render_string_line(o: Renderer, value: str, tags: list[str]) -> Text:
left = Text.from_markup(
'string: [decoration]"[/]{string}[decoration]"[/]'.format(
string=escape(value)
)
)
right = Text(" ".join(tags), style="dim")
available = o.console.size.width - (o.indent * 2)
min_gap = 1
right_len = right.cell_len
max_left = available - right_len - min_gap
if max_left < 12:
combined = left.copy()
combined.append(" ")
combined.append(right)
return combined
if left.cell_len > max_left:
left.truncate(max_left - 1, overflow="ellipsis")
padding = available - left.cell_len - right_len
combined = left.copy()
combined.append(" " * padding)
combined.append(right)
return combined
def _get_primary_source_path(func) -> str | None:
if not func.assemblage_records:
return None
source_path = func.assemblage_records[0].source_path
if not source_path:
return None
return source_path
def _render_source_path_separator(o: Renderer, source_path: str) -> Text:
label = f"[ {source_path} ]"
available = max(0, o.console.size.width - (o.indent * 2))
if available <= len(label) + 2:
return Text(label, style="decoration")
rule_len = available - len(label) - 2
left_len = rule_len // 2
right_len = rule_len - left_len
rendered = Text("-" * left_len, style="decoration")
rendered.append(" ")
rendered.append(label, style="decoration")
rendered.append(" ")
rendered.append("-" * right_len, style="decoration")
return rendered
def _format_function_heading(func: MapaFunction) -> str:
kind = "thunk" if func.is_thunk else "function"
return f"{kind} {func.name} @ {hex(func.address)}"
def _iter_function_rows(func: MapaFunction):
for record in func.assemblage_records:
yield "assemblage", record
if func.is_thunk:
return
for caller in func.callers:
yield "caller", caller
yield "metrics", None
for match in func.capa_matches:
yield "capa", match
for call in func.calls:
yield "call", call
for api in func.apis:
yield "api", api
for string in func.strings:
yield "string", string
def _render_plain_string_line(string: MapaString) -> str:
visible_tags = _visible_tags(string.tags)
line = f'string: "{string.value}"'
if visible_tags:
line += f" {' '.join(visible_tags)}"
return line
def render_function_summary_text(func: MapaFunction) -> str:
lines = [_format_function_heading(func)]
for kind, value in _iter_function_rows(func):
if kind == "assemblage":
lines.append(f"assemblage name: {value.name}")
lines.append(f"assemblage file: {value.source_path}")
elif kind == "caller":
lines.append(
f"xref: {value.direction} {value.name} ({value.delta:+})"
)
elif kind == "metrics":
lines.append(
f"B/E/I: {func.num_basic_blocks} / {func.num_edges} / {func.num_instructions} ({func.total_instruction_bytes} bytes)"
)
elif kind == "capa":
lines.append(f"capa: {value}")
elif kind == "call":
lines.append(
f"calls: {value.direction} {value.name} ({value.delta:+})"
)
elif kind == "api":
lines.append(f"api: {value.name}")
elif kind == "string":
lines.append(_render_plain_string_line(value))
else:
raise ValueError(f"unexpected function row kind: {kind}")
return "\n".join(lines)
def render_report(report: MapaReport, console: Console) -> None:
o = Renderer(console)
with o.section("meta"):
o.writeln(f"name: {report.meta.name}")
o.writeln(f"sha256: {report.meta.sha256}")
o.writeln(f"arch: {report.meta.arch}")
o.writeln(f"ts: {report.meta.timestamp}")
with o.section("sections"):
for section in report.sections:
o.writeln(f"- {hex(section.address)} {section.perms} {hex(section.size)}")
with o.section("libraries"):
for lib in report.libraries:
static = " (static)" if lib.is_static else ""
addr = (
f" at {hex(lib.load_address)}" if lib.load_address is not None else ""
)
o.writeln(f"- {lib.name:<12s}{static}{addr}")
if not report.libraries:
o.writeln("(none)")
with o.section("functions"):
last_source_path: str | None = None
for func in report.functions:
source_path = _get_primary_source_path(func)
if source_path is not None:
if last_source_path is not None and source_path != last_source_path:
o.print(_render_source_path_separator(o, source_path))
last_source_path = source_path
with o.section(
o.markup(
"{function_kind} [default]{function_name}[/] [decoration]@ {function_address}[/]",
function_kind="thunk" if func.is_thunk else "function",
function_name=func.name,
function_address=hex(func.address),
)
):
for kind, value in _iter_function_rows(func):
if kind == "assemblage":
o.writeln(f"assemblage name: {value.name}")
o.writeln(f"assemblage file: {value.source_path}")
elif kind == "caller":
o.print(
"xref: [decoration]{direction}[/] {name} [decoration]({delta:+})[/]",
direction=value.direction,
name=value.name,
delta=value.delta,
)
elif kind == "metrics":
o.writeln(
f"B/E/I: {func.num_basic_blocks} / {func.num_edges} / {func.num_instructions} ({func.total_instruction_bytes} bytes)"
)
elif kind == "capa":
o.writeln(f"capa: {value}")
elif kind == "call":
o.print(
"calls: [decoration]{direction}[/] {name} [decoration]({delta:+})[/]",
direction=value.direction,
name=value.name,
delta=value.delta,
)
elif kind == "api":
o.print(
"api: {name}",
name=value.name,
)
elif kind == "string":
visible_tags = _visible_tags(value.tags)
if visible_tags:
o.print(_render_string_line(o, value.value, visible_tags))
else:
o.print(
'string: [decoration]"[/]{string}[decoration]"[/]',
string=value.value,
)
else:
raise ValueError(f"unexpected function row kind: {kind}")
if not func.is_thunk:
o.print("")

View File

@@ -1,25 +0,0 @@
# Vendored Quantum Strand string databases
Upstream: `../quantumstrand/` (https://github.com/mandiant/flare-floss, Quantum Strand branch)
Upstream commit: `73eb1541e896c065fc694ba7b01067f56871631b`
## Copied data files
- `data/oss/*.jsonl.gz` — open-source library string databases
- `data/crt/msvc_v143.jsonl.gz` — MSVC CRT string database
- `data/expert/capa.jsonl` — expert tagging rules (plain JSONL, not gzipped)
- `data/winapi/apis.txt.gz` — Windows API function names
- `data/winapi/dlls.txt.gz` — Windows DLL names
- `data/gp/gp.jsonl.gz` — global prevalence strings
- `data/gp/cwindb-native.jsonl.gz` — CWinDB native prevalence strings
- `data/gp/cwindb-dotnet.jsonl.gz` — CWinDB .NET prevalence strings
- `data/gp/junk-code.jsonl.gz` — junk/compiler-generated code strings
- `data/gp/xaa-hashes.bin` — truncated MD5 hash set (8 bytes per entry)
- `data/gp/yaa-hashes.bin` — truncated MD5 hash set (8 bytes per entry)
## Code
The loader, tagger, and model code in this package are mapa-local rewrites
inspired by upstream modules `floss/qs/db/oss.py`, `expert.py`, `gp.py`,
`winapi.py`, and the tagging logic in `floss/qs/main.py`. No upstream code
was copied verbatim.

View File

@@ -1 +0,0 @@
from __future__ import annotations

File diff suppressed because it is too large Load Diff

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More