mirror of
https://github.com/mandiant/capa.git
synced 2026-03-17 23:39:00 -07:00
Compare commits
1 Commits
ci/add-gem
...
add-codema
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d0bafd6ab7 |
3
.github/pyinstaller/pyinstaller.spec
vendored
3
.github/pyinstaller/pyinstaller.spec
vendored
@@ -74,9 +74,6 @@ a = Analysis(
|
||||
# only be installed locally.
|
||||
"binaryninja",
|
||||
"ida",
|
||||
# remove once https://github.com/mandiant/capa/issues/2681 has
|
||||
# been addressed by PyInstaller
|
||||
"pkg_resources",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
79
.github/workflows/build.yml
vendored
79
.github/workflows/build.yml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- '**.md'
|
||||
release:
|
||||
types: [edited, published]
|
||||
workflow_dispatch: # manual trigger for testing
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -23,38 +22,24 @@ jobs:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
include:
|
||||
- os: ubuntu-22.04
|
||||
- os: ubuntu-20.04
|
||||
# use old linux so that the shared library versioning is more portable
|
||||
artifact_name: capa
|
||||
asset_name: linux
|
||||
python_version: '3.10'
|
||||
- os: ubuntu-22.04-arm
|
||||
artifact_name: capa
|
||||
asset_name: linux-arm64
|
||||
python_version: '3.10'
|
||||
- os: ubuntu-22.04
|
||||
- os: ubuntu-20.04
|
||||
artifact_name: capa
|
||||
asset_name: linux-py312
|
||||
python_version: '3.12'
|
||||
- os: windows-2022
|
||||
- os: windows-2019
|
||||
artifact_name: capa.exe
|
||||
asset_name: windows
|
||||
python_version: '3.10'
|
||||
# Windows 11 ARM64 complains of conflicting package version
|
||||
# Additionally, there is no ARM64 build of Python for Python 3.10 on Windows 11 ARM: https://raw.githubusercontent.com/actions/python-versions/main/versions-manifest.json
|
||||
#- os: windows-11-arm
|
||||
# artifact_name: capa.exe
|
||||
# asset_name: windows-arm64
|
||||
# python_version: '3.12'
|
||||
- os: macos-13
|
||||
# use older macOS for assumed better portability
|
||||
artifact_name: capa
|
||||
asset_name: macos
|
||||
python_version: '3.10'
|
||||
- os: macos-14
|
||||
artifact_name: capa
|
||||
asset_name: macos-arm64
|
||||
python_version: '3.10'
|
||||
steps:
|
||||
- name: Checkout capa
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
@@ -64,7 +49,7 @@ jobs:
|
||||
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
- if: matrix.os == 'ubuntu-22.04' || matrix.os == 'ubuntu-22.04-arm'
|
||||
- if: matrix.os == 'ubuntu-20.04'
|
||||
run: sudo apt-get install -y libyaml-dev
|
||||
- name: Upgrade pip, setuptools
|
||||
run: python -m pip install --upgrade pip setuptools
|
||||
@@ -74,28 +59,6 @@ jobs:
|
||||
pip install -e .[build]
|
||||
- name: Build standalone executable
|
||||
run: pyinstaller --log-level DEBUG .github/pyinstaller/pyinstaller.spec
|
||||
- name: Does it run without warnings or errors?
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "windows-2022" ]] || [[ "${{ matrix.os }}" == "windows-11-arm" ]]; then
|
||||
EXECUTABLE=".\\dist\\capa"
|
||||
else
|
||||
EXECUTABLE="./dist/capa"
|
||||
fi
|
||||
|
||||
output=$(${EXECUTABLE} --version 2>&1)
|
||||
exit_code=$?
|
||||
|
||||
echo "${output}"
|
||||
echo "${exit_code}"
|
||||
|
||||
if echo "${output}" | grep -iE 'error|warning'; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${exit_code}" -ne 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
- name: Does it run (PE)?
|
||||
run: dist/capa -d "tests/data/Practical Malware Analysis Lab 01-01.dll_"
|
||||
- name: Does it run (Shellcode)?
|
||||
@@ -111,6 +74,34 @@ jobs:
|
||||
name: ${{ matrix.asset_name }}
|
||||
path: dist/${{ matrix.artifact_name }}
|
||||
|
||||
test_run:
|
||||
name: Test run on ${{ matrix.os }} / ${{ matrix.asset_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: [build]
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
# OSs not already tested above
|
||||
- os: ubuntu-22.04
|
||||
artifact_name: capa
|
||||
asset_name: linux
|
||||
- os: ubuntu-22.04
|
||||
artifact_name: capa
|
||||
asset_name: linux-py312
|
||||
- os: windows-2022
|
||||
artifact_name: capa.exe
|
||||
asset_name: windows
|
||||
steps:
|
||||
- name: Download ${{ matrix.asset_name }}
|
||||
uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
- name: Set executable flag
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: chmod +x ${{ matrix.artifact_name }}
|
||||
- name: Run capa
|
||||
run: ./${{ matrix.artifact_name }} -h
|
||||
|
||||
zip_and_upload:
|
||||
# upload zipped binaries to Release page
|
||||
if: github.event_name == 'release'
|
||||
@@ -122,18 +113,12 @@ jobs:
|
||||
include:
|
||||
- asset_name: linux
|
||||
artifact_name: capa
|
||||
- asset_name: linux-arm64
|
||||
artifact_name: capa
|
||||
- asset_name: linux-py312
|
||||
artifact_name: capa
|
||||
- asset_name: windows
|
||||
artifact_name: capa.exe
|
||||
#- asset_name: windows-arm64
|
||||
# artifact_name: capa.exe
|
||||
- asset_name: macos
|
||||
artifact_name: capa
|
||||
- asset_name: macos-arm64
|
||||
artifact_name: capa
|
||||
steps:
|
||||
- name: Download ${{ matrix.asset_name }}
|
||||
uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2
|
||||
|
||||
304
.github/workflows/gemini-cli.yml
vendored
304
.github/workflows/gemini-cli.yml
vendored
@@ -1,304 +0,0 @@
|
||||
name: '💬 Gemini CLI'
|
||||
|
||||
on:
|
||||
pull_request_review_comment:
|
||||
types:
|
||||
- 'created'
|
||||
pull_request_review:
|
||||
types:
|
||||
- 'submitted'
|
||||
issue_comment:
|
||||
types:
|
||||
- 'created'
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
|
||||
cancel-in-progress: |-
|
||||
${{ github.event.sender.type == 'User' && ( github.event.issue.author_association == 'OWNER' || github.event.issue.author_association == 'MEMBER' || github.event.issue.author_association == 'COLLABORATOR') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: 'bash'
|
||||
|
||||
permissions:
|
||||
contents: 'write'
|
||||
id-token: 'write'
|
||||
pull-requests: 'write'
|
||||
issues: 'write'
|
||||
|
||||
jobs:
|
||||
gemini-cli:
|
||||
# This condition is complex to ensure we only run when explicitly invoked.
|
||||
if: |-
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(
|
||||
github.event_name == 'issues' && github.event.action == 'opened' &&
|
||||
contains(github.event.issue.body, '@gemini-cli') &&
|
||||
!contains(github.event.issue.body, '@gemini-cli /review') &&
|
||||
!contains(github.event.issue.body, '@gemini-cli /triage') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.issue.author_association)
|
||||
) ||
|
||||
(
|
||||
(
|
||||
github.event_name == 'issue_comment' ||
|
||||
github.event_name == 'pull_request_review_comment'
|
||||
) &&
|
||||
contains(github.event.comment.body, '@gemini-cli') &&
|
||||
!contains(github.event.comment.body, '@gemini-cli /review') &&
|
||||
!contains(github.event.comment.body, '@gemini-cli /triage') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
) ||
|
||||
(
|
||||
github.event_name == 'pull_request_review' &&
|
||||
contains(github.event.review.body, '@gemini-cli') &&
|
||||
!contains(github.event.review.body, '@gemini-cli /review') &&
|
||||
!contains(github.event.review.body, '@gemini-cli /triage') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.review.author_association)
|
||||
)
|
||||
timeout-minutes: 10
|
||||
runs-on: 'ubuntu-latest'
|
||||
|
||||
steps:
|
||||
- name: 'Generate GitHub App Token'
|
||||
id: 'generate_token'
|
||||
if: |-
|
||||
${{ vars.APP_ID }}
|
||||
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: '${{ vars.APP_ID }}'
|
||||
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
|
||||
|
||||
- name: 'Get context from event'
|
||||
id: 'get_context'
|
||||
env:
|
||||
EVENT_NAME: '${{ github.event_name }}'
|
||||
EVENT_PAYLOAD: '${{ toJSON(github.event) }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
USER_REQUEST=""
|
||||
ISSUE_NUMBER=""
|
||||
IS_PR="false"
|
||||
|
||||
if [[ "${EVENT_NAME}" == "issues" ]]; then
|
||||
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.body)
|
||||
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.number)
|
||||
elif [[ "${EVENT_NAME}" == "issue_comment" ]]; then
|
||||
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .comment.body)
|
||||
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .issue.number)
|
||||
if [[ $(echo "${EVENT_PAYLOAD}" | jq -r .issue.pull_request) != "null" ]]; then
|
||||
IS_PR="true"
|
||||
fi
|
||||
elif [[ "${EVENT_NAME}" == "pull_request_review" ]]; then
|
||||
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .review.body)
|
||||
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .pull_request.number)
|
||||
IS_PR="true"
|
||||
elif [[ "${EVENT_NAME}" == "pull_request_review_comment" ]]; then
|
||||
USER_REQUEST=$(echo "${EVENT_PAYLOAD}" | jq -r .comment.body)
|
||||
ISSUE_NUMBER=$(echo "${EVENT_PAYLOAD}" | jq -r .pull_request.number)
|
||||
IS_PR="true"
|
||||
fi
|
||||
|
||||
# Clean up user request
|
||||
USER_REQUEST=$(echo "${USER_REQUEST}" | sed 's/.*@gemini-cli//' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
|
||||
{
|
||||
echo "user_request=${USER_REQUEST}"
|
||||
echo "issue_number=${ISSUE_NUMBER}"
|
||||
echo "is_pr=${IS_PR}"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Set up git user for commits'
|
||||
run: |-
|
||||
git config --global user.name 'gemini-cli[bot]'
|
||||
git config --global user.email 'gemini-cli[bot]@users.noreply.github.com'
|
||||
|
||||
- name: 'Checkout PR branch'
|
||||
if: |-
|
||||
${{ steps.get_context.outputs.is_pr == 'true' }}
|
||||
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
|
||||
with:
|
||||
token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
repository: '${{ github.repository }}'
|
||||
ref: 'refs/pull/${{ steps.get_context.outputs.issue_number }}/head'
|
||||
fetch-depth: 0
|
||||
|
||||
- name: 'Checkout main branch'
|
||||
if: |-
|
||||
${{ steps.get_context.outputs.is_pr == 'false' }}
|
||||
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
|
||||
with:
|
||||
token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
repository: '${{ github.repository }}'
|
||||
fetch-depth: 0
|
||||
|
||||
- name: 'Acknowledge request'
|
||||
env:
|
||||
GITHUB_ACTOR: '${{ github.actor }}'
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
REQUEST_TYPE: '${{ steps.get_context.outputs.request_type }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
MESSAGE="@${GITHUB_ACTOR} I've received your request and I'm working on it now! 🤖"
|
||||
if [[ -n "${MESSAGE}" ]]; then
|
||||
gh issue comment "${ISSUE_NUMBER}" \
|
||||
--body "${MESSAGE}" \
|
||||
--repo "${REPOSITORY}"
|
||||
fi
|
||||
|
||||
- name: 'Get description'
|
||||
id: 'get_description'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
|
||||
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
if [[ "${IS_PR}" == "true" ]]; then
|
||||
DESCRIPTION=$(gh pr view "${ISSUE_NUMBER}" --json body --template '{{.body}}')
|
||||
else
|
||||
DESCRIPTION=$(gh issue view "${ISSUE_NUMBER}" --json body --template '{{.body}}')
|
||||
fi
|
||||
{
|
||||
echo "description<<EOF"
|
||||
echo "${DESCRIPTION}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Get comments'
|
||||
id: 'get_comments'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
|
||||
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
if [[ "${IS_PR}" == "true" ]]; then
|
||||
COMMENTS=$(gh pr view "${ISSUE_NUMBER}" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{"\n"}}{{end}}')
|
||||
else
|
||||
COMMENTS=$(gh issue view "${ISSUE_NUMBER}" --json comments --template '{{range .comments}}{{.author.login}}: {{.body}}{{"\n"}}{{end}}')
|
||||
fi
|
||||
{
|
||||
echo "comments<<EOF"
|
||||
echo "${COMMENTS}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Run Gemini'
|
||||
id: 'run_gemini'
|
||||
uses: 'google-github-actions/run-gemini-cli@v0'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
USER_REQUEST: '${{ steps.get_context.outputs.user_request }}'
|
||||
ISSUE_NUMBER: '${{ steps.get_context.outputs.issue_number }}'
|
||||
IS_PR: '${{ steps.get_context.outputs.is_pr }}'
|
||||
with:
|
||||
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
|
||||
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
|
||||
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
|
||||
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
|
||||
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
|
||||
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
|
||||
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
|
||||
settings: |-
|
||||
{
|
||||
"maxSessionTurns": 50,
|
||||
"telemetry": {
|
||||
"enabled": false,
|
||||
"target": "gcp"
|
||||
}
|
||||
}
|
||||
prompt: |-
|
||||
## Role
|
||||
|
||||
You are a helpful AI assistant invoked via a CLI interface in a GitHub workflow. You have access to tools to interact with the repository and respond to the user.
|
||||
|
||||
## Context
|
||||
|
||||
- **Repository**: `${{ github.repository }}`
|
||||
- **Triggering Event**: `${{ github.event_name }}`
|
||||
- **Issue/PR Number**: `${{ steps.get_context.outputs.issue_number }}`
|
||||
- **Is this a PR?**: `${{ steps.get_context.outputs.is_pr }}`
|
||||
- **Issue/PR Description**:
|
||||
`${{ steps.get_description.outputs.description }}`
|
||||
- **Comments**:
|
||||
`${{ steps.get_comments.outputs.comments }}`
|
||||
|
||||
## User Request
|
||||
|
||||
The user has sent the following request:
|
||||
`${{ steps.get_context.outputs.user_request }}`
|
||||
|
||||
## How to Respond to Issues, PR Comments, and Questions
|
||||
|
||||
This workflow supports three main scenarios:
|
||||
|
||||
1. **Creating a Fix for an Issue**
|
||||
- Carefully read the user request and the related issue or PR description.
|
||||
- Use available tools to gather all relevant context (e.g., `gh issue view`, `gh pr view`, `gh pr diff`, `cat`, `head`, `tail`).
|
||||
- Identify the root cause of the problem before proceeding.
|
||||
- **Show and maintain a plan as a checklist**:
|
||||
- At the very beginning, outline the steps needed to resolve the issue or address the request and post them as a checklist comment on the issue or PR (use GitHub markdown checkboxes: `- [ ] Task`).
|
||||
- Example:
|
||||
```
|
||||
### Plan
|
||||
- [ ] Investigate the root cause
|
||||
- [ ] Implement the fix in `file.py`
|
||||
- [ ] Add/modify tests
|
||||
- [ ] Update documentation
|
||||
- [ ] Verify the fix and close the issue
|
||||
```
|
||||
- Use: `gh pr comment "${ISSUE_NUMBER}" --body "<plan>"` or `gh issue comment "${ISSUE_NUMBER}" --body "<plan>"` to post the initial plan.
|
||||
- As you make progress, keep the checklist visible and up to date by editing the same comment (check off completed tasks with `- [x]`).
|
||||
- To update the checklist:
|
||||
1. Find the comment ID for the checklist (use `gh pr comment list "${ISSUE_NUMBER}"` or `gh issue comment list "${ISSUE_NUMBER}"`).
|
||||
2. Edit the comment with the updated checklist:
|
||||
- For PRs: `gh pr comment --edit <comment-id> --body "<updated plan>"`
|
||||
- For Issues: `gh issue comment --edit <comment-id> --body "<updated plan>"`
|
||||
3. The checklist should only be maintained as a comment on the issue or PR. Do not track or update the checklist in code files.
|
||||
- If the fix requires code changes, determine which files and lines are affected. If clarification is needed, note any questions for the user.
|
||||
- Make the necessary code or documentation changes using the available tools (e.g., `write_file`). Ensure all changes follow project conventions and best practices. Reference all shell variables as `"${VAR}"` (with quotes and braces) to prevent errors.
|
||||
- Run any relevant tests or checks to verify the fix works as intended. If possible, provide evidence (test output, screenshots, etc.) that the issue is resolved.
|
||||
- **Branching and Committing**:
|
||||
- **NEVER commit directly to the `main` branch.**
|
||||
- If you are working on a **pull request** (`IS_PR` is `true`), the correct branch is already checked out. Simply commit and push to it.
|
||||
- `git add .`
|
||||
- `git commit -m "feat: <describe the change>"`
|
||||
- `git push`
|
||||
- If you are working on an **issue** (`IS_PR` is `false`), create a new branch for your changes. A good branch name would be `issue/${ISSUE_NUMBER}/<short-description>`.
|
||||
- `git checkout -b issue/${ISSUE_NUMBER}/my-fix`
|
||||
- `git add .`
|
||||
- `git commit -m "feat: <describe the fix>"`
|
||||
- `git push origin issue/${ISSUE_NUMBER}/my-fix`
|
||||
- After pushing, you can create a pull request: `gh pr create --title "Fixes #${ISSUE_NUMBER}: <short title>" --body "This PR addresses issue #${ISSUE_NUMBER}."`
|
||||
- Summarize what was changed and why in a markdown file: `write_file("response.md", "<your response here>")`
|
||||
- Post the response as a comment:
|
||||
- For PRs: `gh pr comment "${ISSUE_NUMBER}" --body-file response.md`
|
||||
- For Issues: `gh issue comment "${ISSUE_NUMBER}" --body-file response.md`
|
||||
|
||||
2. **Addressing Comments on a Pull Request**
|
||||
- Read the specific comment and the context of the PR.
|
||||
- Use tools like `gh pr view`, `gh pr diff`, and `cat` to understand the code and discussion.
|
||||
- If the comment requests a change or clarification, follow the same process as for fixing an issue: create a checklist plan, implement, test, and commit any required changes, updating the checklist as you go.
|
||||
- **Committing Changes**: The correct PR branch is already checked out. Simply add, commit, and push your changes.
|
||||
- `git add .`
|
||||
- `git commit -m "fix: address review comments"`
|
||||
- `git push`
|
||||
- If the comment is a question, answer it directly and clearly, referencing code or documentation as needed.
|
||||
- Document your response in `response.md` and post it as a PR comment: `gh pr comment "${ISSUE_NUMBER}" --body-file response.md`
|
||||
|
||||
3. **Answering Any Question on an Issue**
|
||||
- Read the question and the full issue context using `gh issue view` and related tools.
|
||||
- Research or analyze the codebase as needed to provide an accurate answer.
|
||||
- If the question requires code or documentation changes, follow the fix process above, including creating and updating a checklist plan and **creating a new branch for your changes as described in section 1.**
|
||||
- Write a clear, concise answer in `response.md` and post it as an issue comment: `gh issue comment "${ISSUE_NUMBER}" --body-file response.md`
|
||||
|
||||
## Guidelines
|
||||
|
||||
- **Be concise and actionable.** Focus on solving the user's problem efficiently.
|
||||
- **Always commit and push your changes if you modify code or documentation.**
|
||||
- **If you are unsure about the fix or answer, explain your reasoning and ask clarifying questions.**
|
||||
- **Follow project conventions and best practices.**
|
||||
130
.github/workflows/gemini-issue-automated-triage.yml
vendored
130
.github/workflows/gemini-issue-automated-triage.yml
vendored
@@ -1,130 +0,0 @@
|
||||
name: '🏷️ Gemini Automated Issue Triage'
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- 'opened'
|
||||
- 'reopened'
|
||||
issue_comment:
|
||||
types:
|
||||
- 'created'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
issue_number:
|
||||
description: 'issue number to triage'
|
||||
required: true
|
||||
type: 'number'
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: 'bash'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
statuses: 'write'
|
||||
|
||||
jobs:
|
||||
triage-issue:
|
||||
if: |-
|
||||
github.event_name == 'issues' ||
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(
|
||||
github.event_name == 'issue_comment' &&
|
||||
contains(github.event.comment.body, '@gemini-cli /triage') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
)
|
||||
timeout-minutes: 5
|
||||
runs-on: 'ubuntu-latest'
|
||||
|
||||
steps:
|
||||
- name: 'Checkout repository'
|
||||
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
|
||||
|
||||
- name: 'Generate GitHub App Token'
|
||||
id: 'generate_token'
|
||||
if: |-
|
||||
${{ vars.APP_ID }}
|
||||
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: '${{ vars.APP_ID }}'
|
||||
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
|
||||
|
||||
- name: 'Run Gemini Issue Triage'
|
||||
uses: 'google-github-actions/run-gemini-cli@v0'
|
||||
id: 'gemini_issue_triage'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
ISSUE_TITLE: '${{ github.event.issue.title }}'
|
||||
ISSUE_BODY: '${{ github.event.issue.body }}'
|
||||
ISSUE_NUMBER: '${{ github.event.issue.number }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
with:
|
||||
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
|
||||
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
|
||||
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
|
||||
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
|
||||
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
|
||||
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
|
||||
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
|
||||
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
|
||||
settings: |-
|
||||
{
|
||||
"maxSessionTurns": 25,
|
||||
"coreTools": [
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh label list)",
|
||||
"run_shell_command(gh issue edit)"
|
||||
],
|
||||
"telemetry": {
|
||||
"enabled": false,
|
||||
"target": "gcp"
|
||||
}
|
||||
}
|
||||
prompt: |-
|
||||
## Role
|
||||
|
||||
You are an issue triage assistant. Analyze the current GitHub issue
|
||||
and apply the most appropriate existing labels. Use the available
|
||||
tools to gather information; do not ask for information to be
|
||||
provided.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Run: `gh label list` to get all available labels.
|
||||
2. Review the issue title and body provided in the environment
|
||||
variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
|
||||
3. Select the most relevant labels from the existing labels. If
|
||||
available, set labels that follow the `kind/*`, `area/*`, and
|
||||
`priority/*` patterns.
|
||||
4. Apply the selected labels to this issue using:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --add-label "label1,label2"`
|
||||
5. If the "status/needs-triage" label is present, remove it using:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --remove-label "status/needs-triage"`
|
||||
|
||||
## Guidelines
|
||||
|
||||
- Only use labels that already exist in the repository
|
||||
- Do not add comments or modify the issue content
|
||||
- Triage only the current issue
|
||||
- Assign all applicable labels based on the issue content
|
||||
- Reference all shell variables as "${VAR}" (with quotes and braces)
|
||||
|
||||
- name: 'Post Issue Triage Failure Comment'
|
||||
if: |-
|
||||
${{ failure() && steps.gemini_issue_triage.outcome == 'failure' }}
|
||||
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
|
||||
with:
|
||||
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
script: |-
|
||||
github.rest.issues.createComment({
|
||||
owner: '${{ github.repository }}'.split('/')[0],
|
||||
repo: '${{ github.repository }}'.split('/')[1],
|
||||
issue_number: '${{ github.event.issue.number }}',
|
||||
body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'
|
||||
})
|
||||
123
.github/workflows/gemini-issue-scheduled-triage.yml
vendored
123
.github/workflows/gemini-issue-scheduled-triage.yml
vendored
@@ -1,123 +0,0 @@
|
||||
name: '📋 Gemini Scheduled Issue Triage'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *' # Runs every hour
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}'
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: 'bash'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
statuses: 'write'
|
||||
|
||||
jobs:
|
||||
triage-issues:
|
||||
timeout-minutes: 5
|
||||
runs-on: 'ubuntu-latest'
|
||||
|
||||
steps:
|
||||
- name: 'Checkout repository'
|
||||
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
|
||||
|
||||
- name: 'Generate GitHub App Token'
|
||||
id: 'generate_token'
|
||||
if: |-
|
||||
${{ vars.APP_ID }}
|
||||
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: '${{ vars.APP_ID }}'
|
||||
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
|
||||
|
||||
- name: 'Find untriaged issues'
|
||||
id: 'find_issues'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
GITHUB_REPOSITORY: '${{ github.repository }}'
|
||||
GITHUB_OUTPUT: '${{ github.output }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
echo '🔍 Finding issues without labels...'
|
||||
NO_LABEL_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
|
||||
--search 'is:open is:issue no:label' --json number,title,body)"
|
||||
|
||||
echo '🏷️ Finding issues that need triage...'
|
||||
NEED_TRIAGE_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
|
||||
--search 'is:open is:issue label:"status/needs-triage"' --json number,title,body)"
|
||||
|
||||
echo '🔄 Merging and deduplicating issues...'
|
||||
ISSUES="$(echo "${NO_LABEL_ISSUES}" "${NEED_TRIAGE_ISSUES}" | jq -c -s 'add | unique_by(.number)')"
|
||||
|
||||
echo '📝 Setting output for GitHub Actions...'
|
||||
echo "issues_to_triage=${ISSUES}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
ISSUE_COUNT="$(echo "${ISSUES}" | jq 'length')"
|
||||
echo "✅ Found ${ISSUE_COUNT} issues to triage! 🎯"
|
||||
|
||||
- name: 'Run Gemini Issue Triage'
|
||||
if: |-
|
||||
${{ steps.find_issues.outputs.issues_to_triage != '[]' }}
|
||||
uses: 'google-github-actions/run-gemini-cli@v0'
|
||||
id: 'gemini_issue_triage'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
ISSUES_TO_TRIAGE: '${{ steps.find_issues.outputs.issues_to_triage }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
with:
|
||||
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
|
||||
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
|
||||
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
|
||||
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
|
||||
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
|
||||
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
|
||||
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
|
||||
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
|
||||
settings: |-
|
||||
{
|
||||
"maxSessionTurns": 25,
|
||||
"coreTools": [
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh label list)",
|
||||
"run_shell_command(gh issue edit)",
|
||||
"run_shell_command(gh issue list)"
|
||||
],
|
||||
"telemetry": {
|
||||
"enabled": false,
|
||||
"target": "gcp"
|
||||
}
|
||||
}
|
||||
prompt: |-
|
||||
## Role
|
||||
|
||||
You are an issue triage assistant. Analyze issues and apply
|
||||
appropriate labels. Use the available tools to gather information;
|
||||
do not ask for information to be provided.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Run: `gh label list`
|
||||
2. Check environment variable: "${ISSUES_TO_TRIAGE}" (JSON array
|
||||
of issues)
|
||||
3. For each issue, apply labels:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --add-label "label1,label2"`.
|
||||
If available, set labels that follow the `kind/*`, `area/*`,
|
||||
and `priority/*` patterns.
|
||||
4. For each issue, if the `status/needs-triage` label is present,
|
||||
remove it using:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --remove-label "status/needs-triage"`
|
||||
|
||||
## Guidelines
|
||||
|
||||
- Only use existing repository labels
|
||||
- Do not add comments
|
||||
- Triage each issue independently
|
||||
- Reference all shell variables as "${VAR}" (with quotes and braces)
|
||||
456
.github/workflows/gemini-pr-review.yml
vendored
456
.github/workflows/gemini-pr-review.yml
vendored
@@ -1,456 +0,0 @@
|
||||
name: '🧐 Gemini Pull Request Review'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- 'opened'
|
||||
- 'reopened'
|
||||
issue_comment:
|
||||
types:
|
||||
- 'created'
|
||||
pull_request_review_comment:
|
||||
types:
|
||||
- 'created'
|
||||
pull_request_review:
|
||||
types:
|
||||
- 'submitted'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: 'PR number to review'
|
||||
required: true
|
||||
type: 'number'
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}'
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: 'bash'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
pull-requests: 'write'
|
||||
statuses: 'write'
|
||||
|
||||
jobs:
|
||||
review-pr:
|
||||
if: |-
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(
|
||||
github.event_name == 'pull_request' &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.pull_request.author_association)
|
||||
) ||
|
||||
(
|
||||
(
|
||||
(
|
||||
github.event_name == 'issue_comment' &&
|
||||
github.event.issue.pull_request
|
||||
) ||
|
||||
github.event_name == 'pull_request_review_comment'
|
||||
) &&
|
||||
contains(github.event.comment.body, '@gemini-cli /review') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
) ||
|
||||
(
|
||||
github.event_name == 'pull_request_review' &&
|
||||
contains(github.event.review.body, '@gemini-cli /review') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.review.author_association)
|
||||
)
|
||||
timeout-minutes: 5
|
||||
runs-on: 'ubuntu-latest'
|
||||
|
||||
steps:
|
||||
- name: 'Checkout PR code'
|
||||
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
|
||||
|
||||
- name: 'Generate GitHub App Token'
|
||||
id: 'generate_token'
|
||||
if: |-
|
||||
${{ vars.APP_ID }}
|
||||
uses: 'actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e' # ratchet:actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: '${{ vars.APP_ID }}'
|
||||
private-key: '${{ secrets.APP_PRIVATE_KEY }}'
|
||||
|
||||
- name: 'Get PR details (pull_request & workflow_dispatch)'
|
||||
id: 'get_pr'
|
||||
if: |-
|
||||
${{ github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
EVENT_NAME: '${{ github.event_name }}'
|
||||
WORKFLOW_PR_NUMBER: '${{ github.event.inputs.pr_number }}'
|
||||
PULL_REQUEST_NUMBER: '${{ github.event.pull_request.number }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "${EVENT_NAME}" = "workflow_dispatch" ]]; then
|
||||
PR_NUMBER="${WORKFLOW_PR_NUMBER}"
|
||||
else
|
||||
PR_NUMBER="${PULL_REQUEST_NUMBER}"
|
||||
fi
|
||||
|
||||
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Get PR details
|
||||
PR_DATA="$(gh pr view "${PR_NUMBER}" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)"
|
||||
echo "pr_data=${PR_DATA}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Get file changes
|
||||
CHANGED_FILES="$(gh pr diff "${PR_NUMBER}" --name-only)"
|
||||
{
|
||||
echo "changed_files<<EOF"
|
||||
echo "${CHANGED_FILES}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
|
||||
- name: 'Get PR details (issue_comment)'
|
||||
id: 'get_pr_comment'
|
||||
if: |-
|
||||
${{ github.event_name == 'issue_comment' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
COMMENT_BODY: '${{ github.event.comment.body }}'
|
||||
PR_NUMBER: '${{ github.event.issue.number }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Extract additional instructions from comment
|
||||
ADDITIONAL_INSTRUCTIONS="$(
|
||||
echo "${COMMENT_BODY}" | sed 's/.*@gemini-cli \/review//' | xargs
|
||||
)"
|
||||
echo "additional_instructions=${ADDITIONAL_INSTRUCTIONS}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Get PR details
|
||||
PR_DATA="$(gh pr view "${PR_NUMBER}" --json title,body,additions,deletions,changedFiles,baseRefName,headRefName)"
|
||||
echo "pr_data=${PR_DATA}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Get file changes
|
||||
CHANGED_FILES="$(gh pr diff "${PR_NUMBER}" --name-only)"
|
||||
{
|
||||
echo "changed_files<<EOF"
|
||||
echo "${CHANGED_FILES}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Run Gemini PR Review'
|
||||
uses: 'google-github-actions/run-gemini-cli@v0'
|
||||
id: 'gemini_pr_review'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
PR_NUMBER: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}'
|
||||
PR_DATA: '${{ steps.get_pr.outputs.pr_data || steps.get_pr_comment.outputs.pr_data }}'
|
||||
CHANGED_FILES: '${{ steps.get_pr.outputs.changed_files || steps.get_pr_comment.outputs.changed_files }}'
|
||||
ADDITIONAL_INSTRUCTIONS: '${{ steps.get_pr.outputs.additional_instructions || steps.get_pr_comment.outputs.additional_instructions }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
with:
|
||||
gemini_cli_version: '${{ vars.GEMINI_CLI_VERSION }}'
|
||||
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
|
||||
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
|
||||
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
|
||||
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
|
||||
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
|
||||
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
|
||||
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
|
||||
settings: |-
|
||||
{
|
||||
"maxSessionTurns": 20,
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"-e",
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
||||
"ghcr.io/github/github-mcp-server"
|
||||
],
|
||||
"includeTools": [
|
||||
"create_pending_pull_request_review",
|
||||
"add_comment_to_pending_review",
|
||||
"submit_pending_pull_request_review"
|
||||
],
|
||||
"env": {
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_TOKEN}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"coreTools": [
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh pr view)",
|
||||
"run_shell_command(gh pr diff)",
|
||||
"run_shell_command(cat)",
|
||||
"run_shell_command(head)",
|
||||
"run_shell_command(tail)",
|
||||
"run_shell_command(grep)"
|
||||
],
|
||||
"telemetry": {
|
||||
"enabled": false,
|
||||
"target": "gcp"
|
||||
}
|
||||
}
|
||||
prompt: |-
|
||||
## Role
|
||||
|
||||
You are an expert code reviewer. You have access to tools to gather
|
||||
PR information and perform the review on GitHub. Use the available tools to
|
||||
gather information; do not ask for information to be provided.
|
||||
|
||||
## Requirements
|
||||
1. All feedback must be left on GitHub.
|
||||
2. Any output that is not left in GitHub will not be seen.
|
||||
|
||||
## Steps
|
||||
|
||||
Start by running these commands to gather the required data:
|
||||
1. Run: echo $"{REPOSITORY}" to get the github repository in <OWNER>/<REPO> format
|
||||
2. Run: echo "${PR_DATA}" to get PR details (JSON format)
|
||||
3. Run: echo "${CHANGED_FILES}" to get the list of changed files
|
||||
4. Run: echo "${PR_NUMBER}" to get the PR number
|
||||
5. Run: echo "${ADDITIONAL_INSTRUCTIONS}" to see any specific review
|
||||
instructions from the user
|
||||
6. Run: gh pr diff "${PR_NUMBER}" to see the full diff and reference
|
||||
Context section to understand it
|
||||
7. For any specific files, use: cat filename, head -50 filename, or
|
||||
tail -50 filename
|
||||
8. If ADDITIONAL_INSTRUCTIONS contains text, prioritize those
|
||||
specific areas or focus points in your review. Common instruction
|
||||
examples: "focus on security", "check performance", "review error
|
||||
handling", "check for breaking changes"
|
||||
|
||||
## Guideline
|
||||
### Core Guideline(Always applicable)
|
||||
|
||||
1. Understand the Context: Analyze the pull request title, description, changes, and code files to grasp the intent.
|
||||
2. Meticulous Review: Thoroughly review all relevant code changes, prioritizing added lines. Consider the specified
|
||||
focus areas and any provided style guide.
|
||||
3. Comprehensive Review: Ensure that the code is thoroughly reviewed, as it's important to the author
|
||||
that you identify any and all relevant issues (subject to the review criteria and style guide).
|
||||
Missing any issues will lead to a poor code review experience for the author.
|
||||
4. Constructive Feedback:
|
||||
* Provide clear explanations for each concern.
|
||||
* Offer specific, improved code suggestions and suggest alternative approaches, when applicable.
|
||||
Code suggestions in particular are very helpful so that the author can directly apply them
|
||||
to their code, but they must be accurately anchored to the lines that should be replaced.
|
||||
5. Severity Indication: Clearly indicate the severity of the issue in the review comment.
|
||||
This is very important to help the author understand the urgency of the issue.
|
||||
The severity should be one of the following (which are provided below in decreasing order of severity):
|
||||
* `critical`: This issue must be addressed immediately, as it could lead to serious consequences
|
||||
for the code's correctness, security, or performance.
|
||||
* `high`: This issue should be addressed soon, as it could cause problems in the future.
|
||||
* `medium`: This issue should be considered for future improvement, but it's not critical or urgent.
|
||||
* `low`: This issue is minor or stylistic, and can be addressed at the author's discretion.
|
||||
6. Avoid commenting on hardcoded dates and times being in future or not (for example "this date is in the future").
|
||||
* Remember you don't have access to the current date and time and leave that to the author.
|
||||
7. Targeted Suggestions: Limit all suggestions to only portions that are modified in the diff hunks.
|
||||
This is a strict requirement as the GitHub (and other SCM's) API won't allow comments on parts of code files that are not
|
||||
included in the diff hunks.
|
||||
8. Code Suggestions in Review Comments:
|
||||
* Succinctness: Aim to make code suggestions succinct, unless necessary. Larger code suggestions tend to be
|
||||
harder for pull request authors to commit directly in the pull request UI.
|
||||
* Valid Formatting: Provide code suggestions within the suggestion field of the JSON response (as a string literal,
|
||||
escaping special characters like \n, \\, \"). Do not include markdown code blocks in the suggestion field.
|
||||
Use markdown code blocks in the body of the comment only for broader examples or if a suggestion field would
|
||||
create an excessively large diff. Prefer the suggestion field for specific, targeted code changes.
|
||||
* Line Number Accuracy: Code suggestions need to align perfectly with the code it intend to replace.
|
||||
Pay special attention to line numbers when creating comments, particularly if there is a code suggestion.
|
||||
Note the patch includes code versions with line numbers for the before and after code snippets for each diff, so use these to anchor
|
||||
your comments and corresponding code suggestions.
|
||||
* Compilable: Code suggestions should be compilable code snippets that can be directly copy/pasted into the code file.
|
||||
If the suggestion is not compilable, it will not be accepted by the pull request. Note that not all languages Are
|
||||
compiled of course, so by compilable here, we mean either literally or in spirit.
|
||||
* Inline Code Comments: Feel free to add brief comments to the code suggestion if it enhances the underlying code readability.
|
||||
Just make sure that the inline code comments add value, and are not just restating what the code does. Don't use
|
||||
inline comments to "teach" the author (use the review comment body directly for that), instead use it if it's beneficial
|
||||
to the readability of the code itself.
|
||||
10. Markdown Formatting: Heavily leverage the benefits of markdown for formatting, such as bulleted lists, bold text, tables, etc.
|
||||
11. Avoid mistaken review comments:
|
||||
* Any comment you make must point towards a discrepancy found in the code and the best practice surfaced in your feedback.
|
||||
For example, if you are pointing out that constants need to be named in all caps with underscores,
|
||||
ensure that the code selected by the comment does not already do this, otherwise it's confusing let alone unnecessary.
|
||||
12. Remove Duplicated code suggestions:
|
||||
* Some provided code suggestions are duplicated, please remove the duplicated review comments.
|
||||
13. Don't Approve The Pull Request
|
||||
14. Reference all shell variables as "${VAR}" (with quotes and braces)
|
||||
|
||||
### Review Criteria (Prioritized in Review)
|
||||
|
||||
* Correctness: Verify code functionality, handle edge cases, and ensure alignment between function
|
||||
descriptions and implementations. Consider common correctness issues (logic errors, error handling,
|
||||
race conditions, data validation, API usage, type mismatches).
|
||||
* Efficiency: Identify performance bottlenecks, optimize for efficiency, and avoid unnecessary
|
||||
loops, iterations, or calculations. Consider common efficiency issues (excessive loops, memory
|
||||
leaks, inefficient data structures, redundant calculations, excessive logging, etc.).
|
||||
* Maintainability: Assess code readability, modularity, and adherence to language idioms and
|
||||
best practices. Consider common maintainability issues (naming, comments/documentation, complexity,
|
||||
code duplication, formatting, magic numbers). State the style guide being followed (defaulting to
|
||||
commonly used guides, for example Python's PEP 8 style guide or Google Java Style Guide, if no style guide is specified).
|
||||
* Security: Identify potential vulnerabilities (e.g., insecure storage, injection attacks,
|
||||
insufficient access controls).
|
||||
|
||||
### Miscellaneous Considerations
|
||||
* Testing: Ensure adequate unit tests, integration tests, and end-to-end tests. Evaluate
|
||||
coverage, edge case handling, and overall test quality.
|
||||
* Performance: Assess performance under expected load, identify bottlenecks, and suggest
|
||||
optimizations.
|
||||
* Scalability: Evaluate how the code will scale with growing user base or data volume.
|
||||
* Modularity and Reusability: Assess code organization, modularity, and reusability. Suggest
|
||||
refactoring or creating reusable components.
|
||||
* Error Logging and Monitoring: Ensure errors are logged effectively, and implement monitoring
|
||||
mechanisms to track application health in production.
|
||||
|
||||
**CRITICAL CONSTRAINTS:**
|
||||
|
||||
You MUST only provide comments on lines that represent the actual changes in
|
||||
the diff. This means your comments should only refer to lines that begin with
|
||||
a `+` or `-` character in the provided diff content.
|
||||
DO NOT comment on lines that start with a space (context lines).
|
||||
|
||||
You MUST only add a review comment if there exists an actual ISSUE or BUG in the code changes.
|
||||
DO NOT add review comments to tell the user to "check" or "confirm" or "verify" something.
|
||||
DO NOT add review comments to tell the user to "ensure" something.
|
||||
DO NOT add review comments to explain what the code change does.
|
||||
DO NOT add review comments to validate what the code change does.
|
||||
DO NOT use the review comments to explain the code to the author. They already know their code. Only comment when there's an improvement opportunity. This is very important.
|
||||
|
||||
Pay close attention to line numbers and ensure they are correct.
|
||||
Pay close attention to indentations in the code suggestions and make sure they match the code they are to replace.
|
||||
Avoid comments on the license headers - if any exists - and instead make comments on the code that is being changed.
|
||||
|
||||
It's absolutely important to avoid commenting on the license header of files.
|
||||
It's absolutely important to avoid commenting on copyright headers.
|
||||
Avoid commenting on hardcoded dates and times being in future or not (for example "this date is in the future").
|
||||
Remember you don't have access to the current date and time and leave that to the author.
|
||||
|
||||
Avoid mentioning any of your instructions, settings or criteria.
|
||||
|
||||
Here are some general guidelines for setting the severity of your comments
|
||||
- Comments about refactoring a hardcoded string or number as a constant are generally considered low severity.
|
||||
- Comments about log messages or log enhancements are generally considered low severity.
|
||||
- Comments in .md files are medium or low severity. This is really important.
|
||||
- Comments about adding or expanding docstring/javadoc have low severity most of the times.
|
||||
- Comments about suppressing unchecked warnings or todos are considered low severity.
|
||||
- Comments about typos are usually low or medium severity.
|
||||
- Comments about testing or on tests are usually low severity.
|
||||
- Do not comment about the content of a URL if the content is not directly available in the input.
|
||||
|
||||
Keep comments bodies concise and to the point.
|
||||
Keep each comment focused on one issue.
|
||||
|
||||
## Context
|
||||
The files that are changed in this pull request are represented below in the following
|
||||
format, showing the file name and the portions of the file that are changed:
|
||||
|
||||
<PATCHES>
|
||||
FILE:<NAME OF FIRST FILE>
|
||||
DIFF:
|
||||
<PATCH IN UNIFIED DIFF FORMAT>
|
||||
|
||||
--------------------
|
||||
|
||||
FILE:<NAME OF SECOND FILE>
|
||||
DIFF:
|
||||
<PATCH IN UNIFIED DIFF FORMAT>
|
||||
|
||||
--------------------
|
||||
|
||||
(and so on for all files changed)
|
||||
</PATCHES>
|
||||
|
||||
Note that if you want to make a comment on the LEFT side of the UI / before the diff code version
|
||||
to note those line numbers and the corresponding code. Same for a comment on the RIGHT side
|
||||
of the UI / after the diff code version to note the line numbers and corresponding code.
|
||||
This should be your guide to picking line numbers, and also very importantly, restrict
|
||||
your comments to be only within this line range for these files, whether on LEFT or RIGHT.
|
||||
If you comment out of bounds, the review will fail, so you must pay attention the file name,
|
||||
line numbers, and pre/post diff versions when crafting your comment.
|
||||
|
||||
Here are the patches that were implemented in the pull request, per the
|
||||
formatting above:
|
||||
|
||||
The get the files changed in this pull request, run:
|
||||
"$(gh pr diff "${PR_NUMBER}" --patch)" to get the list of changed files PATCH
|
||||
|
||||
## Review
|
||||
|
||||
Once you have the information and are ready to leave a review on GitHub, post the review to GitHub using the GitHub MCP tool by:
|
||||
1. Creating a pending review: Use the mcp__github__create_pending_pull_request_review to create a Pending Pull Request Review.
|
||||
|
||||
2. Adding review comments:
|
||||
2.1 Use the mcp__github__add_comment_to_pending_review to add comments to the Pending Pull Request Review. Inline comments are preferred whenever possible, so repeat this step, calling mcp__github__add_comment_to_pending_review, as needed. All comments about specific lines of code should use inline comments. It is preferred to use code suggestions when possible, which include a code block that is labeled "suggestion", which contains what the new code should be. All comments should also have a severity. The syntax is:
|
||||
Normal Comment Syntax:
|
||||
<COMMENT>
|
||||
{{SEVERITY}} {{COMMENT_TEXT}}
|
||||
</COMMENT>
|
||||
|
||||
Inline Comment Syntax: (Preferred):
|
||||
<COMMENT>
|
||||
{{SEVERITY}} {{COMMENT_TEXT}}
|
||||
```suggestion
|
||||
{{CODE_SUGGESTION}}
|
||||
```
|
||||
</COMMENT>
|
||||
|
||||
Prepend a severity emoji to each comment:
|
||||
- 🟢 for low severity
|
||||
- 🟡 for medium severity
|
||||
- 🟠 for high severity
|
||||
- 🔴 for critical severity
|
||||
- 🔵 if severity is unclear
|
||||
|
||||
Including all of this, an example inline comment would be:
|
||||
<COMMENT>
|
||||
🟢 Use camelCase for function names
|
||||
```suggestion
|
||||
myFooBarFunction
|
||||
```
|
||||
</COMMENT>
|
||||
|
||||
A critical severity example would be:
|
||||
<COMMENT>
|
||||
🔴 Remove storage key from GitHub
|
||||
```suggestion
|
||||
```
|
||||
|
||||
3. Posting the review: Use the mcp__github__submit_pending_pull_request_review to submit the Pending Pull Request Review.
|
||||
|
||||
3.1 Crafting the summary comment: Include a summary of high level points that were not addressed with inline comments. Be concise. Do not repeat details mentioned inline.
|
||||
|
||||
Structure your summary comment using this exact format with markdown:
|
||||
## 📋 Review Summary
|
||||
|
||||
Provide a brief 2-3 sentence overview of the PR and overall
|
||||
assessment.
|
||||
|
||||
## 🔍 General Feedback
|
||||
- List general observations about code quality
|
||||
- Mention overall patterns or architectural decisions
|
||||
- Highlight positive aspects of the implementation
|
||||
- Note any recurring themes across files
|
||||
|
||||
## Final Instructions
|
||||
|
||||
Remember, you are running in a VM and no one reviewing your output. Your review must be posted to GitHub using the MCP tools to create a pending review, add comments to the pending review, and submit the pending review.
|
||||
|
||||
|
||||
- name: 'Post PR review failure comment'
|
||||
if: |-
|
||||
${{ failure() && steps.gemini_pr_review.outcome == 'failure' }}
|
||||
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
|
||||
with:
|
||||
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
|
||||
script: |-
|
||||
github.rest.issues.createComment({
|
||||
owner: '${{ github.repository }}'.split('/')[0],
|
||||
repo: '${{ github.repository }}'.split('/')[1],
|
||||
issue_number: '${{ steps.get_pr.outputs.pr_number || steps.get_pr_comment.outputs.pr_number }}',
|
||||
body: 'There is a problem with the Gemini CLI PR review. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'
|
||||
})
|
||||
2
.github/workflows/publish.yml
vendored
2
.github/workflows/publish.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
with:
|
||||
path: dist/*
|
||||
- name: publish package
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1.12.4
|
||||
uses: pypa/gh-action-pypi-publish@f5622bde02b04381239da3573277701ceca8f6a0 # release/v1
|
||||
with:
|
||||
skip-existing: true
|
||||
verbose: true
|
||||
|
||||
12
.github/workflows/tests.yml
vendored
12
.github/workflows/tests.yml
vendored
@@ -88,16 +88,16 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04, windows-2022, macos-13]
|
||||
os: [ubuntu-20.04, windows-2019, macos-13]
|
||||
# across all operating systems
|
||||
python-version: ["3.10", "3.11"]
|
||||
include:
|
||||
# on Ubuntu run these as well
|
||||
- os: ubuntu-22.04
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.10"
|
||||
- os: ubuntu-22.04
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.11"
|
||||
- os: ubuntu-22.04
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.12"
|
||||
steps:
|
||||
- name: Checkout capa with submodules
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install pyyaml
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
run: sudo apt-get install -y libyaml-dev
|
||||
- name: Install capa
|
||||
run: |
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
|
||||
ghidra-tests:
|
||||
name: Ghidra tests for ${{ matrix.python-version }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [tests]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
70
CHANGELOG.md
70
CHANGELOG.md
@@ -3,59 +3,10 @@
|
||||
## master (unreleased)
|
||||
|
||||
### New Features
|
||||
- ci: add support for arm64 binary releases
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
### New Rules (2)
|
||||
|
||||
- anti-analysis/anti-vm/vm-detection/detect-mouse-movement-via-activity-checks-on-windows tevajdr@gmail.com
|
||||
- nursery/create-executable-heap moritz.raabe@mandiant.com
|
||||
-
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
### capa Explorer Web
|
||||
|
||||
### capa Explorer IDA Pro plugin
|
||||
|
||||
### Development
|
||||
|
||||
- ci: remove redundant "test_run" action from build workflow @mike-hunhoff #2692
|
||||
|
||||
### Raw diffs
|
||||
- [capa v9.2.1...master](https://github.com/mandiant/capa/compare/v9.2.1...master)
|
||||
- [capa-rules v9.2.1...master](https://github.com/mandiant/capa-rules/compare/v9.2.1...master)
|
||||
|
||||
## v9.2.1
|
||||
|
||||
This point release fixes bugs including removing an unnecessary PyInstaller warning message and enabling the standalone binary to execute on systems running older versions of glibc.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- ci: exclude pkg_resources from PyInstaller build @mike-hunhoff #2684
|
||||
- ci: downgrade Ubuntu version to accommodate older glibc versions @mike-hunhoff #2684
|
||||
|
||||
### Development
|
||||
|
||||
- ci: upgrade Windows version to avoid deprecation @mike-hunhoff #2684
|
||||
- ci: check if build runs without warnings or errors @mike-hunhoff #2684
|
||||
|
||||
### Raw diffs
|
||||
- [capa v9.2.0...v9.2.1](https://github.com/mandiant/capa/compare/v9.2.0...v9.2.1)
|
||||
- [capa-rules v9.2.0...v9.2.1](https://github.com/mandiant/capa-rules/compare/v9.2.0...v9.2.1)
|
||||
|
||||
## v9.2.0
|
||||
|
||||
This release improves a few aspects of dynamic analysis, including relaxing our validation on fields across many CAPE versions and processing additional VMRay submission file types, for example.
|
||||
It also includes an updated rule pack containing new rules and rule fixes.
|
||||
|
||||
### New Features
|
||||
- vmray: do not restrict analysis to PE and ELF files, e.g. docx @mike-hunhoff #2672
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
### New Rules (22)
|
||||
### New Rules (15)
|
||||
|
||||
- communication/socket/connect-socket moritz.raabe@mandiant.com joakim@intezer.com mrhafizfarhad@gmail.com
|
||||
- communication/socket/udp/connect-udp-socket mrhafizfarhad@gmail.com
|
||||
@@ -71,23 +22,22 @@ It also includes an updated rule pack containing new rules and rule fixes.
|
||||
- nursery/disable-firewall-features-via-registry-on-windows mehunhoff@google.com
|
||||
- nursery/disable-system-restore-features-via-registry-on-windows mehunhoff@google.com
|
||||
- nursery/disable-windows-defender-features-via-registry-on-windows mehunhoff@google.com
|
||||
- host-interaction/file-system/write/clear-file-content jakeperalta7
|
||||
- host-interaction/filter/unload-minifilter-driver JakePeralta7
|
||||
- exploitation/enumeration/make-suspicious-ntquerysysteminformation-call zdw@google.com
|
||||
- exploitation/gadgets/load-ntoskrnl zdw@google.com
|
||||
- exploitation/gadgets/resolve-ntoskrnl-gadgets zdw@google.com
|
||||
- exploitation/spraying/make-suspicious-ntfscontrolfile-call zdw@google.com
|
||||
- anti-analysis/anti-forensic/unload-sysmon JakePeralta7
|
||||
-
|
||||
|
||||
### Bug Fixes
|
||||
- cape: make some fields optional @williballenthin #2631 #2632
|
||||
- lint: add WARN for regex features that contain unescaped dot #2635
|
||||
- lint: add ERROR for incomplete registry control set regex #2643
|
||||
- binja: update unit test core version #2670
|
||||
|
||||
### capa Explorer Web
|
||||
|
||||
### capa Explorer IDA Pro plugin
|
||||
|
||||
### Development
|
||||
|
||||
### Raw diffs
|
||||
- [capa v9.1.0...v9.2.0](https://github.com/mandiant/capa/compare/v9.1.0...v9.2.0)
|
||||
- [capa-rules v9.1.0...v9.2.0](https://github.com/mandiant/capa-rules/compare/v9.1.0...v9.2.0)
|
||||
- [capa v9.1.0...master](https://github.com/mandiant/capa/compare/v9.1.0...master)
|
||||
- [capa-rules v9.1.0...master](https://github.com/mandiant/capa-rules/compare/v9.1.0...master)
|
||||
|
||||
## v9.1.0
|
||||
|
||||
|
||||
@@ -96,7 +96,14 @@ class VMRayAnalysis:
|
||||
% (self.submission_name, self.submission_type)
|
||||
)
|
||||
|
||||
if self.submission_static is None:
|
||||
if self.submission_static is not None:
|
||||
if self.submission_static.pe is None and self.submission_static.elf is None:
|
||||
# we only support static analysis for PE and ELF files for now
|
||||
raise UnsupportedFormatError(
|
||||
"archive does not contain a supported file format (submission_name: %s, submission_type: %s)"
|
||||
% (self.submission_name, self.submission_type)
|
||||
)
|
||||
else:
|
||||
# VMRay may not record static analysis for certain file types, e.g. MSI, but we'd still like to match dynamic
|
||||
# execution so we continue without and accept that the results may be incomplete
|
||||
logger.warning(
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "9.2.1"
|
||||
__version__ = "9.1.0"
|
||||
|
||||
|
||||
def get_major_version():
|
||||
|
||||
@@ -121,11 +121,11 @@ dev = [
|
||||
# we want all developer environments to be consistent.
|
||||
# These dependencies are not used in production environments
|
||||
# and should not conflict with other libraries/tooling.
|
||||
"pre-commit==4.2.0",
|
||||
"pre-commit==4.1.0",
|
||||
"pytest==8.0.0",
|
||||
"pytest-sugar==1.0.0",
|
||||
"pytest-instafail==0.5.0",
|
||||
"flake8==7.3.0",
|
||||
"flake8==7.1.1",
|
||||
"flake8-bugbear==24.12.12",
|
||||
"flake8-encodings==0.5.1",
|
||||
"flake8-comprehensions==3.16.0",
|
||||
@@ -133,13 +133,13 @@ dev = [
|
||||
"flake8-no-implicit-concat==0.3.5",
|
||||
"flake8-print==5.0.0",
|
||||
"flake8-todos==0.3.1",
|
||||
"flake8-simplify==0.22.0",
|
||||
"flake8-simplify==0.21.0",
|
||||
"flake8-use-pathlib==0.3.0",
|
||||
"flake8-copyright==0.2.4",
|
||||
"ruff==0.12.0",
|
||||
"ruff==0.11.0",
|
||||
"black==25.1.0",
|
||||
"isort==6.0.0",
|
||||
"mypy==1.16.0",
|
||||
"mypy==1.15.0",
|
||||
"mypy-protobuf==3.6.0",
|
||||
"PyGithub==2.6.0",
|
||||
# type stubs for mypy
|
||||
@@ -148,7 +148,7 @@ dev = [
|
||||
"types-PyYAML==6.0.8",
|
||||
"types-psutil==7.0.0.20250218",
|
||||
"types_requests==2.32.0.20240712",
|
||||
"types-protobuf==6.30.2.20250516",
|
||||
"types-protobuf==5.29.1.20241207",
|
||||
"deptry==0.23.0"
|
||||
]
|
||||
build = [
|
||||
@@ -156,8 +156,8 @@ build = [
|
||||
# we want all developer environments to be consistent.
|
||||
# These dependencies are not used in production environments
|
||||
# and should not conflict with other libraries/tooling.
|
||||
"pyinstaller==6.14.1",
|
||||
"setuptools==80.9.0",
|
||||
"pyinstaller==6.12.0",
|
||||
"setuptools==76.0.0",
|
||||
"build==1.2.2"
|
||||
]
|
||||
scripts = [
|
||||
|
||||
@@ -21,12 +21,12 @@ mdurl==0.1.2
|
||||
msgpack==1.0.8
|
||||
networkx==3.4.2
|
||||
pefile==2024.8.26
|
||||
pip==25.1.1
|
||||
protobuf==6.31.1
|
||||
pip==25.0
|
||||
protobuf==6.30.1
|
||||
pyasn1==0.5.1
|
||||
pyasn1-modules==0.3.0
|
||||
pycparser==2.22
|
||||
pydantic==2.11.4
|
||||
pydantic==2.10.1
|
||||
# pydantic pins pydantic-core,
|
||||
# but dependabot updates these separately (which is broken) and is annoying,
|
||||
# so we rely on pydantic to pull in the right version of pydantic-core.
|
||||
@@ -36,10 +36,10 @@ pyelftools==0.32
|
||||
pygments==2.19.1
|
||||
python-flirt==0.9.2
|
||||
pyyaml==6.0.2
|
||||
rich==14.0.0
|
||||
rich==13.9.2
|
||||
ruamel-yaml==0.18.6
|
||||
ruamel-yaml-clib==0.2.8
|
||||
setuptools==80.9.0
|
||||
setuptools==76.0.0
|
||||
six==1.17.0
|
||||
sortedcontainers==2.4.0
|
||||
viv-utils==0.8.0
|
||||
|
||||
2
rules
2
rules
Submodule rules updated: 2f09b4d471...d64c2c91ea
@@ -175,6 +175,8 @@ def convert_rule(rule, rulename, cround, depth):
|
||||
depth += 1
|
||||
logger.info("recursion depth: %d", depth)
|
||||
|
||||
global var_names
|
||||
|
||||
def do_statement(s_type, kid):
|
||||
yara_strings = ""
|
||||
yara_condition = ""
|
||||
|
||||
490
scripts/codemap.py
Normal file
490
scripts/codemap.py
Normal file
@@ -0,0 +1,490 @@
|
||||
#!/usr/bin/env python
|
||||
# /// script
|
||||
# requires-python = ">=3.12"
|
||||
# dependencies = [
|
||||
# "protobuf",
|
||||
# "python-lancelot",
|
||||
# "rich",
|
||||
# ]
|
||||
# ///
|
||||
#
|
||||
# TODO:
|
||||
# - ignore stack cookie check
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import argparse
|
||||
import contextlib
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
|
||||
import lancelot
|
||||
import rich.padding
|
||||
import lancelot.be2utils
|
||||
import google.protobuf.message
|
||||
from rich.text import Text
|
||||
from rich.theme import Theme
|
||||
from rich.markup import escape
|
||||
from rich.console import Console
|
||||
from lancelot.be2utils.binexport2_pb2 import BinExport2
|
||||
|
||||
logger = logging.getLogger("codemap")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def timing(msg: str):
|
||||
t0 = time.time()
|
||||
yield
|
||||
t1 = time.time()
|
||||
logger.debug("perf: %s: %0.2fs", msg, t1 - t0)
|
||||
|
||||
|
||||
class Renderer:
|
||||
def __init__(self, console: Console):
|
||||
self.console: Console = console
|
||||
self.indent: int = 0
|
||||
|
||||
@contextlib.contextmanager
|
||||
def indenting(self):
|
||||
self.indent += 1
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.indent -= 1
|
||||
|
||||
@staticmethod
|
||||
def markup(s: str, **kwargs) -> Text:
|
||||
escaped_args = {k: (escape(v) if isinstance(v, str) else v) for k, v in kwargs.items()}
|
||||
return Text.from_markup(s.format(**escaped_args))
|
||||
|
||||
def print(self, renderable, **kwargs):
|
||||
if not kwargs:
|
||||
return self.console.print(rich.padding.Padding(renderable, (0, 0, 0, self.indent * 2)))
|
||||
|
||||
assert isinstance(renderable, str)
|
||||
return self.print(self.markup(renderable, **kwargs))
|
||||
|
||||
def writeln(self, s: str):
|
||||
self.print(s)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def section(self, name):
|
||||
if isinstance(name, str):
|
||||
self.print("[title]{name}", name=name)
|
||||
elif isinstance(name, Text):
|
||||
name = name.copy()
|
||||
name.stylize_before(self.console.get_style("title"))
|
||||
self.print(name)
|
||||
else:
|
||||
raise ValueError("unexpected section name")
|
||||
|
||||
with self.indenting():
|
||||
yield
|
||||
|
||||
|
||||
@dataclass
|
||||
class AssemblageLocation:
|
||||
name: str
|
||||
file: str
|
||||
prototype: str
|
||||
rva: int
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
if not self.file.endswith(")"):
|
||||
return self.file
|
||||
|
||||
return self.file.rpartition(" (")[0]
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]):
|
||||
return cls(
|
||||
name=data["name"],
|
||||
file=data["file"],
|
||||
prototype=data["prototype"],
|
||||
rva=data["function_start"],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_json(doc: str):
|
||||
return AssemblageLocation.from_dict(json.loads(doc))
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None):
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
|
||||
parser = argparse.ArgumentParser(description="Inspect BinExport2 files")
|
||||
parser.add_argument("input_file", type=Path, help="path to input file")
|
||||
parser.add_argument("--capa", type=Path, help="path to capa JSON results file")
|
||||
parser.add_argument("--assemblage", type=Path, help="path to Assemblage JSONL file")
|
||||
parser.add_argument("-d", "--debug", action="store_true", help="enable debugging output on STDERR")
|
||||
parser.add_argument("-q", "--quiet", action="store_true", help="disable all output but errors")
|
||||
args = parser.parse_args(args=argv)
|
||||
|
||||
logging.basicConfig()
|
||||
if args.quiet:
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
elif args.debug:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
theme = Theme(
|
||||
{
|
||||
"decoration": "grey54",
|
||||
"title": "yellow",
|
||||
"key": "black",
|
||||
"value": "blue",
|
||||
"default": "black",
|
||||
},
|
||||
inherit=False,
|
||||
)
|
||||
console = Console(theme=theme, markup=False, emoji=False)
|
||||
o = Renderer(console)
|
||||
|
||||
be2: BinExport2
|
||||
buf: bytes
|
||||
try:
|
||||
# easiest way to determine if this is a BinExport2 proto is...
|
||||
# to just try to decode it.
|
||||
buf = args.input_file.read_bytes()
|
||||
with timing("loading BinExport2"):
|
||||
be2 = BinExport2()
|
||||
be2.ParseFromString(buf)
|
||||
|
||||
except google.protobuf.message.DecodeError:
|
||||
with timing("analyzing file"):
|
||||
input_file: Path = args.input_file
|
||||
buf = lancelot.get_binexport2_bytes_from_bytes(input_file.read_bytes())
|
||||
|
||||
with timing("loading BinExport2"):
|
||||
be2 = BinExport2()
|
||||
be2.ParseFromString(buf)
|
||||
|
||||
with timing("indexing BinExport2"):
|
||||
idx = lancelot.be2utils.BinExport2Index(be2)
|
||||
|
||||
matches_by_function: defaultdict[int, set[str]] = defaultdict(set)
|
||||
if args.capa:
|
||||
with timing("loading capa"):
|
||||
doc = json.loads(args.capa.read_text())
|
||||
|
||||
functions_by_basic_block: dict[int, int] = {}
|
||||
for function in doc["meta"]["analysis"]["layout"]["functions"]:
|
||||
for basic_block in function["matched_basic_blocks"]:
|
||||
functions_by_basic_block[basic_block["address"]["value"]] = function["address"]["value"]
|
||||
|
||||
matches_by_address: defaultdict[int, set[str]] = defaultdict(set)
|
||||
for rule_name, results in doc["rules"].items():
|
||||
for location, _ in results["matches"]:
|
||||
if location["type"] != "absolute":
|
||||
continue
|
||||
address = location["value"]
|
||||
matches_by_address[location["value"]].add(rule_name)
|
||||
|
||||
for address, matches in matches_by_address.items():
|
||||
if function := functions_by_basic_block.get(address):
|
||||
if function in idx.thunks:
|
||||
# forward any capa for a thunk to its target
|
||||
# since viv may not recognize the thunk as a separate function.
|
||||
logger.debug("forwarding capa matches from thunk 0x%x to 0x%x", function, idx.thunks[function])
|
||||
function = idx.thunks[function]
|
||||
|
||||
matches_by_function[function].update(matches)
|
||||
for match in matches:
|
||||
logger.info("capa: 0x%x: %s", function, match)
|
||||
else:
|
||||
# we don't know which function this is.
|
||||
# hopefully its a function recognized in our BinExport analysis.
|
||||
# *shrug*
|
||||
#
|
||||
# apparently viv doesn't emit function entries for thunks?
|
||||
# or somehow our layout is messed up.
|
||||
|
||||
if address in idx.thunks:
|
||||
# forward any capa for a thunk to its target
|
||||
# since viv may not recognize the thunk as a separate function.
|
||||
logger.debug("forwarding capa matches from thunk 0x%x to 0x%x", address, idx.thunks[address])
|
||||
address = idx.thunks[address]
|
||||
# since we found the thunk, we know this is a BinExport-recognized function.
|
||||
# so thats nice.
|
||||
for match in matches:
|
||||
logger.info("capa: 0x%x: %s", address, match)
|
||||
else:
|
||||
logger.warning("unknown address: 0x%x: %s", address, matches)
|
||||
|
||||
matches_by_function[address].update(matches)
|
||||
|
||||
# guess the base address (which BinExport2) does not track explicitly,
|
||||
# by assuming it is the lowest mapped page.
|
||||
base_address = min(map(lambda section: section.address, be2.section))
|
||||
logging.info("guessed base address: 0x%x", base_address)
|
||||
|
||||
assemblage_locations_by_va: dict[int, AssemblageLocation] = {}
|
||||
if args.assemblage:
|
||||
with timing("loading assemblage"):
|
||||
with args.assemblage.open("rt", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if not line:
|
||||
continue
|
||||
location = AssemblageLocation.from_json(line)
|
||||
assemblage_locations_by_va[base_address + location.rva] = location
|
||||
|
||||
# update function names for the in-memory BinExport2 using Assemblage data.
|
||||
# this won't affect the be2 on disk, because we don't serialize it back out.
|
||||
for address, location in assemblage_locations_by_va.items():
|
||||
if not location.name:
|
||||
continue
|
||||
|
||||
if vertex_index := idx.vertex_index_by_address.get(address):
|
||||
vertex = be2.call_graph.vertex[vertex_index].demangled_name = location.name
|
||||
|
||||
# index all the callers of each function, resolving thunks.
|
||||
# idx.callers_by_vertex_id does not resolve thunks.
|
||||
resolved_callers_by_vertex_id = defaultdict(set)
|
||||
for edge in be2.call_graph.edge:
|
||||
source_index = edge.source_vertex_index
|
||||
|
||||
if lancelot.be2utils.is_thunk_vertex(be2.call_graph.vertex[source_index]):
|
||||
# we don't care about the callers that are thunks.
|
||||
continue
|
||||
|
||||
if lancelot.be2utils.is_thunk_vertex(be2.call_graph.vertex[edge.target_vertex_index]):
|
||||
thunk_vertex = be2.call_graph.vertex[edge.target_vertex_index]
|
||||
thunk_address = thunk_vertex.address
|
||||
|
||||
target_address = idx.thunks[thunk_address]
|
||||
target_index = idx.vertex_index_by_address[target_address]
|
||||
logger.debug(
|
||||
"call %s -(thunk)-> %s",
|
||||
idx.get_function_name_by_vertex(source_index),
|
||||
idx.get_function_name_by_vertex(target_index),
|
||||
)
|
||||
else:
|
||||
target_index = edge.target_vertex_index
|
||||
logger.debug(
|
||||
"call %s -> %s",
|
||||
idx.get_function_name_by_vertex(source_index),
|
||||
idx.get_function_name_by_vertex(target_index),
|
||||
)
|
||||
resolved_callers_by_vertex_id[target_index].add(source_index)
|
||||
|
||||
t0 = time.time()
|
||||
|
||||
with o.section("meta"):
|
||||
o.writeln(f"name: {be2.meta_information.executable_name}")
|
||||
o.writeln(f"sha256: {be2.meta_information.executable_id}")
|
||||
o.writeln(f"arch: {be2.meta_information.architecture_name}")
|
||||
o.writeln(f"ts: {be2.meta_information.timestamp}")
|
||||
|
||||
with o.section("modules"):
|
||||
for module in be2.module:
|
||||
o.writeln(f"- {module.name}")
|
||||
if not be2.module:
|
||||
o.writeln("(none)")
|
||||
|
||||
with o.section("sections"):
|
||||
for section in be2.section:
|
||||
perms = ""
|
||||
perms += "r" if section.flag_r else "-"
|
||||
perms += "w" if section.flag_w else "-"
|
||||
perms += "x" if section.flag_x else "-"
|
||||
o.writeln(f"- {hex(section.address)} {perms} {hex(section.size)}")
|
||||
|
||||
with o.section("libraries"):
|
||||
for library in be2.library:
|
||||
o.writeln(
|
||||
f"- {library.name:<12s} {'(static)' if library.is_static else ''}{(' at ' + hex(library.load_address)) if library.HasField('load_address') else ''}"
|
||||
)
|
||||
if not be2.library:
|
||||
o.writeln("(none)")
|
||||
|
||||
vertex_order_by_address = {address: i for (i, address) in enumerate(idx.vertex_index_by_address.keys())}
|
||||
|
||||
with o.section("functions"):
|
||||
last_address = None
|
||||
for _, vertex_index in idx.vertex_index_by_address.items():
|
||||
vertex = be2.call_graph.vertex[vertex_index]
|
||||
vertex_order = vertex_order_by_address[vertex.address]
|
||||
|
||||
if vertex.HasField("library_index"):
|
||||
continue
|
||||
|
||||
if vertex.HasField("module_index"):
|
||||
continue
|
||||
|
||||
function_name = idx.get_function_name_by_vertex(vertex_index)
|
||||
|
||||
if last_address:
|
||||
try:
|
||||
last_path = assemblage_locations_by_va[last_address].path
|
||||
path = assemblage_locations_by_va[vertex.address].path
|
||||
if last_path != path:
|
||||
o.print(o.markup("[blue]~~~~~~~~~~~~~~~~~~~~~~~~~~~~~[/] [title]file[/] {path}\n", path=path))
|
||||
except KeyError:
|
||||
pass
|
||||
last_address = vertex.address
|
||||
|
||||
if lancelot.be2utils.is_thunk_vertex(vertex):
|
||||
with o.section(
|
||||
o.markup(
|
||||
"thunk [default]{function_name}[/] [decoration]@ {function_address}[/]",
|
||||
function_name=function_name,
|
||||
function_address=hex(vertex.address),
|
||||
)
|
||||
):
|
||||
continue
|
||||
|
||||
with o.section(
|
||||
o.markup(
|
||||
"function [default]{function_name}[/] [decoration]@ {function_address}[/]",
|
||||
function_name=function_name,
|
||||
function_address=hex(vertex.address),
|
||||
)
|
||||
):
|
||||
if vertex.address in idx.thunks:
|
||||
o.writeln("")
|
||||
continue
|
||||
|
||||
# keep the xrefs separate from the calls, since they're visually hard to distinguish.
|
||||
# use local index of callers that has resolved intermediate thunks,
|
||||
# since they are sometimes stored in a physically distant location.
|
||||
for caller_index in resolved_callers_by_vertex_id.get(vertex_index, []):
|
||||
caller_vertex = be2.call_graph.vertex[caller_index]
|
||||
caller_order = vertex_order_by_address[caller_vertex.address]
|
||||
caller_delta = caller_order - vertex_order
|
||||
if caller_delta < 0:
|
||||
direction = "↑"
|
||||
else:
|
||||
direction = "↓"
|
||||
|
||||
o.print(
|
||||
"xref: [decoration]{direction}[/] {name} [decoration]({delta:+})[/]",
|
||||
direction=direction,
|
||||
name=idx.get_function_name_by_vertex(caller_index),
|
||||
delta=caller_delta,
|
||||
)
|
||||
|
||||
if vertex.address not in idx.flow_graph_index_by_address:
|
||||
num_basic_blocks = 0
|
||||
num_instructions = 0
|
||||
num_edges = 0
|
||||
total_instruction_size = 0
|
||||
else:
|
||||
flow_graph_index = idx.flow_graph_index_by_address[vertex.address]
|
||||
flow_graph = be2.flow_graph[flow_graph_index]
|
||||
num_basic_blocks = len(flow_graph.basic_block_index)
|
||||
num_instructions = sum(
|
||||
len(list(idx.instruction_indices(be2.basic_block[bb_idx])))
|
||||
for bb_idx in flow_graph.basic_block_index
|
||||
)
|
||||
num_edges = len(flow_graph.edge)
|
||||
total_instruction_size = 0
|
||||
for bb_idx in flow_graph.basic_block_index:
|
||||
basic_block = be2.basic_block[bb_idx]
|
||||
for _, instruction, _ in idx.basic_block_instructions(basic_block):
|
||||
total_instruction_size += len(instruction.raw_bytes)
|
||||
|
||||
o.writeln(
|
||||
f"B/E/I: {num_basic_blocks} / {num_edges} / {num_instructions} ({total_instruction_size} bytes)"
|
||||
)
|
||||
|
||||
for match in matches_by_function.get(vertex.address, []):
|
||||
o.writeln(f"capa: {match}")
|
||||
|
||||
if vertex.address in idx.flow_graph_index_by_address:
|
||||
flow_graph_index = idx.flow_graph_index_by_address[vertex.address]
|
||||
flow_graph = be2.flow_graph[flow_graph_index]
|
||||
|
||||
seen_callees = set()
|
||||
|
||||
for basic_block_index in flow_graph.basic_block_index:
|
||||
basic_block = be2.basic_block[basic_block_index]
|
||||
|
||||
for instruction_index, instruction, _ in idx.basic_block_instructions(basic_block):
|
||||
if instruction.call_target:
|
||||
for call_target_address in instruction.call_target:
|
||||
if call_target_address in idx.thunks:
|
||||
call_target_address = idx.thunks[call_target_address]
|
||||
|
||||
call_target_index = idx.vertex_index_by_address[call_target_address]
|
||||
call_target_vertex = be2.call_graph.vertex[call_target_index]
|
||||
|
||||
if call_target_vertex.HasField("library_index"):
|
||||
continue
|
||||
|
||||
if call_target_vertex.address in seen_callees:
|
||||
continue
|
||||
seen_callees.add(call_target_vertex.address)
|
||||
|
||||
call_target_order = vertex_order_by_address[call_target_address]
|
||||
call_target_delta = call_target_order - vertex_order
|
||||
call_target_name = idx.get_function_name_by_address(call_target_address)
|
||||
if call_target_delta < 0:
|
||||
direction = "↑"
|
||||
else:
|
||||
direction = "↓"
|
||||
|
||||
o.print(
|
||||
"calls: [decoration]{direction}[/] {name} [decoration]({delta:+})[/]",
|
||||
direction=direction,
|
||||
name=call_target_name,
|
||||
delta=call_target_delta,
|
||||
)
|
||||
|
||||
for basic_block_index in flow_graph.basic_block_index:
|
||||
basic_block = be2.basic_block[basic_block_index]
|
||||
|
||||
for instruction_index, instruction, _ in idx.basic_block_instructions(basic_block):
|
||||
if instruction.call_target:
|
||||
for call_target_address in instruction.call_target:
|
||||
call_target_index = idx.vertex_index_by_address[call_target_address]
|
||||
call_target_vertex = be2.call_graph.vertex[call_target_index]
|
||||
|
||||
if not call_target_vertex.HasField("library_index"):
|
||||
continue
|
||||
|
||||
if call_target_vertex.address in seen_callees:
|
||||
continue
|
||||
seen_callees.add(call_target_vertex.address)
|
||||
|
||||
call_target_name = idx.get_function_name_by_address(call_target_address)
|
||||
o.print(
|
||||
"api: {name}",
|
||||
name=call_target_name,
|
||||
)
|
||||
|
||||
seen_strings = set()
|
||||
for basic_block_index in flow_graph.basic_block_index:
|
||||
basic_block = be2.basic_block[basic_block_index]
|
||||
|
||||
for instruction_index, instruction, _ in idx.basic_block_instructions(basic_block):
|
||||
if instruction_index in idx.string_reference_index_by_source_instruction_index:
|
||||
for string_reference_index in idx.string_reference_index_by_source_instruction_index[
|
||||
instruction_index
|
||||
]:
|
||||
string_reference = be2.string_reference[string_reference_index]
|
||||
string_index = string_reference.string_table_index
|
||||
string = be2.string_table[string_index]
|
||||
|
||||
if string in seen_strings:
|
||||
continue
|
||||
seen_strings.add(string)
|
||||
|
||||
o.print(
|
||||
'string: [decoration]"[/]{string}[decoration]"[/]',
|
||||
string=string.rstrip(),
|
||||
)
|
||||
|
||||
o.print("")
|
||||
|
||||
t1 = time.time()
|
||||
logger.debug("perf: rendering BinExport2: %0.2fs", t1 - t0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -406,7 +406,6 @@ class DoesntMatchExample(Lint):
|
||||
return True
|
||||
|
||||
if rule.name not in capabilities:
|
||||
logger.info('rule "%s" does not match for sample %s', rule.name, example_id)
|
||||
return True
|
||||
|
||||
|
||||
|
||||
Submodule tests/data updated: 836bd7acc0...6cb0838954
@@ -70,4 +70,4 @@ def test_standalone_binja_backend():
|
||||
@pytest.mark.skipif(binja_present is False, reason="Skip binja tests if the binaryninja Python API is not installed")
|
||||
def test_binja_version():
|
||||
version = binaryninja.core_version_info()
|
||||
assert version.major == 5 and version.minor == 0
|
||||
assert version.major == 4 and version.minor == 2
|
||||
|
||||
86
web/explorer/package-lock.json
generated
86
web/explorer/package-lock.json
generated
@@ -27,7 +27,7 @@
|
||||
"eslint-plugin-vue": "^9.23.0",
|
||||
"jsdom": "^24.1.0",
|
||||
"prettier": "^3.2.5",
|
||||
"vite": "^6.3.4",
|
||||
"vite": "^6.2.3",
|
||||
"vite-plugin-singlefile": "^2.2.0",
|
||||
"vitest": "^3.0.9"
|
||||
}
|
||||
@@ -3426,51 +3426,6 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tinyglobby": {
|
||||
"version": "0.2.13",
|
||||
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz",
|
||||
"integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fdir": "^6.4.4",
|
||||
"picomatch": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/SuperchupuDev"
|
||||
}
|
||||
},
|
||||
"node_modules/tinyglobby/node_modules/fdir": {
|
||||
"version": "6.4.4",
|
||||
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz",
|
||||
"integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"picomatch": "^3 || ^4"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"picomatch": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/tinyglobby/node_modules/picomatch": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
|
||||
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/jonschlinkert"
|
||||
}
|
||||
},
|
||||
"node_modules/tinypool": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.2.tgz",
|
||||
@@ -3606,18 +3561,15 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/vite": {
|
||||
"version": "6.3.4",
|
||||
"resolved": "https://registry.npmjs.org/vite/-/vite-6.3.4.tgz",
|
||||
"integrity": "sha512-BiReIiMS2fyFqbqNT/Qqt4CVITDU9M9vE+DKcVAsB+ZV0wvTKd+3hMbkpxz1b+NmEDMegpVbisKiAZOnvO92Sw==",
|
||||
"version": "6.2.3",
|
||||
"resolved": "https://registry.npmjs.org/vite/-/vite-6.2.3.tgz",
|
||||
"integrity": "sha512-IzwM54g4y9JA/xAeBPNaDXiBF8Jsgl3VBQ2YQ/wOY6fyW3xMdSoltIV3Bo59DErdqdE6RxUfv8W69DvUorE4Eg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"esbuild": "^0.25.0",
|
||||
"fdir": "^6.4.4",
|
||||
"picomatch": "^4.0.2",
|
||||
"postcss": "^8.5.3",
|
||||
"rollup": "^4.34.9",
|
||||
"tinyglobby": "^0.2.13"
|
||||
"rollup": "^4.30.1"
|
||||
},
|
||||
"bin": {
|
||||
"vite": "bin/vite.js"
|
||||
@@ -3720,34 +3672,6 @@
|
||||
"vite": "^5.4.11 || ^6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/vite/node_modules/fdir": {
|
||||
"version": "6.4.4",
|
||||
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz",
|
||||
"integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"picomatch": "^3 || ^4"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"picomatch": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/vite/node_modules/picomatch": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
|
||||
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/jonschlinkert"
|
||||
}
|
||||
},
|
||||
"node_modules/vitest": {
|
||||
"version": "3.0.9",
|
||||
"resolved": "https://registry.npmjs.org/vitest/-/vitest-3.0.9.tgz",
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"eslint-plugin-vue": "^9.23.0",
|
||||
"jsdom": "^24.1.0",
|
||||
"prettier": "^3.2.5",
|
||||
"vite": "^6.3.4",
|
||||
"vite": "^6.2.3",
|
||||
"vite-plugin-singlefile": "^2.2.0",
|
||||
"vitest": "^3.0.9"
|
||||
}
|
||||
|
||||
@@ -210,19 +210,35 @@
|
||||
<div class="row flex-lg-row-reverse align-items-center g-5">
|
||||
<h1>What's New</h1>
|
||||
|
||||
<h2 class="mt-3">Rule Updates</h2>
|
||||
|
||||
<ul class="mt-2 ps-5">
|
||||
<!-- TODO(williballenthin): add date -->
|
||||
|
||||
<li>
|
||||
added:
|
||||
<a href="./rules/change registry key timestamp/">
|
||||
change registry key timestamp
|
||||
</a>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
added:
|
||||
<a href="./rules/check mutex and terminate process on windows/">
|
||||
check mutex and terminate process on Windows
|
||||
</a>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
added:
|
||||
<a href="./rules/clear windows event logs remotely/">
|
||||
clear windows event logs remotely
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h2 class="mt-3">Tool Updates</h2>
|
||||
|
||||
<h3 class="mt-2">v9.2.1 (<em>2025-06-06</em>)</h3>
|
||||
<p class="mt-0">
|
||||
This point release fixes bugs including removing an unnecessary PyInstaller warning message and enabling the standalone binary to execute on systems running older versions of glibc.
|
||||
</p>
|
||||
|
||||
<h3 class="mt-2">v9.2.0 (<em>2025-06-03</em>)</h3>
|
||||
<p class="mt-0">
|
||||
This release improves a few aspects of dynamic analysis, including relaxing our validation on fields across many CAPE versions and processing additional VMRay submission file types, for example.
|
||||
It also includes an updated rule pack containing new rules and rule fixes.
|
||||
</p>
|
||||
|
||||
|
||||
<h3 class="mt-2">v9.1.0 (<em>2025-03-02</em>)</h3>
|
||||
<p class="mt-0">
|
||||
This release improves a few aspects of dynamic analysis, relaxing our validation on fields across many CAPE versions, for example.
|
||||
|
||||
Reference in New Issue
Block a user