Compare commits

..

1 Commits

Author SHA1 Message Date
Moritz
9df16d337a Fix typo in build:bundle 2024-08-12 17:28:27 +02:00
151 changed files with 5970 additions and 15088 deletions

View File

@@ -4,6 +4,3 @@ updates:
directory: "/"
schedule:
interval: "weekly"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-patch"]

View File

@@ -1,5 +1,8 @@
[mypy]
[mypy-tqdm.*]
ignore_missing_imports = True
[mypy-ruamel.*]
ignore_missing_imports = True

View File

@@ -2,6 +2,7 @@
# Copyright (C) 2020 Mandiant, Inc. All Rights Reserved.
import sys
import wcwidth
import capa.rules.cache
from pathlib import Path
@@ -28,6 +29,13 @@ a = Analysis(
("../../rules", "rules"),
("../../sigs", "sigs"),
("../../cache", "cache"),
# capa.render.default uses tabulate that depends on wcwidth.
# it seems wcwidth uses a json file `version.json`
# and this doesn't get picked up by pyinstaller automatically.
# so we manually embed the wcwidth resources here.
#
# ref: https://stackoverflow.com/a/62278462/87207
(Path(wcwidth.__file__).parent, "wcwidth"),
],
# when invoking pyinstaller from the project root,
# this gets run from the project root.
@@ -40,6 +48,11 @@ a = Analysis(
"tkinter",
"_tkinter",
"Tkinter",
# tqdm provides renderers for ipython,
# however, this drags in a lot of dependencies.
# since we don't spawn a notebook, we can safely remove these.
"IPython",
"ipywidgets",
# these are pulled in by networkx
# but we don't need to compute the strongly connected components.
"numpy",
@@ -57,10 +70,7 @@ a = Analysis(
"qt5",
"pyqtwebengine",
"pyasn1",
# don't pull in Binary Ninja/IDA bindings that should
# only be installed locally.
"binaryninja",
"ida",
],
)

View File

@@ -30,8 +30,8 @@ jobs:
python_version: 3.8
- os: ubuntu-20.04
artifact_name: capa
asset_name: linux-py312
python_version: 3.12
asset_name: linux-py311
python_version: 3.11
- os: windows-2019
artifact_name: capa.exe
asset_name: windows
@@ -88,7 +88,7 @@ jobs:
asset_name: linux
- os: ubuntu-22.04
artifact_name: capa
asset_name: linux-py312
asset_name: linux-py311
- os: windows-2022
artifact_name: capa.exe
asset_name: windows
@@ -114,7 +114,7 @@ jobs:
include:
- asset_name: linux
artifact_name: capa
- asset_name: linux-py312
- asset_name: linux-py311
artifact_name: capa
- asset_name: windows
artifact_name: capa.exe

View File

@@ -2,7 +2,7 @@ name: deploy web to GitHub Pages
on:
push:
branches: [ master ]
branches: [ master, "wb/webui-actions-1" ]
paths:
- 'web/**'
@@ -22,7 +22,6 @@ concurrency:
jobs:
build-landing-page:
name: Build landing page
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -33,7 +32,6 @@ jobs:
path: './web/public'
build-explorer:
name: Build capa Explorer Web
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -43,7 +41,7 @@ jobs:
fetch-depth: 1
show-progress: true
- name: Set up Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
@@ -55,7 +53,7 @@ jobs:
run: npm run build:bundle
working-directory: ./web/explorer
- name: Zip release bundle
run: zip -r public/capa-explorer-web.zip capa-explorer-web
run: zip -r capa-explorer-web.zip capa-explorer-web
working-directory: ./web/explorer
- name: Build
run: npm run build
@@ -65,51 +63,12 @@ jobs:
name: explorer
path: './web/explorer/dist'
build-rules:
name: Build rules site
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v4
with:
submodules: 'recursive'
# full depth so that capa-rules has a full history
# and we can construct a timeline of rule updates.
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: '3.12'
- uses: extractions/setup-just@v2
- name: Install pagefind
uses: supplypike/setup-bin@v4
with:
uri: "https://github.com/CloudCannon/pagefind/releases/download/v1.1.0/pagefind-v1.1.0-x86_64-unknown-linux-musl.tar.gz"
name: "pagefind"
version: "1.1.0"
- name: Install dependencies
working-directory: ./web/rules
run: pip install -r requirements.txt
- name: Build the website
working-directory: ./web/rules
run: just build
- name: Index the website
working-directory: ./web/rules
run: pagefind --site "public"
# upload the build website to artifacts
# so that we can download and inspect, if desired.
- uses: actions/upload-artifact@v4
with:
name: rules
path: './web/rules/public'
deploy:
name: Deploy site to GitHub Pages
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: [build-landing-page, build-explorer, build-rules]
needs: [build-landing-page, build-explorer]
steps:
- uses: actions/download-artifact@v4
with:
@@ -119,10 +78,6 @@ jobs:
with:
name: explorer
path: './public/explorer'
- uses: actions/download-artifact@v4
with:
name: rules
path: './public/rules'
- name: Setup Pages
uses: actions/configure-pages@v4
- name: Upload artifact

View File

@@ -19,7 +19,7 @@ jobs:
show-progress: true
- name: Set up Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
uses: actions/setup-node@v3
with:
node-version: 20
cache: 'npm'

2
.gitignore vendored
View File

@@ -126,5 +126,3 @@ Pipfile.lock
.github/binja/binaryninja
.github/binja/download_headless.py
.github/binja/BinaryNinja-headless.zip
justfile
data/

View File

@@ -1,25 +0,0 @@
@isort:
pre-commit run isort --show-diff-on-failure --all-files
@black:
pre-commit run black --show-diff-on-failure --all-files
@ruff:
pre-commit run ruff --all-files
@flake8:
pre-commit run flake8 --hook-stage manual --all-files
@mypy:
pre-commit run mypy --hook-stage manual --all-files
@deptry:
pre-commit run deptry --hook-stage manual --all-files
@lint:
-just isort
-just black
-just ruff
-just flake8
-just mypy
-just deptry

View File

@@ -38,7 +38,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -56,7 +55,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -74,7 +72,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -89,11 +86,10 @@ repos:
- "--config"
- ".github/flake8.ini"
- "--extend-exclude"
- "capa/render/proto/capa_pb2.py,capa/features/extractors/binexport2/binexport2_pb2.py"
- "capa/render/proto/capa_pb2.py"
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false
@@ -111,7 +107,6 @@ repos:
- "capa/"
- "scripts/"
- "tests/"
- "web/rules/scripts/"
always_run: true
pass_filenames: false

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,4 @@
<br />
<div align="center">
<a href="https://mandiant.github.io/capa/" target="_blank">
<img src="https://github.com/mandiant/capa/blob/master/.github/logo.png">
</a>
<p align="center">
<a href="https://mandiant.github.io/capa/" target="_blank">Website</a>
|
<a href="https://github.com/mandiant/capa/releases/latest" target="_blank">Download</a>
|
<a href="https://mandiant.github.io/capa/explorer/" target="_blank">Web Interface</a>
</p>
<div align="center">
![capa](https://github.com/mandiant/capa/blob/master/.github/logo.png)
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/flare-capa)](https://pypi.org/project/flare-capa)
[![Last release](https://img.shields.io/github/v/release/mandiant/capa)](https://github.com/mandiant/capa/releases)
@@ -19,16 +7,11 @@
[![Downloads](https://img.shields.io/github/downloads/mandiant/capa/total)](https://github.com/mandiant/capa/releases)
[![License](https://img.shields.io/badge/license-Apache--2.0-green.svg)](LICENSE.txt)
</div>
</div>
---
capa detects capabilities in executable files.
You run it against a PE, ELF, .NET module, shellcode file, or a sandbox report and it tells you what it thinks the program can do.
For example, it might suggest that the file is a backdoor, is capable of installing services, or relies on HTTP to communicate.
To interactively inspect capa results in your browser use the [capa Explorer Web](https://mandiant.github.io/capa/explorer/).
To interactively inspect capa results in your browser use the [capa web explorer](https://mandiant.github.io/capa/explorer/).
If you want to inspect or write capa rules, head on over to the [capa-rules repository](https://github.com/mandiant/capa-rules). Otherwise, keep reading.
@@ -89,12 +72,12 @@ Download stable releases of the standalone capa binaries [here](https://github.c
To use capa as a library or integrate with another tool, see [doc/installation.md](https://github.com/mandiant/capa/blob/master/doc/installation.md) for further setup instructions.
# capa Explorer Web
The [capa Explorer Web](https://mandiant.github.io/capa/explorer/) enables you to interactively explore capa results in your web browser. Besides the online version you can download a standalone HTML file for local offline usage.
# web explorer
The [capa web explorer](https://mandiant.github.io/capa/explorer/) enables you to interactively explore capa results in your web browser. Besides the online version you can download a standalone HTML file for local offline usage.
![capa Explorer Web screenshot](https://github.com/mandiant/capa/blob/master/doc/img/capa_web_explorer.png)
![capa web explorer screenshot](https://github.com/mandiant/capa/blob/master/doc/img/capa_web_explorer.png)
More details on the web UI is available in the [capa Explorer Web README](https://github.com/mandiant/capa/blob/master/web/explorer/README.md).
More details on the web UI is available in the [capa web explorer README](https://github.com/mandiant/capa/blob/master/web/explorer/README.md).
# example
@@ -150,15 +133,13 @@ function @ 0x4011C0
...
```
capa also supports dynamic capabilities detection for multiple sandboxes including:
* [CAPE](https://github.com/kevoreilly/CAPEv2) (supported report formats: `.json`, `.json_`, `.json.gz`)
* [DRAKVUF](https://github.com/CERT-Polska/drakvuf-sandbox/) (supported report formats: `.log`, `.log.gz`)
* [VMRay](https://www.vmray.com/) (supported report formats: analysis archive `.zip`)
## analyzing sandbox reports
Additionally, capa also supports analyzing sandbox reports for dynamic capability extraction.
In order to use this, you first submit your sample to one of supported sandboxes for analysis, and then run capa against the generated report file.
Currently, capa supports the [CAPE sandbox](https://github.com/kevoreilly/CAPEv2) and the [DRAKVUF sandbox](https://github.com/CERT-Polska/drakvuf-sandbox/). In order to use either, simply run capa against the generated file (JSON for CAPE or LOG for DRAKVUF sandbox) and it will automatically detect the sandbox and extract capabilities from it.
To use this feature, submit your file to a supported sandbox and then download and run capa against the generated report file. This feature enables capa to match capabilities against dynamic and static features that the sandbox captured during execution.
Here's an example of running capa against a packed file, and then running capa against the CAPE report generated for the same packed file:
Here's an example of running capa against a packed binary, and then running capa against the CAPE report of that binary:
```yaml
$ capa 05be49819139a3fdcdbddbdefd298398779521f3d68daa25275cc77508e42310.exe
@@ -288,7 +269,6 @@ Please learn to write rules and contribute new entries as you find interesting t
# IDA Pro plugin: capa explorer
If you use IDA Pro, then you can use the [capa explorer](https://github.com/mandiant/capa/tree/master/capa/ida/plugin) plugin.
capa explorer helps you identify interesting areas of a program and build new capa rules using features extracted directly from your IDA Pro database.
It also uses your local changes to the .idb to extract better features, such as when you rename a global variable that contains a dynamically resolved API address.
![capa + IDA Pro integration](https://github.com/mandiant/capa/blob/master/doc/img/explorer_expanded.png)

View File

@@ -6,16 +6,20 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import sys
import logging
import itertools
import collections
from typing import Any, List, Tuple
from typing import Any, Tuple
import tqdm
import capa.perf
import capa.features.freeze as frz
import capa.render.result_document as rdoc
from capa.rules import Scope, RuleSet
from capa.engine import FeatureSet, MatchResults
from capa.helpers import redirecting_print_to_tqdm
from capa.capabilities.common import find_file_capabilities
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle, DynamicFeatureExtractor
@@ -135,30 +139,38 @@ def find_dynamic_capabilities(
feature_counts = rdoc.DynamicFeatureCounts(file=0, processes=())
assert isinstance(extractor, DynamicFeatureExtractor)
processes: List[ProcessHandle] = list(extractor.get_processes())
n_processes: int = len(processes)
with redirecting_print_to_tqdm(disable_progress):
with tqdm.contrib.logging.logging_redirect_tqdm():
pbar = tqdm.tqdm
if disable_progress:
# do not use tqdm to avoid unnecessary side effects when caller intends
# to disable progress completely
def pbar(s, *args, **kwargs):
return s
with capa.helpers.CapaProgressBar(
console=capa.helpers.log_console, transient=True, disable=disable_progress
) as pbar:
task = pbar.add_task("matching", total=n_processes, unit="processes")
for p in processes:
process_matches, thread_matches, call_matches, feature_count = find_process_capabilities(
ruleset, extractor, p
)
feature_counts.processes += (
rdoc.ProcessFeatureCount(address=frz.Address.from_capa(p.address), count=feature_count),
)
logger.debug("analyzed %s and extracted %d features", p.address, feature_count)
elif not sys.stderr.isatty():
# don't display progress bar when stderr is redirected to a file
def pbar(s, *args, **kwargs):
return s
for rule_name, res in process_matches.items():
all_process_matches[rule_name].extend(res)
for rule_name, res in thread_matches.items():
all_thread_matches[rule_name].extend(res)
for rule_name, res in call_matches.items():
all_call_matches[rule_name].extend(res)
processes = list(extractor.get_processes())
pbar.advance(task)
pb = pbar(processes, desc="matching", unit=" processes", leave=False)
for p in pb:
process_matches, thread_matches, call_matches, feature_count = find_process_capabilities(
ruleset, extractor, p
)
feature_counts.processes += (
rdoc.ProcessFeatureCount(address=frz.Address.from_capa(p.address), count=feature_count),
)
logger.debug("analyzed %s and extracted %d features", p.address, feature_count)
for rule_name, res in process_matches.items():
all_process_matches[rule_name].extend(res)
for rule_name, res in thread_matches.items():
all_thread_matches[rule_name].extend(res)
for rule_name, res in call_matches.items():
all_call_matches[rule_name].extend(res)
# collection of features that captures the rule matches within process and thread scopes.
# mapping from feature (matched rule) to set of addresses at which it matched.

View File

@@ -6,18 +6,21 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import sys
import time
import logging
import itertools
import collections
from typing import Any, List, Tuple
from typing import Any, Tuple
import tqdm.contrib.logging
import capa.perf
import capa.helpers
import capa.features.freeze as frz
import capa.render.result_document as rdoc
from capa.rules import Scope, RuleSet
from capa.engine import FeatureSet, MatchResults
from capa.helpers import redirecting_print_to_tqdm
from capa.capabilities.common import find_file_capabilities
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle, StaticFeatureExtractor
@@ -140,58 +143,75 @@ def find_static_capabilities(
library_functions: Tuple[rdoc.LibraryFunction, ...] = ()
assert isinstance(extractor, StaticFeatureExtractor)
functions: List[FunctionHandle] = list(extractor.get_functions())
n_funcs: int = len(functions)
n_libs: int = 0
percentage: float = 0
with redirecting_print_to_tqdm(disable_progress):
with tqdm.contrib.logging.logging_redirect_tqdm():
pbar = tqdm.tqdm
if capa.helpers.is_runtime_ghidra():
# Ghidrathon interpreter cannot properly handle
# the TMonitor thread that is created via a monitor_interval
# > 0
pbar.monitor_interval = 0
if disable_progress:
# do not use tqdm to avoid unnecessary side effects when caller intends
# to disable progress completely
def pbar(s, *args, **kwargs):
return s
with capa.helpers.CapaProgressBar(
console=capa.helpers.log_console, transient=True, disable=disable_progress
) as pbar:
task = pbar.add_task(
"matching", total=n_funcs, unit="functions", postfix=f"skipped {n_libs} library functions, {percentage}%"
)
for f in functions:
t0 = time.time()
if extractor.is_library_function(f.address):
function_name = extractor.get_function_name(f.address)
logger.debug("skipping library function 0x%x (%s)", f.address, function_name)
library_functions += (
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name),
elif not sys.stderr.isatty():
# don't display progress bar when stderr is redirected to a file
def pbar(s, *args, **kwargs):
return s
functions = list(extractor.get_functions())
n_funcs = len(functions)
pb = pbar(functions, desc="matching", unit=" functions", postfix="skipped 0 library functions", leave=False)
for f in pb:
t0 = time.time()
if extractor.is_library_function(f.address):
function_name = extractor.get_function_name(f.address)
logger.debug("skipping library function 0x%x (%s)", f.address, function_name)
library_functions += (
rdoc.LibraryFunction(address=frz.Address.from_capa(f.address), name=function_name),
)
n_libs = len(library_functions)
percentage = round(100 * (n_libs / n_funcs))
if isinstance(pb, tqdm.tqdm):
pb.set_postfix_str(f"skipped {n_libs} library functions ({percentage}%)")
continue
function_matches, bb_matches, insn_matches, feature_count = find_code_capabilities(
ruleset, extractor, f
)
n_libs = len(library_functions)
percentage = round(100 * (n_libs / n_funcs))
pbar.update(task, postfix=f"skipped {n_libs} library functions, {percentage}%")
pbar.advance(task)
continue
feature_counts.functions += (
rdoc.FunctionFeatureCount(address=frz.Address.from_capa(f.address), count=feature_count),
)
t1 = time.time()
function_matches, bb_matches, insn_matches, feature_count = find_code_capabilities(ruleset, extractor, f)
feature_counts.functions += (
rdoc.FunctionFeatureCount(address=frz.Address.from_capa(f.address), count=feature_count),
)
t1 = time.time()
match_count = 0
for name, matches_ in itertools.chain(
function_matches.items(), bb_matches.items(), insn_matches.items()
):
# in practice, most matches are derived rules,
# like "check OS version/5bf4c7f39fd4492cbed0f6dc7d596d49"
# but when we log to the human, they really care about "real" rules.
if not ruleset.rules[name].is_subscope_rule():
match_count += len(matches_)
match_count = 0
for name, matches_ in itertools.chain(function_matches.items(), bb_matches.items(), insn_matches.items()):
if not ruleset.rules[name].is_subscope_rule():
match_count += len(matches_)
logger.debug(
"analyzed function 0x%x and extracted %d features, %d matches in %0.02fs",
f.address,
feature_count,
match_count,
t1 - t0,
)
logger.debug(
"analyzed function 0x%x and extracted %d features, %d matches in %0.02fs",
f.address,
feature_count,
match_count,
t1 - t0,
)
for rule_name, res in function_matches.items():
all_function_matches[rule_name].extend(res)
for rule_name, res in bb_matches.items():
all_bb_matches[rule_name].extend(res)
for rule_name, res in insn_matches.items():
all_insn_matches[rule_name].extend(res)
pbar.advance(task)
for rule_name, res in function_matches.items():
all_function_matches[rule_name].extend(res)
for rule_name, res in bb_matches.items():
all_bb_matches[rule_name].extend(res)
for rule_name, res in insn_matches.items():
all_insn_matches[rule_name].extend(res)
# collection of features that captures the rule matches within function, BB, and instruction scopes.
# mapping from feature (matched rule) to set of addresses at which it matched.

View File

@@ -23,15 +23,3 @@ class UnsupportedOSError(ValueError):
class EmptyReportError(ValueError):
pass
class InvalidArgument(ValueError):
pass
class NonExistantFunctionError(ValueError):
pass
class NonExistantProcessError(ValueError):
pass

View File

@@ -424,11 +424,10 @@ class Arch(Feature):
OS_WINDOWS = "windows"
OS_LINUX = "linux"
OS_MACOS = "macos"
OS_ANDROID = "android"
# dotnet
OS_ANY = "any"
VALID_OS = {os.value for os in capa.features.extractors.elf.OS}
VALID_OS.update({OS_WINDOWS, OS_LINUX, OS_MACOS, OS_ANY, OS_ANDROID})
VALID_OS.update({OS_WINDOWS, OS_LINUX, OS_MACOS, OS_ANY})
# internal only, not to be used in rules
OS_AUTO = "auto"
@@ -463,8 +462,6 @@ FORMAT_SC32 = "sc32"
FORMAT_SC64 = "sc64"
FORMAT_CAPE = "cape"
FORMAT_DRAKVUF = "drakvuf"
FORMAT_VMRAY = "vmray"
FORMAT_BINEXPORT2 = "binexport2"
FORMAT_FREEZE = "freeze"
FORMAT_RESULT = "result"
STATIC_FORMATS = {
@@ -475,12 +472,10 @@ STATIC_FORMATS = {
FORMAT_DOTNET,
FORMAT_FREEZE,
FORMAT_RESULT,
FORMAT_BINEXPORT2,
}
DYNAMIC_FORMATS = {
FORMAT_CAPE,
FORMAT_DRAKVUF,
FORMAT_VMRAY,
FORMAT_FREEZE,
FORMAT_RESULT,
}

View File

@@ -9,9 +9,7 @@
import abc
import hashlib
import dataclasses
from copy import copy
from types import MethodType
from typing import Any, Set, Dict, Tuple, Union, Iterator
from typing import Any, Dict, Tuple, Union, Iterator
from dataclasses import dataclass
# TODO(williballenthin): use typing.TypeAlias directly when Python 3.9 is deprecated
@@ -298,22 +296,6 @@ class StaticFeatureExtractor:
raise NotImplementedError()
def FunctionFilter(extractor: StaticFeatureExtractor, functions: Set) -> StaticFeatureExtractor:
original_get_functions = extractor.get_functions
def filtered_get_functions(self):
yield from (f for f in original_get_functions() if f.address in functions)
# we make a copy of the original extractor object and then update its get_functions() method with the decorated filter one.
# this is in order to preserve the original extractor object's get_functions() method, in case it is used elsewhere in the code.
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
# with some of these tests needing to install a functions filter on the extractor object.
new_extractor = copy(extractor)
new_extractor.get_functions = MethodType(filtered_get_functions, extractor) # type: ignore
return new_extractor
@dataclass
class ProcessHandle:
"""
@@ -485,20 +467,4 @@ class DynamicFeatureExtractor:
raise NotImplementedError()
def ProcessFilter(extractor: DynamicFeatureExtractor, processes: Set) -> DynamicFeatureExtractor:
original_get_processes = extractor.get_processes
def filtered_get_processes(self):
yield from (f for f in original_get_processes() if f.address.pid in processes)
# we make a copy of the original extractor object and then update its get_processes() method with the decorated filter one.
# this is in order to preserve the original extractor object's get_processes() method, in case it is used elsewhere in the code.
# an example where this is important is in our testfiles where we may use the same extractor object with different tests,
# with some of these tests needing to install a processes filter on the extractor object.
new_extractor = copy(extractor)
new_extractor.get_processes = MethodType(filtered_get_processes, extractor) # type: ignore
return new_extractor
FeatureExtractor: TypeAlias = Union[StaticFeatureExtractor, DynamicFeatureExtractor]

View File

@@ -1,416 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Proto files generated via protobuf v24.4:
protoc --python_out=. --mypy_out=. binexport2.proto
from BinExport2 at 6916731d5f6693c4a4f0a052501fd3bd92cfd08b
https://github.com/google/binexport/blob/6916731/binexport2.proto
"""
import io
import hashlib
import logging
import contextlib
from typing import Set, Dict, List, Tuple, Iterator
from pathlib import Path
from collections import defaultdict
from dataclasses import dataclass
from pefile import PE
from elftools.elf.elffile import ELFFile
import capa.features.common
import capa.features.extractors.common
import capa.features.extractors.binexport2.helpers
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def get_binexport2(sample: Path) -> BinExport2:
be2: BinExport2 = BinExport2()
be2.ParseFromString(sample.read_bytes())
return be2
def compute_common_prefix_length(m: str, n: str) -> int:
# ensure #m < #n
if len(n) < len(m):
m, n = n, m
for i, c in enumerate(m):
if n[i] != c:
return i
return len(m)
def get_sample_from_binexport2(input_file: Path, be2: BinExport2, search_paths: List[Path]) -> Path:
"""attempt to find the sample file, given a BinExport2 file.
searches in the same directory as the BinExport2 file, and then in search_paths.
"""
def filename_similarity_key(p: Path) -> Tuple[int, str]:
# note closure over input_file.
# sort first by length of common prefix, then by name (for stability)
return (compute_common_prefix_length(p.name, input_file.name), p.name)
wanted_sha256: str = be2.meta_information.executable_id.lower()
input_directory: Path = input_file.parent
siblings: List[Path] = [p for p in input_directory.iterdir() if p.is_file()]
siblings.sort(key=filename_similarity_key, reverse=True)
for sibling in siblings:
# e.g. with open IDA files in the same directory on Windows
with contextlib.suppress(PermissionError):
if hashlib.sha256(sibling.read_bytes()).hexdigest().lower() == wanted_sha256:
return sibling
for search_path in search_paths:
candidates: List[Path] = [p for p in search_path.iterdir() if p.is_file()]
candidates.sort(key=filename_similarity_key, reverse=True)
for candidate in candidates:
with contextlib.suppress(PermissionError):
if hashlib.sha256(candidate.read_bytes()).hexdigest().lower() == wanted_sha256:
return candidate
raise ValueError("cannot find sample, you may specify the path using the CAPA_SAMPLES_DIR environment variable")
class BinExport2Index:
def __init__(self, be2: BinExport2):
self.be2: BinExport2 = be2
self.callers_by_vertex_index: Dict[int, List[int]] = defaultdict(list)
self.callees_by_vertex_index: Dict[int, List[int]] = defaultdict(list)
# note: flow graph != call graph (vertex)
self.flow_graph_index_by_address: Dict[int, int] = {}
self.flow_graph_address_by_index: Dict[int, int] = {}
# edges that come from the given basic block
self.source_edges_by_basic_block_index: Dict[int, List[BinExport2.FlowGraph.Edge]] = defaultdict(list)
# edges that end up at the given basic block
self.target_edges_by_basic_block_index: Dict[int, List[BinExport2.FlowGraph.Edge]] = defaultdict(list)
self.vertex_index_by_address: Dict[int, int] = {}
self.data_reference_index_by_source_instruction_index: Dict[int, List[int]] = defaultdict(list)
self.data_reference_index_by_target_address: Dict[int, List[int]] = defaultdict(list)
self.string_reference_index_by_source_instruction_index: Dict[int, List[int]] = defaultdict(list)
self.insn_address_by_index: Dict[int, int] = {}
self.insn_index_by_address: Dict[int, int] = {}
self.insn_by_address: Dict[int, BinExport2.Instruction] = {}
# must index instructions first
self._index_insn_addresses()
self._index_vertex_edges()
self._index_flow_graph_nodes()
self._index_flow_graph_edges()
self._index_call_graph_vertices()
self._index_data_references()
self._index_string_references()
def get_insn_address(self, insn_index: int) -> int:
assert insn_index in self.insn_address_by_index, f"insn must be indexed, missing {insn_index}"
return self.insn_address_by_index[insn_index]
def get_basic_block_address(self, basic_block_index: int) -> int:
basic_block: BinExport2.BasicBlock = self.be2.basic_block[basic_block_index]
first_instruction_index: int = next(self.instruction_indices(basic_block))
return self.get_insn_address(first_instruction_index)
def _index_vertex_edges(self):
for edge in self.be2.call_graph.edge:
if not edge.source_vertex_index:
continue
if not edge.target_vertex_index:
continue
self.callers_by_vertex_index[edge.target_vertex_index].append(edge.source_vertex_index)
self.callees_by_vertex_index[edge.source_vertex_index].append(edge.target_vertex_index)
def _index_flow_graph_nodes(self):
for flow_graph_index, flow_graph in enumerate(self.be2.flow_graph):
function_address: int = self.get_basic_block_address(flow_graph.entry_basic_block_index)
self.flow_graph_index_by_address[function_address] = flow_graph_index
self.flow_graph_address_by_index[flow_graph_index] = function_address
def _index_flow_graph_edges(self):
for flow_graph in self.be2.flow_graph:
for edge in flow_graph.edge:
if not edge.HasField("source_basic_block_index") or not edge.HasField("target_basic_block_index"):
continue
self.source_edges_by_basic_block_index[edge.source_basic_block_index].append(edge)
self.target_edges_by_basic_block_index[edge.target_basic_block_index].append(edge)
def _index_call_graph_vertices(self):
for vertex_index, vertex in enumerate(self.be2.call_graph.vertex):
if not vertex.HasField("address"):
continue
vertex_address: int = vertex.address
self.vertex_index_by_address[vertex_address] = vertex_index
def _index_data_references(self):
for data_reference_index, data_reference in enumerate(self.be2.data_reference):
self.data_reference_index_by_source_instruction_index[data_reference.instruction_index].append(
data_reference_index
)
self.data_reference_index_by_target_address[data_reference.address].append(data_reference_index)
def _index_string_references(self):
for string_reference_index, string_reference in enumerate(self.be2.string_reference):
self.string_reference_index_by_source_instruction_index[string_reference.instruction_index].append(
string_reference_index
)
def _index_insn_addresses(self):
# see https://github.com/google/binexport/blob/39f6445c232bb5caf5c4a2a996de91dfa20c48e8/binexport.cc#L45
if len(self.be2.instruction) == 0:
return
assert self.be2.instruction[0].HasField("address"), "first insn must have explicit address"
addr: int = 0
next_addr: int = 0
for idx, insn in enumerate(self.be2.instruction):
if insn.HasField("address"):
addr = insn.address
next_addr = addr + len(insn.raw_bytes)
else:
addr = next_addr
next_addr += len(insn.raw_bytes)
self.insn_address_by_index[idx] = addr
self.insn_index_by_address[addr] = idx
self.insn_by_address[addr] = insn
@staticmethod
def instruction_indices(basic_block: BinExport2.BasicBlock) -> Iterator[int]:
"""
For a given basic block, enumerate the instruction indices.
"""
for index_range in basic_block.instruction_index:
if not index_range.HasField("end_index"):
yield index_range.begin_index
continue
else:
yield from range(index_range.begin_index, index_range.end_index)
def basic_block_instructions(
self, basic_block: BinExport2.BasicBlock
) -> Iterator[Tuple[int, BinExport2.Instruction, int]]:
"""
For a given basic block, enumerate the instruction indices,
the instruction instances, and their addresses.
"""
for instruction_index in self.instruction_indices(basic_block):
instruction: BinExport2.Instruction = self.be2.instruction[instruction_index]
instruction_address: int = self.get_insn_address(instruction_index)
yield instruction_index, instruction, instruction_address
def get_function_name_by_vertex(self, vertex_index: int) -> str:
vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[vertex_index]
name: str = f"sub_{vertex.address:x}"
if vertex.HasField("mangled_name"):
name = vertex.mangled_name
if vertex.HasField("demangled_name"):
name = vertex.demangled_name
if vertex.HasField("library_index"):
library: BinExport2.Library = self.be2.library[vertex.library_index]
if library.HasField("name"):
name = f"{library.name}!{name}"
return name
def get_function_name_by_address(self, address: int) -> str:
if address not in self.vertex_index_by_address:
return ""
vertex_index: int = self.vertex_index_by_address[address]
return self.get_function_name_by_vertex(vertex_index)
def get_instruction_by_address(self, address: int) -> BinExport2.Instruction:
assert address in self.insn_by_address, f"address must be indexed, missing {address:x}"
return self.insn_by_address[address]
class BinExport2Analysis:
def __init__(self, be2: BinExport2, idx: BinExport2Index, buf: bytes):
self.be2: BinExport2 = be2
self.idx: BinExport2Index = idx
self.buf: bytes = buf
self.base_address: int = 0
self.thunks: Dict[int, int] = {}
self._find_base_address()
self._compute_thunks()
def _find_base_address(self):
sections_with_perms: Iterator[BinExport2.Section] = filter(
lambda s: s.flag_r or s.flag_w or s.flag_x, self.be2.section
)
# assume the lowest address is the base address.
# this works as long as BinExport doesn't record other
# libraries mapped into memory.
self.base_address = min(s.address for s in sections_with_perms)
logger.debug("found base address: %x", self.base_address)
def _compute_thunks(self):
for addr, idx in self.idx.vertex_index_by_address.items():
vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
continue
curr_idx: int = idx
for _ in range(capa.features.common.THUNK_CHAIN_DEPTH_DELTA):
thunk_callees: List[int] = self.idx.callees_by_vertex_index[curr_idx]
# if this doesn't hold, then it doesn't seem like this is a thunk,
# because either, len is:
# 0 and the thunk doesn't point to anything, or
# >1 and the thunk may end up at many functions.
assert len(thunk_callees) == 1, f"thunk @ {hex(addr)} failed"
thunked_idx: int = thunk_callees[0]
thunked_vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[thunked_idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
thunked_vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
assert thunked_vertex.HasField("address")
self.thunks[addr] = thunked_vertex.address
break
curr_idx = thunked_idx
@dataclass
class MemoryRegion:
# location of the bytes, potentially relative to a base address
address: int
buf: bytes
@property
def end(self) -> int:
return self.address + len(self.buf)
def contains(self, address: int) -> bool:
# note: address must be relative to any base address
return self.address <= address < self.end
class ReadMemoryError(ValueError): ...
class AddressNotMappedError(ReadMemoryError): ...
@dataclass
class AddressSpace:
base_address: int
memory_regions: Tuple[MemoryRegion, ...]
def read_memory(self, address: int, length: int) -> bytes:
rva: int = address - self.base_address
for region in self.memory_regions:
if region.contains(rva):
offset: int = rva - region.address
return region.buf[offset : offset + length]
raise AddressNotMappedError(address)
@classmethod
def from_pe(cls, pe: PE, base_address: int):
regions: List[MemoryRegion] = []
for section in pe.sections:
address: int = section.VirtualAddress
size: int = section.Misc_VirtualSize
buf: bytes = section.get_data()
if len(buf) != size:
# pad the section with NULLs
# assume page alignment is already handled.
# might need more hardening here.
buf += b"\x00" * (size - len(buf))
regions.append(MemoryRegion(address, buf))
return cls(base_address, tuple(regions))
@classmethod
def from_elf(cls, elf: ELFFile, base_address: int):
regions: List[MemoryRegion] = []
# ELF segments are for runtime data,
# ELF sections are for link-time data.
for segment in elf.iter_segments():
# assume p_align is consistent with addresses here.
# otherwise, should harden this loader.
segment_rva: int = segment.header.p_vaddr
segment_size: int = segment.header.p_memsz
segment_data: bytes = segment.data()
if len(segment_data) < segment_size:
# pad the section with NULLs
# assume page alignment is already handled.
# might need more hardening here.
segment_data += b"\x00" * (segment_size - len(segment_data))
regions.append(MemoryRegion(segment_rva, segment_data))
return cls(base_address, tuple(regions))
@classmethod
def from_buf(cls, buf: bytes, base_address: int):
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: PE = PE(data=buf)
return cls.from_pe(pe, base_address)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
return cls.from_elf(elf, base_address)
else:
raise NotImplementedError("file format address space")
@dataclass
class AnalysisContext:
sample_bytes: bytes
be2: BinExport2
idx: BinExport2Index
analysis: BinExport2Analysis
address_space: AddressSpace
@dataclass
class FunctionContext:
ctx: AnalysisContext
flow_graph_index: int
format: Set[str]
os: Set[str]
arch: Set[str]
@dataclass
class BasicBlockContext:
basic_block_index: int
@dataclass
class InstructionContext:
instruction_index: int

View File

@@ -1,15 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def is_stack_register_expression(be2: BinExport2, expression: BinExport2.Expression) -> bool:
return bool(
expression and expression.type == BinExport2.Expression.REGISTER and expression.symbol.lower().endswith("sp")
)

View File

@@ -1,155 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator, Optional
import capa.features.extractors.binexport2.helpers
from capa.features.insn import MAX_STRUCTURE_SIZE, Number, Offset, OperandNumber, OperandOffset
from capa.features.common import Feature, Characteristic
from capa.features.address import Address
from capa.features.extractors.binexport2 import FunctionContext, InstructionContext
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import (
BinExport2InstructionPatternMatcher,
mask_immediate,
is_address_mapped,
get_instruction_mnemonic,
get_operand_register_expression,
get_operand_immediate_expression,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
from capa.features.extractors.binexport2.arch.arm.helpers import is_stack_register_expression
logger = logging.getLogger(__name__)
def extract_insn_number_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction_index: int = ii.instruction_index
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
if len(instruction.operand_index) == 0:
# skip things like:
# .text:0040116e leave
return
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if mnemonic in ("add", "sub"):
assert len(instruction.operand_index) == 3
operand1_expression: Optional[BinExport2.Expression] = get_operand_register_expression(
be2, be2.operand[instruction.operand_index[1]]
)
if operand1_expression and is_stack_register_expression(be2, operand1_expression):
# skip things like:
# add x0,sp,#0x8
return
for i, operand_index in enumerate(instruction.operand_index):
operand: BinExport2.Operand = be2.operand[operand_index]
immediate_expression: Optional[BinExport2.Expression] = get_operand_immediate_expression(be2, operand)
if not immediate_expression:
continue
value: int = mask_immediate(fhi.arch, immediate_expression.immediate)
if is_address_mapped(be2, value):
continue
yield Number(value), ih.address
yield OperandNumber(i, value), ih.address
if mnemonic == "add" and i == 2:
if 0 < value < MAX_STRUCTURE_SIZE:
yield Offset(value), ih.address
yield OperandOffset(i, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int] ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack), #int]! ; capture #int
ldr|ldrb|ldrh|ldrsb|ldrsh|ldrex|ldrd|str|strb|strh|strex|strd reg, [reg(not-stack)], #int ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int] ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack), #int]! ; capture #int
ldp|ldpd|stp|stpd reg, reg, [reg(not-stack)], #int ; capture #int
"""
)
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = OFFSET_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
value = match.expression.immediate
value = mask_immediate(fhi.arch, value)
if not is_address_mapped(be2, value):
value = capa.features.extractors.binexport2.helpers.twos_complement(fhi.arch, value)
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
eor reg, reg, reg
eor reg, reg, #int
"""
)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if NZXOR_PATTERNS.match_with_be2(be2, ii.instruction_index) is None:
return
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
# guaranteed to be simple int/reg operands
# so we don't have to realize the tree/list.
operands: List[BinExport2.Operand] = [be2.operand[operand_index] for operand_index in instruction.operand_index]
if operands[1] != operands[2]:
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
blx|bx|blr reg
"""
)
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if INDIRECT_CALL_PATTERNS.match_with_be2(be2, ii.instruction_index) is not None:
yield Characteristic("indirect call"), ih.address

View File

@@ -1,135 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Optional
from dataclasses import dataclass
from capa.features.extractors.binexport2.helpers import get_operand_expressions
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
# security cookie checks may perform non-zeroing XORs, these are expected within a certain
# byte range within the first and returning basic blocks, this helps to reduce FP features
SECURITY_COOKIE_BYTES_DELTA: int = 0x40
@dataclass
class OperandPhraseInfo:
scale: Optional[BinExport2.Expression] = None
index: Optional[BinExport2.Expression] = None
base: Optional[BinExport2.Expression] = None
displacement: Optional[BinExport2.Expression] = None
def get_operand_phrase_info(be2: BinExport2, operand: BinExport2.Operand) -> Optional[OperandPhraseInfo]:
# assume the following (see https://blog.yossarian.net/2020/06/13/How-x86_64-addresses-memory):
#
# Scale: A 2-bit constant factor
# Index: Any general purpose register
# Base: Any general purpose register
# Displacement: An integral offset
expressions: List[BinExport2.Expression] = get_operand_expressions(be2, operand)
# skip expression up to and including BinExport2.Expression.DEREFERENCE, assume caller
# has checked for BinExport2.Expression.DEREFERENCE
for i, expression in enumerate(expressions):
if expression.type == BinExport2.Expression.DEREFERENCE:
expressions = expressions[i + 1 :]
break
expression0: BinExport2.Expression
expression1: BinExport2.Expression
expression2: BinExport2.Expression
expression3: BinExport2.Expression
expression4: BinExport2.Expression
if len(expressions) == 1:
expression0 = expressions[0]
assert (
expression0.type == BinExport2.Expression.IMMEDIATE_INT
or expression0.type == BinExport2.Expression.REGISTER
)
if expression0.type == BinExport2.Expression.IMMEDIATE_INT:
# Displacement
return OperandPhraseInfo(displacement=expression0)
elif expression0.type == BinExport2.Expression.REGISTER:
# Base
return OperandPhraseInfo(base=expression0)
elif len(expressions) == 3:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert (
expression2.type == BinExport2.Expression.IMMEDIATE_INT
or expression2.type == BinExport2.Expression.REGISTER
)
if expression2.type == BinExport2.Expression.REGISTER:
# Base + Index
return OperandPhraseInfo(base=expression0, index=expression2)
elif expression2.type == BinExport2.Expression.IMMEDIATE_INT:
# Base + Displacement
return OperandPhraseInfo(base=expression0, displacement=expression2)
elif len(expressions) == 5:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
expression3 = expressions[3]
expression4 = expressions[4]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert (
expression2.type == BinExport2.Expression.REGISTER
or expression2.type == BinExport2.Expression.IMMEDIATE_INT
)
assert expression3.type == BinExport2.Expression.OPERATOR
assert expression4.type == BinExport2.Expression.IMMEDIATE_INT
if expression1.symbol == "+" and expression3.symbol == "+":
# Base + Index + Displacement
return OperandPhraseInfo(base=expression0, index=expression2, displacement=expression4)
elif expression1.symbol == "+" and expression3.symbol == "*":
# Base + (Index * Scale)
return OperandPhraseInfo(base=expression0, index=expression2, scale=expression3)
elif expression1.symbol == "*" and expression3.symbol == "+":
# (Index * Scale) + Displacement
return OperandPhraseInfo(index=expression0, scale=expression2, displacement=expression3)
else:
raise NotImplementedError(expression1.symbol, expression3.symbol)
elif len(expressions) == 7:
expression0 = expressions[0]
expression1 = expressions[1]
expression2 = expressions[2]
expression3 = expressions[3]
expression4 = expressions[4]
expression5 = expressions[5]
expression6 = expressions[6]
assert expression0.type == BinExport2.Expression.REGISTER
assert expression1.type == BinExport2.Expression.OPERATOR
assert expression2.type == BinExport2.Expression.REGISTER
assert expression3.type == BinExport2.Expression.OPERATOR
assert expression4.type == BinExport2.Expression.IMMEDIATE_INT
assert expression5.type == BinExport2.Expression.OPERATOR
assert expression6.type == BinExport2.Expression.IMMEDIATE_INT
# Base + (Index * Scale) + Displacement
return OperandPhraseInfo(base=expression0, index=expression2, scale=expression4, displacement=expression6)
else:
raise NotImplementedError(len(expressions))
return None

View File

@@ -1,248 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator
import capa.features.extractors.strings
import capa.features.extractors.binexport2.helpers
from capa.features.insn import MAX_STRUCTURE_SIZE, Number, Offset, OperandNumber, OperandOffset
from capa.features.common import Feature, Characteristic
from capa.features.address import Address
from capa.features.extractors.binexport2 import BinExport2Index, FunctionContext, BasicBlockContext, InstructionContext
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import (
BinExport2InstructionPatternMatcher,
mask_immediate,
is_address_mapped,
get_instruction_mnemonic,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
from capa.features.extractors.binexport2.arch.intel.helpers import SECURITY_COOKIE_BYTES_DELTA
logger = logging.getLogger(__name__)
IGNORE_NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
ret #int
retn #int
add reg(stack), #int
sub reg(stack), #int
"""
)
NUMBER_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
push #int0 ; capture #int0
# its a little tedious to enumerate all the address forms
# but at least we are explicit
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar reg, #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [#int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg + #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg * #int], #int0 ; capture #int0
cmp|and|or|test|mov|add|adc|sub|shl|shr|sal|sar [reg + reg * #int + #int], #int0 ; capture #int0
imul reg, reg, #int ; capture #int
# note that int is first
cmp|test #int0, reg ; capture #int0
# imagine reg is zero'd out, then this is like `mov reg, #int`
# which is not uncommon.
lea reg, [reg + #int] ; capture #int
"""
)
def extract_insn_number_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
if IGNORE_NUMBER_PATTERNS.match_with_be2(be2, ii.instruction_index):
return
match = NUMBER_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
value: int = mask_immediate(fhi.arch, match.expression.immediate)
if is_address_mapped(be2, value):
return
yield Number(value), ih.address
yield OperandNumber(match.operand_index, value), ih.address
instruction_index: int = ii.instruction_index
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if mnemonic.startswith("add"):
if 0 < value < MAX_STRUCTURE_SIZE:
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
OFFSET_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb|cmp [reg + reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg * #int + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg + reg + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg(not-stack) + #int0], #int ; capture #int0
mov|movzx|movsb|cmp [reg + reg * #int + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg * #int + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg + reg + #int0], reg ; capture #int0
mov|movzx|movsb|cmp [reg(not-stack) + #int0], reg ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg + reg * #int + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg * #int + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg + reg + #int0] ; capture #int0
mov|movzx|movsb|cmp|lea reg, [reg(not-stack) + #int0] ; capture #int0
"""
)
# these are patterns that access offset 0 from some pointer
# (pointer is not the stack pointer).
OFFSET_ZERO_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
mov|movzx|movsb [reg(not-stack)], reg
mov|movzx|movsb [reg(not-stack)], #int
lea reg, [reg(not-stack)]
"""
)
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = OFFSET_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
match = OFFSET_ZERO_PATTERNS.match_with_be2(be2, ii.instruction_index)
if not match:
return
yield Offset(0), ih.address
yield OperandOffset(match.operand_index, 0), ih.address
value = mask_immediate(fhi.arch, match.expression.immediate)
if is_address_mapped(be2, value):
return
value = capa.features.extractors.binexport2.helpers.twos_complement(fhi.arch, value, 32)
yield Offset(value), ih.address
yield OperandOffset(match.operand_index, value), ih.address
def is_security_cookie(
fhi: FunctionContext,
bbi: BasicBlockContext,
instruction_address: int,
instruction: BinExport2.Instruction,
) -> bool:
"""
check if an instruction is related to security cookie checks.
"""
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
# security cookie check should use SP or BP
op1: BinExport2.Operand = be2.operand[instruction.operand_index[1]]
op1_exprs: List[BinExport2.Expression] = [be2.expression[expr_i] for expr_i in op1.expression_index]
if all(expr.symbol.lower() not in ("bp", "esp", "ebp", "rbp", "rsp") for expr in op1_exprs):
return False
# check_nzxor_security_cookie_delta
# if insn falls at the start of first entry block of the parent function.
flow_graph: BinExport2.FlowGraph = be2.flow_graph[fhi.flow_graph_index]
basic_block_index: int = bbi.basic_block_index
bb: BinExport2.BasicBlock = be2.basic_block[basic_block_index]
if flow_graph.entry_basic_block_index == basic_block_index:
first_addr: int = min((idx.insn_address_by_index[ir.begin_index] for ir in bb.instruction_index))
if instruction_address < first_addr + SECURITY_COOKIE_BYTES_DELTA:
return True
# or insn falls at the end before return in a terminal basic block.
if basic_block_index not in (e.source_basic_block_index for e in flow_graph.edge):
last_addr: int = max((idx.insn_address_by_index[ir.end_index - 1] for ir in bb.instruction_index))
if instruction_address > last_addr - SECURITY_COOKIE_BYTES_DELTA:
return True
return False
NZXOR_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
xor|xorpd|xorps|pxor reg, reg
xor|xorpd|xorps|pxor reg, #int
"""
)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
"""
parse non-zeroing XOR instruction from the given instruction.
ignore expected non-zeroing XORs, e.g. security cookies.
"""
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
if NZXOR_PATTERNS.match_with_be2(be2, ii.instruction_index) is None:
return
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
# guaranteed to be simple int/reg operands
# so we don't have to realize the tree/list.
operands: List[BinExport2.Operand] = [be2.operand[operand_index] for operand_index in instruction.operand_index]
if operands[0] == operands[1]:
return
instruction_address: int = idx.insn_address_by_index[ii.instruction_index]
if is_security_cookie(fhi, bbh.inner, instruction_address, instruction):
return
yield Characteristic("nzxor"), ih.address
INDIRECT_CALL_PATTERNS = BinExport2InstructionPatternMatcher.from_str(
"""
call|jmp reg0
call|jmp [reg + reg * #int + #int]
call|jmp [reg + reg * #int]
call|jmp [reg * #int + #int]
call|jmp [reg + reg + #int]
call|jmp [reg + #int]
call|jmp [reg]
"""
)
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
match = INDIRECT_CALL_PATTERNS.match_with_be2(be2, ii.instruction_index)
if match is None:
return
yield Characteristic("indirect call"), ih.address

View File

@@ -1,40 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Tuple, Iterator
from capa.features.common import Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.basicblock import BasicBlock
from capa.features.extractors.binexport2 import FunctionContext, BasicBlockContext
from capa.features.extractors.base_extractor import BBHandle, FunctionHandle
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def extract_bb_tight_loop(fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
bbi: BasicBlockContext = bbh.inner
idx = fhi.ctx.idx
basic_block_index: int = bbi.basic_block_index
target_edges: List[BinExport2.FlowGraph.Edge] = idx.target_edges_by_basic_block_index[basic_block_index]
if basic_block_index in (e.source_basic_block_index for e in target_edges):
basic_block_address: int = idx.get_basic_block_address(basic_block_index)
yield Characteristic("tight loop"), AbsoluteVirtualAddress(basic_block_address)
def extract_features(fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract basic block features"""
for bb_handler in BASIC_BLOCK_HANDLERS:
for feature, addr in bb_handler(fh, bbh):
yield feature, addr
yield BasicBlock(), bbh.address
BASIC_BLOCK_HANDLERS = (extract_bb_tight_loop,)

File diff suppressed because one or more lines are too long

View File

@@ -1,784 +0,0 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
The representation is generic to accommodate various source architectures.
In particular 32 and 64 bit versions of x86, ARM, PowerPC and MIPS have been
tested.
Multiple levels of deduping have been applied to make the format more compact
and avoid redundant data duplication. Some of this due to hard-earned
experience trying to cope with intentionally obfuscated malicious binaries.
Note in particular that the same instruction may occur in multiple basic
blocks and the same basic block in multiple functions (instruction and basic
block sharing). Implemented naively, malware can use this to cause
combinatorial explosion in memory usage, DOSing the analyst. This format
should store every unique expression, mnemonic, operand, instruction and
basic block only once instead of duplicating the information for every
instance of it.
This format does _not_ try to be 100% backwards compatible with the old
version. In particular, we do not store IDA's comment types, making lossless
porting of IDA comments impossible. We do however, store comments and
expression substitutions, so porting the actual data is possible, just not
the exact IDA type.
While it would be more natural to use addresses when defining call graph and
flow graph edges and other such references, it is more efficient to employ
one more level of indirection and use indices into the basic block or
function arrays instead. This is because addresses will usually use most of
the available 64 bit space while indices will be much smaller and compress
much better (less randomly distributed).
We omit all fields that are set to their default value anyways. Note that
this has two side effects:
- changing the defaults in this proto file will, in effect, change what's
read from disk
- the generated code has_* methods are somewhat less useful
WARNING: We omit the defaults manually in the code writing the data. Do not
change the defaults here without changing the code!
TODO(cblichmann): Link flow graphs to call graph nodes. The connection is
there via the address, but tricky to extract.
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class BinExport2(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Meta(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
EXECUTABLE_NAME_FIELD_NUMBER: builtins.int
EXECUTABLE_ID_FIELD_NUMBER: builtins.int
ARCHITECTURE_NAME_FIELD_NUMBER: builtins.int
TIMESTAMP_FIELD_NUMBER: builtins.int
executable_name: builtins.str
"""Input binary filename including file extension but excluding file path.
example: "insider_gcc.exe"
"""
executable_id: builtins.str
"""Application defined executable id. Often the SHA256 hash of the input
binary.
"""
architecture_name: builtins.str
"""Input architecture name, e.g. x86-32."""
timestamp: builtins.int
"""When did this file get created? Unix time. This may be used for some
primitive versioning in case the file format ever changes.
"""
def __init__(
self,
*,
executable_name: builtins.str | None = ...,
executable_id: builtins.str | None = ...,
architecture_name: builtins.str | None = ...,
timestamp: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["architecture_name", b"architecture_name", "executable_id", b"executable_id", "executable_name", b"executable_name", "timestamp", b"timestamp"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["architecture_name", b"architecture_name", "executable_id", b"executable_id", "executable_name", b"executable_name", "timestamp", b"timestamp"]) -> None: ...
@typing_extensions.final
class CallGraph(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Vertex(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.CallGraph.Vertex._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
NORMAL: BinExport2.CallGraph.Vertex._Type.ValueType # 0
"""Regular function with full disassembly."""
LIBRARY: BinExport2.CallGraph.Vertex._Type.ValueType # 1
"""This function is a well known library function."""
IMPORTED: BinExport2.CallGraph.Vertex._Type.ValueType # 2
"""Imported from a dynamic link library (e.g. dll)."""
THUNK: BinExport2.CallGraph.Vertex._Type.ValueType # 3
"""A thunk function, forwarding its work via an unconditional jump."""
INVALID: BinExport2.CallGraph.Vertex._Type.ValueType # 4
"""An invalid function (a function that contained invalid code or was
considered invalid by some heuristics).
"""
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
NORMAL: BinExport2.CallGraph.Vertex.Type.ValueType # 0
"""Regular function with full disassembly."""
LIBRARY: BinExport2.CallGraph.Vertex.Type.ValueType # 1
"""This function is a well known library function."""
IMPORTED: BinExport2.CallGraph.Vertex.Type.ValueType # 2
"""Imported from a dynamic link library (e.g. dll)."""
THUNK: BinExport2.CallGraph.Vertex.Type.ValueType # 3
"""A thunk function, forwarding its work via an unconditional jump."""
INVALID: BinExport2.CallGraph.Vertex.Type.ValueType # 4
"""An invalid function (a function that contained invalid code or was
considered invalid by some heuristics).
"""
ADDRESS_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
MANGLED_NAME_FIELD_NUMBER: builtins.int
DEMANGLED_NAME_FIELD_NUMBER: builtins.int
LIBRARY_INDEX_FIELD_NUMBER: builtins.int
MODULE_INDEX_FIELD_NUMBER: builtins.int
address: builtins.int
"""The function's entry point address. Messages need to be sorted, see
comment below on `vertex`.
"""
type: global___BinExport2.CallGraph.Vertex.Type.ValueType
mangled_name: builtins.str
"""If the function has a user defined, real name it will be given here.
main() is a proper name, sub_BAADF00D is not (auto generated dummy
name).
"""
demangled_name: builtins.str
"""Demangled name if the function is a mangled C++ function and we could
demangle it.
"""
library_index: builtins.int
"""If this is a library function, what is its index in library arrays."""
module_index: builtins.int
"""If module name, such as class name for DEX files, is present - index in
module table.
"""
def __init__(
self,
*,
address: builtins.int | None = ...,
type: global___BinExport2.CallGraph.Vertex.Type.ValueType | None = ...,
mangled_name: builtins.str | None = ...,
demangled_name: builtins.str | None = ...,
library_index: builtins.int | None = ...,
module_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "demangled_name", b"demangled_name", "library_index", b"library_index", "mangled_name", b"mangled_name", "module_index", b"module_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "demangled_name", b"demangled_name", "library_index", b"library_index", "mangled_name", b"mangled_name", "module_index", b"module_index", "type", b"type"]) -> None: ...
@typing_extensions.final
class Edge(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_VERTEX_INDEX_FIELD_NUMBER: builtins.int
TARGET_VERTEX_INDEX_FIELD_NUMBER: builtins.int
source_vertex_index: builtins.int
"""source and target index into the vertex repeated field."""
target_vertex_index: builtins.int
def __init__(
self,
*,
source_vertex_index: builtins.int | None = ...,
target_vertex_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_vertex_index", b"source_vertex_index", "target_vertex_index", b"target_vertex_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["source_vertex_index", b"source_vertex_index", "target_vertex_index", b"target_vertex_index"]) -> None: ...
VERTEX_FIELD_NUMBER: builtins.int
EDGE_FIELD_NUMBER: builtins.int
@property
def vertex(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.CallGraph.Vertex]:
"""vertices == functions in the call graph.
Important: Most downstream tooling (notably BinDiff), need these to be
sorted by `Vertex::address` (ascending). For C++, the
`BinExport2Writer` class enforces this invariant.
"""
@property
def edge(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.CallGraph.Edge]:
"""edges == calls in the call graph."""
def __init__(
self,
*,
vertex: collections.abc.Iterable[global___BinExport2.CallGraph.Vertex] | None = ...,
edge: collections.abc.Iterable[global___BinExport2.CallGraph.Edge] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["edge", b"edge", "vertex", b"vertex"]) -> None: ...
@typing_extensions.final
class Expression(google.protobuf.message.Message):
"""An operand consists of 1 or more expressions, linked together as a tree."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.Expression._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
SYMBOL: BinExport2.Expression._Type.ValueType # 1
IMMEDIATE_INT: BinExport2.Expression._Type.ValueType # 2
IMMEDIATE_FLOAT: BinExport2.Expression._Type.ValueType # 3
OPERATOR: BinExport2.Expression._Type.ValueType # 4
REGISTER: BinExport2.Expression._Type.ValueType # 5
SIZE_PREFIX: BinExport2.Expression._Type.ValueType # 6
DEREFERENCE: BinExport2.Expression._Type.ValueType # 7
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
SYMBOL: BinExport2.Expression.Type.ValueType # 1
IMMEDIATE_INT: BinExport2.Expression.Type.ValueType # 2
IMMEDIATE_FLOAT: BinExport2.Expression.Type.ValueType # 3
OPERATOR: BinExport2.Expression.Type.ValueType # 4
REGISTER: BinExport2.Expression.Type.ValueType # 5
SIZE_PREFIX: BinExport2.Expression.Type.ValueType # 6
DEREFERENCE: BinExport2.Expression.Type.ValueType # 7
TYPE_FIELD_NUMBER: builtins.int
SYMBOL_FIELD_NUMBER: builtins.int
IMMEDIATE_FIELD_NUMBER: builtins.int
PARENT_INDEX_FIELD_NUMBER: builtins.int
IS_RELOCATION_FIELD_NUMBER: builtins.int
type: global___BinExport2.Expression.Type.ValueType
"""IMMEDIATE_INT is by far the most common type and thus we can save some
space by omitting it as the default.
"""
symbol: builtins.str
"""Symbol for this expression. Interpretation depends on type. Examples
include: "eax", "[", "+"
"""
immediate: builtins.int
"""If the expression can be interpreted as an integer value (IMMEDIATE_INT)
the value is given here.
"""
parent_index: builtins.int
"""The parent expression. Example expression tree for the second operand of:
mov eax, b4 [ebx + 12]
"b4" --- "[" --- "+" --- "ebx"
\\ "12"
"""
is_relocation: builtins.bool
"""true if the expression has entry in relocation table"""
def __init__(
self,
*,
type: global___BinExport2.Expression.Type.ValueType | None = ...,
symbol: builtins.str | None = ...,
immediate: builtins.int | None = ...,
parent_index: builtins.int | None = ...,
is_relocation: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["immediate", b"immediate", "is_relocation", b"is_relocation", "parent_index", b"parent_index", "symbol", b"symbol", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["immediate", b"immediate", "is_relocation", b"is_relocation", "parent_index", b"parent_index", "symbol", b"symbol", "type", b"type"]) -> None: ...
@typing_extensions.final
class Operand(google.protobuf.message.Message):
"""An instruction may have 0 or more operands."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
@property
def expression_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Contains all expressions constituting this operand. All expressions
should be linked into a single tree, i.e. there should only be one
expression in this list with parent_index == NULL and all others should
descend from that. Rendering order for expressions on the same tree level
(siblings) is implicitly given by the order they are referenced in this
repeated field.
Implicit: expression sequence
"""
def __init__(
self,
*,
expression_index: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["expression_index", b"expression_index"]) -> None: ...
@typing_extensions.final
class Mnemonic(google.protobuf.message.Message):
"""An instruction has exactly 1 mnemonic."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
name: builtins.str
"""Literal representation of the mnemonic, e.g.: "mov"."""
def __init__(
self,
*,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["name", b"name"]) -> None: ...
@typing_extensions.final
class Instruction(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
CALL_TARGET_FIELD_NUMBER: builtins.int
MNEMONIC_INDEX_FIELD_NUMBER: builtins.int
OPERAND_INDEX_FIELD_NUMBER: builtins.int
RAW_BYTES_FIELD_NUMBER: builtins.int
COMMENT_INDEX_FIELD_NUMBER: builtins.int
address: builtins.int
"""This will only be filled for instructions that do not just flow from the
immediately preceding instruction. Regular instructions will have to
calculate their own address by adding raw_bytes.size() to the previous
instruction's address.
"""
@property
def call_target(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""If this is a call instruction and call targets could be determined
they'll be given here. Note that we may or may not have a flow graph for
the target and thus cannot use an index into the flow graph table here.
We could potentially use call graph nodes, but linking instructions to
the call graph directly does not seem a good choice.
"""
mnemonic_index: builtins.int
"""Index into the mnemonic array of strings. Used for de-duping the data.
The default value is used for the most common mnemonic in the executable.
"""
@property
def operand_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Indices into the operand tree. On X86 this can be 0, 1 or 2 elements
long, 3 elements with VEX/EVEX.
Implicit: operand sequence
"""
raw_bytes: builtins.bytes
"""The unmodified input bytes corresponding to this instruction."""
@property
def comment_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Implicit: comment sequence"""
def __init__(
self,
*,
address: builtins.int | None = ...,
call_target: collections.abc.Iterable[builtins.int] | None = ...,
mnemonic_index: builtins.int | None = ...,
operand_index: collections.abc.Iterable[builtins.int] | None = ...,
raw_bytes: builtins.bytes | None = ...,
comment_index: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "mnemonic_index", b"mnemonic_index", "raw_bytes", b"raw_bytes"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "call_target", b"call_target", "comment_index", b"comment_index", "mnemonic_index", b"mnemonic_index", "operand_index", b"operand_index", "raw_bytes", b"raw_bytes"]) -> None: ...
@typing_extensions.final
class BasicBlock(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class IndexRange(google.protobuf.message.Message):
"""This is a space optimization. The instructions for an individual basic
block will usually be in a continuous index range. Thus it is more
efficient to store the range instead of individual indices. However, this
does not hold true for all basic blocks, so we need to be able to store
multiple index ranges per block.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BEGIN_INDEX_FIELD_NUMBER: builtins.int
END_INDEX_FIELD_NUMBER: builtins.int
begin_index: builtins.int
"""These work like begin and end iterators, i.e. the sequence is
[begin_index, end_index). If the sequence only contains a single
element end_index will be omitted.
"""
end_index: builtins.int
def __init__(
self,
*,
begin_index: builtins.int | None = ...,
end_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["begin_index", b"begin_index", "end_index", b"end_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["begin_index", b"begin_index", "end_index", b"end_index"]) -> None: ...
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
@property
def instruction_index(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.BasicBlock.IndexRange]:
"""Implicit: instruction sequence"""
def __init__(
self,
*,
instruction_index: collections.abc.Iterable[global___BinExport2.BasicBlock.IndexRange] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index"]) -> None: ...
@typing_extensions.final
class FlowGraph(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Edge(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.FlowGraph.Edge._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
CONDITION_TRUE: BinExport2.FlowGraph.Edge._Type.ValueType # 1
CONDITION_FALSE: BinExport2.FlowGraph.Edge._Type.ValueType # 2
UNCONDITIONAL: BinExport2.FlowGraph.Edge._Type.ValueType # 3
SWITCH: BinExport2.FlowGraph.Edge._Type.ValueType # 4
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
CONDITION_TRUE: BinExport2.FlowGraph.Edge.Type.ValueType # 1
CONDITION_FALSE: BinExport2.FlowGraph.Edge.Type.ValueType # 2
UNCONDITIONAL: BinExport2.FlowGraph.Edge.Type.ValueType # 3
SWITCH: BinExport2.FlowGraph.Edge.Type.ValueType # 4
SOURCE_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
TARGET_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
IS_BACK_EDGE_FIELD_NUMBER: builtins.int
source_basic_block_index: builtins.int
"""Source instruction will always be the last instruction of the source
basic block, target instruction the first instruction of the target
basic block.
"""
target_basic_block_index: builtins.int
type: global___BinExport2.FlowGraph.Edge.Type.ValueType
is_back_edge: builtins.bool
"""Indicates whether this is a loop edge as determined by Lengauer-Tarjan."""
def __init__(
self,
*,
source_basic_block_index: builtins.int | None = ...,
target_basic_block_index: builtins.int | None = ...,
type: global___BinExport2.FlowGraph.Edge.Type.ValueType | None = ...,
is_back_edge: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["is_back_edge", b"is_back_edge", "source_basic_block_index", b"source_basic_block_index", "target_basic_block_index", b"target_basic_block_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["is_back_edge", b"is_back_edge", "source_basic_block_index", b"source_basic_block_index", "target_basic_block_index", b"target_basic_block_index", "type", b"type"]) -> None: ...
BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
ENTRY_BASIC_BLOCK_INDEX_FIELD_NUMBER: builtins.int
EDGE_FIELD_NUMBER: builtins.int
@property
def basic_block_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Basic blocks are sorted by address."""
entry_basic_block_index: builtins.int
"""The flow graph's entry point address is the first instruction of the
entry_basic_block.
"""
@property
def edge(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.FlowGraph.Edge]: ...
def __init__(
self,
*,
basic_block_index: collections.abc.Iterable[builtins.int] | None = ...,
entry_basic_block_index: builtins.int | None = ...,
edge: collections.abc.Iterable[global___BinExport2.FlowGraph.Edge] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["entry_basic_block_index", b"entry_basic_block_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["basic_block_index", b"basic_block_index", "edge", b"edge", "entry_basic_block_index", b"entry_basic_block_index"]) -> None: ...
@typing_extensions.final
class Reference(google.protobuf.message.Message):
"""Generic reference class used for address comments (deprecated), string
references and expression substitutions. It allows referencing from an
instruction, operand, expression subtree tuple to a de-duped string in the
string table.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
INSTRUCTION_OPERAND_INDEX_FIELD_NUMBER: builtins.int
OPERAND_EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
STRING_TABLE_INDEX_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table."""
instruction_operand_index: builtins.int
"""Index into the operand array local to an instruction."""
operand_expression_index: builtins.int
"""Index into the expression array local to an operand."""
string_table_index: builtins.int
"""Index into the global string table."""
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
instruction_operand_index: builtins.int | None = ...,
operand_expression_index: builtins.int | None = ...,
string_table_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "string_table_index", b"string_table_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "string_table_index", b"string_table_index"]) -> None: ...
@typing_extensions.final
class DataReference(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
ADDRESS_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table."""
address: builtins.int
"""Address being referred."""
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
address: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "instruction_index", b"instruction_index"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "instruction_index", b"instruction_index"]) -> None: ...
@typing_extensions.final
class Comment(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _Type:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BinExport2.Comment._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: BinExport2.Comment._Type.ValueType # 0
"""A regular instruction comment. Typically displayed next to the
instruction disassembly.
"""
ANTERIOR: BinExport2.Comment._Type.ValueType # 1
"""A comment line that is typically displayed before (above) the
instruction it refers to.
"""
POSTERIOR: BinExport2.Comment._Type.ValueType # 2
"""Like ANTERIOR, but a typically displayed after (below)."""
FUNCTION: BinExport2.Comment._Type.ValueType # 3
"""Similar to an ANTERIOR comment, but applies to the beginning of an
identified function. Programs displaying the proto may choose to render
these differently (e.g. above an inferred function signature).
"""
ENUM: BinExport2.Comment._Type.ValueType # 4
"""Named constants, bitfields and similar."""
LOCATION: BinExport2.Comment._Type.ValueType # 5
"""Named locations, usually the target of a jump."""
GLOBAL_REFERENCE: BinExport2.Comment._Type.ValueType # 6
"""Data cross references."""
LOCAL_REFERENCE: BinExport2.Comment._Type.ValueType # 7
"""Local/stack variables."""
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
DEFAULT: BinExport2.Comment.Type.ValueType # 0
"""A regular instruction comment. Typically displayed next to the
instruction disassembly.
"""
ANTERIOR: BinExport2.Comment.Type.ValueType # 1
"""A comment line that is typically displayed before (above) the
instruction it refers to.
"""
POSTERIOR: BinExport2.Comment.Type.ValueType # 2
"""Like ANTERIOR, but a typically displayed after (below)."""
FUNCTION: BinExport2.Comment.Type.ValueType # 3
"""Similar to an ANTERIOR comment, but applies to the beginning of an
identified function. Programs displaying the proto may choose to render
these differently (e.g. above an inferred function signature).
"""
ENUM: BinExport2.Comment.Type.ValueType # 4
"""Named constants, bitfields and similar."""
LOCATION: BinExport2.Comment.Type.ValueType # 5
"""Named locations, usually the target of a jump."""
GLOBAL_REFERENCE: BinExport2.Comment.Type.ValueType # 6
"""Data cross references."""
LOCAL_REFERENCE: BinExport2.Comment.Type.ValueType # 7
"""Local/stack variables."""
INSTRUCTION_INDEX_FIELD_NUMBER: builtins.int
INSTRUCTION_OPERAND_INDEX_FIELD_NUMBER: builtins.int
OPERAND_EXPRESSION_INDEX_FIELD_NUMBER: builtins.int
STRING_TABLE_INDEX_FIELD_NUMBER: builtins.int
REPEATABLE_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
instruction_index: builtins.int
"""Index into the global instruction table. This is here to enable
comment processing without having to iterate over all instructions.
There is an N:M mapping of instructions to comments.
"""
instruction_operand_index: builtins.int
"""Index into the operand array local to an instruction."""
operand_expression_index: builtins.int
"""Index into the expression array local to an operand, like in Reference.
This is not currently used, but allows to implement expression
substitutions.
"""
string_table_index: builtins.int
"""Index into the global string table."""
repeatable: builtins.bool
"""Comment is propagated to all locations that reference the original
location.
"""
type: global___BinExport2.Comment.Type.ValueType
def __init__(
self,
*,
instruction_index: builtins.int | None = ...,
instruction_operand_index: builtins.int | None = ...,
operand_expression_index: builtins.int | None = ...,
string_table_index: builtins.int | None = ...,
repeatable: builtins.bool | None = ...,
type: global___BinExport2.Comment.Type.ValueType | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "repeatable", b"repeatable", "string_table_index", b"string_table_index", "type", b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_index", b"instruction_index", "instruction_operand_index", b"instruction_operand_index", "operand_expression_index", b"operand_expression_index", "repeatable", b"repeatable", "string_table_index", b"string_table_index", "type", b"type"]) -> None: ...
@typing_extensions.final
class Section(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
FLAG_R_FIELD_NUMBER: builtins.int
FLAG_W_FIELD_NUMBER: builtins.int
FLAG_X_FIELD_NUMBER: builtins.int
address: builtins.int
"""Section start address."""
size: builtins.int
"""Section size."""
flag_r: builtins.bool
"""Read flag of the section, True when section is readable."""
flag_w: builtins.bool
"""Write flag of the section, True when section is writable."""
flag_x: builtins.bool
"""Execute flag of the section, True when section is executable."""
def __init__(
self,
*,
address: builtins.int | None = ...,
size: builtins.int | None = ...,
flag_r: builtins.bool | None = ...,
flag_w: builtins.bool | None = ...,
flag_x: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["address", b"address", "flag_r", b"flag_r", "flag_w", b"flag_w", "flag_x", b"flag_x", "size", b"size"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address", b"address", "flag_r", b"flag_r", "flag_w", b"flag_w", "flag_x", b"flag_x", "size", b"size"]) -> None: ...
@typing_extensions.final
class Library(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
IS_STATIC_FIELD_NUMBER: builtins.int
LOAD_ADDRESS_FIELD_NUMBER: builtins.int
NAME_FIELD_NUMBER: builtins.int
is_static: builtins.bool
"""If this library is statically linked."""
load_address: builtins.int
"""Address where this library was loaded, 0 if unknown."""
name: builtins.str
"""Name of the library (format is platform-dependent)."""
def __init__(
self,
*,
is_static: builtins.bool | None = ...,
load_address: builtins.int | None = ...,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["is_static", b"is_static", "load_address", b"load_address", "name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["is_static", b"is_static", "load_address", b"load_address", "name", b"name"]) -> None: ...
@typing_extensions.final
class Module(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
name: builtins.str
"""Name, such as Java class name. Platform-dependent."""
def __init__(
self,
*,
name: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["name", b"name"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["name", b"name"]) -> None: ...
META_INFORMATION_FIELD_NUMBER: builtins.int
EXPRESSION_FIELD_NUMBER: builtins.int
OPERAND_FIELD_NUMBER: builtins.int
MNEMONIC_FIELD_NUMBER: builtins.int
INSTRUCTION_FIELD_NUMBER: builtins.int
BASIC_BLOCK_FIELD_NUMBER: builtins.int
FLOW_GRAPH_FIELD_NUMBER: builtins.int
CALL_GRAPH_FIELD_NUMBER: builtins.int
STRING_TABLE_FIELD_NUMBER: builtins.int
ADDRESS_COMMENT_FIELD_NUMBER: builtins.int
COMMENT_FIELD_NUMBER: builtins.int
STRING_REFERENCE_FIELD_NUMBER: builtins.int
EXPRESSION_SUBSTITUTION_FIELD_NUMBER: builtins.int
SECTION_FIELD_NUMBER: builtins.int
LIBRARY_FIELD_NUMBER: builtins.int
DATA_REFERENCE_FIELD_NUMBER: builtins.int
MODULE_FIELD_NUMBER: builtins.int
@property
def meta_information(self) -> global___BinExport2.Meta: ...
@property
def expression(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Expression]: ...
@property
def operand(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Operand]: ...
@property
def mnemonic(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Mnemonic]: ...
@property
def instruction(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Instruction]: ...
@property
def basic_block(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.BasicBlock]: ...
@property
def flow_graph(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.FlowGraph]: ...
@property
def call_graph(self) -> global___BinExport2.CallGraph: ...
@property
def string_table(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
@property
def address_comment(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]:
"""No longer written. This is here so that BinDiff can work with older
BinExport files.
"""
@property
def comment(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Comment]:
"""Rich comment index used for BinDiff's comment porting."""
@property
def string_reference(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]: ...
@property
def expression_substitution(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Reference]: ...
@property
def section(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Section]: ...
@property
def library(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Library]: ...
@property
def data_reference(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.DataReference]: ...
@property
def module(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinExport2.Module]: ...
def __init__(
self,
*,
meta_information: global___BinExport2.Meta | None = ...,
expression: collections.abc.Iterable[global___BinExport2.Expression] | None = ...,
operand: collections.abc.Iterable[global___BinExport2.Operand] | None = ...,
mnemonic: collections.abc.Iterable[global___BinExport2.Mnemonic] | None = ...,
instruction: collections.abc.Iterable[global___BinExport2.Instruction] | None = ...,
basic_block: collections.abc.Iterable[global___BinExport2.BasicBlock] | None = ...,
flow_graph: collections.abc.Iterable[global___BinExport2.FlowGraph] | None = ...,
call_graph: global___BinExport2.CallGraph | None = ...,
string_table: collections.abc.Iterable[builtins.str] | None = ...,
address_comment: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
comment: collections.abc.Iterable[global___BinExport2.Comment] | None = ...,
string_reference: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
expression_substitution: collections.abc.Iterable[global___BinExport2.Reference] | None = ...,
section: collections.abc.Iterable[global___BinExport2.Section] | None = ...,
library: collections.abc.Iterable[global___BinExport2.Library] | None = ...,
data_reference: collections.abc.Iterable[global___BinExport2.DataReference] | None = ...,
module: collections.abc.Iterable[global___BinExport2.Module] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["call_graph", b"call_graph", "meta_information", b"meta_information"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["address_comment", b"address_comment", "basic_block", b"basic_block", "call_graph", b"call_graph", "comment", b"comment", "data_reference", b"data_reference", "expression", b"expression", "expression_substitution", b"expression_substitution", "flow_graph", b"flow_graph", "instruction", b"instruction", "library", b"library", "meta_information", b"meta_information", "mnemonic", b"mnemonic", "module", b"module", "operand", b"operand", "section", b"section", "string_reference", b"string_reference", "string_table", b"string_table"]) -> None: ...
global___BinExport2 = BinExport2

View File

@@ -1,130 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Set, List, Tuple, Iterator
import capa.features.extractors.elf
import capa.features.extractors.common
import capa.features.extractors.binexport2.file
import capa.features.extractors.binexport2.insn
import capa.features.extractors.binexport2.helpers
import capa.features.extractors.binexport2.function
import capa.features.extractors.binexport2.basicblock
from capa.features.common import OS, Arch, Format, Feature
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors.binexport2 import (
AddressSpace,
AnalysisContext,
BinExport2Index,
FunctionContext,
BasicBlockContext,
BinExport2Analysis,
InstructionContext,
)
from capa.features.extractors.base_extractor import (
BBHandle,
InsnHandle,
SampleHashes,
FunctionHandle,
StaticFeatureExtractor,
)
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
class BinExport2FeatureExtractor(StaticFeatureExtractor):
def __init__(self, be2: BinExport2, buf: bytes):
super().__init__(hashes=SampleHashes.from_bytes(buf))
self.be2: BinExport2 = be2
self.buf: bytes = buf
self.idx: BinExport2Index = BinExport2Index(self.be2)
self.analysis: BinExport2Analysis = BinExport2Analysis(self.be2, self.idx, self.buf)
address_space: AddressSpace = AddressSpace.from_buf(buf, self.analysis.base_address)
self.ctx: AnalysisContext = AnalysisContext(self.buf, self.be2, self.idx, self.analysis, address_space)
self.global_features: List[Tuple[Feature, Address]] = []
self.global_features.extend(list(capa.features.extractors.common.extract_format(self.buf)))
self.global_features.extend(list(capa.features.extractors.common.extract_os(self.buf)))
self.global_features.extend(list(capa.features.extractors.common.extract_arch(self.buf)))
self.format: Set[str] = set()
self.os: Set[str] = set()
self.arch: Set[str] = set()
for feature, _ in self.global_features:
assert isinstance(feature.value, str)
if isinstance(feature, Format):
self.format.add(feature.value)
elif isinstance(feature, OS):
self.os.add(feature.value)
elif isinstance(feature, Arch):
self.arch.add(feature.value)
else:
raise ValueError("unexpected global feature: %s", feature)
def get_base_address(self) -> AbsoluteVirtualAddress:
return AbsoluteVirtualAddress(self.analysis.base_address)
def extract_global_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from self.global_features
def extract_file_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.file.extract_features(self.be2, self.buf)
def get_functions(self) -> Iterator[FunctionHandle]:
for flow_graph_index, flow_graph in enumerate(self.be2.flow_graph):
entry_basic_block_index: int = flow_graph.entry_basic_block_index
flow_graph_address: int = self.idx.get_basic_block_address(entry_basic_block_index)
vertex_idx: int = self.idx.vertex_index_by_address[flow_graph_address]
be2_vertex: BinExport2.CallGraph.Vertex = self.be2.call_graph.vertex[vertex_idx]
# skip thunks
if capa.features.extractors.binexport2.helpers.is_vertex_type(
be2_vertex, BinExport2.CallGraph.Vertex.Type.THUNK
):
continue
yield FunctionHandle(
AbsoluteVirtualAddress(flow_graph_address),
inner=FunctionContext(self.ctx, flow_graph_index, self.format, self.os, self.arch),
)
def extract_function_features(self, fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.function.extract_features(fh)
def get_basic_blocks(self, fh: FunctionHandle) -> Iterator[BBHandle]:
fhi: FunctionContext = fh.inner
flow_graph_index: int = fhi.flow_graph_index
flow_graph: BinExport2.FlowGraph = self.be2.flow_graph[flow_graph_index]
for basic_block_index in flow_graph.basic_block_index:
basic_block_address: int = self.idx.get_basic_block_address(basic_block_index)
yield BBHandle(
address=AbsoluteVirtualAddress(basic_block_address),
inner=BasicBlockContext(basic_block_index),
)
def extract_basic_block_features(self, fh: FunctionHandle, bbh: BBHandle) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.basicblock.extract_features(fh, bbh)
def get_instructions(self, fh: FunctionHandle, bbh: BBHandle) -> Iterator[InsnHandle]:
bbi: BasicBlockContext = bbh.inner
basic_block: BinExport2.BasicBlock = self.be2.basic_block[bbi.basic_block_index]
for instruction_index, _, instruction_address in self.idx.basic_block_instructions(basic_block):
yield InsnHandle(
address=AbsoluteVirtualAddress(instruction_address),
inner=InstructionContext(instruction_index),
)
def extract_insn_features(
self, fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.binexport2.insn.extract_features(fh, bbh, ih)

View File

@@ -1,80 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import logging
from typing import Tuple, Iterator
import pefile
from elftools.elf.elffile import ELFFile
import capa.features.common
import capa.features.extractors.common
import capa.features.extractors.pefile
import capa.features.extractors.elffile
from capa.features.common import Feature
from capa.features.address import Address
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def extract_file_export_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_export_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_export_names(elf)
else:
logger.warning("unsupported format")
def extract_file_import_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_import_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_import_names(elf)
else:
logger.warning("unsupported format")
def extract_file_section_names(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
if buf.startswith(capa.features.extractors.common.MATCH_PE):
pe: pefile.PE = pefile.PE(data=buf)
yield from capa.features.extractors.pefile.extract_file_section_names(pe)
elif buf.startswith(capa.features.extractors.common.MATCH_ELF):
elf: ELFFile = ELFFile(io.BytesIO(buf))
yield from capa.features.extractors.elffile.extract_file_section_names(elf)
else:
logger.warning("unsupported format")
def extract_file_strings(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.common.extract_file_strings(buf)
def extract_file_format(_be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.common.extract_format(buf)
def extract_features(be2: BinExport2, buf: bytes) -> Iterator[Tuple[Feature, Address]]:
"""extract file features"""
for file_handler in FILE_HANDLERS:
for feature, addr in file_handler(be2, buf):
yield feature, addr
FILE_HANDLERS = (
extract_file_export_names,
extract_file_import_names,
extract_file_strings,
extract_file_section_names,
extract_file_format,
)

View File

@@ -1,72 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Tuple, Iterator
from capa.features.file import FunctionName
from capa.features.common import Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors import loops
from capa.features.extractors.binexport2 import BinExport2Index, FunctionContext
from capa.features.extractors.base_extractor import FunctionHandle
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
def extract_function_calls_to(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
flow_graph_index: int = fhi.flow_graph_index
flow_graph_address: int = idx.flow_graph_address_by_index[flow_graph_index]
vertex_index: int = idx.vertex_index_by_address[flow_graph_address]
for caller_index in idx.callers_by_vertex_index[vertex_index]:
caller: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[caller_index]
caller_address: int = caller.address
yield Characteristic("calls to"), AbsoluteVirtualAddress(caller_address)
def extract_function_loop(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
flow_graph_index: int = fhi.flow_graph_index
flow_graph: BinExport2.FlowGraph = be2.flow_graph[flow_graph_index]
edges: List[Tuple[int, int]] = []
for edge in flow_graph.edge:
edges.append((edge.source_basic_block_index, edge.target_basic_block_index))
if loops.has_loop(edges):
yield Characteristic("loop"), fh.address
def extract_function_name(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
flow_graph_index: int = fhi.flow_graph_index
flow_graph_address: int = idx.flow_graph_address_by_index[flow_graph_index]
vertex_index: int = idx.vertex_index_by_address[flow_graph_address]
vertex: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[vertex_index]
if vertex.HasField("mangled_name"):
yield FunctionName(vertex.mangled_name), fh.address
def extract_features(fh: FunctionHandle) -> Iterator[Tuple[Feature, Address]]:
for func_handler in FUNCTION_HANDLERS:
for feature, addr in func_handler(fh):
yield feature, addr
FUNCTION_HANDLERS = (extract_function_calls_to, extract_function_loop, extract_function_name)

View File

@@ -1,650 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
from typing import Set, Dict, List, Tuple, Union, Iterator, Optional
from collections import defaultdict
from dataclasses import dataclass
import capa.features.extractors.helpers
import capa.features.extractors.binexport2.helpers
from capa.features.common import ARCH_I386, ARCH_AMD64, ARCH_AARCH64
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
HAS_ARCH32 = {ARCH_I386}
HAS_ARCH64 = {ARCH_AARCH64, ARCH_AMD64}
HAS_ARCH_INTEL = {ARCH_I386, ARCH_AMD64}
HAS_ARCH_ARM = {ARCH_AARCH64}
def mask_immediate(arch: Set[str], immediate: int) -> int:
if arch & HAS_ARCH64:
immediate &= 0xFFFFFFFFFFFFFFFF
elif arch & HAS_ARCH32:
immediate &= 0xFFFFFFFF
return immediate
def twos_complement(arch: Set[str], immediate: int, default: Optional[int] = None) -> int:
if default is not None:
return capa.features.extractors.helpers.twos_complement(immediate, default)
elif arch & HAS_ARCH64:
return capa.features.extractors.helpers.twos_complement(immediate, 64)
elif arch & HAS_ARCH32:
return capa.features.extractors.helpers.twos_complement(immediate, 32)
return immediate
def is_address_mapped(be2: BinExport2, address: int) -> bool:
"""return True if the given address is mapped"""
sections_with_perms: Iterator[BinExport2.Section] = filter(lambda s: s.flag_r or s.flag_w or s.flag_x, be2.section)
return any(section.address <= address < section.address + section.size for section in sections_with_perms)
def is_vertex_type(vertex: BinExport2.CallGraph.Vertex, type_: BinExport2.CallGraph.Vertex.Type.ValueType) -> bool:
return vertex.HasField("type") and vertex.type == type_
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree_empty_shifts(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 0 and expression.symbol in ("lsl", "lsr"):
# Ghidra may emit superfluous lsl nodes with no children.
# https://github.com/mandiant/capa/pull/2340/files#r1750003919
# Which is maybe: https://github.com/NationalSecurityAgency/ghidra/issues/6821#issuecomment-2295394697
#
# Which seems to be as if the shift wasn't there (shift of #0)
# so we want to remove references to this node from any parent nodes.
for tree_node in expression_tree:
if tree_index in tree_node:
tree_node.remove(tree_index)
return
for child_tree_index in children_tree_indexes:
_prune_expression_tree_empty_shifts(be2, operand, expression_tree, child_tree_index)
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree_empty_commas(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 1 and expression.symbol == ",":
# Due to the above pruning of empty LSL or LSR expressions,
# the parents might need to be fixed up.
#
# Specifically, if the pruned node was part of a comma list with two children,
# now there's only a single child, which renders as an extra comma,
# so we replace references to the comma node with the immediate child.
#
# A more correct way of doing this might be to walk up the parents and do fixups,
# but I'm not quite sure how to do this yet. Just do two passes right now.
child = children_tree_indexes[0]
for tree_node in expression_tree:
tree_node.index
if tree_index in tree_node:
tree_node[tree_node.index(tree_index)] = child
return
for child_tree_index in children_tree_indexes:
_prune_expression_tree_empty_commas(be2, operand, expression_tree, child_tree_index)
# internal to `build_expression_tree`
# this is unstable: it is subject to change, so don't rely on it!
def _prune_expression_tree(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
):
_prune_expression_tree_empty_shifts(be2, operand, expression_tree, 0)
_prune_expression_tree_empty_commas(be2, operand, expression_tree, 0)
# this is unstable: it is subject to change, so don't rely on it!
def _build_expression_tree(
be2: BinExport2,
operand: BinExport2.Operand,
) -> List[List[int]]:
# The reconstructed expression tree layout, linking parent nodes to their children.
#
# There is one list of integers for each expression in the operand.
# These integers are indexes of other expressions in the same operand,
# which are the children of that expression.
#
# So:
#
# [ [1, 3], [2], [], [4], [5], []]
#
# means the first expression has two children, at index 1 and 3,
# and the tree looks like:
#
# 0
# / \
# 1 3
# | |
# 2 4
# |
# 5
#
# Remember, these are the indices into the entries in operand.expression_index.
if len(operand.expression_index) == 0:
# Ghidra bug where empty operands (no expressions) may
# exist (see https://github.com/NationalSecurityAgency/ghidra/issues/6817)
return []
tree: List[List[int]] = []
for i, expression_index in enumerate(operand.expression_index):
children = []
# scan all subsequent expressions, looking for those that have parent_index == current.expression_index
for j, candidate_index in enumerate(operand.expression_index[i + 1 :]):
candidate = be2.expression[candidate_index]
if candidate.parent_index == expression_index:
children.append(i + j + 1)
tree.append(children)
_prune_expression_tree(be2, operand, tree)
_prune_expression_tree(be2, operand, tree)
return tree
def _fill_operand_expression_list(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
expression_list: List[BinExport2.Expression],
):
"""
Walk the given expression tree and collect the expression nodes in-order.
"""
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.REGISTER:
assert len(children_tree_indexes) == 0
expression_list.append(expression)
return
elif expression.type == BinExport2.Expression.SYMBOL:
assert len(children_tree_indexes) <= 1
expression_list.append(expression)
if len(children_tree_indexes) == 0:
return
elif len(children_tree_indexes) == 1:
# like: v
# from: mov v0.D[0x1], x9
# |
# 0
# .
# |
# D
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.IMMEDIATE_INT:
assert len(children_tree_indexes) == 0
expression_list.append(expression)
return
elif expression.type == BinExport2.Expression.SIZE_PREFIX:
# like: b4
#
# We might want to use this occasionally, such as to disambiguate the
# size of MOVs into/out of memory. But I'm not sure when/where we need that yet.
#
# IDA spams this size prefix hint *everywhere*, so we can't rely on the exporter
# to provide it only when necessary.
assert len(children_tree_indexes) == 1
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 1:
# prefix operator, like "ds:"
expression_list.append(expression)
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif len(children_tree_indexes) == 2:
# infix operator: like "+" in "ebp+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
_fill_operand_expression_list(be2, operand, expression_tree, child_a, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_b, expression_list)
return
elif len(children_tree_indexes) == 3:
# infix operator: like "+" in "ebp+ecx+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
child_c = children_tree_indexes[2]
_fill_operand_expression_list(be2, operand, expression_tree, child_a, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_b, expression_list)
expression_list.append(expression)
_fill_operand_expression_list(be2, operand, expression_tree, child_c, expression_list)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.DEREFERENCE:
assert len(children_tree_indexes) == 1
expression_list.append(expression)
child_index = children_tree_indexes[0]
_fill_operand_expression_list(be2, operand, expression_tree, child_index, expression_list)
return
elif expression.type == BinExport2.Expression.IMMEDIATE_FLOAT:
raise NotImplementedError(expression.type)
else:
raise NotImplementedError(expression.type)
def get_operand_expressions(be2: BinExport2, op: BinExport2.Operand) -> List[BinExport2.Expression]:
tree = _build_expression_tree(be2, op)
expressions: List[BinExport2.Expression] = []
_fill_operand_expression_list(be2, op, tree, 0, expressions)
return expressions
def get_operand_register_expression(be2: BinExport2, operand: BinExport2.Operand) -> Optional[BinExport2.Expression]:
if len(operand.expression_index) == 1:
expression: BinExport2.Expression = be2.expression[operand.expression_index[0]]
if expression.type == BinExport2.Expression.REGISTER:
return expression
return None
def get_operand_immediate_expression(be2: BinExport2, operand: BinExport2.Operand) -> Optional[BinExport2.Expression]:
if len(operand.expression_index) == 1:
# - type: IMMEDIATE_INT
# immediate: 20588728364
# parent_index: 0
expression: BinExport2.Expression = be2.expression[operand.expression_index[0]]
if expression.type == BinExport2.Expression.IMMEDIATE_INT:
return expression
elif len(operand.expression_index) == 2:
# from IDA, which provides a size hint for every operand,
# we get the following pattern for immediate constants:
#
# - type: SIZE_PREFIX
# symbol: "b8"
# - type: IMMEDIATE_INT
# immediate: 20588728364
# parent_index: 0
expression0: BinExport2.Expression = be2.expression[operand.expression_index[0]]
expression1: BinExport2.Expression = be2.expression[operand.expression_index[1]]
if expression0.type == BinExport2.Expression.SIZE_PREFIX:
if expression1.type == BinExport2.Expression.IMMEDIATE_INT:
return expression1
return None
def get_instruction_mnemonic(be2: BinExport2, instruction: BinExport2.Instruction) -> str:
return be2.mnemonic[instruction.mnemonic_index].name.lower()
def get_instruction_operands(be2: BinExport2, instruction: BinExport2.Instruction) -> List[BinExport2.Operand]:
return [be2.operand[operand_index] for operand_index in instruction.operand_index]
def split_with_delimiters(s: str, delimiters: Tuple[str, ...]) -> Iterator[str]:
"""
Splits a string by any of the provided delimiter characters,
including the delimiters in the results.
Args:
string: The string to split.
delimiters: A string containing the characters to use as delimiters.
"""
start = 0
for i, char in enumerate(s):
if char in delimiters:
yield s[start:i]
yield char
start = i + 1
if start < len(s):
yield s[start:]
BinExport2OperandPattern = Union[str, Tuple[str, ...]]
@dataclass
class BinExport2InstructionPattern:
"""
This describes a way to match disassembled instructions, with mnemonics and operands.
You can specify constraints on the instruction, via:
- the mnemonics, like "mov",
- number of operands, and
- format of each operand, "[reg, reg, #int]".
During matching, you can also capture a single element, to see its concrete value.
For example, given the pattern:
mov reg0, #int0 ; capture int0
and the instruction:
mov eax, 1
Then the capture will contain the immediate integer 1.
This matcher uses the BinExport2 data layout under the hood.
"""
mnemonics: Tuple[str, ...]
operands: Tuple[Union[str, BinExport2OperandPattern], ...]
capture: Optional[str]
@classmethod
def from_str(cls, query: str):
"""
Parse a pattern string into a Pattern instance.
The supported syntax is like this:
br reg
br reg ; capture reg
br reg(stack) ; capture reg
br reg(not-stack) ; capture reg
mov reg0, reg1 ; capture reg0
adrp reg, #int ; capture #int
add reg, reg, #int ; capture #int
ldr reg0, [reg1] ; capture reg1
ldr|str reg, [reg, #int] ; capture #int
ldr|str reg, [reg(stack), #int] ; capture #int
ldr|str reg, [reg(not-stack), #int] ; capture #int
ldr|str reg, [reg, #int]! ; capture #int
ldr|str reg, [reg], #int ; capture #int
ldp|stp reg, reg, [reg, #int] ; capture #int
ldp|stp reg, reg, [reg, #int]! ; capture #int
ldp|stp reg, reg, [reg], #int ; capture #int
"""
#
# The implementation of the parser here is obviously ugly.
# Its handwritten and probably fragile. But since we don't
# expect this to be widely used, its probably ok.
# Don't hesitate to rewrite this if it becomes more important.
#
# Note that this doesn't have to be very performant.
# We expect these patterns to be parsed once upfront and then reused
# (globally at the module level?) rather than within any loop.
#
pattern, _, comment = query.strip().partition(";")
# we don't support fs: yet
assert ":" not in pattern
# from "capture #int" to "#int"
if comment:
comment = comment.strip()
assert comment.startswith("capture ")
capture = comment[len("capture ") :]
else:
capture = None
# from "ldr|str ..." to ["ldr", "str"]
pattern = pattern.strip()
mnemonic, _, rest = pattern.partition(" ")
mnemonics = mnemonic.split("|")
operands: List[Union[str, Tuple[str, ...]]] = []
while rest:
rest = rest.strip()
if not rest.startswith("["):
# If its not a dereference, which looks like `[op, op, op, ...]`,
# then its a simple operand, which we can split by the next comma.
operand, _, rest = rest.partition(", ")
rest = rest.strip()
operands.append(operand)
else:
# This looks like a dereference, something like `[op, op, op, ...]`.
# Since these can't be nested, look for the next ] and then parse backwards.
deref_end = rest.index("]")
try:
deref_end = rest.index(", ", deref_end)
deref_end += len(", ")
except ValueError:
deref = rest
rest = ""
else:
deref = rest[:deref_end]
rest = rest[deref_end:]
rest = rest.strip()
deref = deref.rstrip(" ")
deref = deref.rstrip(",")
# like: [reg, #int]!
has_postindex_writeback = deref.endswith("!")
deref = deref.rstrip("!")
deref = deref.rstrip("]")
deref = deref.lstrip("[")
parts = tuple(split_with_delimiters(deref, (",", "+", "*")))
parts = tuple(s.strip() for s in parts)
# emit operands in this order to match
# how BinExport2 expressions are flatted
# by get_operand_expressions
if has_postindex_writeback:
operands.append(("!", "[") + parts)
else:
operands.append(("[",) + parts)
for operand in operands: # type: ignore
# Try to ensure we've parsed the operands correctly.
# This is just sanity checking.
for o in (operand,) if isinstance(operand, str) else operand:
# operands can look like:
# - reg
# - reg0
# - reg(stack)
# - reg0(stack)
# - reg(not-stack)
# - reg0(not-stack)
# - #int
# - #int0
# and a limited set of supported operators.
# use an inline regex so that its easy to read. not perf critical.
assert re.match(r"^(reg|#int)[0-9]?(\(stack\)|\(not-stack\))?$", o) or o in ("[", ",", "!", "+", "*")
return cls(tuple(mnemonics), tuple(operands), capture)
@dataclass
class MatchResult:
operand_index: int
expression_index: int
expression: BinExport2.Expression
def match(
self, mnemonic: str, operand_expressions: List[List[BinExport2.Expression]]
) -> Optional["BinExport2InstructionPattern.MatchResult"]:
"""
Match the given BinExport2 data against this pattern.
The BinExport2 expression tree must have been flattened, such as with
capa.features.extractors.binexport2.helpers.get_operand_expressions.
If there's a match, the captured Expression instance is returned.
Otherwise, you get None back.
"""
if mnemonic not in self.mnemonics:
return None
if len(self.operands) != len(operand_expressions):
return None
captured = None
for operand_index, found_expressions in enumerate(operand_expressions):
wanted_expressions = self.operands[operand_index]
# from `"reg"` to `("reg", )`
if isinstance(wanted_expressions, str):
wanted_expressions = (wanted_expressions,)
assert isinstance(wanted_expressions, tuple)
if len(wanted_expressions) != len(found_expressions):
return None
for expression_index, (wanted_expression, found_expression) in enumerate(
zip(wanted_expressions, found_expressions)
):
if wanted_expression.startswith("reg"):
if found_expression.type != BinExport2.Expression.REGISTER:
return None
if wanted_expression.endswith(")"):
if wanted_expression.endswith("(not-stack)"):
# intel 64: rsp, esp, sp,
# intel 32: ebp, ebp, bp
# arm: sp
register_name = found_expression.symbol.lower()
if register_name in ("rsp", "esp", "sp", "rbp", "ebp", "bp"):
return None
elif wanted_expression.endswith("(stack)"):
register_name = found_expression.symbol.lower()
if register_name not in ("rsp", "esp", "sp", "rbp", "ebp", "bp"):
return None
else:
raise ValueError("unexpected expression suffix", wanted_expression)
if self.capture == wanted_expression:
captured = BinExport2InstructionPattern.MatchResult(
operand_index, expression_index, found_expression
)
elif wanted_expression.startswith("#int"):
if found_expression.type != BinExport2.Expression.IMMEDIATE_INT:
return None
if self.capture == wanted_expression:
captured = BinExport2InstructionPattern.MatchResult(
operand_index, expression_index, found_expression
)
elif wanted_expression == "[":
if found_expression.type != BinExport2.Expression.DEREFERENCE:
return None
elif wanted_expression in (",", "!", "+", "*"):
if found_expression.type != BinExport2.Expression.OPERATOR:
return None
if found_expression.symbol != wanted_expression:
return None
else:
raise ValueError(found_expression)
if captured:
return captured
else:
# There were no captures, so
# return arbitrary non-None expression
return BinExport2InstructionPattern.MatchResult(operand_index, expression_index, found_expression)
class BinExport2InstructionPatternMatcher:
"""Index and match a collection of instruction patterns."""
def __init__(self, queries: List[BinExport2InstructionPattern]):
self.queries = queries
# shard the patterns by (mnemonic, #operands)
self._index: Dict[Tuple[str, int], List[BinExport2InstructionPattern]] = defaultdict(list)
for query in queries:
for mnemonic in query.mnemonics:
self._index[(mnemonic.lower(), len(query.operands))].append(query)
@classmethod
def from_str(cls, patterns: str):
return cls(
[
BinExport2InstructionPattern.from_str(line)
for line in filter(
lambda line: not line.startswith("#"), (line.strip() for line in patterns.split("\n"))
)
]
)
def match(
self, mnemonic: str, operand_expressions: List[List[BinExport2.Expression]]
) -> Optional[BinExport2InstructionPattern.MatchResult]:
queries = self._index.get((mnemonic.lower(), len(operand_expressions)), [])
for query in queries:
captured = query.match(mnemonic.lower(), operand_expressions)
if captured:
return captured
return None
def match_with_be2(
self, be2: BinExport2, instruction_index: int
) -> Optional[BinExport2InstructionPattern.MatchResult]:
instruction: BinExport2.Instruction = be2.instruction[instruction_index]
mnemonic: str = get_instruction_mnemonic(be2, instruction)
if (mnemonic.lower(), len(instruction.operand_index)) not in self._index:
# verify that we might have a hit before we realize the operand expression list
return None
operands = []
for operand_index in instruction.operand_index:
operands.append(get_operand_expressions(be2, be2.operand[operand_index]))
return self.match(mnemonic, operands)

View File

@@ -1,254 +0,0 @@
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import List, Tuple, Iterator
import capa.features.extractors.helpers
import capa.features.extractors.strings
import capa.features.extractors.binexport2.helpers
import capa.features.extractors.binexport2.arch.arm.insn
import capa.features.extractors.binexport2.arch.intel.insn
from capa.features.insn import API, Mnemonic
from capa.features.common import Bytes, String, Feature, Characteristic
from capa.features.address import Address, AbsoluteVirtualAddress
from capa.features.extractors.binexport2 import (
AddressSpace,
AnalysisContext,
BinExport2Index,
FunctionContext,
ReadMemoryError,
BinExport2Analysis,
InstructionContext,
)
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle
from capa.features.extractors.binexport2.helpers import HAS_ARCH_ARM, HAS_ARCH_INTEL
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger(__name__)
def extract_insn_api_features(fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
be2_index: BinExport2Index = fhi.ctx.idx
be2_analysis: BinExport2Analysis = fhi.ctx.analysis
insn: BinExport2.Instruction = be2.instruction[ii.instruction_index]
for addr in insn.call_target:
addr = be2_analysis.thunks.get(addr, addr)
if addr not in be2_index.vertex_index_by_address:
# disassembler did not define function at address
logger.debug("0x%x is not a vertex", addr)
continue
vertex_idx: int = be2_index.vertex_index_by_address[addr]
vertex: BinExport2.CallGraph.Vertex = be2.call_graph.vertex[vertex_idx]
if not capa.features.extractors.binexport2.helpers.is_vertex_type(
vertex, BinExport2.CallGraph.Vertex.Type.IMPORTED
):
continue
if not vertex.HasField("mangled_name"):
logger.debug("vertex %d does not have mangled_name", vertex_idx)
continue
api_name: str = vertex.mangled_name
for name in capa.features.extractors.helpers.generate_symbols("", api_name):
yield API(name), ih.address
def extract_insn_number_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_number_features(fh, bbh, ih)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_number_features(fh, bbh, ih)
def extract_insn_bytes_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
ctx: AnalysisContext = fhi.ctx
be2: BinExport2 = ctx.be2
idx: BinExport2Index = ctx.idx
address_space: AddressSpace = ctx.address_space
instruction_index: int = ii.instruction_index
if instruction_index in idx.string_reference_index_by_source_instruction_index:
# disassembler already identified string reference from instruction
return
reference_addresses: List[int] = []
if instruction_index in idx.data_reference_index_by_source_instruction_index:
for data_reference_index in idx.data_reference_index_by_source_instruction_index[instruction_index]:
data_reference: BinExport2.DataReference = be2.data_reference[data_reference_index]
data_reference_address: int = data_reference.address
if data_reference_address in idx.insn_address_by_index:
# appears to be code
continue
reference_addresses.append(data_reference_address)
for reference_address in reference_addresses:
try:
# if at end of segment then there might be an overrun here.
buf: bytes = address_space.read_memory(reference_address, 0x100)
except ReadMemoryError:
logger.debug("failed to read memory: 0x%x", reference_address)
continue
if capa.features.extractors.helpers.all_zeros(buf):
continue
is_string: bool = False
# note: we *always* break after the first iteration
for s in capa.features.extractors.strings.extract_ascii_strings(buf):
if s.offset != 0:
break
yield String(s.s), ih.address
is_string = True
break
# note: we *always* break after the first iteration
for s in capa.features.extractors.strings.extract_unicode_strings(buf):
if s.offset != 0:
break
yield String(s.s), ih.address
is_string = True
break
if not is_string:
yield Bytes(buf), ih.address
def extract_insn_string_features(
fh: FunctionHandle, _bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
idx: BinExport2Index = fhi.ctx.idx
instruction_index: int = ii.instruction_index
if instruction_index in idx.string_reference_index_by_source_instruction_index:
for string_reference_index in idx.string_reference_index_by_source_instruction_index[instruction_index]:
string_reference: BinExport2.Reference = be2.string_reference[string_reference_index]
string_index: int = string_reference.string_table_index
string: str = be2.string_table[string_index]
yield String(string), ih.address
def extract_insn_offset_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_offset_features(fh, bbh, ih)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_offset_features(fh, bbh, ih)
def extract_insn_nzxor_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_insn_nzxor_characteristic_features(
fh, bbh, ih
)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_insn_nzxor_characteristic_features(
fh, bbh, ih
)
def extract_insn_mnemonic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
mnemonic: BinExport2.Mnemonic = be2.mnemonic[instruction.mnemonic_index]
mnemonic_name: str = mnemonic.name.lower()
yield Mnemonic(mnemonic_name), ih.address
def extract_function_calls_from(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract functions calls from features
most relevant at the function scope;
however, its most efficient to extract at the instruction scope.
"""
fhi: FunctionContext = fh.inner
ii: InstructionContext = ih.inner
be2: BinExport2 = fhi.ctx.be2
instruction: BinExport2.Instruction = be2.instruction[ii.instruction_index]
for call_target_address in instruction.call_target:
addr: AbsoluteVirtualAddress = AbsoluteVirtualAddress(call_target_address)
yield Characteristic("calls from"), addr
if fh.address == addr:
yield Characteristic("recursive call"), addr
def extract_function_indirect_call_characteristic_features(
fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle
) -> Iterator[Tuple[Feature, Address]]:
fhi: FunctionContext = fh.inner
if fhi.arch & HAS_ARCH_INTEL:
yield from capa.features.extractors.binexport2.arch.intel.insn.extract_function_indirect_call_characteristic_features(
fh, bbh, ih
)
elif fhi.arch & HAS_ARCH_ARM:
yield from capa.features.extractors.binexport2.arch.arm.insn.extract_function_indirect_call_characteristic_features(
fh, bbh, ih
)
def extract_features(f: FunctionHandle, bbh: BBHandle, insn: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
"""extract instruction features"""
for inst_handler in INSTRUCTION_HANDLERS:
for feature, ea in inst_handler(f, bbh, insn):
yield feature, ea
INSTRUCTION_HANDLERS = (
extract_insn_api_features,
extract_insn_number_features,
extract_insn_bytes_features,
extract_insn_string_features,
extract_insn_offset_features,
extract_insn_nzxor_characteristic_features,
extract_insn_mnemonic_features,
extract_function_calls_from,
extract_function_indirect_call_characteristic_features,
)

View File

@@ -5,6 +5,8 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import struct
from typing import Tuple, Iterator
from binaryninja import Segment, BinaryView, SymbolType, SymbolBinding
@@ -18,24 +20,56 @@ from capa.features.address import NO_ADDRESS, Address, FileOffsetAddress, Absolu
from capa.features.extractors.binja.helpers import read_c_string, unmangle_c_name
def check_segment_for_pe(bv: BinaryView, seg: Segment) -> Iterator[Tuple[Feature, Address]]:
"""check segment for embedded PE"""
start = 0
if bv.view_type == "PE" and seg.start == bv.start:
# If this is the first segment of the binary, skip the first bytes.
# Otherwise, there will always be a matched PE at the start of the binaryview.
def check_segment_for_pe(bv: BinaryView, seg: Segment) -> Iterator[Tuple[int, int]]:
"""check segment for embedded PE
adapted for binja from:
https://github.com/vivisect/vivisect/blob/7be4037b1cecc4551b397f840405a1fc606f9b53/PE/carve.py#L19
"""
mz_xor = [
(
capa.features.extractors.helpers.xor_static(b"MZ", i),
capa.features.extractors.helpers.xor_static(b"PE", i),
i,
)
for i in range(256)
]
todo = []
# If this is the first segment of the binary, skip the first bytes. Otherwise, there will always be a matched
# PE at the start of the binaryview.
start = seg.start
if bv.view_type == "PE" and start == bv.start:
start += 1
buf = bv.read(seg.start, seg.length)
for mzx, pex, i in mz_xor:
for off, _ in bv.find_all_data(start, seg.end, mzx):
todo.append((off, mzx, pex, i))
for offset, _ in capa.features.extractors.helpers.carve_pe(buf, start):
yield Characteristic("embedded pe"), FileOffsetAddress(seg.start + offset)
while len(todo):
off, mzx, pex, i = todo.pop()
# The MZ header has one field we will check e_lfanew is at 0x3c
e_lfanew = off + 0x3C
if seg.end < (e_lfanew + 4):
continue
newoff = struct.unpack("<I", capa.features.extractors.helpers.xor_static(bv.read(e_lfanew, 4), i))[0]
peoff = off + newoff
if seg.end < (peoff + 2):
continue
if bv.read(peoff, 2) == pex:
yield off, i
def extract_file_embedded_pe(bv: BinaryView) -> Iterator[Tuple[Feature, Address]]:
"""extract embedded PE features"""
for seg in bv.segments:
yield from check_segment_for_pe(bv, seg)
for ea, _ in check_segment_for_pe(bv, seg):
yield Characteristic("embedded pe"), FileOffsetAddress(ea)
def extract_file_export_names(bv: BinaryView) -> Iterator[Tuple[Feature, Address]]:

View File

@@ -5,175 +5,31 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import sys
import logging
import subprocess
import importlib.util
from typing import Optional
from pathlib import Path
logger = logging.getLogger(__name__)
# When the script gets executed as a standalone executable (via PyInstaller), `import binaryninja` does not work because
# we have excluded the binaryninja module in `pyinstaller.spec`. The trick here is to call the system Python and try
# to find out the path of the binaryninja module that has been installed.
# Note, including the binaryninja module in the `pyinstaller.spec` would not work, since the binaryninja module tries to
# find the binaryninja core e.g., `libbinaryninjacore.dylib`, using a relative path. And this does not work when the
# binaryninja module is extracted by the PyInstaller.
CODE = r"""
code = r"""
from pathlib import Path
from importlib import util
spec = util.find_spec('binaryninja')
if spec is not None:
if len(spec.submodule_search_locations) > 0:
path = Path(spec.submodule_search_locations[0])
# encode the path with utf8 then convert to hex, make sure it can be read and restored properly
print(str(path.parent).encode('utf8').hex())
path = Path(spec.submodule_search_locations[0])
# encode the path with utf8 then convert to hex, make sure it can be read and restored properly
print(str(path.parent).encode('utf8').hex())
"""
def find_binaryninja_path_via_subprocess() -> Optional[Path]:
raw_output = subprocess.check_output(["python", "-c", CODE]).decode("ascii").strip()
output = bytes.fromhex(raw_output).decode("utf8")
if not output.strip():
return None
return Path(output)
def get_desktop_entry(name: str) -> Optional[Path]:
"""
Find the path for the given XDG Desktop Entry name.
Like:
>> get_desktop_entry("com.vector35.binaryninja.desktop")
Path("~/.local/share/applications/com.vector35.binaryninja.desktop")
"""
assert sys.platform in ("linux", "linux2")
assert name.endswith(".desktop")
data_dirs = os.environ.get("XDG_DATA_DIRS", "/usr/share") + f":{Path.home()}/.local/share"
for data_dir in data_dirs.split(":"):
applications = Path(data_dir) / "applications"
for application in applications.glob("*.desktop"):
if application.name == name:
return application
return None
def get_binaryninja_path(desktop_entry: Path) -> Optional[Path]:
# from: Exec=/home/wballenthin/software/binaryninja/binaryninja %u
# to: /home/wballenthin/software/binaryninja/
for line in desktop_entry.read_text(encoding="utf-8").splitlines():
if not line.startswith("Exec="):
continue
if not line.endswith("binaryninja %u"):
continue
binaryninja_path = Path(line[len("Exec=") : -len("binaryninja %u")])
if not binaryninja_path.exists():
return None
return binaryninja_path
return None
def validate_binaryninja_path(binaryninja_path: Path) -> bool:
if not binaryninja_path:
return False
module_path = binaryninja_path / "python"
if not module_path.is_dir():
return False
if not (module_path / "binaryninja" / "__init__.py").is_file():
return False
return True
def find_binaryninja() -> Optional[Path]:
binaryninja_path = find_binaryninja_path_via_subprocess()
if not binaryninja_path or not validate_binaryninja_path(binaryninja_path):
if sys.platform == "linux" or sys.platform == "linux2":
# ok
logger.debug("detected OS: linux")
elif sys.platform == "darwin":
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
elif sys.platform == "win32":
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
else:
logger.warning("unsupported platform to find Binary Ninja: %s", sys.platform)
return False
desktop_entry = get_desktop_entry("com.vector35.binaryninja.desktop")
if not desktop_entry:
logger.debug("failed to find Binary Ninja application")
return None
logger.debug("found Binary Ninja application: %s", desktop_entry)
binaryninja_path = get_binaryninja_path(desktop_entry)
if not binaryninja_path:
logger.debug("failed to determine Binary Ninja installation path")
return None
if not validate_binaryninja_path(binaryninja_path):
logger.debug("failed to validate Binary Ninja installation")
return None
logger.debug("found Binary Ninja installation: %s", binaryninja_path)
return binaryninja_path / "python"
def is_binaryninja_installed() -> bool:
"""Is the binaryninja module ready to import?"""
try:
return importlib.util.find_spec("binaryninja") is not None
except ModuleNotFoundError:
return False
def has_binaryninja() -> bool:
if is_binaryninja_installed():
logger.debug("found installed Binary Ninja API")
return True
logger.debug("Binary Ninja API not installed, searching...")
binaryninja_path = find_binaryninja()
if not binaryninja_path:
logger.debug("failed to find Binary Ninja installation")
logger.debug("found Binary Ninja API: %s", binaryninja_path)
return binaryninja_path is not None
def load_binaryninja() -> bool:
try:
import binaryninja
return True
except ImportError:
binaryninja_path = find_binaryninja()
if not binaryninja_path:
return False
sys.path.append(binaryninja_path.absolute().as_posix())
try:
import binaryninja # noqa: F401 unused import
return True
except ImportError:
return False
def find_binja_path() -> Path:
raw_output = subprocess.check_output(["python", "-c", code]).decode("ascii").strip()
return Path(bytes.fromhex(raw_output).decode("utf8"))
if __name__ == "__main__":
print(find_binaryninja_path_via_subprocess())
print(find_binja_path())

View File

@@ -9,7 +9,6 @@
import logging
from typing import Tuple, Iterator
import capa.features.extractors.helpers
from capa.helpers import assert_never
from capa.features.insn import API, Number
from capa.features.common import String, Feature
@@ -51,8 +50,7 @@ def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -
else:
assert_never(value)
for name in capa.features.extractors.helpers.generate_symbols("", call.api):
yield API(name), ch.address
yield API(call.api), ch.address
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:

View File

@@ -75,7 +75,7 @@ def extract_format(buf: bytes) -> Iterator[Tuple[Feature, Address]]:
# 1. handling a file format (e.g. macho)
#
# for (1), this logic will need to be updated as the format is implemented.
logger.debug("unknown file format: %s", buf[:4].hex())
logger.debug("unsupported file format: %s", binascii.hexlify(buf[:4]).decode("ascii"))
return

View File

@@ -9,7 +9,6 @@
import logging
from typing import Tuple, Iterator
import capa.features.extractors.helpers
from capa.features.insn import API, Number
from capa.features.common import String, Feature
from capa.features.address import Address
@@ -45,8 +44,7 @@ def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -
# but yielding the entire string would be helpful for an analyst looking at the verbose output
yield String(arg_value), ch.address
for name in capa.features.extractors.helpers.generate_symbols("", call.name):
yield API(name), ch.address
yield API(call.name), ch.address
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:

View File

@@ -310,9 +310,6 @@ class ELF:
98: "TPC",
99: "SNP1K",
100: "ST200",
# https://www.sco.com/developers/gabi/latest/ch4.eheader.html
183: "aarch64",
243: "riscv",
}
@property

View File

@@ -158,10 +158,6 @@ def extract_file_arch(elf: ELFFile, **kwargs):
yield Arch("i386"), NO_ADDRESS
elif arch == "x64":
yield Arch("amd64"), NO_ADDRESS
elif arch == "ARM":
yield Arch("arm"), NO_ADDRESS
elif arch == "AArch64":
yield Arch("aarch64"), NO_ADDRESS
else:
logger.warning("unsupported architecture: %s", arch)

View File

@@ -63,7 +63,6 @@ def generate_symbols(dll: str, symbol: str, include_dll=False) -> Iterator[str]:
# trim extensions observed in dynamic traces
dll = dll[0:-4] if dll.endswith(".dll") else dll
dll = dll[0:-4] if dll.endswith(".drv") else dll
dll = dll[0:-3] if dll.endswith(".so") else dll
if include_dll or is_ordinal(symbol):
# ws2_32.#1

View File

@@ -8,6 +8,7 @@
from typing import List, Tuple, Iterator
import idaapi
import ida_nalt
import capa.ida.helpers
import capa.features.extractors.elf
@@ -31,9 +32,7 @@ class IdaFeatureExtractor(StaticFeatureExtractor):
def __init__(self):
super().__init__(
hashes=SampleHashes(
md5=capa.ida.helpers.retrieve_input_file_md5(),
sha1="(unknown)",
sha256=capa.ida.helpers.retrieve_input_file_sha256(),
md5=ida_nalt.retrieve_input_file_md5(), sha1="(unknown)", sha256=ida_nalt.retrieve_input_file_sha256()
)
)
self.global_features: List[Tuple[Feature, Address]] = []

View File

@@ -14,7 +14,6 @@ import idaapi
import idautils
import ida_entry
import capa.ida.helpers
import capa.features.extractors.common
import capa.features.extractors.helpers
import capa.features.extractors.strings
@@ -178,17 +177,17 @@ def extract_file_function_names() -> Iterator[Tuple[Feature, Address]]:
def extract_file_format() -> Iterator[Tuple[Feature, Address]]:
filetype = capa.ida.helpers.get_filetype()
file_info = idaapi.get_inf_structure()
if filetype in (idaapi.f_PE, idaapi.f_COFF):
if file_info.filetype in (idaapi.f_PE, idaapi.f_COFF):
yield Format(FORMAT_PE), NO_ADDRESS
elif filetype == idaapi.f_ELF:
elif file_info.filetype == idaapi.f_ELF:
yield Format(FORMAT_ELF), NO_ADDRESS
elif filetype == idaapi.f_BIN:
elif file_info.filetype == idaapi.f_BIN:
# no file type to return when processing a binary file, but we want to continue processing
return
else:
raise NotImplementedError(f"unexpected file format: {filetype}")
raise NotImplementedError(f"unexpected file format: {file_info.filetype}")
def extract_features() -> Iterator[Tuple[Feature, Address]]:

View File

@@ -9,6 +9,7 @@ import logging
import contextlib
from typing import Tuple, Iterator
import idaapi
import ida_loader
import capa.ida.helpers
@@ -47,12 +48,12 @@ def extract_os() -> Iterator[Tuple[Feature, Address]]:
def extract_arch() -> Iterator[Tuple[Feature, Address]]:
procname = capa.ida.helpers.get_processor_name()
if procname == "metapc" and capa.ida.helpers.is_64bit():
info: idaapi.idainfo = idaapi.get_inf_structure()
if info.procname == "metapc" and info.is_64bit():
yield Arch(ARCH_AMD64), NO_ADDRESS
elif procname == "metapc" and capa.ida.helpers.is_32bit():
elif info.procname == "metapc" and info.is_32bit():
yield Arch(ARCH_I386), NO_ADDRESS
elif procname == "metapc":
elif info.procname == "metapc":
logger.debug("unsupported architecture: non-32-bit nor non-64-bit intel")
return
else:
@@ -60,5 +61,5 @@ def extract_arch() -> Iterator[Tuple[Feature, Address]]:
# 1. handling a new architecture (e.g. aarch64)
#
# for (1), this logic will need to be updated as the format is implemented.
logger.debug("unsupported architecture: %s", procname)
logger.debug("unsupported architecture: %s", info.procname)
return

View File

@@ -21,49 +21,28 @@ from capa.features.extractors.base_extractor import FunctionHandle
IDA_NALT_ENCODING = ida_nalt.get_default_encoding_idx(ida_nalt.BPU_1B) # use one byte-per-character encoding
if hasattr(ida_bytes, "parse_binpat_str"):
# TODO (mr): use find_bytes
# https://github.com/mandiant/capa/issues/2339
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
patterns = ida_bytes.compiled_binpat_vec_t()
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
patterns = ida_bytes.compiled_binpat_vec_t()
seqstr = " ".join([f"{b:02x}" for b in seq])
err = ida_bytes.parse_binpat_str(patterns, 0, seqstr, 16, IDA_NALT_ENCODING)
seqstr = " ".join([f"{b:02x}" for b in seq])
err = ida_bytes.parse_binpat_str(patterns, 0, seqstr, 16, IDA_NALT_ENCODING)
if err:
return
if err:
return
while True:
ea, _ = ida_bytes.bin_search(start, end, patterns, ida_bytes.BIN_SEARCH_FORWARD)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
else:
# for IDA 7.5 and older; using deprecated find_binary instead of bin_search
def find_byte_sequence(start: int, end: int, seq: bytes) -> Iterator[int]:
"""yield all ea of a given byte sequence
args:
start: min virtual address
end: max virtual address
seq: bytes to search e.g. b"\x01\x03"
"""
seqstr = " ".join([f"{b:02x}" for b in seq])
while True:
ea = idaapi.find_binary(start, end, seqstr, 0, idaapi.SEARCH_DOWN)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
while True:
ea = ida_bytes.bin_search(start, end, patterns, ida_bytes.BIN_SEARCH_FORWARD)
if ea == idaapi.BADADDR:
break
start = ea + 1
yield ea
def get_functions(

View File

@@ -1,117 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import sys
import json
import logging
import importlib.util
from typing import Optional
from pathlib import Path
logger = logging.getLogger(__name__)
def is_idalib_installed() -> bool:
try:
return importlib.util.find_spec("idapro") is not None
except ModuleNotFoundError:
return False
def get_idalib_user_config_path() -> Optional[Path]:
"""Get the path to the user's config file based on platform following IDA's user directories."""
# derived from `py-activate-idalib.py` from IDA v9.0 Beta 4
if sys.platform == "win32":
# On Windows, use the %APPDATA%\Hex-Rays\IDA Pro directory
config_dir = Path(os.getenv("APPDATA")) / "Hex-Rays" / "IDA Pro"
else:
# On macOS and Linux, use ~/.idapro
config_dir = Path.home() / ".idapro"
# Return the full path to the config file (now in JSON format)
user_config_path = config_dir / "ida-config.json"
if not user_config_path.exists():
return None
return user_config_path
def find_idalib() -> Optional[Path]:
config_path = get_idalib_user_config_path()
if not config_path:
logger.error("IDA Pro user configuration does not exist, please make sure you've installed idalib properly.")
return None
config = json.loads(config_path.read_text(encoding="utf-8"))
try:
ida_install_dir = Path(config["Paths"]["ida-install-dir"])
except KeyError:
logger.error(
"IDA Pro user configuration does not contain location of IDA Pro installation, please make sure you've installed idalib properly."
)
return None
if not ida_install_dir.exists():
return None
libname = {
"win32": "idalib.dll",
"linux": "libidalib.so",
"linux2": "libidalib.so",
"darwin": "libidalib.dylib",
}[sys.platform]
if not (ida_install_dir / "ida.hlp").is_file():
return None
if not (ida_install_dir / libname).is_file():
return None
idalib_path = ida_install_dir / "idalib" / "python"
if not idalib_path.exists():
return None
if not (idalib_path / "idapro" / "__init__.py").is_file():
return None
return idalib_path
def has_idalib() -> bool:
if is_idalib_installed():
logger.debug("found installed IDA idalib API")
return True
logger.debug("IDA idalib API not installed, searching...")
idalib_path = find_idalib()
if not idalib_path:
logger.debug("failed to find IDA idalib installation")
logger.debug("found IDA idalib API: %s", idalib_path)
return idalib_path is not None
def load_idalib() -> bool:
try:
import idapro
return True
except ImportError:
idalib_path = find_idalib()
if not idalib_path:
return False
sys.path.append(idalib_path.absolute().as_posix())
try:
import idapro # noqa: F401 unused import
return True
except ImportError:
return False

View File

@@ -5,11 +5,9 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
from typing import Any, Dict, Tuple, Iterator, Optional
from typing import Any, Dict, Tuple, Iterator
import idc
import ida_ua
import idaapi
import idautils
@@ -37,9 +35,9 @@ def get_externs(ctx: Dict[str, Any]) -> Dict[int, Any]:
return ctx["externs_cache"]
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[Tuple[str, str]]:
def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Iterator[Any]:
"""check instruction for API call"""
info = None
info = ()
ref = insn.ea
# attempt to resolve API calls by following chained thunks to a reasonable depth
@@ -54,7 +52,7 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[T
except IndexError:
break
info = funcs.get(ref)
info = funcs.get(ref, ())
if info:
break
@@ -62,7 +60,8 @@ def check_for_api_call(insn: idaapi.insn_t, funcs: Dict[int, Any]) -> Optional[T
if not f or not (f.flags & idaapi.FUNC_THUNK):
break
return info
if info:
yield info
def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle) -> Iterator[Tuple[Feature, Address]]:
@@ -77,39 +76,16 @@ def extract_insn_api_features(fh: FunctionHandle, bbh: BBHandle, ih: InsnHandle)
if insn.get_canon_mnem() not in ("call", "jmp"):
return
# check call to imported functions
api = check_for_api_call(insn, get_imports(fh.ctx))
if api:
# check calls to imported functions
for api in check_for_api_call(insn, get_imports(fh.ctx)):
# tuple (<module>, <function>, <ordinal>)
for name in capa.features.extractors.helpers.generate_symbols(api[0], api[1]):
yield API(name), ih.address
# a call instruction should only call one function, stop if a call to an import is extracted
return
# check call to extern functions
api = check_for_api_call(insn, get_externs(fh.ctx))
if api:
# check calls to extern functions
for api in check_for_api_call(insn, get_externs(fh.ctx)):
# tuple (<module>, <function>, <ordinal>)
yield API(api[1]), ih.address
# a call instruction should only call one function, stop if a call to an extern is extracted
return
# extract dynamically resolved APIs stored in renamed globals (renamed for example using `renimp.idc`)
# examples: `CreateProcessA`, `HttpSendRequestA`
if insn.Op1.type == ida_ua.o_mem:
op_addr = insn.Op1.addr
op_name = idaapi.get_name(op_addr)
# when renaming a global using an API name, IDA assigns it the function type
# ensure we do not extract something wrong by checking that the address has a name and a type
# we could check that the type is a function definition, but that complicates the code
if (not op_name.startswith("off_")) and idc.get_type(op_addr):
# Remove suffix used in repeated names, for example _0 in VirtualFree_0
match = re.match(r"(.+)_\d+", op_name)
if match:
op_name = match.group(1)
# the global name does not include the DLL name, so we can't extract it
for name in capa.features.extractors.helpers.generate_symbols("", op_name):
yield API(name), ih.address
# extract IDA/FLIRT recognized API functions
targets = tuple(idautils.CodeRefsFrom(insn.ea, False))

View File

@@ -130,13 +130,7 @@ def extract_file_arch(pe, **kwargs):
elif pe.FILE_HEADER.Machine == pefile.MACHINE_TYPE["IMAGE_FILE_MACHINE_AMD64"]:
yield Arch(ARCH_AMD64), NO_ADDRESS
else:
try:
logger.warning(
"unsupported architecture: %s",
pefile.MACHINE_TYPE[pe.FILE_HEADER.Machine],
)
except KeyError:
logger.warning("unknown architecture: %s", pe.FILE_HEADER.Machine)
logger.warning("unsupported architecture: %s", pefile.MACHINE_TYPE[pe.FILE_HEADER.Machine])
def extract_file_features(pe, buf):

View File

@@ -1,198 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Dict, List, Tuple, Optional
from pathlib import Path
from zipfile import ZipFile
from collections import defaultdict
from dataclasses import dataclass
from capa.exceptions import UnsupportedFormatError
from capa.features.extractors.vmray.models import File, Flog, SummaryV2, StaticData, FunctionCall, xml_to_dict
logger = logging.getLogger(__name__)
DEFAULT_ARCHIVE_PASSWORD = b"infected"
SUPPORTED_FLOG_VERSIONS = ("2",)
@dataclass
class VMRayMonitorThread:
tid: int # thread ID assigned by OS
monitor_id: int # unique ID assigned to thread by VMRay
process_monitor_id: int # unqiue ID assigned to containing process by VMRay
@dataclass
class VMRayMonitorProcess:
pid: int # process ID assigned by OS
ppid: int # parent process ID assigned by OS
monitor_id: int # unique ID assigned to process by VMRay
image_name: str
class VMRayAnalysis:
def __init__(self, zipfile_path: Path):
self.zipfile = ZipFile(zipfile_path, "r")
# summary_v2.json is the entry point to the entire VMRay archive and
# we use its data to find everything else that we need for capa
self.sv2 = SummaryV2.model_validate_json(
self.zipfile.read("logs/summary_v2.json", pwd=DEFAULT_ARCHIVE_PASSWORD)
)
self.file_type: str = self.sv2.analysis_metadata.sample_type
# flog.xml contains all of the call information that VMRay captured during execution
flog_xml = self.zipfile.read("logs/flog.xml", pwd=DEFAULT_ARCHIVE_PASSWORD)
flog_dict = xml_to_dict(flog_xml)
self.flog = Flog.model_validate(flog_dict)
if self.flog.analysis.log_version not in SUPPORTED_FLOG_VERSIONS:
raise UnsupportedFormatError(
"VMRay feature extractor does not support flog version %s" % self.flog.analysis.log_version
)
self.exports: Dict[int, str] = {}
self.imports: Dict[int, Tuple[str, str]] = {}
self.sections: Dict[int, str] = {}
self.monitor_processes: Dict[int, VMRayMonitorProcess] = {}
self.monitor_threads: Dict[int, VMRayMonitorThread] = {}
# map monitor thread IDs to their associated monitor process ID
self.monitor_threads_by_monitor_process: Dict[int, List[int]] = defaultdict(list)
# map function calls to their associated monitor thread ID mapped to its associated monitor process ID
self.monitor_process_calls: Dict[int, Dict[int, List[FunctionCall]]] = defaultdict(lambda: defaultdict(list))
self.base_address: int
self.sample_file_name: Optional[str] = None
self.sample_file_analysis: Optional[File] = None
self.sample_file_static_data: Optional[StaticData] = None
self._find_sample_file()
# VMRay analysis archives in various shapes and sizes and file type does not definitively tell us what data
# we can expect to find in the archive, so to be explicit we check for the various pieces that we need at
# minimum to run capa analysis
if self.sample_file_name is None or self.sample_file_analysis is None:
raise UnsupportedFormatError("VMRay archive does not contain sample file (file_type: %s)" % self.file_type)
if not self.sample_file_static_data:
raise UnsupportedFormatError("VMRay archive does not contain static data (file_type: %s)" % self.file_type)
if not self.sample_file_static_data.pe and not self.sample_file_static_data.elf:
raise UnsupportedFormatError(
"VMRay feature extractor only supports PE and ELF at this time (file_type: %s)" % self.file_type
)
# VMRay does not store static strings for the sample file so we must use the source file
# stored in the archive
sample_sha256: str = self.sample_file_analysis.hash_values.sha256.lower()
sample_file_path: str = f"internal/static_analyses/{sample_sha256}/objects/files/{sample_sha256}"
logger.debug("file_type: %s, file_path: %s", self.file_type, sample_file_path)
self.sample_file_buf: bytes = self.zipfile.read(sample_file_path, pwd=DEFAULT_ARCHIVE_PASSWORD)
# do not change order, it matters
self._compute_base_address()
self._compute_imports()
self._compute_exports()
self._compute_sections()
self._compute_monitor_processes()
self._compute_monitor_threads()
self._compute_monitor_process_calls()
def _find_sample_file(self):
for file_name, file_analysis in self.sv2.files.items():
if file_analysis.is_sample:
# target the sample submitted for analysis
self.sample_file_name = file_name
self.sample_file_analysis = file_analysis
if file_analysis.ref_static_data:
# like "path": ["static_data","static_data_0"] where "static_data_0" is the summary_v2 static data
# key for the file's static data
self.sample_file_static_data = self.sv2.static_data[file_analysis.ref_static_data.path[1]]
break
def _compute_base_address(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
self.base_address = self.sample_file_static_data.pe.basic_info.image_base
def _compute_exports(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for export in self.sample_file_static_data.pe.exports:
self.exports[export.address] = export.api.name
def _compute_imports(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for module in self.sample_file_static_data.pe.imports:
for api in module.apis:
self.imports[api.address] = (module.dll, api.api.name)
def _compute_sections(self):
assert self.sample_file_static_data is not None
if self.sample_file_static_data.pe:
for pefile_section in self.sample_file_static_data.pe.sections:
self.sections[pefile_section.virtual_address] = pefile_section.name
elif self.sample_file_static_data.elf:
for elffile_section in self.sample_file_static_data.elf.sections:
self.sections[elffile_section.header.sh_addr] = elffile_section.header.sh_name
def _compute_monitor_processes(self):
for process in self.sv2.processes.values():
# we expect monitor IDs to be unique
assert process.monitor_id not in self.monitor_processes
ppid: int = (
self.sv2.processes[process.ref_parent_process.path[1]].os_pid if process.ref_parent_process else 0
)
self.monitor_processes[process.monitor_id] = VMRayMonitorProcess(
process.os_pid, ppid, process.monitor_id, process.image_name
)
# not all processes are recorded in SummaryV2.json, get missing data from flog.xml, see #2394
for monitor_process in self.flog.analysis.monitor_processes:
vmray_monitor_process: VMRayMonitorProcess = VMRayMonitorProcess(
monitor_process.os_pid,
monitor_process.os_parent_pid,
monitor_process.process_id,
monitor_process.image_name,
)
if monitor_process.process_id not in self.monitor_processes:
self.monitor_processes[monitor_process.process_id] = vmray_monitor_process
else:
# we expect monitor processes recorded in both SummaryV2.json and flog.xml to equal
assert self.monitor_processes[monitor_process.process_id] == vmray_monitor_process
def _compute_monitor_threads(self):
for monitor_thread in self.flog.analysis.monitor_threads:
# we expect monitor IDs to be unique
assert monitor_thread.thread_id not in self.monitor_threads
self.monitor_threads[monitor_thread.thread_id] = VMRayMonitorThread(
monitor_thread.os_tid, monitor_thread.thread_id, monitor_thread.process_id
)
# we expect each monitor thread ID to be unique for its associated monitor process ID e.g. monitor
# thread ID 10 should not be captured twice for monitor process ID 1
assert monitor_thread.thread_id not in self.monitor_threads_by_monitor_process[monitor_thread.thread_id]
self.monitor_threads_by_monitor_process[monitor_thread.process_id].append(monitor_thread.thread_id)
def _compute_monitor_process_calls(self):
for function_call in self.flog.analysis.function_calls:
self.monitor_process_calls[function_call.process_id][function_call.thread_id].append(function_call)

View File

@@ -1,59 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Tuple, Iterator
import capa.features.extractors.helpers
from capa.features.insn import API, Number
from capa.features.common import String, Feature
from capa.features.address import Address
from capa.features.extractors.vmray.models import PARAM_TYPE_INT, PARAM_TYPE_STR, Param, FunctionCall, hexint
from capa.features.extractors.base_extractor import CallHandle, ThreadHandle, ProcessHandle
logger = logging.getLogger(__name__)
def get_call_param_features(param: Param, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
if param.deref is not None:
# pointer types contain a special "deref" member that stores the deref'd value
# so we check for this first and ignore Param.value as this always contains the
# deref'd pointer value
if param.deref.value is not None:
if param.deref.type_ in PARAM_TYPE_INT:
yield Number(hexint(param.deref.value)), ch.address
elif param.deref.type_ in PARAM_TYPE_STR:
# TODO(mr-tz): remove FPS like " \\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\..."
# https://github.com/mandiant/capa/issues/2432
# parsing the data up to here results in double-escaped backslashes, remove those here
yield String(param.deref.value.replace("\\\\", "\\")), ch.address
else:
logger.debug("skipping deref param type %s", param.deref.type_)
elif param.value is not None:
if param.type_ in PARAM_TYPE_INT:
yield Number(hexint(param.value)), ch.address
def extract_call_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
call: FunctionCall = ch.inner
if call.params_in:
for param in call.params_in.params:
yield from get_call_param_features(param, ch)
for name in capa.features.extractors.helpers.generate_symbols("", call.name):
yield API(name), ch.address
def extract_features(ph: ProcessHandle, th: ThreadHandle, ch: CallHandle) -> Iterator[Tuple[Feature, Address]]:
for handler in CALL_HANDLERS:
for feature, addr in handler(ph, th, ch):
yield feature, addr
CALL_HANDLERS = (extract_call_features,)

View File

@@ -1,133 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import List, Tuple, Iterator
from pathlib import Path
import capa.helpers
import capa.features.extractors.vmray.call
import capa.features.extractors.vmray.file
import capa.features.extractors.vmray.global_
from capa.features.common import Feature, Characteristic
from capa.features.address import (
NO_ADDRESS,
Address,
ThreadAddress,
ProcessAddress,
DynamicCallAddress,
AbsoluteVirtualAddress,
)
from capa.features.extractors.vmray import VMRayAnalysis, VMRayMonitorThread, VMRayMonitorProcess
from capa.features.extractors.vmray.models import PARAM_TYPE_STR, ParamList, FunctionCall
from capa.features.extractors.base_extractor import (
CallHandle,
SampleHashes,
ThreadHandle,
ProcessHandle,
DynamicFeatureExtractor,
)
def get_formatted_params(params: ParamList) -> List[str]:
params_list: List[str] = []
for param in params:
if param.deref and param.deref.value is not None:
deref_value: str = f'"{param.deref.value}"' if param.deref.type_ in PARAM_TYPE_STR else param.deref.value
params_list.append(f"{param.name}: {deref_value}")
else:
value: str = "" if param.value is None else param.value
params_list.append(f"{param.name}: {value}")
return params_list
class VMRayExtractor(DynamicFeatureExtractor):
def __init__(self, analysis: VMRayAnalysis):
assert analysis.sample_file_analysis is not None
super().__init__(
hashes=SampleHashes(
md5=analysis.sample_file_analysis.hash_values.md5.lower(),
sha1=analysis.sample_file_analysis.hash_values.sha1.lower(),
sha256=analysis.sample_file_analysis.hash_values.sha256.lower(),
)
)
self.analysis = analysis
# pre-compute these because we'll yield them at *every* scope.
self.global_features = list(capa.features.extractors.vmray.global_.extract_features(self.analysis))
def get_base_address(self) -> Address:
# value according to the PE header, the actual trace may use a different imagebase
return AbsoluteVirtualAddress(self.analysis.base_address)
def extract_file_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.vmray.file.extract_features(self.analysis)
def extract_global_features(self) -> Iterator[Tuple[Feature, Address]]:
yield from self.global_features
def get_processes(self) -> Iterator[ProcessHandle]:
for monitor_process in self.analysis.monitor_processes.values():
address: ProcessAddress = ProcessAddress(pid=monitor_process.pid, ppid=monitor_process.ppid)
yield ProcessHandle(address, inner=monitor_process)
def extract_process_features(self, ph: ProcessHandle) -> Iterator[Tuple[Feature, Address]]:
# we have not identified process-specific features for VMRay yet
yield from []
def get_process_name(self, ph) -> str:
monitor_process: VMRayMonitorProcess = ph.inner
return monitor_process.image_name
def get_threads(self, ph: ProcessHandle) -> Iterator[ThreadHandle]:
for monitor_thread_id in self.analysis.monitor_threads_by_monitor_process[ph.inner.monitor_id]:
monitor_thread: VMRayMonitorThread = self.analysis.monitor_threads[monitor_thread_id]
address: ThreadAddress = ThreadAddress(process=ph.address, tid=monitor_thread.tid)
yield ThreadHandle(address=address, inner=monitor_thread)
def extract_thread_features(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[Tuple[Feature, Address]]:
if False:
# force this routine to be a generator,
# but we don't actually have any elements to generate.
yield Characteristic("never"), NO_ADDRESS
return
def get_calls(self, ph: ProcessHandle, th: ThreadHandle) -> Iterator[CallHandle]:
for function_call in self.analysis.monitor_process_calls[ph.inner.monitor_id][th.inner.monitor_id]:
addr = DynamicCallAddress(thread=th.address, id=function_call.fncall_id)
yield CallHandle(address=addr, inner=function_call)
def extract_call_features(
self, ph: ProcessHandle, th: ThreadHandle, ch: CallHandle
) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.vmray.call.extract_features(ph, th, ch)
def get_call_name(self, ph, th, ch) -> str:
call: FunctionCall = ch.inner
call_formatted: str = call.name
# format input parameters
if call.params_in:
call_formatted += f"({', '.join(get_formatted_params(call.params_in.params))})"
else:
call_formatted += "()"
# format output parameters
if call.params_out:
call_formatted += f" -> {', '.join(get_formatted_params(call.params_out.params))}"
return call_formatted
@classmethod
def from_zipfile(cls, zipfile_path: Path):
return cls(VMRayAnalysis(zipfile_path))

View File

@@ -1,82 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Tuple, Iterator
import capa.features.extractors.common
from capa.features.file import Export, Import, Section
from capa.features.common import String, Feature
from capa.features.address import NO_ADDRESS, Address, AbsoluteVirtualAddress
from capa.features.extractors.vmray import VMRayAnalysis
from capa.features.extractors.helpers import generate_symbols
logger = logging.getLogger(__name__)
def extract_export_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for addr, name in analysis.exports.items():
yield Export(name), AbsoluteVirtualAddress(addr)
def extract_import_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for addr, (module, api) in analysis.imports.items():
for symbol in generate_symbols(module, api, include_dll=True):
yield Import(symbol), AbsoluteVirtualAddress(addr)
def extract_section_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for addr, name in analysis.sections.items():
yield Section(name), AbsoluteVirtualAddress(addr)
def extract_referenced_filenames(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for filename in analysis.sv2.filenames.values():
yield String(filename.filename), NO_ADDRESS
def extract_referenced_mutex_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for mutex in analysis.sv2.mutexes.values():
yield String(mutex.name), NO_ADDRESS
def extract_referenced_domain_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for domain in analysis.sv2.domains.values():
yield String(domain.domain), NO_ADDRESS
def extract_referenced_ip_addresses(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for ip_address in analysis.sv2.ip_addresses.values():
yield String(ip_address.ip_address), NO_ADDRESS
def extract_referenced_registry_key_names(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for registry_record in analysis.sv2.registry_records.values():
yield String(registry_record.reg_key_name), NO_ADDRESS
def extract_file_strings(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.common.extract_file_strings(analysis.sample_file_buf)
def extract_features(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for handler in FILE_HANDLERS:
for feature, addr in handler(analysis):
yield feature, addr
FILE_HANDLERS = (
extract_import_names,
extract_export_names,
extract_section_names,
extract_referenced_filenames,
extract_referenced_mutex_names,
extract_referenced_domain_names,
extract_referenced_ip_addresses,
extract_referenced_registry_key_names,
extract_file_strings,
)

View File

@@ -1,72 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
from typing import Tuple, Iterator
from capa.features.common import (
OS,
OS_LINUX,
ARCH_I386,
FORMAT_PE,
ARCH_AMD64,
FORMAT_ELF,
OS_WINDOWS,
Arch,
Format,
Feature,
)
from capa.features.address import NO_ADDRESS, Address
from capa.features.extractors.vmray import VMRayAnalysis
logger = logging.getLogger(__name__)
def extract_arch(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
file_type: str = analysis.file_type
if "x86-32" in file_type:
yield Arch(ARCH_I386), NO_ADDRESS
elif "x86-64" in file_type:
yield Arch(ARCH_AMD64), NO_ADDRESS
else:
raise ValueError("unrecognized arch from the VMRay report: %s" % file_type)
def extract_format(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
assert analysis.sample_file_static_data is not None
if analysis.sample_file_static_data.pe:
yield Format(FORMAT_PE), NO_ADDRESS
elif analysis.sample_file_static_data.elf:
yield Format(FORMAT_ELF), NO_ADDRESS
else:
raise ValueError("unrecognized file format from the VMRay report: %s" % analysis.file_type)
def extract_os(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
file_type: str = analysis.file_type
if "windows" in file_type.lower():
yield OS(OS_WINDOWS), NO_ADDRESS
elif "linux" in file_type.lower():
yield OS(OS_LINUX), NO_ADDRESS
else:
raise ValueError("unrecognized OS from the VMRay report: %s" % file_type)
def extract_features(analysis: VMRayAnalysis) -> Iterator[Tuple[Feature, Address]]:
for global_handler in GLOBAL_HANDLER:
for feature, addr in global_handler(analysis):
yield feature, addr
GLOBAL_HANDLER = (
extract_format,
extract_os,
extract_arch,
)

View File

@@ -1,368 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from typing import Dict, List, Union, Optional
import xmltodict
from pydantic import Field, BaseModel
from typing_extensions import Annotated
from pydantic.functional_validators import BeforeValidator
"""
# possible param types, included for documentation
PARAM_TYPE = (
"signed_8bit",
"unsigned_8bit",
"signed_16bit",
"unsigned_16bit",
"signed_32bit",
"unsigned_32bit",
"signed_64bit",
"unsigned_64bit",
"double",
"void_ptr",
"bool",
"unknown",
"ptr",
"void",
"str",
"array",
"container",
"bindata",
"undefined_type",
)
"""
PARAM_TYPE_PTR = ("void_ptr", "ptr")
PARAM_TYPE_STR = ("str",)
PARAM_TYPE_INT = (
"signed_8bit",
"unsigned_8bit",
"signed_16bit",
"unsigned_16bit",
"signed_32bit",
"unsigned_32bit",
"signed_64bit",
"unsigned_64bit",
"double",
"bool",
"unknown",
)
def xml_to_dict(xml):
return xmltodict.parse(xml, attr_prefix="")
def hexint(value: Union[str, int]) -> int:
if isinstance(value, str):
return int(value, 16) if value.startswith("0x") else int(value, 10)
else:
return value
def validate_hex_int(value: Union[str, int]) -> int:
return hexint(value)
# convert the input value to a Python int type before inner validation (int) is called
HexInt = Annotated[int, BeforeValidator(validate_hex_int)]
# models flog.xml file, certain fields left as comments for documentation purposes
class ParamDeref(BaseModel):
type_: str = Field(alias="type")
value: Optional[str] = None
class Param(BaseModel):
name: str
type_: str = Field(alias="type")
value: Optional[str] = None
deref: Optional[ParamDeref] = None
def validate_ensure_is_list(value: Union[List[Param], Param]) -> List[Param]:
if isinstance(value, list):
return value
else:
return [value]
# params may be stored as a list of Param or a single Param so we convert
# the input value to Python list type before the inner validation (List[Param])
# is called
ParamList = Annotated[List[Param], BeforeValidator(validate_ensure_is_list)]
class Params(BaseModel):
params: ParamList = Field(alias="param")
def validate_call_name(value: str) -> str:
if value.startswith("sys_"):
# VMRay appears to log kernel function calls ("sys_*") for Linux so we remove that
# here to enable capa matching
return value[4:]
else:
return value
# function call names may need to be reformatted to remove data, etc. so we reformat
# before calling the inner validation (str)
CallName = Annotated[str, BeforeValidator(validate_call_name)]
class FunctionCall(BaseModel):
# ts: HexInt
fncall_id: HexInt
process_id: HexInt
thread_id: HexInt
name: CallName
# addr: HexInt
# from_addr: HexInt = Field(alias="from")
params_in: Optional[Params] = Field(alias="in", default=None)
params_out: Optional[Params] = Field(alias="out", default=None)
class FunctionReturn(BaseModel):
ts: HexInt
fncall_id: HexInt
addr: HexInt
from_addr: HexInt = Field(alias="from")
class MonitorProcess(BaseModel):
ts: HexInt
process_id: int
image_name: str
filename: str
# page_root: HexInt
os_pid: HexInt
# os_integrity_level: HexInt
# os_privileges: HexInt
monitor_reason: str
parent_id: int
os_parent_pid: HexInt
# cmd_line: str
# cur_dir: str
# os_username: str
# bitness: int
# os_groups: str
class MonitorThread(BaseModel):
ts: HexInt
thread_id: int
process_id: int
os_tid: HexInt
# handle if there's only single entries, but the model expects a list
MonitorProcessList = Annotated[List[MonitorProcess], BeforeValidator(validate_ensure_is_list)]
MonitorThreadList = Annotated[List[MonitorThread], BeforeValidator(validate_ensure_is_list)]
FunctionCallList = Annotated[List[FunctionCall], BeforeValidator(validate_ensure_is_list)]
class Analysis(BaseModel):
log_version: str # tested 2
analyzer_version: str # tested 2024.2.1
# analysis_date: str
monitor_processes: MonitorProcessList = Field(alias="monitor_process", default=[])
monitor_threads: MonitorThreadList = Field(alias="monitor_thread", default=[])
function_calls: FunctionCallList = Field(alias="fncall", default=[])
# function_returns: List[FunctionReturn] = Field(alias="fnret", default=[])
class Flog(BaseModel):
analysis: Analysis
# models for summary_v2.json file, certain fields left as comments for documentation purposes
class GenericReference(BaseModel):
path: List[str]
source: str
class StaticDataReference(GenericReference): ...
class PEFileBasicInfo(BaseModel):
# compile_time: str
# file_type: str
image_base: int
# machine_type: str
# size_of_code: int
# size_of_initialized_data: int
# size_of_uninitialized_data: int
# subsystem: str
# entry_point: int
# imphash: Optional[str] = None
class API(BaseModel):
name: str
ordinal: Optional[int] = None
class PEFileExport(BaseModel):
address: int
api: API
class PEFileImport(BaseModel):
address: int
api: API
# thunk_offset: int
# hint: Optional[int] = None
# thunk_rva: int
class PEFileImportModule(BaseModel):
dll: str
apis: List[PEFileImport]
class PEFileSection(BaseModel):
# entropy: float
# flags: List[str] = []
name: str
# raw_data_offset: int
# raw_data_size: int
virtual_address: int
# virtual_size: int
class PEFile(BaseModel):
basic_info: PEFileBasicInfo
exports: List[PEFileExport] = []
imports: List[PEFileImportModule] = []
sections: List[PEFileSection] = []
class ElfFileSectionHeader(BaseModel):
sh_name: str
sh_addr: int
class ElfFileSection(BaseModel):
header: ElfFileSectionHeader
"""
class ElfFileHeader(BaseModel):
file_class: str
endianness: str
file_type: str
architecture: str
architecture_human_str: str
entry_point: int
"""
class ElfFile(BaseModel):
# file_header: ElfFileHeader
sections: List[ElfFileSection]
class StaticData(BaseModel):
pe: Optional[PEFile] = None
elf: Optional[ElfFile] = None
class FileHashes(BaseModel):
md5: str
sha1: str
sha256: str
# ssdeep: str
class File(BaseModel):
# categories: List[str]
hash_values: FileHashes
# is_artifact: bool
# is_ioc: bool
is_sample: bool
# size: int
# is_truncated: bool
# mime_type: Optional[str] = None
# operations: List[str] = []
# ref_filenames: List[GenericReference] = []
# ref_gfncalls: List[GenericReference] = []
ref_static_data: Optional[StaticDataReference] = None
# ref_vti_matches: List[GenericReference] = []
# verdict: str
class Process(BaseModel):
# bitness: int
# is_artifact: bool
# is_ioc: bool
monitor_id: int
# monitor_reason: str
os_pid: int
filename: str
image_name: str
ref_parent_process: Optional[GenericReference] = None
class Filename(BaseModel):
filename: str
# is_artifact: bool
# is_ioc: bool
# verdict: str
class Mutex(BaseModel):
name: str
# is_artifact: bool
# is_ioc: bool
# verdict: str
class Registry(BaseModel):
reg_key_name: str
# reg_key_value_type: Optional[str] = None
# is_artifact: bool
# is_ioc: bool
# verdict: str
class Domain(BaseModel):
domain: str
# is_artifact: bool
# is_ioc: bool
# verdict: str
class IPAddress(BaseModel):
ip_address: str
# is_artifact: bool
# is_ioc: bool
# verdict: str
class AnalysisMetadata(BaseModel):
sample_type: str
submission_filename: str
class SummaryV2(BaseModel):
analysis_metadata: AnalysisMetadata
static_data: Dict[str, StaticData] = {}
# recorded artifacts
files: Dict[str, File] = {}
processes: Dict[str, Process] = {}
filenames: Dict[str, Filename] = {}
mutexes: Dict[str, Mutex] = {}
domains: Dict[str, Domain] = {}
ip_addresses: Dict[str, IPAddress] = {}
registry_records: Dict[str, Registry] = {}

View File

@@ -100,10 +100,9 @@ class Mnemonic(Feature):
# max number of operands to consider for a given instruction.
# for Intel and .NET, this is 3
# since we only support Intel and .NET, we can assume this is 3
# which covers cases up to e.g. "vinserti128 ymm0,ymm0,ymm5,1"
# for ARM/aarch64, we assume 4
MAX_OPERAND_COUNT = 5
MAX_OPERAND_COUNT = 4
MAX_OPERAND_INDEX = MAX_OPERAND_COUNT - 1

View File

@@ -372,10 +372,6 @@ if __name__ == "__main__":
from capa.exceptions import UnsupportedRuntimeError
raise UnsupportedRuntimeError("This version of capa can only be used with Python 3.8+")
elif sys.version_info < (3, 10):
from warnings import warn
warn("This is the last capa version supporting Python 3.8 and 3.9.", DeprecationWarning, stacklevel=2)
exit_code = main()
if exit_code != 0:
popup("capa explorer encountered errors during analysis. Please check the console output for more information.") # type: ignore [name-defined] # noqa: F821

View File

@@ -164,8 +164,4 @@ if __name__ == "__main__":
from capa.exceptions import UnsupportedRuntimeError
raise UnsupportedRuntimeError("This version of capa can only be used with Python 3.8+")
elif sys.version_info < (3, 10):
from warnings import warn
warn("This is the last capa version supporting Python 3.8 and 3.9.", DeprecationWarning, stacklevel=2)
sys.exit(main())

View File

@@ -5,34 +5,17 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import os
import sys
import gzip
import ctypes
import inspect
import logging
import tempfile
import contextlib
import importlib.util
from typing import Dict, List, Union, BinaryIO, Iterator, NoReturn
from typing import Dict, Union, BinaryIO, Iterator, NoReturn
from pathlib import Path
from zipfile import ZipFile
from datetime import datetime
from rich.console import Console
from rich.progress import (
Task,
Text,
Progress,
BarColumn,
TextColumn,
SpinnerColumn,
ProgressColumn,
TimeElapsedColumn,
MofNCompleteColumn,
TaskProgressColumn,
TimeRemainingColumn,
)
import tqdm
import msgspec.json
from capa.exceptions import UnsupportedFormatError
from capa.features.common import (
@@ -40,32 +23,24 @@ from capa.features.common import (
FORMAT_CAPE,
FORMAT_SC32,
FORMAT_SC64,
FORMAT_VMRAY,
FORMAT_DOTNET,
FORMAT_FREEZE,
FORMAT_DRAKVUF,
FORMAT_UNKNOWN,
FORMAT_BINEXPORT2,
Format,
)
EXTENSIONS_SHELLCODE_32 = ("sc32", "raw32")
EXTENSIONS_SHELLCODE_64 = ("sc64", "raw64")
# CAPE (.json, .json_, .json.gz)
# DRAKVUF (.log, .log.gz)
# VMRay (.zip)
EXTENSIONS_DYNAMIC = ("json", "json_", "json.gz", "log", ".log.gz", ".zip")
EXTENSIONS_BINEXPORT2 = ("BinExport", "BinExport2")
# CAPE extensions: .json, .json_, .json.gz
# DRAKVUF Sandbox extensions: .log, .log.gz
EXTENSIONS_DYNAMIC = ("json", "json_", "json.gz", "log", ".log.gz")
EXTENSIONS_ELF = "elf_"
EXTENSIONS_FREEZE = "frz"
logger = logging.getLogger("capa")
# shared console used to redirect logging to stderr
log_console: Console = Console(stderr=True)
def hex(n: int) -> str:
"""render the given number using upper case hex, like: 0x123ABC"""
if n < 0:
@@ -99,61 +74,7 @@ def assert_never(value) -> NoReturn:
assert False, f"Unhandled value: {value} ({type(value).__name__})" # noqa: B011
@contextlib.contextmanager
def stdout_redirector(stream):
"""
Redirect stdout at the C runtime level,
which lets us handle native libraries that spam stdout.
*But*, this only works on Linux! Otherwise will silently still write to stdout.
So, try to upstream the fix when possible.
Via: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
"""
if sys.platform not in ("linux", "linux2"):
logger.warning("Unable to capture STDOUT on non-Linux (begin)")
yield
logger.warning("Unable to capture STDOUT on non-Linux (end)")
return
# libc is only on Linux
LIBC = ctypes.CDLL(None)
C_STDOUT = ctypes.c_void_p.in_dll(LIBC, "stdout")
# The original fd stdout points to. Usually 1 on POSIX systems.
original_stdout_fd = sys.stdout.fileno()
def _redirect_stdout(to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stdout
LIBC.fflush(C_STDOUT)
# Flush and close sys.stdout - also closes the file descriptor (fd)
sys.stdout.close()
# Make original_stdout_fd point to the same file as to_fd
os.dup2(to_fd, original_stdout_fd)
# Create a new sys.stdout that points to the redirected fd
sys.stdout = io.TextIOWrapper(os.fdopen(original_stdout_fd, "wb"))
# Save a copy of the original stdout fd in saved_stdout_fd
saved_stdout_fd = os.dup(original_stdout_fd)
try:
# Create a temporary file and redirect stdout to it
tfile = tempfile.TemporaryFile(mode="w+b")
_redirect_stdout(tfile.fileno())
# Yield to caller, then redirect stdout back to the saved fd
yield
_redirect_stdout(saved_stdout_fd)
# Copy contents of temporary file to the given stream
tfile.flush()
tfile.seek(0, io.SEEK_SET)
stream.write(tfile.read())
finally:
tfile.close()
os.close(saved_stdout_fd)
def load_json_from_path(json_path: Path):
import msgspec.json
with gzip.open(json_path, "r") as compressed_report:
try:
report_json = compressed_report.read()
@@ -165,7 +86,6 @@ def load_json_from_path(json_path: Path):
def decode_json_lines(fd: Union[BinaryIO, gzip.GzipFile]):
import msgspec.json
for line in fd:
try:
line_s = line.strip().decode()
@@ -187,7 +107,6 @@ def load_jsonl_from_path(jsonl_path: Path) -> Iterator[Dict]:
def load_one_jsonl_from_path(jsonl_path: Path):
# this loads one json line to avoid the overhead of loading the entire file
import msgspec.json
try:
with gzip.open(jsonl_path, "rb") as f:
line = next(iter(f))
@@ -204,20 +123,16 @@ def get_format_from_report(sample: Path) -> str:
line = load_one_jsonl_from_path(sample)
if "Plugin" in line:
return FORMAT_DRAKVUF
elif sample.name.endswith(".zip"):
with ZipFile(sample, "r") as zipfile:
namelist: List[str] = zipfile.namelist()
if "logs/summary_v2.json" in namelist and "logs/flog.xml" in namelist:
# assume VMRay zipfile at a minimum has these files
return FORMAT_VMRAY
elif sample.name.endswith(("json", "json_", "json.gz")):
report = load_json_from_path(sample)
if "CAPE" in report:
return FORMAT_CAPE
if "target" in report and "info" in report and "behavior" in report:
# CAPE report that's missing the "CAPE" key,
# which is not going to be much use, but its correct.
return FORMAT_CAPE
return FORMAT_UNKNOWN
report = load_json_from_path(sample)
if "CAPE" in report:
return FORMAT_CAPE
if "target" in report and "info" in report and "behavior" in report:
# CAPE report that's missing the "CAPE" key,
# which is not going to be much use, but its correct.
return FORMAT_CAPE
return FORMAT_UNKNOWN
@@ -232,8 +147,6 @@ def get_format_from_extension(sample: Path) -> str:
format_ = get_format_from_report(sample)
elif sample.name.endswith(EXTENSIONS_FREEZE):
format_ = FORMAT_FREEZE
elif sample.name.endswith(EXTENSIONS_BINEXPORT2):
format_ = FORMAT_BINEXPORT2
return format_
@@ -265,13 +178,45 @@ def get_format(sample: Path) -> str:
return FORMAT_UNKNOWN
@contextlib.contextmanager
def redirecting_print_to_tqdm(disable_progress):
"""
tqdm (progress bar) expects to have fairly tight control over console output.
so calls to `print()` will break the progress bar and make things look bad.
so, this context manager temporarily replaces the `print` implementation
with one that is compatible with tqdm.
via: https://stackoverflow.com/a/42424890/87207
"""
old_print = print # noqa: T202 [reserved word print used]
def new_print(*args, **kwargs):
# If tqdm.tqdm.write raises error, use builtin print
if disable_progress:
old_print(*args, **kwargs)
else:
try:
tqdm.tqdm.write(*args, **kwargs)
except Exception:
old_print(*args, **kwargs)
try:
# Globally replace print with new_print.
# Verified this works manually on Python 3.11:
# >>> import inspect
# >>> inspect.builtins
# <module 'builtins' (built-in)>
inspect.builtins.print = new_print # type: ignore
yield
finally:
inspect.builtins.print = old_print # type: ignore
def log_unsupported_format_error():
logger.error("-" * 80)
logger.error(" Input file does not appear to be a supported file.")
logger.error(" ")
logger.error(" See all supported file formats via capa's help output (-h).")
logger.error(" If you don't know the input file type,")
logger.error(" you can try using the `file` utility to guess it.")
logger.error(" If you don't know the input file type, you can try using the `file` utility to guess it.")
logger.error("-" * 80)
@@ -297,17 +242,6 @@ def log_unsupported_drakvuf_report_error(error: str):
logger.error("-" * 80)
def log_unsupported_vmray_report_error(error: str):
logger.error("-" * 80)
logger.error(" Input file is not a valid VMRay analysis archive: %s", error)
logger.error(" ")
logger.error(
" capa only supports analyzing VMRay dynamic analysis archives containing summary_v2.json and flog.xml log files."
)
logger.error(" Please make sure you have downloaded a dynamic analysis archive from VMRay.")
logger.error("-" * 80)
def log_empty_sandbox_report_error(error: str, sandbox_name: str):
logger.error("-" * 80)
logger.error(" %s report is empty or only contains little useful data: %s", sandbox_name, error)
@@ -320,8 +254,9 @@ def log_unsupported_os_error():
logger.error("-" * 80)
logger.error(" Input file does not appear to target a supported OS.")
logger.error(" ")
logger.error(" capa currently only analyzes executables for some operating systems")
logger.error(" (including Windows, Linux, and Android).")
logger.error(
" capa currently only supports analyzing executables for some operating systems (including Windows and Linux)."
)
logger.error("-" * 80)
@@ -339,8 +274,9 @@ def log_unsupported_runtime_error():
logger.error(" ")
logger.error(" capa supports running under Python 3.8 and higher.")
logger.error(" ")
logger.error(" If you're seeing this message on the command line,")
logger.error(" please ensure you're running a supported Python version.")
logger.error(
" If you're seeing this message on the command line, please ensure you're running a supported Python version."
)
logger.error("-" * 80)
@@ -355,110 +291,3 @@ def is_running_standalone() -> bool:
# so we keep this in a common area.
# generally, other library code should not use this function.
return hasattr(sys, "frozen") and hasattr(sys, "_MEIPASS")
def is_dev_environment() -> bool:
if is_running_standalone():
return False
if "site-packages" in __file__:
# running from a site-packages installation
return False
capa_root = Path(__file__).resolve().parent.parent
git_dir = capa_root / ".git"
if not git_dir.is_dir():
# .git directory doesn't exist
return False
return True
def is_cache_newer_than_rule_code(cache_dir: Path) -> bool:
"""
basic check to prevent issues if the rules cache is older than relevant rules code
args:
cache_dir: the cache directory containing cache files
returns:
True if latest cache file is newer than relevant rule cache code
"""
# retrieve the latest modified cache file
cache_files = list(cache_dir.glob("*.cache"))
if not cache_files:
logger.debug("no rule cache files found")
return False
latest_cache_file = max(cache_files, key=os.path.getmtime)
cache_timestamp = os.path.getmtime(latest_cache_file)
# these are the relevant rules code files that could conflict with using an outdated cache
# delayed import due to circular dependencies
import capa.rules
import capa.rules.cache
latest_rule_code_file = max([Path(capa.rules.__file__), Path(capa.rules.cache.__file__)], key=os.path.getmtime)
rule_code_timestamp = os.path.getmtime(latest_rule_code_file)
if rule_code_timestamp > cache_timestamp:
def ts_to_str(ts):
return datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
logger.warning(
"latest rule code file %s (%s) is newer than the latest rule cache file %s (%s)",
latest_rule_code_file,
ts_to_str(rule_code_timestamp),
latest_cache_file,
ts_to_str(cache_timestamp),
)
return False
return True
class RateColumn(ProgressColumn):
"""Renders speed column in progress bar."""
def render(self, task: "Task") -> Text:
speed = f"{task.speed:>.1f}" if task.speed else "00.0"
unit = task.fields.get("unit", "it")
return Text.from_markup(f"[progress.data.speed]{speed} {unit}/s")
class PostfixColumn(ProgressColumn):
"""Renders a postfix column in progress bar."""
def render(self, task: "Task") -> Text:
return Text(task.fields.get("postfix", ""))
class MofNCompleteColumnWithUnit(MofNCompleteColumn):
"""Renders completed/total count column with a unit."""
def render(self, task: "Task") -> Text:
ret = super().render(task)
unit = task.fields.get("unit")
return ret.append(f" {unit}") if unit else ret
class CapaProgressBar(Progress):
@classmethod
def get_default_columns(cls):
return (
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
TaskProgressColumn(),
BarColumn(),
MofNCompleteColumnWithUnit(),
"",
TimeElapsedColumn(),
"<",
TimeRemainingColumn(),
"",
RateColumn(),
PostfixColumn(),
)

View File

@@ -13,8 +13,6 @@ from pathlib import Path
import idc
import idaapi
import ida_ida
import ida_nalt
import idautils
import ida_bytes
import ida_loader
@@ -47,51 +45,6 @@ NETNODE_RESULTS = "results"
NETNODE_RULES_CACHE_ID = "rules-cache-id"
# wrappers for IDA Pro (IDAPython) 7, 8 and 9 compability
version = float(idaapi.get_kernel_version())
if version < 9.0:
def get_filetype() -> "ida_ida.filetype_t":
return idaapi.get_inf_structure().filetype
def get_processor_name() -> str:
return idaapi.get_inf_structure().procname
def is_32bit() -> bool:
info: idaapi.idainfo = idaapi.get_inf_structure()
return info.is_32bit()
def is_64bit() -> bool:
info: idaapi.idainfo = idaapi.get_inf_structure()
return info.is_64bit()
def retrieve_input_file_md5() -> str:
return ida_nalt.retrieve_input_file_md5()
def retrieve_input_file_sha256() -> str:
return ida_nalt.retrieve_input_file_sha256()
else:
def get_filetype() -> "ida_ida.filetype_t":
return ida_ida.inf_get_filetype()
def get_processor_name() -> str:
return idc.get_processor_name()
def is_32bit() -> bool:
return idaapi.inf_is_32bit_exactly()
def is_64bit() -> bool:
return idaapi.inf_is_64bit()
def retrieve_input_file_md5() -> str:
return ida_nalt.retrieve_input_file_md5().hex()
def retrieve_input_file_sha256() -> str:
return ida_nalt.retrieve_input_file_sha256().hex()
def inform_user_ida_ui(message):
# this isn't a logger, this is IDA's logging facility
idaapi.info(f"{message}. Please refer to IDA Output window for more information.") # noqa: G004
@@ -99,16 +52,17 @@ def inform_user_ida_ui(message):
def is_supported_ida_version():
version = float(idaapi.get_kernel_version())
if version < 7.4 or version >= 10:
if version < 7.4 or version >= 9:
warning_msg = "This plugin does not support your IDA Pro version"
logger.warning(warning_msg)
logger.warning("Your IDA Pro version is: %s. Supported versions are: IDA >= 7.4 and IDA < 10.0.", version)
logger.warning("Your IDA Pro version is: %s. Supported versions are: IDA >= 7.4 and IDA < 9.0.", version)
return False
return True
def is_supported_file_type():
if get_filetype() not in SUPPORTED_FILE_TYPES:
file_info = idaapi.get_inf_structure()
if file_info.filetype not in SUPPORTED_FILE_TYPES:
logger.error("-" * 80)
logger.error(" Input file does not appear to be a supported file type.")
logger.error(" ")
@@ -122,7 +76,8 @@ def is_supported_file_type():
def is_supported_arch_type():
if get_processor_name() not in SUPPORTED_ARCH_TYPES or not any((is_32bit(), is_64bit())):
file_info = idaapi.get_inf_structure()
if file_info.procname not in SUPPORTED_ARCH_TYPES or not any((file_info.is_32bit(), file_info.is_64bit())):
logger.error("-" * 80)
logger.error(" Input file does not appear to target a supported architecture.")
logger.error(" ")
@@ -170,10 +125,10 @@ def collect_metadata(rules: List[Path]):
md5 = get_file_md5()
sha256 = get_file_sha256()
procname = get_processor_name()
if procname == "metapc" and is_64bit():
info: idaapi.idainfo = idaapi.get_inf_structure()
if info.procname == "metapc" and info.is_64bit():
arch = "x86_64"
elif procname == "metapc" and is_32bit():
elif info.procname == "metapc" and info.is_32bit():
arch = "x86"
else:
arch = "unknown arch"

View File

@@ -81,7 +81,6 @@ can update using the `Settings` button.
* Double-click the `Address` column to navigate your Disassembly view to the address of the associated feature
* Double-click a result in the `Rule Information` column to expand its children
* Select a checkbox in the `Rule Information` column to highlight the address of the associated feature in your Disassembly view
* Reanalyze if you renamed global variables that store dynamically resolved APIs. capa will use these to improve its analysis.
#### Tips for Rule Generator

View File

@@ -5,8 +5,7 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import os
import sys
import logging
import datetime
import contextlib
@@ -45,10 +44,8 @@ from capa.features.common import (
FORMAT_CAPE,
FORMAT_SC32,
FORMAT_SC64,
FORMAT_VMRAY,
FORMAT_DOTNET,
FORMAT_DRAKVUF,
FORMAT_BINEXPORT2,
)
from capa.features.address import Address
from capa.features.extractors.base_extractor import (
@@ -66,10 +63,7 @@ BACKEND_BINJA = "binja"
BACKEND_PEFILE = "pefile"
BACKEND_CAPE = "cape"
BACKEND_DRAKVUF = "drakvuf"
BACKEND_VMRAY = "vmray"
BACKEND_FREEZE = "freeze"
BACKEND_BINEXPORT2 = "binexport2"
BACKEND_IDA = "ida"
class CorruptFile(ValueError):
@@ -171,7 +165,6 @@ def get_workspace(path: Path, input_format: str, sigpaths: List[Path]):
# to do a subclass check via isinstance.
if type(e) is Exception and "Couldn't convert rva" in e.args[0]:
raise CorruptFile(e.args[0]) from e
raise
viv_utils.flirt.register_flirt_signature_analyzers(vw, [str(s) for s in sigpaths])
@@ -225,11 +218,6 @@ def get_extractor(
report = capa.helpers.load_jsonl_from_path(input_path)
return capa.features.extractors.drakvuf.extractor.DrakvufExtractor.from_report(report)
elif backend == BACKEND_VMRAY:
import capa.features.extractors.vmray.extractor
return capa.features.extractors.vmray.extractor.VMRayExtractor.from_zipfile(input_path)
elif backend == BACKEND_DOTNET:
import capa.features.extractors.dnfile.extractor
@@ -239,15 +227,24 @@ def get_extractor(
return capa.features.extractors.dnfile.extractor.DnfileFeatureExtractor(input_path)
elif backend == BACKEND_BINJA:
import capa.features.extractors.binja.find_binja_api as finder
import capa.helpers
from capa.features.extractors.binja.find_binja_api import find_binja_path
if not finder.has_binaryninja():
raise RuntimeError("cannot find Binary Ninja API module.")
# When we are running as a standalone executable, we cannot directly import binaryninja
# We need to fist find the binja API installation path and add it into sys.path
if capa.helpers.is_running_standalone():
bn_api = find_binja_path()
if bn_api.exists():
sys.path.append(str(bn_api))
if not finder.load_binaryninja():
raise RuntimeError("failed to load Binary Ninja API module.")
import binaryninja
try:
import binaryninja
from binaryninja import BinaryView
except ImportError:
raise RuntimeError(
"Cannot import binaryninja module. Please install the Binary Ninja Python API first: "
+ "https://docs.binary.ninja/dev/batch.html#install-the-api)."
)
import capa.features.extractors.binja.extractor
@@ -262,7 +259,7 @@ def get_extractor(
raise UnsupportedOSError()
with console.status("analyzing program...", spinner="dots"):
bv: binaryninja.BinaryView = binaryninja.load(str(input_path))
bv: BinaryView = binaryninja.load(str(input_path))
if bv is None:
raise RuntimeError(f"Binary Ninja cannot open file {input_path}")
@@ -304,70 +301,10 @@ def get_extractor(
elif backend == BACKEND_FREEZE:
return frz.load(input_path.read_bytes())
elif backend == BACKEND_BINEXPORT2:
import capa.features.extractors.binexport2
import capa.features.extractors.binexport2.extractor
be2 = capa.features.extractors.binexport2.get_binexport2(input_path)
assert sample_path is not None
buf = sample_path.read_bytes()
return capa.features.extractors.binexport2.extractor.BinExport2FeatureExtractor(be2, buf)
elif backend == BACKEND_IDA:
import capa.features.extractors.ida.idalib as idalib
if not idalib.has_idalib():
raise RuntimeError("cannot find IDA idalib module.")
if not idalib.load_idalib():
raise RuntimeError("failed to load IDA idalib module.")
import idapro
import ida_auto
import capa.features.extractors.ida.extractor
logger.debug("idalib: opening database...")
# idalib writes to stdout (ugh), so we have to capture that
# so as not to screw up structured output.
with capa.helpers.stdout_redirector(io.BytesIO()):
with console.status("analyzing program...", spinner="dots"):
if idapro.open_database(str(input_path), run_auto_analysis=True):
raise RuntimeError("failed to analyze input file")
logger.debug("idalib: waiting for analysis...")
ida_auto.auto_wait()
logger.debug("idalib: opened database.")
return capa.features.extractors.ida.extractor.IdaFeatureExtractor()
else:
raise ValueError("unexpected backend: " + backend)
def _get_binexport2_file_extractors(input_file: Path) -> List[FeatureExtractor]:
# I'm not sure this is where this logic should live, but it works for now.
# we'll keep this a "private" routine until we're sure.
import capa.features.extractors.binexport2
be2 = capa.features.extractors.binexport2.get_binexport2(input_file)
sample_path = capa.features.extractors.binexport2.get_sample_from_binexport2(
input_file, be2, [Path(os.environ.get("CAPA_SAMPLES_DIR", "."))]
)
with sample_path.open("rb") as f:
taste = f.read()
if taste.startswith(capa.features.extractors.common.MATCH_PE):
return get_file_extractors(sample_path, FORMAT_PE)
elif taste.startswith(capa.features.extractors.common.MATCH_ELF):
return get_file_extractors(sample_path, FORMAT_ELF)
else:
logger.warning("unsupported format")
return []
def get_file_extractors(input_file: Path, input_format: str) -> List[FeatureExtractor]:
file_extractors: List[FeatureExtractor] = []
@@ -405,14 +342,6 @@ def get_file_extractors(input_file: Path, input_format: str) -> List[FeatureExtr
report = capa.helpers.load_jsonl_from_path(input_file)
file_extractors.append(capa.features.extractors.drakvuf.extractor.DrakvufExtractor.from_report(report))
elif input_format == FORMAT_VMRAY:
import capa.features.extractors.vmray.extractor
file_extractors.append(capa.features.extractors.vmray.extractor.VMRayExtractor.from_zipfile(input_file))
elif input_format == FORMAT_BINEXPORT2:
file_extractors = _get_binexport2_file_extractors(input_file)
return file_extractors

View File

@@ -17,12 +17,11 @@ import argparse
import textwrap
import contextlib
from types import TracebackType
from typing import Any, Set, Dict, List, Optional, TypedDict
from typing import Any, Dict, List, Optional
from pathlib import Path
import colorama
from pefile import PEFormatError
from rich.logging import RichHandler
from elftools.common.exceptions import ELFError
import capa.perf
@@ -44,16 +43,13 @@ import capa.features.extractors.common
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.loader import (
BACKEND_IDA,
BACKEND_VIV,
BACKEND_CAPE,
BACKEND_BINJA,
BACKEND_VMRAY,
BACKEND_DOTNET,
BACKEND_FREEZE,
BACKEND_PEFILE,
BACKEND_DRAKVUF,
BACKEND_BINEXPORT2,
)
from capa.helpers import (
get_file_taste,
@@ -63,11 +59,9 @@ from capa.helpers import (
log_unsupported_format_error,
log_empty_sandbox_report_error,
log_unsupported_cape_report_error,
log_unsupported_vmray_report_error,
log_unsupported_drakvuf_report_error,
)
from capa.exceptions import (
InvalidArgument,
EmptyReportError,
UnsupportedOSError,
UnsupportedArchError,
@@ -85,23 +79,13 @@ from capa.features.common import (
FORMAT_CAPE,
FORMAT_SC32,
FORMAT_SC64,
FORMAT_VMRAY,
FORMAT_DOTNET,
FORMAT_FREEZE,
FORMAT_RESULT,
FORMAT_DRAKVUF,
STATIC_FORMATS,
DYNAMIC_FORMATS,
FORMAT_BINEXPORT2,
)
from capa.capabilities.common import find_capabilities, has_file_limitation, find_file_capabilities
from capa.features.extractors.base_extractor import (
ProcessFilter,
FunctionFilter,
FeatureExtractor,
StaticFeatureExtractor,
DynamicFeatureExtractor,
)
from capa.features.extractors.base_extractor import FeatureExtractor, StaticFeatureExtractor, DynamicFeatureExtractor
RULES_PATH_DEFAULT_STRING = "(embedded rules)"
SIGNATURES_PATH_DEFAULT_STRING = "(embedded signatures)"
@@ -122,17 +106,10 @@ E_MISSING_CAPE_STATIC_ANALYSIS = 21
E_MISSING_CAPE_DYNAMIC_ANALYSIS = 22
E_EMPTY_REPORT = 23
E_UNSUPPORTED_GHIDRA_EXECUTION_MODE = 24
E_INVALID_INPUT_FORMAT = 25
E_INVALID_FEATURE_EXTRACTOR = 26
logger = logging.getLogger("capa")
class FilterConfig(TypedDict, total=False):
processes: Set[int]
functions: Set[int]
@contextlib.contextmanager
def timing(msg: str):
t0 = time.time()
@@ -197,13 +174,12 @@ def simple_message_exception_handler(exctype, value: BaseException, traceback: T
"""
if exctype is KeyboardInterrupt:
print("KeyboardInterrupt detected, program terminated", file=sys.stderr)
print("KeyboardInterrupt detected, program terminated")
else:
print(
f"Unexpected exception raised: {exctype}. Please run capa in debug mode (-d/--debug) "
+ "to see the stack trace. Please also report your issue on the capa GitHub page so we "
+ "can improve the code! (https://github.com/mandiant/capa/issues)",
file=sys.stderr,
+ "can improve the code! (https://github.com/mandiant/capa/issues)"
)
@@ -267,9 +243,7 @@ def install_common_args(parser, wanted=None):
(FORMAT_SC64, "64-bit shellcode"),
(FORMAT_CAPE, "CAPE sandbox report"),
(FORMAT_DRAKVUF, "DRAKVUF sandbox report"),
(FORMAT_VMRAY, "VMRay sandbox report"),
(FORMAT_FREEZE, "features previously frozen by capa"),
(FORMAT_BINEXPORT2, "BinExport2"),
]
format_help = ", ".join([f"{f[0]}: {f[1]}" for f in formats])
@@ -285,15 +259,12 @@ def install_common_args(parser, wanted=None):
backends = [
(BACKEND_AUTO, "(default) detect appropriate backend automatically"),
(BACKEND_VIV, "vivisect"),
(BACKEND_IDA, "IDA via idalib"),
(BACKEND_PEFILE, "pefile (file features only)"),
(BACKEND_BINJA, "Binary Ninja"),
(BACKEND_DOTNET, ".NET"),
(BACKEND_BINEXPORT2, "BinExport2"),
(BACKEND_FREEZE, "capa freeze"),
(BACKEND_CAPE, "CAPE"),
(BACKEND_DRAKVUF, "DRAKVUF"),
(BACKEND_VMRAY, "VMRay"),
]
backend_help = ", ".join([f"{f[0]}: {f[1]}" for f in backends])
parser.add_argument(
@@ -305,22 +276,6 @@ def install_common_args(parser, wanted=None):
help=f"select backend, {backend_help}",
)
if "restrict-to-functions" in wanted:
parser.add_argument(
"--restrict-to-functions",
type=lambda s: s.replace(" ", "").split(","),
default=[],
help="provide a list of comma-separated function virtual addresses to analyze (static analysis).",
)
if "restrict-to-processes" in wanted:
parser.add_argument(
"--restrict-to-processes",
type=lambda s: s.replace(" ", "").split(","),
default=[],
help="provide a list of comma-separated process IDs to analyze (dynamic analysis).",
)
if "os" in wanted:
oses = [
(OS_AUTO, "detect OS automatically - default"),
@@ -406,23 +361,15 @@ def handle_common_args(args):
ShouldExitError: if the program is invoked incorrectly and should exit.
"""
if args.quiet:
logging.basicConfig(level=logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
# use [/] after the logger name to reset any styling,
# and prevent the color from carrying over to the message
logformat = "[dim]%(name)s[/]: %(message)s"
# set markup=True to allow the use of Rich's markup syntax in log messages
rich_handler = RichHandler(markup=True, show_time=False, show_path=True, console=capa.helpers.log_console)
rich_handler.setFormatter(logging.Formatter(logformat))
# use RichHandler for root logger
logging.getLogger().addHandler(rich_handler)
# disable vivisect-related logging, it's verbose and not relevant for capa users
set_vivisect_log_level(logging.CRITICAL)
@@ -466,12 +413,8 @@ def handle_common_args(args):
if args.rules == [RULES_PATH_DEFAULT_STRING]:
logger.debug("-" * 80)
logger.debug(" Using default embedded rules.")
logger.debug(" To provide your own rules, use the form:")
logger.debug("")
logger.debug(" `capa.exe -r ./path/to/rules/ /path/to/mal.exe`.")
logger.debug("")
logger.debug(" To provide your own rules, use the form `capa.exe -r ./path/to/rules/ /path/to/mal.exe`.")
logger.debug(" You can see the current default rule set here:")
logger.debug("")
logger.debug(" https://github.com/mandiant/capa-rules")
logger.debug("-" * 80)
@@ -577,18 +520,12 @@ def get_backend_from_cli(args, input_format: str) -> str:
if input_format == FORMAT_DRAKVUF:
return BACKEND_DRAKVUF
elif input_format == FORMAT_VMRAY:
return BACKEND_VMRAY
elif input_format == FORMAT_DOTNET:
return BACKEND_DOTNET
elif input_format == FORMAT_FREEZE:
return BACKEND_FREEZE
elif input_format == FORMAT_BINEXPORT2:
return BACKEND_BINEXPORT2
else:
return BACKEND_VIV
@@ -607,15 +544,8 @@ def get_sample_path_from_cli(args, backend: str) -> Optional[Path]:
raises:
ShouldExitError: if the program is invoked incorrectly and should exit.
"""
if backend in (BACKEND_CAPE, BACKEND_DRAKVUF, BACKEND_VMRAY):
if backend in (BACKEND_CAPE, BACKEND_DRAKVUF):
return None
elif backend == BACKEND_BINEXPORT2:
import capa.features.extractors.binexport2
be2 = capa.features.extractors.binexport2.get_binexport2(args.input_file)
return capa.features.extractors.binexport2.get_sample_from_binexport2(
args.input_file, be2, [Path(os.environ.get("CAPA_SAMPLES_DIR", "."))]
)
else:
return args.input_file
@@ -650,22 +580,13 @@ def get_rules_from_cli(args) -> RuleSet:
raises:
ShouldExitError: if the program is invoked incorrectly and should exit.
"""
enable_cache: bool = True
try:
if capa.helpers.is_running_standalone() and args.is_default_rules:
cache_dir = get_default_root() / "cache"
else:
cache_dir = capa.rules.cache.get_default_cache_directory()
if capa.helpers.is_dev_environment():
# using the rules cache during development may result in unexpected errors, see #1898
enable_cache = capa.helpers.is_cache_newer_than_rule_code(cache_dir)
if not enable_cache:
logger.debug("not using cache. delete the cache file manually to use rule caching again")
else:
logger.debug("cache can be used, no potentially outdated cache files found")
rules = capa.rules.get_rules(args.rules, cache_dir=cache_dir, enable_cache=enable_cache)
rules = capa.rules.get_rules(args.rules, cache_dir=cache_dir)
except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e:
logger.error("%s", str(e))
logger.error(
@@ -728,8 +649,6 @@ def get_file_extractors_from_cli(args, input_format: str) -> List[FeatureExtract
log_unsupported_cape_report_error(str(e))
elif input_format == FORMAT_DRAKVUF:
log_unsupported_drakvuf_report_error(str(e))
elif input_format == FORMAT_VMRAY:
log_unsupported_vmray_report_error(str(e))
else:
log_unsupported_format_error()
raise ShouldExitError(E_INVALID_FILE_TYPE) from e
@@ -830,13 +749,9 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
os_ = get_os_from_cli(args, backend)
sample_path = get_sample_path_from_cli(args, backend)
extractor_filters = get_extractor_filters_from_cli(args, input_format)
logger.debug("format: %s", input_format)
logger.debug("backend: %s", backend)
try:
extractor = capa.loader.get_extractor(
return capa.loader.get_extractor(
args.input_file,
input_format,
os_,
@@ -846,14 +761,11 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
disable_progress=args.quiet or args.debug,
sample_path=sample_path,
)
return apply_extractor_filters(extractor, extractor_filters)
except UnsupportedFormatError as e:
if input_format == FORMAT_CAPE:
log_unsupported_cape_report_error(str(e))
elif input_format == FORMAT_DRAKVUF:
log_unsupported_drakvuf_report_error(str(e))
elif input_format == FORMAT_VMRAY:
log_unsupported_vmray_report_error(str(e))
else:
log_unsupported_format_error()
raise ShouldExitError(E_INVALID_FILE_TYPE) from e
@@ -868,45 +780,9 @@ def get_extractor_from_cli(args, input_format: str, backend: str) -> FeatureExtr
raise ShouldExitError(E_CORRUPT_FILE) from e
def get_extractor_filters_from_cli(args, input_format) -> FilterConfig:
if not hasattr(args, "restrict_to_processes") and not hasattr(args, "restrict_to_functions"):
# no processes or function filters were installed in the args
return {}
if input_format in STATIC_FORMATS:
if args.restrict_to_processes:
raise InvalidArgument("Cannot filter processes with static analysis.")
return {"functions": {int(addr, 0) for addr in args.restrict_to_functions}}
elif input_format in DYNAMIC_FORMATS:
if args.restrict_to_functions:
raise InvalidArgument("Cannot filter functions with dynamic analysis.")
return {"processes": {int(pid, 0) for pid in args.restrict_to_processes}}
else:
raise ShouldExitError(E_INVALID_INPUT_FORMAT)
def apply_extractor_filters(extractor: FeatureExtractor, extractor_filters: FilterConfig):
if not any(extractor_filters.values()):
return extractor
# if the user specified extractor filters, then apply them here
if isinstance(extractor, StaticFeatureExtractor):
assert extractor_filters["functions"]
return FunctionFilter(extractor, extractor_filters["functions"])
elif isinstance(extractor, DynamicFeatureExtractor):
assert extractor_filters["processes"]
return ProcessFilter(extractor, extractor_filters["processes"])
else:
raise ShouldExitError(E_INVALID_FEATURE_EXTRACTOR)
def main(argv: Optional[List[str]] = None):
if sys.version_info < (3, 8):
raise UnsupportedRuntimeError("This version of capa can only be used with Python 3.8+")
elif sys.version_info < (3, 10):
from warnings import warn
warn("This is the last capa version supporting Python 3.8 and 3.9.", DeprecationWarning, stacklevel=2)
if argv is None:
argv = sys.argv[1:]
@@ -918,9 +794,6 @@ def main(argv: Optional[List[str]] = None):
You can see the rule set here:
https://github.com/mandiant/capa-rules
You can load capa JSON output to capa Explorer Web:
https://github.com/mandiant/capa/explorer
To provide your own rule set, use the `-r` flag:
capa --rules /path/to/rules suspicious.exe
capa -r /path/to/rules suspicious.exe
@@ -946,20 +819,7 @@ def main(argv: Optional[List[str]] = None):
parser = argparse.ArgumentParser(
description=desc, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter
)
install_common_args(
parser,
{
"input_file",
"format",
"backend",
"os",
"signatures",
"rules",
"tag",
"restrict-to-functions",
"restrict-to-processes",
},
)
install_common_args(parser, {"input_file", "format", "backend", "os", "signatures", "rules", "tag"})
parser.add_argument("-j", "--json", action="store_true", help="emit JSON instead of text")
args = parser.parse_args(args=argv)

View File

@@ -6,43 +6,18 @@
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import collections
import urllib.parse
import rich
import rich.table
import rich.console
from rich.console import Console
import tabulate
import capa.render.utils as rutils
import capa.render.result_document as rd
import capa.features.freeze.features as frzf
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.render.utils import StringIO
def bold_markup(s) -> str:
"""
Generate Rich markup in a bold style.
The resulting string should be passed to a Rich renderable
and/or printed via Rich or the markup will be visible to the user.
"""
return f"[cyan]{s}[/cyan]"
def link_markup(s: str, href: str) -> str:
"""
Generate Rich markup for a clickable hyperlink.
This works in many modern terminals.
When it doesn't work, the fallback is just to show the link name (s),
as if it was not a link.
The resulting string should be passed to a Rich renderable
and/or printed via Rich or the markup will be visible to the user.
"""
return f"[link={href}]{s}[/link]"
tabulate.PRESERVE_WHITESPACE = True
def width(s: str, character_count: int) -> str:
@@ -53,16 +28,11 @@ def width(s: str, character_count: int) -> str:
return s
def render_sample_link(hash: str) -> str:
url = "https://www.virustotal.com/gui/file/" + hash
return link_markup(hash, url)
def render_meta(doc: rd.ResultDocument, console: Console):
def render_meta(doc: rd.ResultDocument, ostream: StringIO):
rows = [
("md5", render_sample_link(doc.meta.sample.md5)),
("sha1", render_sample_link(doc.meta.sample.sha1)),
("sha256", render_sample_link(doc.meta.sample.sha256)),
(width("md5", 22), width(doc.meta.sample.md5, 82)),
("sha1", doc.meta.sample.sha1),
("sha256", doc.meta.sample.sha256),
("analysis", doc.meta.flavor.value),
("os", doc.meta.analysis.os),
("format", doc.meta.analysis.format),
@@ -70,14 +40,8 @@ def render_meta(doc: rd.ResultDocument, console: Console):
("path", doc.meta.sample.path),
]
table = rich.table.Table(show_header=False, min_width=100)
table.add_column()
table.add_column()
for row in rows:
table.add_row(*row)
console.print(table)
ostream.write(tabulate.tabulate(rows, tablefmt="mixed_outline"))
ostream.write("\n")
def find_subrule_matches(doc: rd.ResultDocument):
@@ -107,12 +71,7 @@ def find_subrule_matches(doc: rd.ResultDocument):
return matches
def render_rule_name(name: str) -> str:
url = f"https://mandiant.github.io/capa/rules/{urllib.parse.quote(name)}/"
return bold_markup(link_markup(name, url))
def render_capabilities(doc: rd.ResultDocument, console: Console):
def render_capabilities(doc: rd.ResultDocument, ostream: StringIO):
"""
example::
@@ -136,30 +95,25 @@ def render_capabilities(doc: rd.ResultDocument, console: Console):
count = len(rule.matches)
if count == 1:
capability = render_rule_name(rule.meta.name)
capability = rutils.bold(rule.meta.name)
else:
capability = render_rule_name(rule.meta.name) + f" ({count} matches)"
capability = f"{rutils.bold(rule.meta.name)} ({count} matches)"
rows.append((capability, rule.meta.namespace))
if rows:
table = rich.table.Table(min_width=100)
table.add_column(width("Capability", 20))
table.add_column("Namespace")
for row in rows:
table.add_row(*row)
console.print(table)
ostream.write(
tabulate.tabulate(
rows,
headers=[width("Capability", 50), width("Namespace", 50)],
tablefmt="mixed_outline",
)
)
ostream.write("\n")
else:
console.print(bold_markup("no capabilities found"))
ostream.writeln(rutils.bold("no capabilities found"))
def render_attack_link(id: str) -> str:
url = f"https://attack.mitre.org/techniques/{id.replace('.', '/')}/"
return rf"\[{link_markup(id, url)}]"
def render_attack(doc: rd.ResultDocument, console: Console):
def render_attack(doc: rd.ResultDocument, ostream: StringIO):
"""
example::
@@ -178,36 +132,35 @@ def render_attack(doc: rd.ResultDocument, console: Console):
tactics = collections.defaultdict(set)
for rule in rutils.capability_rules(doc):
for attack in rule.meta.attack:
tactics[attack.tactic].add((attack.technique, attack.subtechnique, attack.id.strip("[").strip("]")))
tactics[attack.tactic].add((attack.technique, attack.subtechnique, attack.id))
rows = []
for tactic, techniques in sorted(tactics.items()):
inner_rows = []
for technique, subtechnique, id in sorted(techniques):
if not subtechnique:
# example: File and Directory Discovery [T1083]
inner_rows.append(f"{bold_markup(technique)} {render_attack_link(id)}")
inner_rows.append(f"{rutils.bold(technique)} {id}")
else:
# example: Code Discovery::Enumerate PE Sections [T1084.001]
inner_rows.append(f"{bold_markup(technique)}::{subtechnique} {render_attack_link(id)}")
tactic = bold_markup(tactic.upper())
technique = "\n".join(inner_rows)
rows.append((tactic, technique))
inner_rows.append(f"{rutils.bold(technique)}::{subtechnique} {id}")
rows.append(
(
rutils.bold(tactic.upper()),
"\n".join(inner_rows),
)
)
if rows:
table = rich.table.Table(min_width=100)
table.add_column(width("ATT&CK Tactic", 20))
table.add_column("ATT&CK Technique")
for row in rows:
table.add_row(*row)
console.print(table)
ostream.write(
tabulate.tabulate(
rows,
headers=[width("ATT&CK Tactic", 20), width("ATT&CK Technique", 80)],
tablefmt="mixed_grid",
)
)
ostream.write("\n")
def render_maec(doc: rd.ResultDocument, console: Console):
def render_maec(doc: rd.ResultDocument, ostream: StringIO):
"""
example::
@@ -240,37 +193,20 @@ def render_maec(doc: rd.ResultDocument, console: Console):
for category in sorted(maec_categories):
values = maec_table.get(category, set())
if values:
rows.append((bold_markup(category.replace("_", "-")), "\n".join(sorted(values))))
rows.append((rutils.bold(category.replace("_", "-")), "\n".join(sorted(values))))
if rows:
table = rich.table.Table(min_width=100)
table.add_column(width("MAEC Category", 20))
table.add_column("MAEC Value")
for row in rows:
table.add_row(*row)
console.print(table)
ostream.write(
tabulate.tabulate(
rows,
headers=[width("MAEC Category", 25), width("MAEC Value", 75)],
tablefmt="mixed_grid",
)
)
ostream.write("\n")
def render_mbc_link(id: str, objective: str, behavior: str) -> str:
if id[0] in {"B", "T", "E", "F"}:
# behavior
base_url = "https://github.com/MBCProject/mbc-markdown/blob/main"
elif id[0] == "C":
# micro-behavior
base_url = "https://github.com/MBCProject/mbc-markdown/blob/main/micro-behaviors"
else:
raise ValueError("unexpected MBC prefix")
objective_fragment = objective.lower().replace(" ", "-")
behavior_fragment = behavior.lower().replace(" ", "-")
url = f"{base_url}/{objective_fragment}/{behavior_fragment}.md"
return rf"\[{link_markup(id, url)}]"
def render_mbc(doc: rd.ResultDocument, console: Console):
def render_mbc(doc: rd.ResultDocument, ostream: StringIO):
"""
example::
@@ -287,48 +223,48 @@ def render_mbc(doc: rd.ResultDocument, console: Console):
objectives = collections.defaultdict(set)
for rule in rutils.capability_rules(doc):
for mbc in rule.meta.mbc:
objectives[mbc.objective].add((mbc.behavior, mbc.method, mbc.id.strip("[").strip("]")))
objectives[mbc.objective].add((mbc.behavior, mbc.method, mbc.id))
rows = []
for objective, behaviors in sorted(objectives.items()):
inner_rows = []
for technique, subtechnique, id in sorted(behaviors):
if not subtechnique:
# example: File and Directory Discovery [T1083]
inner_rows.append(f"{bold_markup(technique)} {render_mbc_link(id, objective, technique)}")
for behavior, method, id in sorted(behaviors):
if not method:
inner_rows.append(f"{rutils.bold(behavior)} [{id}]")
else:
# example: Code Discovery::Enumerate PE Sections [T1084.001]
inner_rows.append(
f"{bold_markup(technique)}::{subtechnique} {render_mbc_link(id, objective, technique)}"
)
objective = bold_markup(objective.upper())
technique = "\n".join(inner_rows)
rows.append((objective, technique))
inner_rows.append(f"{rutils.bold(behavior)}::{method} [{id}]")
rows.append(
(
rutils.bold(objective.upper()),
"\n".join(inner_rows),
)
)
if rows:
table = rich.table.Table(min_width=100)
table.add_column(width("MBC Objective", 20))
table.add_column("MBC Behavior")
for row in rows:
table.add_row(*row)
console.print(table)
ostream.write(
tabulate.tabulate(
rows,
headers=[width("MBC Objective", 25), width("MBC Behavior", 75)],
tablefmt="mixed_grid",
)
)
ostream.write("\n")
def render_default(doc: rd.ResultDocument):
f = io.StringIO()
console = rich.console.Console()
ostream = rutils.StringIO()
render_meta(doc, console)
render_attack(doc, console)
render_maec(doc, console)
render_mbc(doc, console)
render_capabilities(doc, console)
render_meta(doc, ostream)
ostream.write("\n")
render_attack(doc, ostream)
ostream.write("\n")
render_maec(doc, ostream)
ostream.write("\n")
render_mbc(doc, ostream)
ostream.write("\n")
render_capabilities(doc, ostream)
return f.getvalue()
return ostream.getvalue()
def render(meta, rules: RuleSet, capabilities: MatchResults) -> str:

View File

@@ -22,7 +22,7 @@ import capa.features.address
import capa.features.freeze.features as frzf
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.helpers import assert_never, load_json_from_path
from capa.helpers import assert_never
class FrozenModel(BaseModel):
@@ -668,5 +668,4 @@ class ResultDocument(FrozenModel):
@classmethod
def from_file(cls, path: Path) -> "ResultDocument":
report = load_json_from_path(path)
return cls.model_validate(report)
return cls.model_validate_json(path.read_text(encoding="utf-8"))

View File

@@ -9,29 +9,28 @@
import io
from typing import Dict, List, Tuple, Union, Iterator, Optional
import rich.console
from rich.progress import Text
import termcolor
import capa.render.result_document as rd
def bold(s: str) -> Text:
def bold(s: str) -> str:
"""draw attention to the given string"""
return Text.from_markup(f"[cyan]{s}")
return termcolor.colored(s, "cyan")
def bold2(s: str) -> Text:
def bold2(s: str) -> str:
"""draw attention to the given string, within a `bold` section"""
return Text.from_markup(f"[green]{s}")
return termcolor.colored(s, "green")
def mute(s: str) -> Text:
def mute(s: str) -> str:
"""draw attention away from the given string"""
return Text.from_markup(f"[dim]{s}")
return termcolor.colored(s, "dark_grey")
def warn(s: str) -> Text:
return Text.from_markup(f"[yellow]{s}")
def warn(s: str) -> str:
return termcolor.colored(s, "yellow")
def format_parts_id(data: Union[rd.AttackSpec, rd.MBCSpec]):
@@ -86,17 +85,3 @@ class StringIO(io.StringIO):
def writeln(self, s):
self.write(s)
self.write("\n")
class Console(rich.console.Console):
def writeln(self, *args, **kwargs) -> None:
"""
prints the text with a new line at the end.
"""
return self.print(*args, **kwargs)
def write(self, *args, **kwargs) -> None:
"""
prints the text without a new line at the end.
"""
return self.print(*args, **kwargs, end="")

View File

@@ -25,8 +25,7 @@ See the License for the specific language governing permissions and limitations
from typing import cast
from rich.text import Text
from rich.table import Table
import tabulate
import capa.rules
import capa.helpers
@@ -35,7 +34,6 @@ import capa.features.freeze as frz
import capa.render.result_document as rd
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.render.utils import Console
def format_address(address: frz.Address) -> str:
@@ -142,7 +140,7 @@ def render_call(layout: rd.DynamicLayout, addr: frz.Address) -> str:
)
def render_static_meta(console: Console, meta: rd.StaticMetadata):
def render_static_meta(ostream, meta: rd.StaticMetadata):
"""
like:
@@ -163,16 +161,12 @@ def render_static_meta(console: Console, meta: rd.StaticMetadata):
total feature count 1918
"""
grid = Table.grid(padding=(0, 2))
grid.add_column(style="dim")
grid.add_column()
rows = [
("md5", meta.sample.md5),
("sha1", meta.sample.sha1),
("sha256", meta.sample.sha256),
("path", meta.sample.path),
("timestamp", str(meta.timestamp)),
("timestamp", meta.timestamp),
("capa version", meta.version),
("os", meta.analysis.os),
("format", meta.analysis.format),
@@ -181,21 +175,18 @@ def render_static_meta(console: Console, meta: rd.StaticMetadata):
("extractor", meta.analysis.extractor),
("base address", format_address(meta.analysis.base_address)),
("rules", "\n".join(meta.analysis.rules)),
("function count", str(len(meta.analysis.feature_counts.functions))),
("library function count", str(len(meta.analysis.library_functions))),
("function count", len(meta.analysis.feature_counts.functions)),
("library function count", len(meta.analysis.library_functions)),
(
"total feature count",
str(meta.analysis.feature_counts.file + sum(f.count for f in meta.analysis.feature_counts.functions)),
meta.analysis.feature_counts.file + sum(f.count for f in meta.analysis.feature_counts.functions),
),
]
for row in rows:
grid.add_row(*row)
console.print(grid)
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
def render_dynamic_meta(console: Console, meta: rd.DynamicMetadata):
def render_dynamic_meta(ostream, meta: rd.DynamicMetadata):
"""
like:
@@ -214,16 +205,12 @@ def render_dynamic_meta(console: Console, meta: rd.DynamicMetadata):
total feature count 1918
"""
table = Table.grid(padding=(0, 2))
table.add_column(style="dim")
table.add_column()
rows = [
("md5", meta.sample.md5),
("sha1", meta.sample.sha1),
("sha256", meta.sample.sha256),
("path", meta.sample.path),
("timestamp", str(meta.timestamp)),
("timestamp", meta.timestamp),
("capa version", meta.version),
("os", meta.analysis.os),
("format", meta.analysis.format),
@@ -231,29 +218,26 @@ def render_dynamic_meta(console: Console, meta: rd.DynamicMetadata):
("analysis", meta.flavor.value),
("extractor", meta.analysis.extractor),
("rules", "\n".join(meta.analysis.rules)),
("process count", str(len(meta.analysis.feature_counts.processes))),
("process count", len(meta.analysis.feature_counts.processes)),
(
"total feature count",
str(meta.analysis.feature_counts.file + sum(p.count for p in meta.analysis.feature_counts.processes)),
meta.analysis.feature_counts.file + sum(p.count for p in meta.analysis.feature_counts.processes),
),
]
for row in rows:
table.add_row(*row)
console.print(table)
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
def render_meta(console: Console, doc: rd.ResultDocument):
def render_meta(osstream, doc: rd.ResultDocument):
if doc.meta.flavor == rd.Flavor.STATIC:
render_static_meta(console, cast(rd.StaticMetadata, doc.meta))
render_static_meta(osstream, cast(rd.StaticMetadata, doc.meta))
elif doc.meta.flavor == rd.Flavor.DYNAMIC:
render_dynamic_meta(console, cast(rd.DynamicMetadata, doc.meta))
render_dynamic_meta(osstream, cast(rd.DynamicMetadata, doc.meta))
else:
raise ValueError("invalid meta analysis")
def render_rules(console: Console, doc: rd.ResultDocument):
def render_rules(ostream, doc: rd.ResultDocument):
"""
like:
@@ -270,15 +254,11 @@ def render_rules(console: Console, doc: rd.ResultDocument):
if count == 1:
capability = rutils.bold(rule.meta.name)
else:
capability = Text.assemble(rutils.bold(rule.meta.name), f" ({count} matches)")
capability = f"{rutils.bold(rule.meta.name)} ({count} matches)"
console.print(capability)
ostream.writeln(capability)
had_match = True
table = Table.grid(padding=(0, 2))
table.add_column(style="dim")
table.add_column()
rows = []
ns = rule.meta.namespace
@@ -330,26 +310,23 @@ def render_rules(console: Console, doc: rd.ResultDocument):
rows.append(("matches", "\n".join(lines)))
for row in rows:
table.add_row(*row)
console.print(table)
console.print()
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
ostream.write("\n")
if not had_match:
console.print(rutils.bold("no capabilities found"))
ostream.writeln(rutils.bold("no capabilities found"))
def render_verbose(doc: rd.ResultDocument):
console = Console(highlight=False)
ostream = rutils.StringIO()
with console.capture() as capture:
render_meta(console, doc)
console.print()
render_rules(console, doc)
console.print()
render_meta(ostream, doc)
ostream.write("\n")
return capture.get()
render_rules(ostream, doc)
ostream.write("\n")
return ostream.getvalue()
def render(meta, rules: RuleSet, capabilities: MatchResults) -> str:

View File

@@ -9,8 +9,7 @@ import logging
import textwrap
from typing import Dict, Iterable, Optional
from rich.text import Text
from rich.table import Table
import tabulate
import capa.rules
import capa.helpers
@@ -23,7 +22,6 @@ import capa.render.result_document as rd
import capa.features.freeze.features as frzf
from capa.rules import RuleSet
from capa.engine import MatchResults
from capa.render.utils import Console
logger = logging.getLogger(__name__)
@@ -47,7 +45,7 @@ def hanging_indent(s: str, indent: int) -> str:
return textwrap.indent(s, prefix=prefix)[len(prefix) :]
def render_locations(console: Console, layout: rd.Layout, locations: Iterable[frz.Address], indent: int):
def render_locations(ostream, layout: rd.Layout, locations: Iterable[frz.Address], indent: int):
import capa.render.verbose as v
# it's possible to have an empty locations array here,
@@ -58,7 +56,7 @@ def render_locations(console: Console, layout: rd.Layout, locations: Iterable[fr
if len(locations) == 0:
return
console.write(" @ ")
ostream.write(" @ ")
location0 = locations[0]
if len(locations) == 1:
@@ -66,58 +64,58 @@ def render_locations(console: Console, layout: rd.Layout, locations: Iterable[fr
if location.type == frz.AddressType.CALL:
assert isinstance(layout, rd.DynamicLayout)
console.write(hanging_indent(v.render_call(layout, location), indent + 1))
ostream.write(hanging_indent(v.render_call(layout, location), indent + 1))
else:
console.write(v.format_address(locations[0]))
ostream.write(v.format_address(locations[0]))
elif location0.type == frz.AddressType.CALL and len(locations) > 1:
location = locations[0]
assert isinstance(layout, rd.DynamicLayout)
s = f"{v.render_call(layout, location)}\nand {(len(locations) - 1)} more..."
console.write(hanging_indent(s, indent + 1))
ostream.write(hanging_indent(s, indent + 1))
elif len(locations) > 4:
# don't display too many locations, because it becomes very noisy.
# probably only the first handful of locations will be useful for inspection.
console.write(", ".join(map(v.format_address, locations[0:4])))
console.write(f", and {(len(locations) - 4)} more...")
ostream.write(", ".join(map(v.format_address, locations[0:4])))
ostream.write(f", and {(len(locations) - 4)} more...")
elif len(locations) > 1:
console.write(", ".join(map(v.format_address, locations)))
ostream.write(", ".join(map(v.format_address, locations)))
else:
raise RuntimeError("unreachable")
def render_statement(console: Console, layout: rd.Layout, match: rd.Match, statement: rd.Statement, indent: int):
console.write(" " * indent)
def render_statement(ostream, layout: rd.Layout, match: rd.Match, statement: rd.Statement, indent: int):
ostream.write(" " * indent)
if isinstance(statement, rd.SubscopeStatement):
# emit `basic block:`
# rather than `subscope:`
console.write(statement.scope)
ostream.write(statement.scope)
console.write(":")
ostream.write(":")
if statement.description:
console.write(f" = {statement.description}")
console.writeln()
ostream.write(f" = {statement.description}")
ostream.writeln("")
elif isinstance(statement, (rd.CompoundStatement)):
# emit `and:` `or:` `optional:` `not:`
console.write(statement.type)
ostream.write(statement.type)
console.write(":")
ostream.write(":")
if statement.description:
console.write(f" = {statement.description}")
console.writeln()
ostream.write(f" = {statement.description}")
ostream.writeln("")
elif isinstance(statement, rd.SomeStatement):
console.write(f"{statement.count} or more:")
ostream.write(f"{statement.count} or more:")
if statement.description:
console.write(f" = {statement.description}")
console.writeln()
ostream.write(f" = {statement.description}")
ostream.writeln("")
elif isinstance(statement, rd.RangeStatement):
# `range` is a weird node, its almost a hybrid of statement+feature.
@@ -135,25 +133,25 @@ def render_statement(console: Console, layout: rd.Layout, match: rd.Match, state
value = rutils.bold2(value)
if child.description:
console.write(f"count({child.type}({value} = {child.description})): ")
ostream.write(f"count({child.type}({value} = {child.description})): ")
else:
console.write(f"count({child.type}({value})): ")
ostream.write(f"count({child.type}({value})): ")
else:
console.write(f"count({child.type}): ")
ostream.write(f"count({child.type}): ")
if statement.max == statement.min:
console.write(f"{statement.min}")
ostream.write(f"{statement.min}")
elif statement.min == 0:
console.write(f"{statement.max} or fewer")
ostream.write(f"{statement.max} or fewer")
elif statement.max == (1 << 64 - 1):
console.write(f"{statement.min} or more")
ostream.write(f"{statement.min} or more")
else:
console.write(f"between {statement.min} and {statement.max}")
ostream.write(f"between {statement.min} and {statement.max}")
if statement.description:
console.write(f" = {statement.description}")
render_locations(console, layout, match.locations, indent)
console.writeln()
ostream.write(f" = {statement.description}")
render_locations(ostream, layout, match.locations, indent)
ostream.writeln("")
else:
raise RuntimeError("unexpected match statement type: " + str(statement))
@@ -164,9 +162,9 @@ def render_string_value(s: str) -> str:
def render_feature(
console: Console, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, feature: frzf.Feature, indent: int
ostream, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, feature: frzf.Feature, indent: int
):
console.write(" " * indent)
ostream.write(" " * indent)
key = feature.type
value: Optional[str]
@@ -207,14 +205,14 @@ def render_feature(
elif isinstance(feature, frzf.OperandOffsetFeature):
key = f"operand[{feature.index}].offset"
console.write(f"{key}: ")
ostream.write(f"{key}: ")
if value:
console.write(rutils.bold2(value))
ostream.write(rutils.bold2(value))
if feature.description:
console.write(capa.rules.DESCRIPTION_SEPARATOR)
console.write(feature.description)
ostream.write(capa.rules.DESCRIPTION_SEPARATOR)
ostream.write(feature.description)
if isinstance(feature, (frzf.OSFeature, frzf.ArchFeature, frzf.FormatFeature)):
# don't show the location of these global features
@@ -226,32 +224,35 @@ def render_feature(
elif isinstance(feature, (frzf.OSFeature, frzf.ArchFeature, frzf.FormatFeature)):
pass
else:
render_locations(console, layout, match.locations, indent)
console.writeln()
render_locations(ostream, layout, match.locations, indent)
ostream.write("\n")
else:
# like:
# regex: /blah/ = SOME_CONSTANT
# - "foo blah baz" @ 0x401000
# - "aaa blah bbb" @ 0x402000, 0x403400
console.writeln(f"{key}: {value}")
ostream.write(key)
ostream.write(": ")
ostream.write(value)
ostream.write("\n")
for capture, locations in sorted(match.captures.items()):
console.write(" " * (indent + 1))
console.write("- ")
console.write(rutils.bold2(render_string_value(capture)))
ostream.write(" " * (indent + 1))
ostream.write("- ")
ostream.write(rutils.bold2(render_string_value(capture)))
if isinstance(layout, rd.DynamicLayout) and rule.meta.scopes.dynamic == capa.rules.Scope.CALL:
# like above, don't re-render calls when in call scope.
pass
else:
render_locations(console, layout, locations, indent=indent)
console.writeln()
render_locations(ostream, layout, locations, indent=indent)
ostream.write("\n")
def render_node(console: Console, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, node: rd.Node, indent: int):
def render_node(ostream, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, node: rd.Node, indent: int):
if isinstance(node, rd.StatementNode):
render_statement(console, layout, match, node.statement, indent=indent)
render_statement(ostream, layout, match, node.statement, indent=indent)
elif isinstance(node, rd.FeatureNode):
render_feature(console, layout, rule, match, node.feature, indent=indent)
render_feature(ostream, layout, rule, match, node.feature, indent=indent)
else:
raise RuntimeError("unexpected node type: " + str(node))
@@ -264,9 +265,7 @@ MODE_SUCCESS = "success"
MODE_FAILURE = "failure"
def render_match(
console: Console, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, indent=0, mode=MODE_SUCCESS
):
def render_match(ostream, layout: rd.Layout, rule: rd.RuleMatches, match: rd.Match, indent=0, mode=MODE_SUCCESS):
child_mode = mode
if mode == MODE_SUCCESS:
# display only nodes that evaluated successfully.
@@ -298,13 +297,13 @@ def render_match(
else:
raise RuntimeError("unexpected mode: " + mode)
render_node(console, layout, rule, match, match.node, indent=indent)
render_node(ostream, layout, rule, match, match.node, indent=indent)
for child in match.children:
render_match(console, layout, rule, child, indent=indent + 1, mode=child_mode)
render_match(ostream, layout, rule, child, indent=indent + 1, mode=child_mode)
def render_rules(console: Console, doc: rd.ResultDocument):
def render_rules(ostream, doc: rd.ResultDocument):
"""
like:
@@ -351,13 +350,13 @@ def render_rules(console: Console, doc: rd.ResultDocument):
if count == 1:
if rule.meta.lib:
lib_info = " (library rule)"
capability = Text.assemble(rutils.bold(rule.meta.name), f"{lib_info}")
capability = f"{rutils.bold(rule.meta.name)}{lib_info}"
else:
if rule.meta.lib:
lib_info = ", only showing first match of library rule"
capability = Text.assemble(rutils.bold(rule.meta.name), f" ({count} matches{lib_info})")
capability = f"{rutils.bold(rule.meta.name)} ({count} matches{lib_info})"
console.writeln(capability)
ostream.writeln(capability)
had_match = True
rows = []
@@ -403,14 +402,7 @@ def render_rules(console: Console, doc: rd.ResultDocument):
if rule.meta.description:
rows.append(("description", rule.meta.description))
grid = Table.grid(padding=(0, 2))
grid.add_column(style="dim")
grid.add_column()
for row in rows:
grid.add_row(*row)
console.writeln(grid)
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
if capa.rules.Scope.FILE in rule.meta.scopes:
matches = doc.rules[rule.meta.name].matches
@@ -421,58 +413,61 @@ def render_rules(console: Console, doc: rd.ResultDocument):
# so, lets be explicit about our assumptions and raise an exception if they fail.
raise RuntimeError(f"unexpected file scope match count: {len(matches)}")
_, first_match = matches[0]
render_match(console, doc.meta.analysis.layout, rule, first_match, indent=0)
render_match(ostream, doc.meta.analysis.layout, rule, first_match, indent=0)
else:
for location, match in sorted(doc.rules[rule.meta.name].matches):
if doc.meta.flavor == rd.Flavor.STATIC:
assert rule.meta.scopes.static is not None
console.write(rule.meta.scopes.static.value + " @ ")
console.write(capa.render.verbose.format_address(location))
ostream.write(rule.meta.scopes.static.value)
ostream.write(" @ ")
ostream.write(capa.render.verbose.format_address(location))
if rule.meta.scopes.static == capa.rules.Scope.BASIC_BLOCK:
func = frz.Address.from_capa(functions_by_bb[location.to_capa()])
console.write(f" in function {capa.render.verbose.format_address(func)}")
ostream.write(f" in function {capa.render.verbose.format_address(func)}")
elif doc.meta.flavor == rd.Flavor.DYNAMIC:
assert rule.meta.scopes.dynamic is not None
assert isinstance(doc.meta.analysis.layout, rd.DynamicLayout)
console.write(rule.meta.scopes.dynamic.value + " @ ")
ostream.write(rule.meta.scopes.dynamic.value)
ostream.write(" @ ")
if rule.meta.scopes.dynamic == capa.rules.Scope.PROCESS:
console.write(v.render_process(doc.meta.analysis.layout, location))
ostream.write(v.render_process(doc.meta.analysis.layout, location))
elif rule.meta.scopes.dynamic == capa.rules.Scope.THREAD:
console.write(v.render_thread(doc.meta.analysis.layout, location))
ostream.write(v.render_thread(doc.meta.analysis.layout, location))
elif rule.meta.scopes.dynamic == capa.rules.Scope.CALL:
console.write(hanging_indent(v.render_call(doc.meta.analysis.layout, location), indent=1))
ostream.write(hanging_indent(v.render_call(doc.meta.analysis.layout, location), indent=1))
else:
capa.helpers.assert_never(rule.meta.scopes.dynamic)
else:
capa.helpers.assert_never(doc.meta.flavor)
console.writeln()
render_match(console, doc.meta.analysis.layout, rule, match, indent=1)
ostream.write("\n")
render_match(ostream, doc.meta.analysis.layout, rule, match, indent=1)
if rule.meta.lib:
# only show first match
break
console.writeln()
ostream.write("\n")
if not had_match:
console.writeln(rutils.bold("no capabilities found"))
ostream.writeln(rutils.bold("no capabilities found"))
def render_vverbose(doc: rd.ResultDocument):
console = Console(highlight=False)
ostream = rutils.StringIO()
with console.capture() as capture:
capa.render.verbose.render_meta(console, doc)
console.writeln()
render_rules(console, doc)
console.writeln()
capa.render.verbose.render_meta(ostream, doc)
ostream.write("\n")
return capture.get()
render_rules(ostream, doc)
ostream.write("\n")
return ostream.getvalue()
def render(meta, rules: RuleSet, capabilities: MatchResults) -> str:

View File

@@ -575,15 +575,6 @@ def trim_dll_part(api: str) -> str:
return api
def unique(sequence):
"""deduplicate the items in the given sequence, returning a list with the same order.
via: https://stackoverflow.com/a/58666031
"""
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))] # type: ignore [func-returns-value]
def build_statements(d, scopes: Scopes):
if len(d.keys()) > 2:
raise InvalidRule("too many statements")
@@ -591,21 +582,21 @@ def build_statements(d, scopes: Scopes):
key = list(d.keys())[0]
description = pop_statement_description_entry(d[key])
if key == "and":
return ceng.And(unique(build_statements(dd, scopes) for dd in d[key]), description=description)
return ceng.And([build_statements(dd, scopes) for dd in d[key]], description=description)
elif key == "or":
return ceng.Or(unique(build_statements(dd, scopes) for dd in d[key]), description=description)
return ceng.Or([build_statements(dd, scopes) for dd in d[key]], description=description)
elif key == "not":
if len(d[key]) != 1:
raise InvalidRule("not statement must have exactly one child statement")
return ceng.Not(build_statements(d[key][0], scopes), description=description)
elif key.endswith(" or more"):
count = int(key[: -len("or more")])
return ceng.Some(count, unique(build_statements(dd, scopes) for dd in d[key]), description=description)
return ceng.Some(count, [build_statements(dd, scopes) for dd in d[key]], description=description)
elif key == "optional":
# `optional` is an alias for `0 or more`
# which is useful for documenting behaviors,
# like with `write file`, we might say that `WriteFile` is optionally found alongside `CreateFileA`.
return ceng.Some(0, unique(build_statements(dd, scopes) for dd in d[key]), description=description)
return ceng.Some(0, [build_statements(dd, scopes) for dd in d[key]], description=description)
elif key == "process":
if Scope.FILE not in scopes:
@@ -681,7 +672,7 @@ def build_statements(d, scopes: Scopes):
# - arch: i386
# - mnemonic: cmp
#
statements = ceng.And(unique(build_statements(dd, Scopes(static=Scope.INSTRUCTION)) for dd in d[key]))
statements = ceng.And([build_statements(dd, Scopes(static=Scope.INSTRUCTION)) for dd in d[key]])
return ceng.Subscope(Scope.INSTRUCTION, statements, description=description)
@@ -2130,14 +2121,12 @@ def get_rules(
rule_paths: List[RulePath],
cache_dir=None,
on_load_rule: Callable[[RulePath, int, int], None] = on_load_rule_default,
enable_cache: bool = True,
) -> RuleSet:
"""
args:
rule_paths: list of paths to rules files or directories containing rules files
cache_dir: directory to use for caching rules, or will use the default detected cache directory if None
on_load_rule: callback to invoke before a rule is loaded, use for progress or cancellation
enable_cache: enable loading of a cached ruleset (default: True)
"""
if cache_dir is None:
cache_dir = capa.rules.cache.get_default_cache_directory()
@@ -2149,10 +2138,9 @@ def get_rules(
# rule_file_paths[i] corresponds to rule_contents[i].
rule_contents = [file_path.read_bytes() for file_path in rule_file_paths]
if enable_cache:
ruleset = capa.rules.cache.load_cached_ruleset(cache_dir, rule_contents)
if ruleset is not None:
return ruleset
ruleset = capa.rules.cache.load_cached_ruleset(cache_dir, rule_contents)
if ruleset is not None:
return ruleset
rules: List[Rule] = []

View File

@@ -5,7 +5,7 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
__version__ = "7.4.0"
__version__ = "7.1.0"
def get_major_version():

View File

@@ -26,9 +26,7 @@
### Bug Fixes
### capa Explorer Web
### capa Explorer IDA Pro plugin
### capa explorer IDA Pro plugin
### Development
@@ -44,6 +42,5 @@
- [ ] [publish to PyPI](https://pypi.org/project/flare-capa)
- [ ] [create tag in capa rules](https://github.com/mandiant/capa-rules/tags)
- [ ] [create release in capa rules](https://github.com/mandiant/capa-rules/releases)
- [ ] Update [homepage](https://github.com/mandiant/capa/blob/master/web/public/index.html)
- [ ] [Spread the word](https://twitter.com)
- [ ] Update internal service

View File

@@ -9,22 +9,6 @@ Use the `-t` option to run rules with the given metadata value (see the rule fie
For example, `capa -t william.ballenthin@mandiant.com` runs rules that reference Willi's email address (probably as the author), or
`capa -t communication` runs rules with the namespace `communication`.
### only analyze selected functions
Use the `--restrict-to-functions` option to extract capabilities from only a selected set of functions. This is useful for analyzing
large functions and figuring out their capabilities and their address of occurance; for example: PEB access, RC4 encryption, etc.
To use this, you can copy the virtual addresses from your favorite disassembler and pass them to capa as follows:
`capa sample.exe --restrict-to-functions 0x4019C0,0x401CD0`. If you add the `-v` option then capa will extract the interesting parts of a function for you.
### only analyze selected processes
Use the `--restrict-to-processes` option to extract capabilities from only a selected set of processes. This is useful for filtering the noise
generated from analyzing non-malicious processes that can be reported by some sandboxes, as well as reduce the execution time
by not analyzing such processes in the first place.
To use this, you can pick the PIDs of the processes you are interested in from the sandbox-generated process tree (or from the sandbox-reported malware PID)
and pass that to capa as follows: `capa report.log --restrict-to-processes 3888,3214,4299`. If you add the `-v` option then capa will tell you
which threads perform what actions (encrypt/decrypt data, initiate a connection, etc.).
### IDA Pro plugin: capa explorer
Please check out the [capa explorer documentation](/capa/ida/plugin/README.md).
@@ -32,4 +16,4 @@ Please check out the [capa explorer documentation](/capa/ida/plugin/README.md).
Set the environment variable `CAPA_SAVE_WORKSPACE` to instruct the underlying analysis engine to
cache its intermediate results to the file system. For example, vivisect will create `.viv` files.
Subsequently, capa may run faster when reprocessing the same input file.
This is particularly useful during rule development as you repeatedly test a rule against a known sample.
This is particularly useful during rule development as you repeatedly test a rule against a known sample.

View File

@@ -20,7 +20,7 @@ authors = [
description = "The FLARE team's open-source tool to identify capabilities in executable files."
readme = {file = "README.md", content-type = "text/markdown"}
license = {file = "LICENSE.txt"}
requires-python = ">=3.8.1"
requires-python = ">=3.8"
keywords = ["malware analysis", "reverse engineering", "capability detection", "software behaviors", "capa", "FLARE"]
classifiers = [
"Development Status :: 5 - Production/Stable",
@@ -65,8 +65,12 @@ dependencies = [
# or minor otherwise).
# As specific constraints are identified, please provide
# comments and context.
"tqdm>=4",
"pyyaml>=6",
"tabulate>=0.9",
"colorama>=0.4",
"termcolor>=2",
"wcwidth>=0.2",
"ida-settings>=2",
"ruamel.yaml>=0.18",
"pefile>=2023.2.7",
@@ -76,7 +80,6 @@ dependencies = [
"humanize>=4",
"protobuf>=5",
"msgspec>=0.18.6",
"xmltodict>=0.13.0",
# ---------------------------------------
# Dependencies that we develop
@@ -121,8 +124,8 @@ dev = [
"pytest-sugar==1.0.0",
"pytest-instafail==0.5.0",
"pytest-cov==5.0.0",
"flake8==7.1.1",
"flake8-bugbear==24.8.19",
"flake8==7.1.0",
"flake8-bugbear==24.4.26",
"flake8-encodings==0.5.1",
"flake8-comprehensions==3.15.0",
"flake8-logging-format==0.9.0",
@@ -132,29 +135,31 @@ dev = [
"flake8-simplify==0.21.0",
"flake8-use-pathlib==0.3.0",
"flake8-copyright==0.2.4",
"ruff==0.6.4",
"black==24.8.0",
"ruff==0.5.6",
"black==24.4.2",
"isort==5.13.2",
"mypy==1.11.2",
"mypy==1.11.1",
"mypy-protobuf==3.6.0",
"PyGithub==2.4.0",
"PyGithub==2.3.0",
# type stubs for mypy
"types-backports==0.1.3",
"types-colorama==0.4.15.11",
"types-PyYAML==6.0.8",
"types-psutil==6.0.0.20240901",
"types_requests==2.32.0.20240712",
"types-protobuf==5.28.0.20240924",
"deptry==0.20.0"
"types-tabulate==0.9.0.20240106",
"types-termcolor==1.1.4",
"types-psutil==5.8.23",
"types_requests==2.32.0.20240602",
"types-protobuf==5.27.0.20240626",
"deptry==0.17.0"
]
build = [
# Dev and build dependencies are not relaxed because
# we want all developer environments to be consistent.
# These dependencies are not used in production environments
# and should not conflict with other libraries/tooling.
"pyinstaller==6.10.0",
"setuptools==75.1.0",
"build==1.2.2"
"pyinstaller==6.9.0",
"setuptools==70.0.0",
"build==1.2.1"
]
scripts = [
"jschema_to_python==1.2.3",
@@ -167,8 +172,7 @@ scripts = [
[tool.deptry]
extend_exclude = [
"sigs",
"tests",
"web",
"tests"
]
# dependencies marked as first party, to inform deptry that they are local
@@ -177,9 +181,6 @@ known_first_party = [
"binaryninja",
"flirt",
"ghidra",
"idapro",
"ida_ida",
"ida_auto",
"ida_bytes",
"ida_entry",
"ida_funcs",
@@ -187,7 +188,6 @@ known_first_party = [
"ida_loader",
"ida_nalt",
"ida_segment",
"ida_ua",
"idaapi",
"idautils",
"idc",
@@ -230,7 +230,10 @@ DEP002 = [
"types-protobuf",
"types-psutil",
"types-PyYAML",
"types-tabulate",
"types-termcolor",
"types_requests",
"wcwidth"
]
# dependencies imported but missing from definitions

View File

@@ -8,11 +8,11 @@
# Kept up to date by dependabot.
annotated-types==0.7.0
colorama==0.4.6
cxxfilt==0.3.0
cxxfilt==0.2.2
dncil==1.0.2
dnfile==0.15.0
funcy==2.0
humanize==4.10.0
humanize==4.9.0
ida-netnode==3.0
ida-settings==2.1.0
intervaltree==3.1.0
@@ -20,28 +20,27 @@ markdown-it-py==3.0.0
mdurl==0.1.2
msgpack==1.0.8
networkx==3.1
pefile==2024.8.26
pip==24.2
protobuf==5.28.2
pyasn1==0.5.1
pyasn1-modules==0.3.0
pefile==2023.2.7
pip==24.1.2
protobuf==5.27.3
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.22
pydantic==2.9.2
# pydantic pins pydantic-core,
# but dependabot updates these separately (which is broken) and is annoying,
# so we rely on pydantic to pull in the right version of pydantic-core.
# pydantic-core==2.23.4
xmltodict==0.13.0
pydantic==2.7.3
pydantic-core==2.18.4
pyelftools==0.31
pygments==2.18.0
python-flirt==0.8.10
pyyaml==6.0.2
rich==13.9.2
pyyaml==6.0.1
rich==13.7.1
ruamel-yaml==0.18.6
ruamel-yaml-clib==0.2.8
setuptools==75.1.0
setuptools==70.0.0
six==1.16.0
sortedcontainers==2.4.0
tabulate==0.9.0
termcolor==2.4.0
tqdm==4.66.5
viv-utils==0.7.11
vivisect==1.2.1
msgspec==0.18.6
vivisect==1.1.1
wcwidth==0.2.13

2
rules

Submodule rules updated: 64b174e502...0e2500fa8a

View File

@@ -84,7 +84,8 @@ def main() -> int:
args = _parse_args()
try:
json_data = json.loads(Path(args.capa_output).read_text(encoding="utf-8"))
with Path(args.capa_output).open() as capa_output:
json_data = json.load(capa_output)
except ValueError:
logger.error("Input data was not valid JSON, input should be a capa json output file.")
return -1

View File

@@ -1,316 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import sys
import json
import time
import logging
import argparse
import contextlib
import statistics
import subprocess
import multiprocessing
from typing import Set, Dict, List, Optional
from pathlib import Path
from collections import Counter
from dataclasses import dataclass
from multiprocessing import Pool
import rich
import rich.box
import rich.table
import capa.main
logger = logging.getLogger("capa.compare-backends")
BACKENDS = ("vivisect", "ida", "binja")
@dataclass
class CapaInvocation:
path: Path
backend: str
duration: float
returncode: int
stdout: Optional[str]
stderr: Optional[str]
err: Optional[str]
def invoke_capa(file: Path, backend: str) -> CapaInvocation:
stdout = None
stderr = None
err = None
returncode: int
try:
logger.debug("run capa: %s: %s", backend, file.name)
t1 = time.time()
child = subprocess.run(
["python", "-m", "capa.main", "--json", "--backend=" + backend, str(file)],
capture_output=True,
check=True,
text=True,
encoding="utf-8",
)
returncode = child.returncode
stdout = child.stdout
stderr = child.stderr
except subprocess.CalledProcessError as e:
returncode = e.returncode
stdout = e.stdout
stderr = e.stderr
logger.debug("%s:%s: error", backend, file.name)
err = str(e)
else:
pass
finally:
t2 = time.time()
return CapaInvocation(
path=file,
backend=backend,
duration=t2 - t1,
returncode=returncode,
stdout=stdout,
stderr=stderr,
err=err,
)
def wrapper_invoke_capa(args):
file, backend = args
return invoke_capa(file, backend)
def collect(args):
results_path = args.results_path
if not results_path.is_file():
default_doc = {backend: {} for backend in BACKENDS} # type: ignore
results_path.write_text(json.dumps(default_doc), encoding="utf-8")
testfiles = Path(__file__).parent.parent / "tests" / "data"
for file in sorted(p for p in testfiles.glob("*")):
# remove leftover analysis files
# because IDA doesn't cleanup after itself, currently.
if file.suffix in (".til", ".id0", ".id1", ".id2", ".nam", ".viv"):
logger.debug("removing: %s", file)
with contextlib.suppress(IOError):
file.unlink()
doc = json.loads(results_path.read_text(encoding="utf-8"))
plan = []
for file in sorted(p for p in testfiles.glob("*")):
if not file.is_file():
continue
if file.is_dir():
continue
if file.name.startswith("."):
continue
if file.suffix not in (".exe_", ".dll_", ".elf_", ""):
continue
logger.debug("%s", file.name)
key = str(file)
for backend in BACKENDS:
if (backend, file.name) in {
("binja", "0953cc3b77ed2974b09e3a00708f88de931d681e2d0cb64afbaf714610beabe6.exe_")
}:
# this file takes 38GB+ and 20hrs+
# https://github.com/Vector35/binaryninja-api/issues/5951
continue
if key in doc[backend]:
if not args.retry_failures:
continue
if not doc[backend][key]["err"]:
# didn't previously fail, don't repeat work
continue
else:
# want to retry this previous failure
pass
plan.append((file, backend))
pool_size = multiprocessing.cpu_count() // 2
logger.info("work pool size: %d", pool_size)
with Pool(processes=pool_size) as pool:
for i, result in enumerate(pool.imap_unordered(wrapper_invoke_capa, plan)):
doc[result.backend][str(result.path)] = {
"path": str(result.path),
"returncode": result.returncode,
"stdout": result.stdout,
"stderr": result.stderr,
"err": result.err,
"duration": result.duration,
}
if i % 8 == 0:
logger.info("syncing output database")
results_path.write_text(json.dumps(doc))
logger.info(
"%.1f\t%s %s %s",
result.duration,
"(err)" if result.err else " ",
result.backend.ljust(8),
result.path.name,
)
results_path.write_text(json.dumps(doc))
return
def report(args):
doc = json.loads(args.results_path.read_text(encoding="utf-8"))
samples = set()
for backend in BACKENDS:
samples.update(doc[backend].keys())
failures_by_backend: Dict[str, Set[str]] = {backend: set() for backend in BACKENDS}
durations_by_backend: Dict[str, List[float]] = {backend: [] for backend in BACKENDS}
console = rich.get_console()
for key in sorted(samples):
sample = Path(key).name
console.print(sample, style="bold")
seen_rules: Counter[str] = Counter()
rules_by_backend: Dict[str, Set[str]] = {backend: set() for backend in BACKENDS}
for backend in BACKENDS:
if key not in doc[backend]:
continue
entry = doc[backend][key]
duration = entry["duration"]
if not entry["err"]:
matches = json.loads(entry["stdout"])["rules"].keys()
seen_rules.update(matches)
rules_by_backend[backend].update(matches)
durations_by_backend[backend].append(duration)
console.print(f" {backend: >8}: {duration: >6.1f}s {len(matches): >3d} matches")
else:
failures_by_backend[backend].add(sample)
console.print(f" {backend: >8}: {duration: >6.1f}s (error)")
if not seen_rules:
console.print()
continue
t = rich.table.Table(box=rich.box.SIMPLE, header_style="default")
t.add_column("viv")
t.add_column("ida")
t.add_column("bn")
t.add_column("rule")
for rule, _ in seen_rules.most_common():
t.add_row(
"x" if rule in rules_by_backend["vivisect"] else " ",
"x" if rule in rules_by_backend["ida"] else " ",
"x" if rule in rules_by_backend["binja"] else " ",
rule,
)
console.print(t)
for backend in BACKENDS:
console.print(f"failures for {backend}:", style="bold")
for failure in sorted(failures_by_backend[backend]):
console.print(f" - {failure}")
if not failures_by_backend[backend]:
console.print(" (none)", style="green")
console.print()
console.print("durations:", style="bold")
console.print(" (10-quantiles, in seconds)", style="grey37")
for backend in BACKENDS:
q = statistics.quantiles(durations_by_backend[backend], n=10)
console.print(f" {backend: <8}: ", end="")
for i in range(9):
if i in (4, 8):
style = "bold"
else:
style = "default"
console.print(f"{q[i]: >6.1f}", style=style, end=" ")
console.print()
console.print(" ^-- 10% of samples took less than this ^", style="grey37")
console.print(" 10% of samples took more than this -----------------+", style="grey37")
console.print()
for backend in BACKENDS:
total = sum(durations_by_backend[backend])
successes = len(durations_by_backend[backend])
avg = statistics.mean(durations_by_backend[backend])
console.print(
f" {backend: <8}: {total: >7.0f} seconds across {successes: >4d} successful runs, {avg: >4.1f} average"
)
console.print()
console.print("slowest samples:", style="bold")
for backend in BACKENDS:
console.print(backend)
for duration, path in sorted(
((d["duration"], Path(d["path"]).name) for d in doc[backend].values()), reverse=True
)[:5]:
console.print(f" - {duration: >6.1f} {path}")
return
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
default_samples_path = Path(__file__).resolve().parent.parent / "tests" / "data"
parser = argparse.ArgumentParser(description="Compare analysis backends.")
capa.main.install_common_args(
parser,
wanted=set(),
)
subparsers = parser.add_subparsers()
collect_parser = subparsers.add_parser("collect")
collect_parser.add_argument("results_path", type=Path, help="Path to output JSON file")
collect_parser.add_argument("--samples", type=Path, default=default_samples_path, help="Path to samples")
collect_parser.add_argument("--retry-failures", action="store_true", help="Retry previous failures")
collect_parser.set_defaults(func=collect)
report_parser = subparsers.add_parser("report")
report_parser.add_argument("results_path", type=Path, help="Path to JSON file")
report_parser.set_defaults(func=report)
args = parser.parse_args(args=argv)
try:
capa.main.handle_common_args(args)
except capa.main.ShouldExitError as e:
return e.status_code
args.func(args)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,106 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import sys
import logging
import argparse
import importlib.util
import rich
import rich.table
import capa.main
from capa.features.extractors.ida.idalib import find_idalib, load_idalib, is_idalib_installed
from capa.features.extractors.binja.find_binja_api import find_binaryninja, load_binaryninja, is_binaryninja_installed
logger = logging.getLogger(__name__)
def is_vivisect_installed() -> bool:
try:
return importlib.util.find_spec("vivisect") is not None
except ModuleNotFoundError:
return False
def load_vivisect() -> bool:
try:
import vivisect # noqa: F401 unused import
return True
except ImportError:
return False
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Detect analysis backends.")
capa.main.install_common_args(parser, wanted=set())
args = parser.parse_args(args=argv)
try:
capa.main.handle_common_args(args)
except capa.main.ShouldExitError as e:
return e.status_code
if args.debug:
logging.getLogger("capa").setLevel(logging.DEBUG)
logging.getLogger("viv_utils").setLevel(logging.DEBUG)
else:
logging.getLogger("capa").setLevel(logging.ERROR)
logging.getLogger("viv_utils").setLevel(logging.ERROR)
table = rich.table.Table()
table.add_column("backend")
table.add_column("already installed?")
table.add_column("found?")
table.add_column("loads?")
if True:
row = ["vivisect"]
if is_vivisect_installed():
row.append("True")
row.append("-")
else:
row.append("False")
row.append("False")
row.append(str(load_vivisect()))
table.add_row(*row)
if True:
row = ["Binary Ninja"]
if is_binaryninja_installed():
row.append("True")
row.append("-")
else:
row.append("False")
row.append(str(find_binaryninja() is not None))
row.append(str(load_binaryninja()))
table.add_row(*row)
if True:
row = ["IDA idalib"]
if is_idalib_installed():
row.append("True")
row.append("-")
else:
row.append("False")
row.append(str(find_idalib() is not None))
row.append(str(load_idalib()))
table.add_row(*row)
rich.print(table)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,112 +0,0 @@
#!/usr/bin/env python2
"""
Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at: [package root]/LICENSE.txt
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
detect-binexport2-capabilities.py
Detect capabilities in a BinExport2 file and write the results into the protobuf format.
Example:
$ python detect-binexport2-capabilities.py suspicious.BinExport2 | xxd | head
┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│00000000│ 0a d4 05 0a 1a 32 30 32 ┊ 33 2d 30 32 2d 31 30 20 │_.•_•202┊3-02-10 │
│00000010│ 31 31 3a 34 39 3a 35 32 ┊ 2e 36 39 33 34 30 30 12 │11:49:52┊.693400•│
│00000020│ 05 35 2e 30 2e 30 1a 34 ┊ 74 65 73 74 73 2f 64 61 │•5.0.0•4┊tests/da│
│00000030│ 74 61 2f 50 72 61 63 74 ┊ 69 63 61 6c 20 4d 61 6c │ta/Pract┊ical Mal│
│00000040│ 77 61 72 65 20 41 6e 61 ┊ 6c 79 73 69 73 20 4c 61 │ware Ana┊lysis La│
│00000050│ 62 20 30 31 2d 30 31 2e ┊ 64 6c 6c 5f 1a 02 2d 6a │b 01-01.┊dll_••-j│
│00000060│ 22 c4 01 0a 20 32 39 30 ┊ 39 33 34 63 36 31 64 65 │".•_ 290┊934c61de│
│00000070│ 39 31 37 36 61 64 36 38 ┊ 32 66 66 64 64 36 35 66 │9176ad68┊2ffdd65f│
│00000080│ 30 61 36 36 39 12 28 61 ┊ 34 62 33 35 64 65 37 31 │0a669•(a┊4b35de71│
"""
import sys
import logging
import argparse
import capa.main
import capa.rules
import capa.engine
import capa.loader
import capa.helpers
import capa.features
import capa.exceptions
import capa.render.proto
import capa.render.verbose
import capa.features.freeze
import capa.capabilities.common
import capa.render.result_document as rd
from capa.loader import FORMAT_BINEXPORT2, BACKEND_BINEXPORT2
logger = logging.getLogger("capa.detect-binexport2-capabilities")
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="detect capabilities in programs.")
capa.main.install_common_args(
parser,
wanted={"format", "os", "backend", "input_file", "signatures", "rules", "tag"},
)
args = parser.parse_args(args=argv)
try:
capa.main.handle_common_args(args)
capa.main.ensure_input_exists_from_cli(args)
input_format = capa.main.get_input_format_from_cli(args)
assert input_format == FORMAT_BINEXPORT2
backend = capa.main.get_backend_from_cli(args, input_format)
assert backend == BACKEND_BINEXPORT2
sample_path = capa.main.get_sample_path_from_cli(args, backend)
assert sample_path is not None
os_ = capa.loader.get_os(sample_path)
rules = capa.main.get_rules_from_cli(args)
extractor = capa.main.get_extractor_from_cli(args, input_format, backend)
# alternatively, if you have all this handy in your library code:
#
# extractor = capa.loader.get_extractor(
# args.input_file,
# FORMAT_BINEXPORT2,
# os_,
# BACKEND_BINEXPORT2,
# sig_paths=[],
# sample_path=sample_path,
# )
#
# or even more concisely:
#
# be2 = capa.features.extractors.binexport2.get_binexport2(input_path)
# buf = sample_path.read_bytes()
# extractor = capa.features.extractors.binexport2.extractor.BinExport2FeatureExtractor(be2, buf)
except capa.main.ShouldExitError as e:
return e.status_code
capabilities, counts = capa.capabilities.common.find_capabilities(rules, extractor)
meta = capa.loader.collect_metadata(argv, args.input_file, input_format, os_, args.rules, extractor, counts)
meta.analysis.layout = capa.loader.compute_layout(rules, extractor, capabilities)
doc = rd.ResultDocument.from_capa(meta, rules, capabilities)
pb = capa.render.proto.doc_to_pb2(doc)
sys.stdout.buffer.write(pb.SerializeToString(deterministic=True))
sys.stdout.flush()
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,463 +0,0 @@
#!/usr/bin/env python
"""
Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at: [package root]/LICENSE.txt
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import io
import sys
import time
import logging
import argparse
import contextlib
from typing import Dict, List, Optional
import capa.main
import capa.features.extractors.binexport2
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
logger = logging.getLogger("inspect-binexport2")
@contextlib.contextmanager
def timing(msg: str):
t0 = time.time()
yield
t1 = time.time()
logger.debug("perf: %s: %0.2fs", msg, t1 - t0)
class Renderer:
def __init__(self, o: io.StringIO):
self.o = o
self.indent = 0
@contextlib.contextmanager
def indenting(self):
self.indent += 1
try:
yield
finally:
self.indent -= 1
def write(self, s):
self.o.write(s)
def writeln(self, s):
self.o.write(" " * self.indent)
self.o.write(s)
self.o.write("\n")
@contextlib.contextmanager
def section(self, name):
self.writeln(name)
with self.indenting():
try:
yield
finally:
pass
self.writeln("/" + name)
self.writeln("")
def getvalue(self):
return self.o.getvalue()
# internal to `render_operand`
def _render_expression_tree(
be2: BinExport2,
operand: BinExport2.Operand,
expression_tree: List[List[int]],
tree_index: int,
o: io.StringIO,
):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
if expression.type == BinExport2.Expression.REGISTER:
o.write(expression.symbol)
assert len(children_tree_indexes) == 0
return
elif expression.type == BinExport2.Expression.SYMBOL:
o.write(expression.symbol)
assert len(children_tree_indexes) <= 1
if len(children_tree_indexes) == 0:
return
elif len(children_tree_indexes) == 1:
# like: v
# from: mov v0.D[0x1], x9
# |
# 0
# .
# |
# D
child_index = children_tree_indexes[0]
_render_expression_tree(be2, operand, expression_tree, child_index, o)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.IMMEDIATE_INT:
o.write(f"0x{expression.immediate:X}")
assert len(children_tree_indexes) == 0
return
elif expression.type == BinExport2.Expression.SIZE_PREFIX:
# like: b4
#
# We might want to use this occasionally, such as to disambiguate the
# size of MOVs into/out of memory. But I'm not sure when/where we need that yet.
#
# IDA spams this size prefix hint *everywhere*, so we can't rely on the exporter
# to provide it only when necessary.
assert len(children_tree_indexes) == 1
child_index = children_tree_indexes[0]
_render_expression_tree(be2, operand, expression_tree, child_index, o)
return
elif expression.type == BinExport2.Expression.OPERATOR:
if len(children_tree_indexes) == 1:
# prefix operator, like "ds:"
if expression.symbol != "!":
o.write(expression.symbol)
child_index = children_tree_indexes[0]
_render_expression_tree(be2, operand, expression_tree, child_index, o)
# postfix operator, like "!" in aarch operand "[x1, 8]!"
if expression.symbol == "!":
o.write(expression.symbol)
return
elif len(children_tree_indexes) == 2:
# infix operator: like "+" in "ebp+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
_render_expression_tree(be2, operand, expression_tree, child_a, o)
o.write(expression.symbol)
_render_expression_tree(be2, operand, expression_tree, child_b, o)
return
elif len(children_tree_indexes) == 3:
# infix operator: like "+" in "ebp+ecx+10"
child_a = children_tree_indexes[0]
child_b = children_tree_indexes[1]
child_c = children_tree_indexes[2]
_render_expression_tree(be2, operand, expression_tree, child_a, o)
o.write(expression.symbol)
_render_expression_tree(be2, operand, expression_tree, child_b, o)
o.write(expression.symbol)
_render_expression_tree(be2, operand, expression_tree, child_c, o)
return
else:
raise NotImplementedError(len(children_tree_indexes))
elif expression.type == BinExport2.Expression.DEREFERENCE:
o.write("[")
assert len(children_tree_indexes) == 1
child_index = children_tree_indexes[0]
_render_expression_tree(be2, operand, expression_tree, child_index, o)
o.write("]")
return
elif expression.type == BinExport2.Expression.IMMEDIATE_FLOAT:
raise NotImplementedError(expression.type)
else:
raise NotImplementedError(expression.type)
_OPERAND_CACHE: Dict[int, str] = {}
def render_operand(be2: BinExport2, operand: BinExport2.Operand, index: Optional[int] = None) -> str:
# For the mimikatz example file, there are 138k distinct operands.
# Of those, only 11k are unique, which is less than 10% of the total.
# The most common operands are seen 37k, 24k, 17k, 15k, 11k, ... times.
# In other words, the most common five operands account for 100k instances,
# which is around 75% of operand instances.
# Therefore, we expect caching to be fruitful, trading memory for CPU time.
#
# No caching: 6.045 s ± 0.164 s [User: 5.916 s, System: 0.129 s]
# With caching: 4.259 s ± 0.161 s [User: 4.141 s, System: 0.117 s]
#
# So we can save 30% of CPU time by caching operand rendering.
#
# Other measurements:
#
# perf: loading BinExport2: 0.06s
# perf: indexing BinExport2: 0.34s
# perf: rendering BinExport2: 1.96s
# perf: writing BinExport2: 1.13s
# ________________________________________________________
# Executed in 4.40 secs fish external
# usr time 4.22 secs 0.00 micros 4.22 secs
# sys time 0.18 secs 842.00 micros 0.18 secs
if index and index in _OPERAND_CACHE:
return _OPERAND_CACHE[index]
o = io.StringIO()
tree = capa.features.extractors.binexport2.helpers._build_expression_tree(be2, operand)
_render_expression_tree(be2, operand, tree, 0, o)
s = o.getvalue()
if index:
_OPERAND_CACHE[index] = s
return s
def inspect_operand(be2: BinExport2, operand: BinExport2.Operand):
expression_tree = capa.features.extractors.binexport2.helpers._build_expression_tree(be2, operand)
def rec(tree_index, indent=0):
expression_index = operand.expression_index[tree_index]
expression = be2.expression[expression_index]
children_tree_indexes: List[int] = expression_tree[tree_index]
NEWLINE = "\n"
print(f" {' ' * indent}expression: {str(expression).replace(NEWLINE, ', ')}")
for child_index in children_tree_indexes:
rec(child_index, indent + 1)
rec(0)
def inspect_instruction(be2: BinExport2, instruction: BinExport2.Instruction, address: int):
mnemonic = be2.mnemonic[instruction.mnemonic_index]
print("instruction:")
print(f" address: {hex(address)}")
print(f" mnemonic: {mnemonic.name}")
print(" operands:")
for i, operand_index in enumerate(instruction.operand_index):
print(f" - operand {i}: [{operand_index}]")
operand = be2.operand[operand_index]
# Ghidra bug where empty operands (no expressions) may
# exist so we skip those for now (see https://github.com/NationalSecurityAgency/ghidra/issues/6817)
if len(operand.expression_index) > 0:
inspect_operand(be2, operand)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Inspect BinExport2 files")
capa.main.install_common_args(parser, wanted={"input_file"})
parser.add_argument("--instruction", type=lambda v: int(v, 0))
args = parser.parse_args(args=argv)
try:
capa.main.handle_common_args(args)
except capa.main.ShouldExitError as e:
return e.status_code
o = Renderer(io.StringIO())
with timing("loading BinExport2"):
be2: BinExport2 = capa.features.extractors.binexport2.get_binexport2(args.input_file)
with timing("indexing BinExport2"):
idx = capa.features.extractors.binexport2.BinExport2Index(be2)
t0 = time.time()
with o.section("meta"):
o.writeln(f"name: {be2.meta_information.executable_name}")
o.writeln(f"sha256: {be2.meta_information.executable_id}")
o.writeln(f"arch: {be2.meta_information.architecture_name}")
o.writeln(f"ts: {be2.meta_information.timestamp}")
with o.section("modules"):
for module in be2.module:
o.writeln(f"- {module.name}")
if not be2.module:
o.writeln("(none)")
with o.section("sections"):
for section in be2.section:
perms = ""
perms += "r" if section.flag_r else "-"
perms += "w" if section.flag_w else "-"
perms += "x" if section.flag_x else "-"
o.writeln(f"- {hex(section.address)} {perms} {hex(section.size)}")
with o.section("libraries"):
for library in be2.library:
o.writeln(
f"- {library.name:<12s} {'(static)' if library.is_static else ''}{(' at ' + hex(library.load_address)) if library.HasField('load_address') else ''}"
)
if not be2.library:
o.writeln("(none)")
with o.section("functions"):
for vertex_index, vertex in enumerate(be2.call_graph.vertex):
if not vertex.HasField("address"):
continue
with o.section(f"function {idx.get_function_name_by_vertex(vertex_index)} @ {hex(vertex.address)}"):
o.writeln(f"type: {vertex.Type.Name(vertex.type)}")
if vertex.HasField("mangled_name"):
o.writeln(f"name: {vertex.mangled_name}")
if vertex.HasField("demangled_name"):
o.writeln(f"demangled: {vertex.demangled_name}")
if vertex.HasField("library_index"):
# TODO(williballenthin): this seems to be incorrect for Ghidra exporter
# https://github.com/mandiant/capa/issues/1755
library = be2.library[vertex.library_index]
o.writeln(f"library: [{vertex.library_index}] {library.name}")
if vertex.HasField("module_index"):
module = be2.module[vertex.module_index]
o.writeln(f"module: [{vertex.module_index}] {module.name}")
if idx.callees_by_vertex_index[vertex_index] or idx.callers_by_vertex_index[vertex_index]:
o.writeln("xrefs:")
for caller_index in idx.callers_by_vertex_index[vertex_index]:
o.writeln(f"{idx.get_function_name_by_vertex(caller_index)}")
for callee_index in idx.callees_by_vertex_index[vertex_index]:
o.writeln(f"{idx.get_function_name_by_vertex(callee_index)}")
if vertex.address not in idx.flow_graph_index_by_address:
o.writeln("(no flow graph)")
else:
flow_graph_index = idx.flow_graph_index_by_address[vertex.address]
flow_graph = be2.flow_graph[flow_graph_index]
o.writeln("")
for basic_block_index in flow_graph.basic_block_index:
basic_block = be2.basic_block[basic_block_index]
basic_block_address = idx.get_basic_block_address(basic_block_index)
with o.section(f"basic block {hex(basic_block_address)}"):
for edge in idx.target_edges_by_basic_block_index[basic_block_index]:
if edge.type == BinExport2.FlowGraph.Edge.Type.CONDITION_FALSE:
continue
source_basic_block_index = edge.source_basic_block_index
source_basic_block_address = idx.get_basic_block_address(source_basic_block_index)
o.writeln(
f"{BinExport2.FlowGraph.Edge.Type.Name(edge.type)} basic block {hex(source_basic_block_address)}"
)
for instruction_index, instruction, instruction_address in idx.basic_block_instructions(
basic_block
):
mnemonic = be2.mnemonic[instruction.mnemonic_index]
operands = []
for operand_index in instruction.operand_index:
operand = be2.operand[operand_index]
# Ghidra bug where empty operands (no expressions) may
# exist so we skip those for now (see https://github.com/NationalSecurityAgency/ghidra/issues/6817)
if len(operand.expression_index) > 0:
operands.append(render_operand(be2, operand, index=operand_index))
call_targets = ""
if instruction.call_target:
call_targets = " "
for call_target_address in instruction.call_target:
call_target_name = idx.get_function_name_by_address(call_target_address)
call_targets += f"→ function {call_target_name} @ {hex(call_target_address)} "
data_references = ""
if instruction_index in idx.data_reference_index_by_source_instruction_index:
data_references = " "
for data_reference_index in idx.data_reference_index_by_source_instruction_index[
instruction_index
]:
data_reference = be2.data_reference[data_reference_index]
data_reference_address = data_reference.address
data_references += f"⇥ data {hex(data_reference_address)} "
string_references = ""
if instruction_index in idx.string_reference_index_by_source_instruction_index:
string_references = " "
for (
string_reference_index
) in idx.string_reference_index_by_source_instruction_index[instruction_index]:
string_reference = be2.string_reference[string_reference_index]
string_index = string_reference.string_table_index
string = be2.string_table[string_index]
string_references += f'⇥ string "{string.rstrip()}" '
comments = ""
if instruction.comment_index:
comments = " "
for comment_index in instruction.comment_index:
comment = be2.comment[comment_index]
comment_string = be2.string_table[comment.string_table_index]
comments += f"; {BinExport2.Comment.Type.Name(comment.type)} {comment_string} "
o.writeln(
f"{hex(instruction_address)} {mnemonic.name:<12s}{', '.join(operands):<14s}{call_targets}{data_references}{string_references}{comments}"
)
does_fallthrough = False
for edge in idx.source_edges_by_basic_block_index[basic_block_index]:
if edge.type == BinExport2.FlowGraph.Edge.Type.CONDITION_FALSE:
does_fallthrough = True
continue
back_edge = ""
if edge.HasField("is_back_edge") and edge.is_back_edge:
back_edge = ""
target_basic_block_index = edge.target_basic_block_index
target_basic_block_address = idx.get_basic_block_address(target_basic_block_index)
o.writeln(
f"{BinExport2.FlowGraph.Edge.Type.Name(edge.type)} basic block {hex(target_basic_block_address)} {back_edge}"
)
if does_fallthrough:
o.writeln("↓ CONDITION_FALSE")
with o.section("data"):
for data_address in sorted(idx.data_reference_index_by_target_address.keys()):
if data_address in idx.insn_address_by_index:
# appears to be code
continue
data_xrefs: List[int] = []
for data_reference_index in idx.data_reference_index_by_target_address[data_address]:
data_reference = be2.data_reference[data_reference_index]
instruction_address = idx.get_insn_address(data_reference.instruction_index)
data_xrefs.append(instruction_address)
if not data_xrefs:
continue
o.writeln(f"{hex(data_address)}{hex(data_xrefs[0])}")
for data_xref in data_xrefs[1:]:
o.writeln(f"{' ' * len(hex(data_address))}{hex(data_xref)}")
t1 = time.time()
logger.debug("perf: rendering BinExport2: %0.2fs", t1 - t0)
with timing("writing to STDOUT"):
print(o.getvalue())
if args.instruction:
insn = idx.insn_by_address[args.instruction]
inspect_instruction(be2, insn, args.instruction)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -31,9 +31,11 @@ from typing import Set, Dict, List
from pathlib import Path
from dataclasses import field, dataclass
import tqdm
import pydantic
import termcolor
import ruamel.yaml
from rich import print
import tqdm.contrib.logging
import capa.main
import capa.rules
@@ -49,6 +51,18 @@ from capa.render.result_document import RuleMetadata
logger = logging.getLogger("lint")
def red(s):
return termcolor.colored(s, "red")
def orange(s):
return termcolor.colored(s, "yellow")
def green(s):
return termcolor.colored(s, "green")
@dataclass
class Context:
"""
@@ -66,8 +80,8 @@ class Context:
class Lint:
WARN = "[yellow]WARN[/yellow]"
FAIL = "[red]FAIL[/red]"
WARN = orange("WARN")
FAIL = red("FAIL")
name = "lint"
level = FAIL
@@ -882,7 +896,7 @@ def lint_rule(ctx: Context, rule: Rule):
if (not lints_failed) and (not lints_warned) and has_examples:
print("")
print(f'{" (nursery) " if is_nursery_rule(rule) else ""} {rule.name}')
print(f" {Lint.WARN}: '[green]no lint failures[/green]': Graduate the rule")
print(f" {Lint.WARN}: {green('no lint failures')}: Graduate the rule")
print("")
else:
lints_failed = len(tuple(filter(lambda v: v.level == Lint.FAIL, violations)))
@@ -907,15 +921,12 @@ def lint(ctx: Context):
ret = {}
source_rules = [rule for rule in ctx.rules.rules.values() if not rule.is_subscope_rule()]
n_rules: int = len(source_rules)
with capa.helpers.CapaProgressBar(transient=True, console=capa.helpers.log_console) as pbar:
task = pbar.add_task(description="linting", total=n_rules, unit="rule")
for rule in source_rules:
name = rule.name
pbar.update(task, description=width(f"linting rule: {name}", 48))
ret[name] = lint_rule(ctx, rule)
pbar.advance(task)
with tqdm.contrib.logging.tqdm_logging_redirect(source_rules, unit="rule", leave=False) as pbar:
with capa.helpers.redirecting_print_to_tqdm(False):
for rule in pbar:
name = rule.name
pbar.set_description(width(f"linting rule: {name}", 48))
ret[name] = lint_rule(ctx, rule)
return ret
@@ -1009,18 +1020,18 @@ def main(argv=None):
logger.debug("lints ran for ~ %02d:%02dm", min, sec)
if warned_rules:
print("[yellow]rules with WARN:[/yellow]")
print(orange("rules with WARN:"))
for warned_rule in sorted(warned_rules):
print(" - " + warned_rule)
print()
if failed_rules:
print("[red]rules with FAIL:[/red]")
print(red("rules with FAIL:"))
for failed_rule in sorted(failed_rules):
print(" - " + failed_rule)
return 1
else:
logger.info("[green]no lints failed, nice![/green]")
logger.info(green("no lints failed, nice!"))
return 0

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env python
"""
Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at: [package root]/LICENSE.txt
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Extract files relevant to capa analysis from VMRay Analysis Archive and create a new ZIP file.
"""
import sys
import logging
import zipfile
import argparse
from pathlib import Path
from capa.features.extractors.vmray import DEFAULT_ARCHIVE_PASSWORD, VMRayAnalysis
logger = logging.getLogger(__name__)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Minimize VMRay Analysis Archive to ZIP file only containing relevant files"
)
parser.add_argument(
"analysis_archive",
type=Path,
help="path to VMRay Analysis Archive downloaded from Dynamic Analysis Report page",
)
parser.add_argument(
"-p", "--password", type=str, default="infected", help="password used to unzip and zip protected archives"
)
args = parser.parse_args(args=argv)
analysis_archive = args.analysis_archive
vmra = VMRayAnalysis(analysis_archive)
sv2_json = vmra.zipfile.read("logs/summary_v2.json", pwd=DEFAULT_ARCHIVE_PASSWORD)
flog_xml = vmra.zipfile.read("logs/flog.xml", pwd=DEFAULT_ARCHIVE_PASSWORD)
sample_file_buf = vmra.sample_file_buf
assert vmra.sample_file_analysis is not None
sample_sha256: str = vmra.sample_file_analysis.hash_values.sha256.lower()
new_zip_name = f"{analysis_archive.parent / analysis_archive.stem}_min.zip"
with zipfile.ZipFile(new_zip_name, "w") as new_zip:
new_zip.writestr("logs/summary_v2.json", sv2_json)
new_zip.writestr("logs/flog.xml", flog_xml)
new_zip.writestr(f"internal/static_analyses/{sample_sha256}/objects/files/{sample_sha256}", sample_file_buf)
new_zip.setpassword(args.password.encode("ascii"))
# ensure capa loads the minimized archive
assert isinstance(VMRayAnalysis(Path(new_zip_name)), VMRayAnalysis)
print(f"Created minimized VMRay archive '{new_zip_name}' with password '{args.password}'.")
if __name__ == "__main__":
sys.exit(main())

View File

@@ -42,10 +42,9 @@ import logging
import argparse
import subprocess
import tqdm
import humanize
from rich import box
from rich.table import Table
from rich.console import Console
import tabulate
import capa.main
import capa.perf
@@ -93,60 +92,50 @@ def main(argv=None):
except capa.main.ShouldExitError as e:
return e.status_code
with capa.helpers.CapaProgressBar(console=capa.helpers.log_console) as progress:
total_iterations = args.number * args.repeat
task = progress.add_task("profiling", total=total_iterations)
with tqdm.tqdm(total=args.number * args.repeat, leave=False) as pbar:
def do_iteration():
capa.perf.reset()
capa.capabilities.common.find_capabilities(rules, extractor, disable_progress=True)
progress.advance(task)
pbar.update(1)
samples = timeit.repeat(do_iteration, number=args.number, repeat=args.repeat)
logger.debug("perf: find capabilities: min: %0.2fs", (min(samples) / float(args.number)))
logger.debug(
"perf: find capabilities: avg: %0.2fs",
(sum(samples) / float(args.repeat) / float(args.number)),
)
logger.debug("perf: find capabilities: avg: %0.2fs", (sum(samples) / float(args.repeat) / float(args.number)))
logger.debug("perf: find capabilities: max: %0.2fs", (max(samples) / float(args.number)))
for counter, count in capa.perf.counters.most_common():
logger.debug("perf: counter: %s: %s", counter, count)
console = Console()
table1 = Table(box=box.MARKDOWN)
table1.add_column("feature class")
table1.add_column("evaluation count")
for counter, count in capa.perf.counters.most_common():
table1.add_row(counter, humanize.intcomma(count))
console.print(table1)
console.print()
table2 = Table(box=box.MARKDOWN)
table2.add_column("label")
table2.add_column("count(evaluations)", style="magenta")
table2.add_column("min(time)", style="green")
table2.add_column("avg(time)", style="yellow")
table2.add_column("max(time)", style="red")
table2.add_row(
args.label,
# python documentation indicates that min(samples) should be preferred,
# so lets put that first.
#
# https://docs.python.org/3/library/timeit.html#timeit.Timer.repeat
"{:,}".format(capa.perf.counters["evaluate.feature"]),
f"{(min(samples) / float(args.number)):.2f}s",
f"{(sum(samples) / float(args.repeat) / float(args.number)):.2f}s",
f"{(max(samples) / float(args.number)):.2f}s",
print(
tabulate.tabulate(
[(counter, humanize.intcomma(count)) for counter, count in capa.perf.counters.most_common()],
headers=["feature class", "evaluation count"],
tablefmt="github",
)
)
print()
console.print(table2)
print(
tabulate.tabulate(
[
(
args.label,
"{:,}".format(capa.perf.counters["evaluate.feature"]),
# python documentation indicates that min(samples) should be preferred,
# so lets put that first.
#
# https://docs.python.org/3/library/timeit.html#timeit.Timer.repeat
f"{(min(samples) / float(args.number)):.2f}s",
f"{(sum(samples) / float(args.repeat) / float(args.number)):.2f}s",
f"{(max(samples) / float(args.number)):.2f}s",
)
],
headers=["label", "count(evaluations)", "min(time)", "avg(time)", "max(time)"],
tablefmt="github",
)
)
return 0

View File

@@ -229,37 +229,40 @@ def print_dynamic_features(processes, extractor: DynamicFeatureExtractor):
for p in processes:
print(f"proc: {extractor.get_process_name(p)} (ppid={p.address.ppid}, pid={p.address.pid})")
for feature, _ in extractor.extract_process_features(p):
for feature, addr in extractor.extract_process_features(p):
if is_global_feature(feature):
continue
print(f" proc: {extractor.get_process_name(p)}: {feature}")
for t in extractor.get_threads(p):
print(f" thread: {t.address.tid}")
for feature, addr in extractor.extract_thread_features(p, t):
if is_global_feature(feature):
continue
if feature != Feature(0):
print(f" {format_address(addr)}: {feature}")
for call in extractor.get_calls(p, t):
apis = []
arguments = []
for feature, addr in extractor.extract_call_features(p, t, call):
for t in extractor.get_threads(p):
print(f" thread: {t.address.tid}")
for feature, addr in extractor.extract_thread_features(p, t):
if is_global_feature(feature):
continue
if isinstance(feature, API):
assert isinstance(addr, capa.features.address.DynamicCallAddress)
apis.append((addr.id, str(feature.value)))
if feature != Feature(0):
print(f" {format_address(addr)}: {feature}")
if isinstance(feature, (Number, String)):
arguments.append(str(feature.value))
for call in extractor.get_calls(p, t):
apis = []
arguments = []
for feature, addr in extractor.extract_call_features(p, t, call):
if is_global_feature(feature):
continue
for cid, api in apis:
print(f" call {cid}: {api}({', '.join(arguments)})")
if isinstance(feature, API):
assert isinstance(addr, capa.features.address.DynamicCallAddress)
apis.append((addr.id, str(feature.value)))
if isinstance(feature, (Number, String)):
arguments.append(str(feature.value))
if not apis:
print(f" arguments=[{', '.join(arguments)}]")
for cid, api in apis:
print(f" call {cid}: {api}({', '.join(arguments)})")
def ida_main():

View File

@@ -12,12 +12,11 @@ import sys
import typing
import logging
import argparse
from typing import Set, List, Tuple
from typing import Set, Tuple
from collections import Counter
from rich import print
from rich.text import Text
from rich.table import Table
import tabulate
from termcolor import colored
import capa.main
import capa.rules
@@ -78,30 +77,23 @@ def get_file_features(
return feature_map
def get_colored(s: str) -> Text:
def get_colored(s: str):
if "(" in s and ")" in s:
s_split = s.split("(", 1)
return Text.assemble(s_split[0], "(", (s_split[1][:-1], "cyan"), ")")
s_color = colored(s_split[1][:-1], "cyan")
return f"{s_split[0]}({s_color})"
else:
return Text(s, style="cyan")
return colored(s, "cyan")
def print_unused_features(feature_map: typing.Counter[Feature], rules_feature_set: Set[Feature]):
unused_features: List[Tuple[str, Text]] = []
unused_features = []
for feature, count in reversed(feature_map.most_common()):
if feature in rules_feature_set:
continue
unused_features.append((str(count), get_colored(str(feature))))
table = Table(title="Unused Features", box=None)
table.add_column("Count", style="dim")
table.add_column("Feature")
for count_str, feature_text in unused_features:
table.add_row(count_str, feature_text)
print("\n")
print(table)
print(tabulate.tabulate(unused_features, headers=["Count", "Feature"], tablefmt="plain"))
print("\n")

View File

@@ -209,13 +209,6 @@ def get_drakvuf_extractor(path):
return DrakvufExtractor.from_report(report)
@lru_cache(maxsize=1)
def get_vmray_extractor(path):
from capa.features.extractors.vmray.extractor import VMRayExtractor
return VMRayExtractor.from_zipfile(path)
@lru_cache(maxsize=1)
def get_ghidra_extractor(path: Path):
import capa.features.extractors.ghidra.extractor
@@ -226,19 +219,6 @@ def get_ghidra_extractor(path: Path):
return extractor
@lru_cache(maxsize=1)
def get_binexport_extractor(path):
import capa.features.extractors.binexport2
import capa.features.extractors.binexport2.extractor
be2 = capa.features.extractors.binexport2.get_binexport2(path)
search_paths = [CD / "data", CD / "data" / "aarch64"]
path = capa.features.extractors.binexport2.get_sample_from_binexport2(path, be2, search_paths)
buf = path.read_bytes()
return capa.features.extractors.binexport2.extractor.BinExport2FeatureExtractor(be2, buf)
def extract_global_features(extractor):
features = collections.defaultdict(set)
for feature, va in extractor.extract_global_features():
@@ -415,7 +395,7 @@ def get_data_path_by_name(name) -> Path:
/ "v2.2"
/ "d46900384c78863420fb3e297d0a2f743cd2b6b3f7f82bf64059a168e07aceb7.json.gz"
)
elif name.startswith("93b2d1-drakvuf"):
elif name.startswith("93b2d1"):
return (
CD
/ "data"
@@ -423,22 +403,6 @@ def get_data_path_by_name(name) -> Path:
/ "drakvuf"
/ "93b2d1840566f45fab674ebc79a9d19c88993bcb645e0357f3cb584d16e7c795.log.gz"
)
elif name.startswith("93b2d1-vmray"):
return (
CD
/ "data"
/ "dynamic"
/ "vmray"
/ "93b2d1840566f45fab674ebc79a9d19c88993bcb645e0357f3cb584d16e7c795_min_archive.zip"
)
elif name.startswith("2f8a79-vmray"):
return (
CD
/ "data"
/ "dynamic"
/ "vmray"
/ "2f8a79b12a7a989ac7e5f6ec65050036588a92e65aeb6841e08dc228ff0e21b4_min_archive.zip"
)
elif name.startswith("ea2876"):
return CD / "data" / "ea2876e9175410b6f6719f80ee44b9553960758c7d0f7bed73c0fe9a78d8e669.dll_"
elif name.startswith("1038a2"):
@@ -449,20 +413,6 @@ def get_data_path_by_name(name) -> Path:
return CD / "data" / "dotnet" / "dd9098ff91717f4906afe9dafdfa2f52.exe_"
elif name.startswith("nested_typeref"):
return CD / "data" / "dotnet" / "2c7d60f77812607dec5085973ff76cea.dll_"
elif name.startswith("687e79.ghidra.be2"):
return (
CD
/ "data"
/ "binexport2"
/ "687e79cde5b0ced75ac229465835054931f9ec438816f2827a8be5f3bd474929.elf_.ghidra.BinExport"
)
elif name.startswith("d1e650.ghidra.be2"):
return (
CD
/ "data"
/ "binexport2"
/ "d1e6506964edbfffb08c0dd32e1486b11fbced7a4bd870ffe79f110298f0efb8.elf_.ghidra.BinExport"
)
else:
raise ValueError(f"unexpected sample fixture: {name}")
@@ -826,9 +776,7 @@ FEATURE_PRESENCE_TESTS = sorted(
("mimikatz", "function=0x40105D", capa.features.insn.Offset(0x8), False),
("mimikatz", "function=0x40105D", capa.features.insn.Offset(0x10), False),
# insn/offset: negative
# 0x4012b4 MOVZX ECX, [EAX+0xFFFFFFFFFFFFFFFF]
("mimikatz", "function=0x4011FB", capa.features.insn.Offset(-0x1), True),
# 0x4012b8 MOVZX EAX, [EAX+0xFFFFFFFFFFFFFFFE]
("mimikatz", "function=0x4011FB", capa.features.insn.Offset(-0x2), True),
#
# insn/offset from mnemonic: add
@@ -851,7 +799,7 @@ FEATURE_PRESENCE_TESTS = sorted(
# should not be considered, lea operand invalid encoding
# .text:004717B1 8D 4C 31 D0 lea ecx, [ecx+esi-30h]
("mimikatz", "function=0x47153B,bb=0x4717AB,insn=0x4717B1", capa.features.insn.Number(-0x30), False),
# yes, this is also a number (imagine ebx is zero):
# yes, this is also a number (imagine edx is zero):
# .text:004018C0 8D 4B 02 lea ecx, [ebx+2]
("mimikatz", "function=0x401873,bb=0x4018B2,insn=0x4018C0", capa.features.insn.Number(0x2), True),
# insn/api
@@ -1405,9 +1353,9 @@ FEATURE_COUNT_TESTS_DOTNET = [
FEATURE_COUNT_TESTS_GHIDRA = [
# Ghidra may render functions as labels, as well as provide differing amounts of call references
# (Colton) TODO: Add more test cases
("mimikatz", "function=0x4702FD", capa.features.common.Characteristic("calls from"), 0),
("mimikatz", "function=0x401bf1", capa.features.common.Characteristic("calls to"), 2),
("mimikatz", "function=0x401000", capa.features.basicblock.BasicBlock(), 3),
("mimikatz", "function=0x4556E5", capa.features.common.Characteristic("calls to"), 0),
]
@@ -1589,7 +1537,4 @@ def a076114_rd():
@pytest.fixture
def dynamic_a0000a6_rd():
# python -m capa.main tests/data/dynamic/cape/v2.2/0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json --json > tests/data/rd/0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json
# gzip tests/data/rd/0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json
return get_result_doc(
CD / "data" / "rd" / "0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json.gz"
)
return get_result_doc(CD / "data" / "rd" / "0000a65749f5902c4d82ffa701198038f0b4870b00a27cfca109f8f933476d82.json")

View File

@@ -1,602 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import re
import logging
from typing import Any, Dict
from pathlib import Path
import pytest
import fixtures
from google.protobuf.json_format import ParseDict
import capa.features.extractors.binexport2.helpers
from capa.features.extractors.binexport2.helpers import (
BinExport2InstructionPattern,
BinExport2InstructionPatternMatcher,
split_with_delimiters,
get_operand_expressions,
get_instruction_mnemonic,
get_instruction_operands,
get_operand_register_expression,
get_operand_immediate_expression,
)
from capa.features.extractors.binexport2.extractor import BinExport2FeatureExtractor
from capa.features.extractors.binexport2.binexport2_pb2 import BinExport2
from capa.features.extractors.binexport2.arch.arm.helpers import is_stack_register_expression
logger = logging.getLogger(__name__)
CD = Path(__file__).resolve().parent
# found via https://www.virustotal.com/gui/search/type%253Aelf%2520and%2520size%253A1.2kb%252B%2520and%2520size%253A1.4kb-%2520and%2520tag%253Aarm%2520and%2520not%2520tag%253Arelocatable%2520and%2520tag%253A64bits/files
# Ghidra disassembly of c7f38027552a3eca84e2bfc846ac1307fbf98657545426bb93a2d63555cbb486
GHIDRA_DISASSEMBLY = """
//
// segment_1
// Loadable segment [0x200000 - 0x200157]
// ram:00200000-ram:00200157
//
00200000 7f 45 4c Elf64_Ehdr
...
//
// .text
// SHT_PROGBITS [0x210158 - 0x2101c7]
// ram:00210158-ram:002101c7
//
**************************************************************
* FUNCTION *
**************************************************************
undefined entry()
undefined w0:1 <RETURN>
_start XREF[4]: Entry Point(*), 00200018(*),
entry 002000c0(*),
_elfSectionHeaders::00000050(*)
00210158 20 00 80 d2 mov x0,#0x1
0021015c a1 02 00 58 ldr x1=>helloWorldStr,DAT_002101b0 = "Hello World!\n"
= 00000000002201C8h
00210160 c2 02 00 58 ldr x2,DAT_002101b8 = 000000000000000Eh
00210164 08 08 80 d2 mov x8,#0x40
00210168 01 00 00 d4 svc 0x0
0021016c a0 02 00 58 ldr x0=>$stringWith_Weird_Name,DAT_002101c0 = "This string has a very strang
= 00000000002201D6h
00210170 04 00 00 94 bl printString undefined printString()
00210174 60 0f 80 d2 mov x0,#0x7b
00210178 a8 0b 80 d2 mov x8,#0x5d
0021017c 01 00 00 d4 svc 0x0
**************************************************************
* FUNCTION *
**************************************************************
undefined printString()
undefined w0:1 <RETURN>
printString XREF[1]: entry:00210170(c)
00210180 01 00 80 d2 mov x1,#0x0
strlenLoop XREF[1]: 00210194(j)
00210184 02 68 61 38 ldrb w2,[x0, x1, LSL ]
00210188 5f 00 00 71 cmp w2,#0x0
0021018c 60 00 00 54 b.eq strlenDone
00210190 21 04 00 91 add x1,x1,#0x1
00210194 fc ff ff 17 b strlenLoop
strlenDone XREF[1]: 0021018c(j)
00210198 e2 03 01 aa mov x2,x1
0021019c e1 03 00 aa mov x1,x0
002101a0 20 00 80 d2 mov x0,#0x1
002101a4 08 08 80 d2 mov x8,#0x40
002101a8 01 00 00 d4 svc 0x0
002101ac c0 03 5f d6 ret
DAT_002101b0 XREF[1]: entry:0021015c(R)
002101b0 c8 01 22 undefined8 00000000002201C8h ? -> 002201c8
00 00 00
00 00
DAT_002101b8 XREF[1]: entry:00210160(R)
002101b8 0e 00 00 undefined8 000000000000000Eh
00 00 00
00 00
DAT_002101c0 XREF[1]: entry:0021016c(R)
002101c0 d6 01 22 undefined8 00000000002201D6h ? -> 002201d6
00 00 00
00 00
//
// .data
// SHT_PROGBITS [0x2201c8 - 0x2201fb]
// ram:002201c8-ram:002201fb
//
helloWorldStr XREF[3]: 002000f8(*), entry:0021015c(*),
_elfSectionHeaders::00000090(*)
002201c8 48 65 6c ds "Hello World!\n"
6c 6f 20
57 6f 72
$stringWith_Weird_Name XREF[1]: entry:0021016c(*)
002201d6 54 68 69 ds "This string has a very strange label\n"
73 20 73
74 72 69
...
"""
def _parse_ghidra_disassembly(disasm: str) -> dict:
dd = {}
# 00210158 20 00 80 d2 mov x0,#0x1
# ^^^^^^^^ ^^^^^^^^^^^ ^^^ ^^ ^^^^
# address bytes mnemonic o1,o2 (,o3)
pattern = re.compile(
r"^( ){8}(?P<address>[0-9a-f]+) "
+ r"(?P<bytes>([0-9a-f]{2}[ ]){4})\s+"
+ r"(?P<mnemonic>[\w\.]+)\s*"
+ r"(?P<operand1>[\w#$=>]+)?,?"
+ r"((?P<operand2>[\w#$=>]+))?,?"
+ r"((?P<operand3>[\w#$=>]+))?"
)
for line in disasm.splitlines()[20:]:
m = pattern.match(line)
if m:
logger.debug("Match found\t%s\n\t\t\t\t%s", line, m.groupdict())
dd[int(m["address"], 0x10)] = {
"bytes": m["bytes"].strip(),
"mnemonic": m["mnemonic"],
"operands": [e for e in [m["operand1"], m["operand2"], m["operand3"]] if e is not None],
}
else:
logger.debug("No match\t%s", line)
return dd
BE2_EXTRACTOR = fixtures.get_binexport_extractor(
CD
/ "data"
/ "binexport2"
/ "c7f38027552a3eca84e2bfc846ac1307fbf98657545426bb93a2d63555cbb486.elf_.ghidra.BinExport"
)
PARSED_DISASM = _parse_ghidra_disassembly(GHIDRA_DISASSEMBLY)
def test_instruction_bytes():
# more a data sanity check here as we don't test our code
for addr, de in PARSED_DISASM.items():
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
assert insn.raw_bytes == bytes.fromhex(de["bytes"])
def test_get_instruction_mnemonic():
for addr, de in PARSED_DISASM.items():
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
assert get_instruction_mnemonic(BE2_EXTRACTOR.be2, insn) == de["mnemonic"]
def test_get_instruction_operands_count():
for addr, de in PARSED_DISASM.items():
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
ops = get_instruction_operands(BE2_EXTRACTOR.be2, insn)
# this line is not properly parsed from the Ghidra disassembly using the current regex
# 00210184 02 68 61 38 ldrb w2,[x0, x1, LSL ]
if addr == 0x210184:
assert len(ops) == 2
else:
assert len(ops) == len(de["operands"])
@pytest.mark.parametrize(
"addr,expressions",
[
# 00210158 20 00 80 d2 mov x0,#0x1
(
0x210158,
(
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x0"),
BinExport2.Expression(type=BinExport2.Expression.IMMEDIATE_INT, immediate=0x1),
),
),
# 0021015c a1 02 00 58 ldr x1=>helloWorldStr,DAT_002101b0
(
0x21015C,
(
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x1"),
BinExport2.Expression(
type=BinExport2.Expression.IMMEDIATE_INT, symbol="PTR_helloWorldStr_002101b0", immediate=0x2101B0
),
),
),
# 00210184 02 68 61 38 ldrb w2,[x0, x1, LSL ]
# ^^^ issue in Ghidra?
# IDA gives LDRB W2, [X0,X1]
(
0x210184,
(
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="w2"),
(
BinExport2.Expression(type=BinExport2.Expression.DEREFERENCE, symbol="["),
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x0"),
BinExport2.Expression(type=BinExport2.Expression.OPERATOR, symbol=","),
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x1"),
BinExport2.Expression(type=BinExport2.Expression.DEREFERENCE, symbol="]"),
),
),
),
# 00210190 21 04 00 91 add x1,x1,#0x1
(
0x210190,
(
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x1"),
BinExport2.Expression(type=BinExport2.Expression.REGISTER, symbol="x1"),
BinExport2.Expression(type=BinExport2.Expression.IMMEDIATE_INT, immediate=0x1),
),
),
],
)
def test_get_operand_expressions(addr, expressions):
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
ops = get_instruction_operands(BE2_EXTRACTOR.be2, insn)
for i, op in enumerate(ops):
op_expression = expressions[i]
exps = get_operand_expressions(BE2_EXTRACTOR.be2, op)
if len(exps) > 1:
for j, exp in enumerate(exps):
assert exp.type == op_expression[j].type
assert exp.symbol == op_expression[j].symbol
else:
assert len(exps) == 1
assert exps[0] == op_expression
@pytest.mark.parametrize(
"addr,expressions",
[
# 00210158 20 00 80 d2 mov x0,#0x1
(0x210158, ("x0", None)),
# 0021015c a1 02 00 58 ldr x1=>helloWorldStr,DAT_002101b0
(0x21015C, ("x1", None)),
# 0021019c e1 03 00 aa mov x1,x0
(0x21019C, ("x1", "x0")),
# 00210190 21 04 00 91 add x1,x1,#0x1
(0x210190, ("x1", "x1", None)),
],
)
def test_get_operand_register_expression(addr, expressions):
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
ops = get_instruction_operands(BE2_EXTRACTOR.be2, insn)
for i, op in enumerate(ops):
reg_exp = get_operand_register_expression(BE2_EXTRACTOR.be2, op)
if reg_exp is None:
assert reg_exp == expressions[i]
else:
assert reg_exp.symbol == expressions[i]
@pytest.mark.parametrize(
"addr,expressions",
[
# 00210158 20 00 80 d2 mov x0,#0x1
(0x210158, (None, 0x1)),
# 0021015c a1 02 00 58 ldr x1=>helloWorldStr,DAT_002101b0
(0x21015C, (None, 0x2101B0)),
# 002101a8 01 00 00 d4 svc 0x0
(0x2101A8, (0x0,)),
# 00210190 21 04 00 91 add x1,x1,#0x1
(0x210190, (None, None, 0x1)),
],
)
def test_get_operand_immediate_expression(addr, expressions):
insn = BE2_EXTRACTOR.idx.get_instruction_by_address(addr)
ops = get_instruction_operands(BE2_EXTRACTOR.be2, insn)
for i, op in enumerate(ops):
reg_exp = get_operand_immediate_expression(BE2_EXTRACTOR.be2, op)
if reg_exp is None:
assert reg_exp == expressions[i]
else:
assert reg_exp.immediate == expressions[i]
"""
mov x0, 0x20
bl 0x100
add x0, sp, 0x10
"""
BE2_DICT: Dict[str, Any] = {
"expression": [
{"type": BinExport2.Expression.REGISTER, "symbol": "x0"},
{"type": BinExport2.Expression.IMMEDIATE_INT, "immediate": 0x20},
{"type": BinExport2.Expression.IMMEDIATE_INT, "immediate": 0x100},
{"type": BinExport2.Expression.REGISTER, "symbol": "sp"},
{"type": BinExport2.Expression.IMMEDIATE_INT, "immediate": 0x10},
],
# operand consists of 1 or more expressions, linked together as a tree
"operand": [
{"expression_index": [0]},
{"expression_index": [1]},
{"expression_index": [2]},
{"expression_index": [3]},
{"expression_index": [4]},
],
"mnemonic": [
{"name": "mov"}, # mnem 0
{"name": "bl"}, # mnem 1
{"name": "add"}, # mnem 2
],
# instruction may have 0 or more operands
"instruction": [
{"mnemonic_index": 0, "operand_index": [0, 1]},
{"mnemonic_index": 1, "operand_index": [2]},
{"mnemonic_index": 2, "operand_index": [0, 3, 4]},
],
}
BE2 = ParseDict(
BE2_DICT,
BinExport2(),
)
def test_is_stack_register_expression():
mov = ParseDict(BE2_DICT["instruction"][0], BinExport2.Instruction())
add = ParseDict(BE2_DICT["instruction"][2], BinExport2.Instruction())
mov_op0, mov_op1 = get_instruction_operands(BE2, mov)
op0_exp0 = get_operand_expressions(BE2, mov_op0)[0]
assert is_stack_register_expression(BE2, op0_exp0) is False
op0_exp1 = get_operand_expressions(BE2, mov_op1)[0]
assert is_stack_register_expression(BE2, op0_exp1) is False
add_op0, add_op1, add_op2 = get_instruction_operands(BE2, add)
op0_exp0 = get_operand_expressions(BE2, add_op0)[0]
assert is_stack_register_expression(BE2, op0_exp0) is False
op1_exp0 = get_operand_expressions(BE2, add_op1)[0]
assert is_stack_register_expression(BE2, op1_exp0) is True
op2_exp0 = get_operand_expressions(BE2, add_op2)[0]
assert is_stack_register_expression(BE2, op2_exp0) is False
def test_split_with_delimiters():
assert tuple(split_with_delimiters("abc|def", ("|",))) == ("abc", "|", "def")
assert tuple(split_with_delimiters("abc|def|", ("|",))) == ("abc", "|", "def", "|")
assert tuple(split_with_delimiters("abc||def", ("|",))) == ("abc", "|", "", "|", "def")
assert tuple(split_with_delimiters("abc|def-ghi", ("|", "-"))) == ("abc", "|", "def", "-", "ghi")
def test_pattern_parsing():
assert BinExport2InstructionPattern.from_str(
"br reg ; capture reg"
) == BinExport2InstructionPattern(mnemonics=("br",), operands=("reg",), capture="reg")
assert BinExport2InstructionPattern.from_str(
"mov reg0, reg1 ; capture reg0"
) == BinExport2InstructionPattern(mnemonics=("mov",), operands=("reg0", "reg1"), capture="reg0")
assert BinExport2InstructionPattern.from_str(
"adrp reg, #int ; capture #int"
) == BinExport2InstructionPattern(mnemonics=("adrp",), operands=("reg", "#int"), capture="#int")
assert BinExport2InstructionPattern.from_str(
"add reg, reg, #int ; capture #int"
) == BinExport2InstructionPattern(mnemonics=("add",), operands=("reg", "reg", "#int"), capture="#int")
assert BinExport2InstructionPattern.from_str(
"ldr reg0, [reg1] ; capture reg1"
) == BinExport2InstructionPattern(mnemonics=("ldr",), operands=("reg0", ("[", "reg1")), capture="reg1")
assert BinExport2InstructionPattern.from_str(
"ldr|str reg, [reg, #int] ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldr",
"str",
),
operands=("reg", ("[", "reg", ",", "#int")),
capture="#int",
)
assert BinExport2InstructionPattern.from_str(
"ldr|str reg, [reg, #int]! ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldr",
"str",
),
operands=("reg", ("!", "[", "reg", ",", "#int")),
capture="#int",
)
assert BinExport2InstructionPattern.from_str(
"ldr|str reg, [reg], #int ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldr",
"str",
),
operands=(
"reg",
(
"[",
"reg",
),
"#int",
),
capture="#int",
)
assert BinExport2InstructionPattern.from_str(
"ldp|stp reg, reg, [reg, #int] ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldp",
"stp",
),
operands=("reg", "reg", ("[", "reg", ",", "#int")),
capture="#int",
)
assert BinExport2InstructionPattern.from_str(
"ldp|stp reg, reg, [reg, #int]! ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldp",
"stp",
),
operands=("reg", "reg", ("!", "[", "reg", ",", "#int")),
capture="#int",
)
assert BinExport2InstructionPattern.from_str(
"ldp|stp reg, reg, [reg], #int ; capture #int"
) == BinExport2InstructionPattern(
mnemonics=(
"ldp",
"stp",
),
operands=("reg", "reg", ("[", "reg"), "#int"),
capture="#int",
)
assert (
BinExport2InstructionPatternMatcher.from_str(
"""
# comment
br reg
br reg(not-stack)
br reg ; capture reg
mov reg0, reg1 ; capture reg0
adrp reg, #int ; capture #int
add reg, reg, #int ; capture #int
ldr reg0, [reg1] ; capture reg1
ldr|str reg, [reg, #int] ; capture #int
ldr|str reg, [reg, #int]! ; capture #int
ldr|str reg, [reg], #int ; capture #int
ldp|stp reg, reg, [reg, #int] ; capture #int
ldp|stp reg, reg, [reg, #int]! ; capture #int
ldp|stp reg, reg, [reg], #int ; capture #int
ldrb reg0, [reg1, reg2] ; capture reg2
call [reg + reg * #int + #int]
call [reg + reg * #int]
call [reg * #int + #int]
call [reg + reg + #int]
call [reg + #int]
"""
).queries
is not None
)
def match_address(extractor: BinExport2FeatureExtractor, queries: BinExport2InstructionPatternMatcher, address: int):
instruction = extractor.idx.insn_by_address[address]
mnemonic: str = get_instruction_mnemonic(extractor.be2, instruction)
operands = []
for operand_index in instruction.operand_index:
operand = extractor.be2.operand[operand_index]
operands.append(capa.features.extractors.binexport2.helpers.get_operand_expressions(extractor.be2, operand))
return queries.match(mnemonic, operands)
def match_address_with_be2(
extractor: BinExport2FeatureExtractor, queries: BinExport2InstructionPatternMatcher, address: int
):
instruction_index = extractor.idx.insn_index_by_address[address]
return queries.match_with_be2(extractor.be2, instruction_index)
def test_pattern_matching():
queries = BinExport2InstructionPatternMatcher.from_str(
"""
br reg(stack) ; capture reg
br reg(not-stack) ; capture reg
mov reg0, reg1 ; capture reg0
adrp reg, #int ; capture #int
add reg, reg, #int ; capture #int
ldr reg0, [reg1] ; capture reg1
ldr|str reg, [reg, #int] ; capture #int
ldr|str reg, [reg, #int]! ; capture #int
ldr|str reg, [reg], #int ; capture #int
ldp|stp reg, reg, [reg, #int] ; capture #int
ldp|stp reg, reg, [reg, #int]! ; capture #int
ldp|stp reg, reg, [reg], #int ; capture #int
ldrb reg0, [reg1(not-stack), reg2] ; capture reg2
"""
)
# 0x210184: ldrb w2, [x0, x1]
# query: ldrb reg0, [reg1(not-stack), reg2] ; capture reg2"
assert match_address(BE2_EXTRACTOR, queries, 0x210184).expression.symbol == "x1"
assert match_address_with_be2(BE2_EXTRACTOR, queries, 0x210184).expression.symbol == "x1"
# 0x210198: mov x2, x1
# query: mov reg0, reg1 ; capture reg0"),
assert match_address(BE2_EXTRACTOR, queries, 0x210198).expression.symbol == "x2"
assert match_address_with_be2(BE2_EXTRACTOR, queries, 0x210198).expression.symbol == "x2"
# 0x210190: add x1, x1, 0x1
# query: add reg, reg, #int ; capture #int
assert match_address(BE2_EXTRACTOR, queries, 0x210190).expression.immediate == 1
assert match_address_with_be2(BE2_EXTRACTOR, queries, 0x210190).expression.immediate == 1
BE2_EXTRACTOR_687 = fixtures.get_binexport_extractor(
CD
/ "data"
/ "binexport2"
/ "687e79cde5b0ced75ac229465835054931f9ec438816f2827a8be5f3bd474929.elf_.ghidra.BinExport"
)
def test_pattern_matching_exclamation():
queries = BinExport2InstructionPatternMatcher.from_str(
"""
stp reg, reg, [reg, #int]! ; capture #int
"""
)
# note this captures the sp
# 0x107918: stp x20, x19, [sp,0xFFFFFFFFFFFFFFE0]!
# query: stp reg, reg, [reg, #int]! ; capture #int
assert match_address(BE2_EXTRACTOR_687, queries, 0x107918).expression.immediate == 0xFFFFFFFFFFFFFFE0
assert match_address_with_be2(BE2_EXTRACTOR_687, queries, 0x107918).expression.immediate == 0xFFFFFFFFFFFFFFE0
def test_pattern_matching_stack():
queries = BinExport2InstructionPatternMatcher.from_str(
"""
stp reg, reg, [reg(stack), #int]! ; capture #int
"""
)
# note this does capture the sp
# compare this with the test above (exclamation)
# 0x107918: stp x20, x19, [sp, 0xFFFFFFFFFFFFFFE0]!
# query: stp reg, reg, [reg(stack), #int]! ; capture #int
assert match_address(BE2_EXTRACTOR_687, queries, 0x107918).expression.immediate == 0xFFFFFFFFFFFFFFE0
assert match_address_with_be2(BE2_EXTRACTOR_687, queries, 0x107918).expression.immediate == 0xFFFFFFFFFFFFFFE0
def test_pattern_matching_not_stack():
queries = BinExport2InstructionPatternMatcher.from_str(
"""
stp reg, reg, [reg(not-stack), #int]! ; capture #int
"""
)
# note this does not capture the sp
# compare this with the test above (exclamation)
# 0x107918: stp x20, x19, [sp, 0xFFFFFFFFFFFFFFE0]!
# query: stp reg, reg, [reg(not-stack), #int]! ; capture #int
assert match_address(BE2_EXTRACTOR_687, queries, 0x107918) is None
assert match_address_with_be2(BE2_EXTRACTOR_687, queries, 0x107918) is None
BE2_EXTRACTOR_MIMI = fixtures.get_binexport_extractor(CD / "data" / "binexport2" / "mimikatz.exe_.ghidra.BinExport")
def test_pattern_matching_x86():
queries = BinExport2InstructionPatternMatcher.from_str(
"""
cmp|lea reg, [reg(not-stack) + #int0] ; capture #int0
"""
)
# 0x4018c0: LEA ECX, [EBX+0x2]
# query: cmp|lea reg, [reg(not-stack) + #int0] ; capture #int0
assert match_address(BE2_EXTRACTOR_MIMI, queries, 0x4018C0).expression.immediate == 2
assert match_address_with_be2(BE2_EXTRACTOR_MIMI, queries, 0x4018C0).expression.immediate == 2

View File

@@ -1,442 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import binascii
from typing import cast
import pytest
import fixtures
import capa.features.file
import capa.features.insn
import capa.features.common
import capa.features.basicblock
from capa.features.common import (
OS,
OS_LINUX,
ARCH_I386,
FORMAT_PE,
ARCH_AMD64,
FORMAT_ELF,
OS_ANDROID,
OS_WINDOWS,
ARCH_AARCH64,
Arch,
Format,
)
FEATURE_PRESENCE_TESTS_BE2_ELF_AARCH64 = sorted(
[
# file/string
(
"687e79.ghidra.be2",
"file",
capa.features.common.String("AppDataService start"),
True,
),
("687e79.ghidra.be2", "file", capa.features.common.String("nope"), False),
# file/sections
("687e79.ghidra.be2", "file", capa.features.file.Section(".text"), True),
("687e79.ghidra.be2", "file", capa.features.file.Section(".nope"), False),
# file/exports
(
"687e79.ghidra.be2",
"file",
capa.features.file.Export("android::clearDir"),
"xfail: name demangling is not implemented",
),
("687e79.ghidra.be2", "file", capa.features.file.Export("nope"), False),
# file/imports
("687e79.ghidra.be2", "file", capa.features.file.Import("fopen"), True),
("687e79.ghidra.be2", "file", capa.features.file.Import("exit"), True),
(
"687e79.ghidra.be2",
"file",
capa.features.file.Import("_ZN7android10IInterfaceD0Ev"),
True,
),
("687e79.ghidra.be2", "file", capa.features.file.Import("nope"), False),
# function/characteristic(loop)
(
"687e79.ghidra.be2",
"function=0x1056c0",
capa.features.common.Characteristic("loop"),
True,
),
(
"687e79.ghidra.be2",
"function=0x1075c0",
capa.features.common.Characteristic("loop"),
False,
),
# bb/characteristic(tight loop)
(
"d1e650.ghidra.be2",
"function=0x114af4",
capa.features.common.Characteristic("tight loop"),
True,
),
(
"d1e650.ghidra.be2",
"function=0x118F1C",
capa.features.common.Characteristic("tight loop"),
True,
),
(
"d1e650.ghidra.be2",
"function=0x11464c",
capa.features.common.Characteristic("tight loop"),
False,
),
# bb/characteristic(stack string)
(
"687e79.ghidra.be2",
"function=0x0",
capa.features.common.Characteristic("stack string"),
"xfail: not implemented yet",
),
(
"687e79.ghidra.be2",
"function=0x0",
capa.features.common.Characteristic("stack string"),
"xfail: not implemented yet",
),
# insn/mnemonic
("687e79.ghidra.be2", "function=0x107588", capa.features.insn.Mnemonic("stp"), True),
("687e79.ghidra.be2", "function=0x107588", capa.features.insn.Mnemonic("adrp"), True),
("687e79.ghidra.be2", "function=0x107588", capa.features.insn.Mnemonic("bl"), True),
("687e79.ghidra.be2", "function=0x107588", capa.features.insn.Mnemonic("in"), False),
("687e79.ghidra.be2", "function=0x107588", capa.features.insn.Mnemonic("adrl"), False),
# insn/number
# 00114524 add x29,sp,#0x10
(
"d1e650.ghidra.be2",
"function=0x11451c",
capa.features.insn.Number(0x10),
False,
),
# 00105128 sub sp,sp,#0xE0
(
"687e79.ghidra.be2",
"function=0x105128",
capa.features.insn.Number(0xE0),
False,
),
# insn/operand.number
(
"687e79.ghidra.be2",
"function=0x105128,bb=0x1051e4",
capa.features.insn.OperandNumber(1, 0xFFFFFFFF),
True,
),
(
"687e79.ghidra.be2",
"function=0x107588,bb=0x107588",
capa.features.insn.OperandNumber(1, 0x8),
True,
),
(
"687e79.ghidra.be2",
"function=0x107588,bb=0x107588,insn=0x1075a4",
capa.features.insn.OperandNumber(1, 0x8),
True,
),
# insn/operand.offset
(
"687e79.ghidra.be2",
"function=0x105128,bb=0x105450",
capa.features.insn.OperandOffset(2, 0x10),
True,
),
(
"d1e650.ghidra.be2",
"function=0x124854,bb=0x1248AC,insn=0x1248B4",
capa.features.insn.OperandOffset(2, -0x48),
True,
),
(
"d1e650.ghidra.be2",
"function=0x13347c,bb=0x133548,insn=0x133554",
capa.features.insn.OperandOffset(2, 0x20),
False,
),
("687e79.ghidra.be2", "function=0x105C88", capa.features.insn.Number(0xF000), True),
# insn/number: negative
(
"687e79.ghidra.be2",
"function=0x1057f8,bb=0x1057f8",
capa.features.insn.Number(0xFFFFFFFFFFFFFFFF),
True,
),
(
"687e79.ghidra.be2",
"function=0x1057f8,bb=0x1057f8",
capa.features.insn.Number(0xFFFFFFFFFFFFFFFF),
True,
),
(
"687e79.ghidra.be2",
"function=0x1066e0,bb=0x1068c4",
capa.features.insn.Number(0xFFFFFFFF),
True,
),
# insn/offset
(
"687e79.ghidra.be2",
"function=0x105128,bb=0x105450",
capa.features.insn.Offset(0x10),
True,
),
# ldp x29,x30,[sp, #0x20]
(
"d1e650.ghidra.be2",
"function=0x13347c,bb=0x133548,insn=0x133554",
capa.features.insn.Offset(0x20),
False,
),
# stp x20,x0,[x19, #0x8]
(
"d1e650.ghidra.be2",
"function=0x1183e0,bb=0x11849c,insn=0x1184b0",
capa.features.insn.Offset(0x8),
True,
),
# str xzr,[x8, #0x8]!
(
"d1e650.ghidra.be2",
"function=0x138688,bb=0x138994,insn=0x1389a8",
capa.features.insn.Offset(0x8),
True,
),
# ldr x9,[x8, #0x8]!
(
"d1e650.ghidra.be2",
"function=0x138688,bb=0x138978,insn=0x138984",
capa.features.insn.Offset(0x8),
True,
),
# ldr x19,[sp], #0x20
(
"d1e650.ghidra.be2",
"function=0x11451c",
capa.features.insn.Offset(0x20),
False,
),
# ldrb w9,[x8, #0x1]
(
"d1e650.ghidra.be2",
"function=0x138a9c,bb=0x138b00,insn=0x138b00",
capa.features.insn.Offset(0x1),
True,
),
# insn/offset: negative
(
"d1e650.ghidra.be2",
"function=0x124854,bb=0x1248AC,insn=0x1248B4",
capa.features.insn.Offset(-0x48),
True,
),
# insn/offset from mnemonic: add
# 0010514c add x23,param_1,#0x8
(
"687e79.ghidra.be2",
"function=0x105128,bb=0x105128,insn=0x10514c",
capa.features.insn.Offset(0x8),
True,
),
# insn/api
# not extracting dll name
("687e79.ghidra.be2", "function=0x105c88", capa.features.insn.API("memset"), True),
("687e79.ghidra.be2", "function=0x105c88", capa.features.insn.API("Nope"), False),
# insn/string
(
"687e79.ghidra.be2",
"function=0x107588",
capa.features.common.String("AppDataService start"),
True,
),
(
"687e79.ghidra.be2",
"function=0x1075c0",
capa.features.common.String("AppDataService"),
True,
),
("687e79.ghidra.be2", "function=0x107588", capa.features.common.String("nope"), False),
(
"687e79.ghidra.be2",
"function=0x106d58",
capa.features.common.String("/data/misc/wifi/wpa_supplicant.conf"),
True,
),
# insn/regex
(
"687e79.ghidra.be2",
"function=0x105c88",
capa.features.common.Regex("innerRename"),
True,
),
(
"687e79.ghidra.be2",
"function=0x106d58",
capa.features.common.Regex("/data/misc"),
True,
),
(
"687e79.ghidra.be2",
"function=0x106d58",
capa.features.common.Substring("/data/misc"),
True,
),
# insn/bytes
(
"d1e650.ghidra.be2",
"function=0x1165a4",
capa.features.common.Bytes(binascii.unhexlify("E405B89370BA6B419CD7925275BF6FCC1E8360CC")),
True,
),
# # don't extract byte features for obvious strings
(
"687e79.ghidra.be2",
"function=0x1057f8",
capa.features.common.Bytes("/system/xbin/busybox".encode("utf-16le")),
False,
),
# insn/characteristic(nzxor)
(
"d1e650.ghidra.be2",
"function=0x114af4",
capa.features.common.Characteristic("nzxor"),
True,
),
(
"d1e650.ghidra.be2",
"function=0x117988",
capa.features.common.Characteristic("nzxor"),
True,
),
# # insn/characteristic(cross section flow)
# ("a1982...", "function=0x4014D0", capa.features.common.Characteristic("cross section flow"), True),
# # insn/characteristic(cross section flow): imports don't count
# ("mimikatz", "function=0x4556E5", capa.features.common.Characteristic("cross section flow"), False),
# insn/characteristic(recursive call)
(
"687e79.ghidra.be2",
"function=0x105b38",
capa.features.common.Characteristic("recursive call"),
True,
),
(
"687e79.ghidra.be2",
"function=0x106530",
capa.features.common.Characteristic("recursive call"),
True,
),
# insn/characteristic(indirect call)
("d1e650.ghidra.be2", "function=0x118620", capa.features.common.Characteristic("indirect call"), True),
(
"d1e650.ghidra.be2",
"function=0x118500",
capa.features.common.Characteristic("indirect call"),
False,
),
("d1e650.ghidra.be2", "function=0x118620", capa.features.common.Characteristic("indirect call"), True),
(
"d1e650.ghidra.be2",
"function=0x11451c",
capa.features.common.Characteristic("indirect call"),
True,
),
# insn/characteristic(calls from)
(
"687e79.ghidra.be2",
"function=0x105080",
capa.features.common.Characteristic("calls from"),
True,
),
(
"687e79.ghidra.be2",
"function=0x1070e8",
capa.features.common.Characteristic("calls from"),
False,
),
# function/characteristic(calls to)
(
"687e79.ghidra.be2",
"function=0x1075c0",
capa.features.common.Characteristic("calls to"),
True,
),
# file/function-name
(
"687e79.ghidra.be2",
"file",
capa.features.file.FunctionName("__libc_init"),
"xfail: TODO should this be a function-name?",
),
# os & format & arch
("687e79.ghidra.be2", "file", OS(OS_ANDROID), True),
("687e79.ghidra.be2", "file", OS(OS_LINUX), False),
("687e79.ghidra.be2", "file", OS(OS_WINDOWS), False),
# os & format & arch are also global features
("687e79.ghidra.be2", "function=0x107588", OS(OS_ANDROID), True),
("687e79.ghidra.be2", "function=0x1075c0,bb=0x1076c0", OS(OS_ANDROID), True),
("687e79.ghidra.be2", "file", Arch(ARCH_I386), False),
("687e79.ghidra.be2", "file", Arch(ARCH_AMD64), False),
("687e79.ghidra.be2", "file", Arch(ARCH_AARCH64), True),
("687e79.ghidra.be2", "function=0x107588", Arch(ARCH_AARCH64), True),
("687e79.ghidra.be2", "function=0x1075c0,bb=0x1076c0", Arch(ARCH_AARCH64), True),
("687e79.ghidra.be2", "file", Format(FORMAT_ELF), True),
("687e79.ghidra.be2", "file", Format(FORMAT_PE), False),
("687e79.ghidra.be2", "function=0x107588", Format(FORMAT_ELF), True),
("687e79.ghidra.be2", "function=0x107588", Format(FORMAT_PE), False),
],
# order tests by (file, item)
# so that our LRU cache is most effective.
key=lambda t: (t[0], t[1]),
)
@fixtures.parametrize(
"sample,scope,feature,expected",
FEATURE_PRESENCE_TESTS_BE2_ELF_AARCH64,
indirect=["sample", "scope"],
)
def test_binexport_features_elf_aarch64(sample, scope, feature, expected):
if not isinstance(expected, bool):
# (for now) xfails indicates using string like: "xfail: not implemented yet"
pytest.xfail(expected)
fixtures.do_test_feature_presence(fixtures.get_binexport_extractor, sample, scope, feature, expected)
@fixtures.parametrize(
"sample,scope,feature,expected",
fixtures.FEATURE_PRESENCE_TESTS,
indirect=["sample", "scope"],
)
def test_binexport_features_pe_x86(sample, scope, feature, expected):
if "mimikatz.exe_" not in sample.name:
pytest.skip("for now only testing mimikatz.exe_ Ghidra BinExport file")
if isinstance(feature, capa.features.common.Characteristic) and "stack string" in cast(str, feature.value):
pytest.skip("for now only testing basic features")
sample = sample.parent / "binexport2" / (sample.name + ".ghidra.BinExport")
assert sample.exists()
fixtures.do_test_feature_presence(fixtures.get_binexport_extractor, sample, scope, feature, expected)
@fixtures.parametrize(
"sample,scope,feature,expected",
fixtures.FEATURE_COUNT_TESTS_GHIDRA,
indirect=["sample", "scope"],
)
def test_binexport_feature_counts_ghidra(sample, scope, feature, expected):
if "mimikatz.exe_" not in sample.name:
pytest.skip("for now only testing mimikatz.exe_ Ghidra BinExport file")
sample = sample.parent / "binexport2" / (sample.name + ".ghidra.BinExport")
assert sample.exists()
fixtures.do_test_feature_count(fixtures.get_binexport_extractor, sample, scope, feature, expected)

View File

@@ -9,7 +9,6 @@
import textwrap
import capa.capabilities.common
from capa.features.extractors.base_extractor import FunctionFilter
def test_match_across_scopes_file_function(z9324d_extractor):
@@ -175,37 +174,6 @@ def test_subscope_bb_rules(z9324d_extractor):
assert "test rule" in capabilities
def test_match_specific_functions(z9324d_extractor):
rules = capa.rules.RuleSet(
[
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: receive data
scopes:
static: function
dynamic: call
examples:
- 9324d1a8ae37a36ae560c37448c9705a:0x401CD0
features:
- or:
- api: recv
"""
)
)
]
)
extractor = FunctionFilter(z9324d_extractor, {0x4019C0})
capabilities, meta = capa.capabilities.common.find_capabilities(rules, extractor)
matches = capabilities["receive data"]
# test that we received only one match
assert len(matches) == 1
# and that this match is from the specified function
assert matches[0][0] == 0x4019C0
def test_byte_matching(z9324d_extractor):
rules = capa.rules.RuleSet(
[

View File

@@ -37,8 +37,6 @@ DYNAMIC_CAPE_FEATURE_PRESENCE_TESTS = sorted(
),
("0000a657", "process=(1180:3052)", capa.features.common.String("nope"), False),
# thread/api calls
("0000a657", "process=(2900:2852),thread=2904", capa.features.insn.API("RegQueryValueExA"), True),
("0000a657", "process=(2900:2852),thread=2904", capa.features.insn.API("RegQueryValueEx"), True),
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("NtQueryValueKey"), True),
("0000a657", "process=(2852:3052),thread=2804", capa.features.insn.API("GetActiveWindow"), False),
# thread/number call argument

View File

@@ -15,33 +15,26 @@ import capa.features.common
DYNAMIC_DRAKVUF_FEATURE_PRESENCE_TESTS = sorted(
[
("93b2d1-drakvuf", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
("93b2d1", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
# file/imports
("93b2d1-drakvuf", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), True),
("93b2d1", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), True),
# thread/api calls
("93b2d1-drakvuf", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), True),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), True),
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
# call/api
("93b2d1-drakvuf", "process=(3564:4852),thread=4716,call=17", capa.features.insn.API("CreateWindowExW"), True),
("93b2d1-drakvuf", "process=(3564:4852),thread=4716,call=17", capa.features.insn.API("CreateWindowEx"), True),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), True),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), False),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), True),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), False),
# call/string argument
(
"93b2d1-drakvuf",
"93b2d1",
"process=(3564:4852),thread=6592,call=1",
capa.features.common.String('0x667e2beb40:"api-ms-win-core-fibers-l1-1-1"'),
True,
),
(
"93b2d1-drakvuf",
"process=(3564:4852),thread=6592,call=1",
capa.features.common.String("non_existant"),
False,
),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.common.String("non_existant"), False),
# call/number argument
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), True),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), False),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), True),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), False),
],
# order tests by (file, item)
# so that our LRU cache is most effective.
@@ -50,26 +43,26 @@ DYNAMIC_DRAKVUF_FEATURE_PRESENCE_TESTS = sorted(
DYNAMIC_DRAKVUF_FEATURE_COUNT_TESTS = sorted(
[
("93b2d1-drakvuf", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
("93b2d1", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
# file/imports
("93b2d1-drakvuf", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), 1),
("93b2d1", "file", capa.features.file.Import("SetUnhandledExceptionFilter"), 1),
# thread/api calls
("93b2d1-drakvuf", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), 9),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("LdrLoadDll"), 9),
("93b2d1", "process=(3564:4852),thread=6592", capa.features.insn.API("DoesNotExist"), False),
# call/api
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), 1),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), 0),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("LdrLoadDll"), 1),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.API("DoesNotExist"), 0),
# call/string argument
(
"93b2d1-drakvuf",
"93b2d1",
"process=(3564:4852),thread=6592,call=1",
capa.features.common.String('0x667e2beb40:"api-ms-win-core-fibers-l1-1-1"'),
1,
),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.common.String("non_existant"), 0),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.common.String("non_existant"), 0),
# call/number argument
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), 1),
("93b2d1-drakvuf", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), 0),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x801), 1),
("93b2d1", "process=(3564:4852),thread=6592,call=1", capa.features.insn.Number(0x010101010101), 0),
],
# order tests by (file, item)
# so that our LRU cache is most effective.

View File

@@ -8,7 +8,6 @@
import codecs
import capa.helpers
from capa.features.extractors import helpers
@@ -65,8 +64,3 @@ def test_generate_symbols():
symbols = list(helpers.generate_symbols("ws2_32", "#1", include_dll=False))
assert len(symbols) == 1
assert "ws2_32.#1" in symbols
def test_is_dev_environment():
# testing environment should be a dev environment
assert capa.helpers.is_dev_environment() is True

View File

@@ -5,7 +5,6 @@
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import io
import textwrap
from unittest.mock import Mock
@@ -23,7 +22,6 @@ import capa.features.basicblock
import capa.render.result_document
import capa.render.result_document as rd
import capa.features.freeze.features
from capa.render.utils import Console
def test_render_number():
@@ -153,10 +151,9 @@ def test_render_meta_maec():
mock_rd.rules = {"test rule": rm}
# capture the output of render_maec
f = io.StringIO()
console = Console(file=f)
capa.render.default.render_maec(mock_rd, console)
output = f.getvalue()
output_stream = capa.render.utils.StringIO()
capa.render.default.render_maec(mock_rd, output_stream)
output = output_stream.getvalue()
assert "analysis-conclusion" in output
assert analysis_conclusion in output
@@ -198,7 +195,7 @@ def test_render_meta_maec():
],
)
def test_render_vverbose_feature(feature, expected):
console = Console(highlight=False)
ostream = capa.render.utils.StringIO()
addr = capa.features.freeze.Address.from_capa(capa.features.address.AbsoluteVirtualAddress(0x401000))
feature = capa.features.freeze.features.feature_from_capa(feature)
@@ -240,8 +237,6 @@ def test_render_vverbose_feature(feature, expected):
matches=(),
)
with console.capture() as capture:
capa.render.vverbose.render_feature(console, layout, rm, matches, feature, indent=0)
capa.render.vverbose.render_feature(ostream, layout, rm, matches, feature, indent=0)
output = capture.get().strip()
assert output == expected
assert ostream.getvalue().strip() == expected

View File

@@ -6,13 +6,10 @@
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import textwrap
import contextlib
from pathlib import Path
import capa.rules
import capa.helpers
import capa.rules.cache
R1 = capa.rules.Rule.from_yaml(
@@ -116,40 +113,3 @@ def test_ruleset_cache_invalid():
assert capa.rules.cache.load_cached_ruleset(cache_dir, content) is None
# the invalid cache should be deleted
assert not path.exists()
def test_rule_cache_dev_environment():
# generate rules cache
rs = capa.rules.RuleSet([R2])
content = capa.rules.cache.get_ruleset_content(rs)
id = capa.rules.cache.compute_cache_identifier(content)
cache_dir = capa.rules.cache.get_default_cache_directory()
cache_path = capa.rules.cache.get_cache_path(cache_dir, id)
# clear existing cache files
for f in cache_dir.glob("*.cache"):
f.unlink()
capa.rules.cache.cache_ruleset(cache_dir, rs)
assert cache_path.exists()
assert capa.helpers.is_cache_newer_than_rule_code(cache_dir) is True
capa_root = Path(__file__).resolve().parent.parent
cachepy = capa_root / "capa" / "rules" / "cache.py" # alternative: capa_root / "capa" / "rules" / "__init__.py"
# set cache's last modified time prior to code file's modified time
os.utime(cache_path, (cache_path.stat().st_atime, cachepy.stat().st_mtime - 600000))
# debug
def ts_to_str(ts):
from datetime import datetime
return datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
for g in ((capa_root / "capa" / "rules").glob("*.py"), cache_dir.glob("*.cache")):
for p in g:
print(p, "\t", ts_to_str(p.stat().st_mtime)) # noqa: T201
assert capa.helpers.is_dev_environment() is True
assert capa.helpers.is_cache_newer_than_rule_code(cache_dir) is False

View File

@@ -6,7 +6,6 @@
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import os
import sys
import logging
import textwrap
@@ -28,7 +27,7 @@ def get_binary_file_path():
return str(CD / "data" / "9324d1a8ae37a36ae560c37448c9705a.exe_")
def get_cape_report_file_path():
def get_report_file_path():
return str(
CD
/ "data"
@@ -39,10 +38,6 @@ def get_cape_report_file_path():
)
def get_binexport2_file_path():
return str(CD / "data" / "binexport2" / "mimikatz.exe_.ghidra.BinExport")
def get_rules_path():
return str(CD / ".." / "rules")
@@ -68,10 +63,9 @@ def get_rule_path():
pytest.param("show-capabilities-by-function.py", [get_binary_file_path()]),
pytest.param("show-features.py", [get_binary_file_path()]),
pytest.param("show-features.py", ["-F", "0x407970", get_binary_file_path()]),
pytest.param("show-features.py", ["-P", "MicrosoftEdgeUpdate.exe", get_cape_report_file_path()]),
pytest.param("show-features.py", ["-P", "MicrosoftEdgeUpdate.exe", get_report_file_path()]),
pytest.param("show-unused-features.py", [get_binary_file_path()]),
pytest.param("capa-as-library.py", [get_binary_file_path()]),
# not testing "minimize-vmray-results.py" as we don't currently upload full VMRay analysis archives
pytest.param("capa_as_library.py", [get_binary_file_path()]),
],
)
def test_scripts(script, args):
@@ -80,22 +74,6 @@ def test_scripts(script, args):
assert p.returncode == 0
@pytest.mark.parametrize(
"script,args",
[
pytest.param("inspect-binexport2.py", [get_binexport2_file_path()]),
pytest.param("detect-binexport2-capabilities.py", [get_binexport2_file_path()]),
],
)
def test_binexport_scripts(script, args):
# define sample bytes location
os.environ["CAPA_SAMPLES_DIR"] = str(Path(CD / "data"))
script_path = get_script_path(script)
p = run_program(script_path, args)
assert p.returncode == 0
def test_bulk_process(tmp_path):
# create test directory to recursively analyze
t = tmp_path / "test"

View File

@@ -1,125 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import fixtures
import capa.main
import capa.features.file
import capa.features.insn
import capa.features.common
DYNAMIC_VMRAY_FEATURE_PRESENCE_TESTS = sorted(
[
("93b2d1-vmray", "file", capa.features.common.String("api.%x%x.%s"), True),
("93b2d1-vmray", "file", capa.features.common.String("\\Program Files\\WindowsApps\\does_not_exist"), False),
# file/imports
("93b2d1-vmray", "file", capa.features.file.Import("GetAddrInfoW"), True),
("93b2d1-vmray", "file", capa.features.file.Import("GetAddrInfo"), True),
# thread/api calls
("93b2d1-vmray", "process=(2176:0),thread=2180", capa.features.insn.API("LoadLibraryExA"), True),
("93b2d1-vmray", "process=(2176:0),thread=2180", capa.features.insn.API("LoadLibraryEx"), True),
("93b2d1-vmray", "process=(2176:0),thread=2420", capa.features.insn.API("GetAddrInfoW"), True),
("93b2d1-vmray", "process=(2176:0),thread=2420", capa.features.insn.API("GetAddrInfo"), True),
("93b2d1-vmray", "process=(2176:0),thread=2420", capa.features.insn.API("DoesNotExist"), False),
# call/api
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2361", capa.features.insn.API("GetAddrInfoW"), True),
# call/string argument
(
"93b2d1-vmray",
"process=(2176:0),thread=2420,call=10323",
capa.features.common.String("raw.githubusercontent.com"),
True,
),
# backslashes in paths; see #2428
(
"93b2d1-vmray",
"process=(2176:0),thread=2180,call=267",
capa.features.common.String("C:\\Users\\WhuOXYsD\\Desktop\\filename.exe"),
True,
),
(
"93b2d1-vmray",
"process=(2176:0),thread=2180,call=267",
capa.features.common.String("C:\\\\Users\\\\WhuOXYsD\\\\Desktop\\\\filename.exe"),
False,
),
(
"93b2d1-vmray",
"process=(2176:0),thread=2204,call=2395",
capa.features.common.String("Software\\Microsoft\\Windows\\CurrentVersion\\Policies\\System"),
True,
),
(
"93b2d1-vmray",
"process=(2176:0),thread=2204,call=2395",
capa.features.common.String("Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\System"),
False,
),
# call/number argument
# VirtualAlloc(4096, 4)
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2358", capa.features.insn.Number(4096), True),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2358", capa.features.insn.Number(4), True),
],
# order tests by (file, item)
# so that our LRU cache is most effective.
key=lambda t: (t[0], t[1]),
)
DYNAMIC_VMRAY_FEATURE_COUNT_TESTS = sorted(
[
# file/imports
("93b2d1-vmray", "file", capa.features.file.Import("GetAddrInfoW"), 1),
# thread/api calls
("93b2d1-vmray", "process=(2176:0),thread=2420", capa.features.insn.API("free"), 1),
("93b2d1-vmray", "process=(2176:0),thread=2420", capa.features.insn.API("GetAddrInfoW"), 5),
# call/api
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2345", capa.features.insn.API("free"), 1),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2345", capa.features.insn.API("GetAddrInfoW"), 0),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=2361", capa.features.insn.API("GetAddrInfoW"), 1),
# call/string argument
(
"93b2d1-vmray",
"process=(2176:0),thread=2420,call=10323",
capa.features.common.String("raw.githubusercontent.com"),
1,
),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=10323", capa.features.common.String("non_existant"), 0),
# call/number argument
("93b2d1-vmray", "process=(2176:0),thread=2420,call=10315", capa.features.insn.Number(4096), 1),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=10315", capa.features.insn.Number(4), 1),
("93b2d1-vmray", "process=(2176:0),thread=2420,call=10315", capa.features.insn.Number(404), 0),
],
# order tests by (file, item)
# so that our LRU cache is most effective.
key=lambda t: (t[0], t[1]),
)
@fixtures.parametrize(
"sample,scope,feature,expected",
DYNAMIC_VMRAY_FEATURE_PRESENCE_TESTS,
indirect=["sample", "scope"],
)
def test_vmray_features(sample, scope, feature, expected):
fixtures.do_test_feature_presence(fixtures.get_vmray_extractor, sample, scope, feature, expected)
@fixtures.parametrize(
"sample,scope,feature,expected",
DYNAMIC_VMRAY_FEATURE_COUNT_TESTS,
indirect=["sample", "scope"],
)
def test_vmray_feature_counts(sample, scope, feature, expected):
fixtures.do_test_feature_count(fixtures.get_vmray_extractor, sample, scope, feature, expected)
def test_vmray_processes():
# see #2394
path = fixtures.get_data_path_by_name("2f8a79-vmray")
vmre = fixtures.get_vmray_extractor(path)
assert len(vmre.analysis.monitor_processes) == 9

View File

@@ -1,160 +0,0 @@
# Copyright (C) 2024 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import textwrap
from capa.features.extractors.vmray.models import (
Param,
PEFile,
ElfFile,
FunctionCall,
AnalysisMetadata,
hexint,
xml_to_dict,
)
def test_vmray_model_param():
param_str = textwrap.dedent(
"""
<param name="addrlen" type="signed_32bit" value="16"/>
"""
)
param: Param = Param.model_validate(xml_to_dict(param_str)["param"])
assert param.value is not None
assert hexint(param.value) == 16
def test_vmray_model_param_deref():
param_str = textwrap.dedent(
"""
<param name="buf" type="ptr" value="0xaaaaaaaa">
<deref type="str" value="Hello world"/>
</param>
"""
)
param: Param = Param.model_validate(xml_to_dict(param_str)["param"])
assert param.deref is not None
assert param.deref.value == "Hello world"
def test_vmray_model_function_call():
function_call_str = textwrap.dedent(
"""
<fncall fncall_id="18" process_id="1" thread_id="1" name="sys_time">
<in>
<param name="tloc" type="unknown" value="0x0"/>
</in>
<out>
<param name="ret_val" type="unknown" value="0xaaaaaaaa"/>
</out>
</fncall>
"""
)
function_call: FunctionCall = FunctionCall.model_validate(xml_to_dict(function_call_str)["fncall"])
assert function_call.fncall_id == 18
assert function_call.process_id == 1
assert function_call.thread_id == 1
assert function_call.name == "time"
assert function_call.params_in is not None
assert function_call.params_in.params[0].value is not None
assert hexint(function_call.params_in.params[0].value) == 0
assert function_call.params_out is not None
assert function_call.params_out.params[0].value is not None
assert hexint(function_call.params_out.params[0].value) == 2863311530
def test_vmray_model_analysis_metadata():
analysis_metadata: AnalysisMetadata = AnalysisMetadata.model_validate_json(
"""
{
"sample_type": "Linux ELF Executable (x86-64)",
"submission_filename": "abcd1234"
}
"""
)
assert analysis_metadata.sample_type == "Linux ELF Executable (x86-64)"
assert analysis_metadata.submission_filename == "abcd1234"
def test_vmray_model_elffile():
elffile: ElfFile = ElfFile.model_validate_json(
"""
{
"sections": [
{
"header": {
"sh_name": "abcd1234",
"sh_addr": 2863311530
}
}
]
}
"""
)
assert elffile.sections[0].header.sh_name == "abcd1234"
assert elffile.sections[0].header.sh_addr == 2863311530
def test_vmray_model_pefile():
pefile: PEFile = PEFile.model_validate_json(
"""
{
"basic_info": {
"image_base": 2863311530
},
"imports": [
{
"apis": [
{
"address": 2863311530,
"api": {
"name": "Sleep"
}
}
],
"dll": "KERNEL32.dll"
}
],
"sections": [
{
"name": ".text",
"virtual_address": 2863311530
}
],
"exports": [
{
"api": {
"name": "HelloWorld",
"ordinal": 10
},
"address": 2863311530
}
]
}
"""
)
assert pefile.basic_info.image_base == 2863311530
assert pefile.imports[0].dll == "KERNEL32.dll"
assert pefile.imports[0].apis[0].address == 2863311530
assert pefile.imports[0].apis[0].api.name == "Sleep"
assert pefile.sections[0].name == ".text"
assert pefile.sections[0].virtual_address == 2863311530
assert pefile.exports[0].address == 2863311530
assert pefile.exports[0].api.name == "HelloWorld"
assert pefile.exports[0].api.ordinal == 10

View File

@@ -4,6 +4,5 @@
"tabWidth": 4,
"singleQuote": false,
"printWidth": 120,
"trailingComma": "none",
"htmlWhitespaceSensitivity": "ignore"
"trailingComma": "none"
}

View File

@@ -1,6 +1,6 @@
# Development Guide for capa Explorer Web
# Development Guide for Capa Explorer Web
This guide will help you set up the capa Explorer Web project for local development.
This guide will help you set up the Capa Explorer Web project for local development.
## Prerequisites
@@ -31,7 +31,7 @@ Before you begin, ensure you have the following installed:
npm run dev
```
This will start the Vite development server. The application should now be running at `http://localhost:<port>`.
This will start the Vite development server. The application should now be running at `http://localhost:<port>`
## Project Structure
@@ -80,7 +80,7 @@ Or, you can build a standalone bundle application that can be used offline:
npm run build:bundle
```
This will generate an offline HTML bundle file in the `capa-explorer-web/` directory.
This will generate an offline HTML bundle file in the `dist/` directory.
## Testing
@@ -98,13 +98,12 @@ We use ESLint for linting and Prettier for code formatting. Run the linter with:
```
npm run lint
npm run format:check
npm run format
```
## Working with PrimeVue Components
capa Explorer Web uses the PrimeVue UI component library. When adding new features or modifying existing ones, refer to the [PrimeVue documentation](https://primevue.org/vite) for available components and their usage.
Capa Explorer Web uses the PrimeVue UI component library. When adding new features or modifying existing ones, refer to the [PrimeVue documentation](https://primevue.org/vite) for available components and their usage.
## Best Practices

View File

@@ -1,6 +1,6 @@
# capa Explorer Web
# Capa Explorer Web
capa Explorer Web is a browser-based user interface for exploring program capabilities identified by capa. It provides an intuitive and interactive way to analyze and visualize the results of capa analysis.
Capa Explorer Web is a browser-based user interface for exploring program capabilities identified by capa. It provides an intuitive and interactive way to analyze and visualize the results of capa analysis.
## Features
@@ -11,34 +11,31 @@ capa Explorer Web is a browser-based user interface for exploring program capabi
## Getting Started
1. **Access the application**: Open capa Explorer Web in your web browser.
You can start using capa Explorer Web by accessing [https://mandiant.github.io/capa](https://mandiant.github.io/capa/explorer) or running it locally by downloading the offline release from the top right-hand corner and opening it in your web browser.
1. **Access the Application**: Open Capa Explorer Web in your web browser.
You can start using Capa Explorer Web by accessing [https://mandiant.github.io/capa](https://mandiant.github.io/capa/) or running it locally by dowloading the offline release in the [releases](https://github.com/mandiant/capa/releases) section and loading it in your browser.
2. **Import capa results**:
2. **Import capa Results**:
- Click on "Upload from local" to select a capa analysis document file from your computer (with a version higher than 7.0.0).
- You can generate the analysis document by running `capa.exe -j results.json sample.exe_`
- Or, paste a URL to a capa JSON file and click the arrow button to load it.
- Like for the other import mechanisms, loading of both plain (`.json`) and GZIP compressed JSON (`.json.gz`) files is supported).
- Alternatively, use the "Preview Static" or "Preview Dynamic" for sample data.
3. **Explore the results**:
3. **Explore the Results**:
- Use the tree view to navigate through the identified capabilities.
- Toggle between different views using the checkboxes in the settings panel:
- "Show capabilities by function/process" for grouped analysis.
- "Show distinct library rule matches" to include or exclude library rules.
- "Show columns filters" to show per-column search filters.
- "Show library rule matches" to include or exclude library rules.
4. **Interact with the results**:
- Expand/collapse nodes in the table to see more details by clicking rows or clicking arrow icons.
4. **Interact with the Data**:
- Expand/collapse nodes in the table to see more details.
- Use the search and filter options to find specific features, functions or capabilities (rules).
- Right click on rule names (and `match` nodes) to view their source code or additional information.
- Right click on rule names to view their source code or additional information.
## Feedback and Contributions
We welcome your feedback and contributions to improve the web-based capa explorer. Please report any issues or suggest enhancements through the `capa` GitHub repository.
We welcome your feedback and contributions to improve the web-based Capa Explorer. Please report any issues or suggest enhancements through the `capa` GitHub repository.
---
For developers interested in building or contributing to capa Explorer Web, please refer to our [Development Guide](DEVELOPMENT.md).
For developers interested in building or contributing to Capa Explorer WebUI, please refer to our [Development Guide](DEVELOPMENT.md).

Some files were not shown because too many files have changed in this diff Show More