mirror of
https://github.com/rosenpass/rosenpass.git
synced 2025-12-06 04:40:55 -08:00
Compare commits
78 Commits
dev/karo/k
...
analyze_py
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6af6fb6b2a | ||
|
|
e3b43a59bf | ||
|
|
3942bfa65e | ||
|
|
7b1a62b6bb | ||
|
|
d1a33981b1 | ||
|
|
f20fd1acc3 | ||
|
|
3ce0d262d9 | ||
|
|
a389e3c222 | ||
|
|
cb16bd44bb | ||
|
|
3f4c7c2786 | ||
|
|
475f4593f9 | ||
|
|
13c5edbe44 | ||
|
|
b1ac5d9244 | ||
|
|
a4ff3d4eb5 | ||
|
|
19ebce79f1 | ||
|
|
3f2a9bb96b | ||
|
|
8dfa67a2dd | ||
|
|
f31d635df8 | ||
|
|
75702dfc03 | ||
|
|
3af479a27e | ||
|
|
e76e5b253f | ||
|
|
0d944afbd8 | ||
|
|
8d81be56f3 | ||
|
|
16b3914c46 | ||
|
|
ae060f7cfb | ||
|
|
afa6212264 | ||
|
|
3c744c253b | ||
|
|
53e6553c8b | ||
|
|
4cd2cdfcff | ||
|
|
a5ae83e726 | ||
|
|
9327c2c4f3 | ||
|
|
b140c56359 | ||
|
|
3e03e47935 | ||
|
|
7003671cde | ||
|
|
91fc50c1e1 | ||
|
|
d20bb137c9 | ||
|
|
c259be76c8 | ||
|
|
b1a7d94295 | ||
|
|
48b7bb2f14 | ||
|
|
77e3682820 | ||
|
|
8bad02bcda | ||
|
|
864407f90b | ||
|
|
4deee59e90 | ||
|
|
c82ed332f6 | ||
|
|
5ced547a07 | ||
|
|
bdaedc4e2a | ||
|
|
4e77e67f10 | ||
|
|
f33c3a6928 | ||
|
|
348650d507 | ||
|
|
c318cf7bac | ||
|
|
d9a6430472 | ||
|
|
9656fa7025 | ||
|
|
53ddad30f1 | ||
|
|
7e8e502bca | ||
|
|
d81649c1d1 | ||
|
|
da642186f2 | ||
|
|
ad6d053015 | ||
|
|
73d180c4cf | ||
|
|
d44a96e6b6 | ||
|
|
ff20fbbe3a | ||
|
|
5232ab3a8e | ||
|
|
2e27753f4a | ||
|
|
2628adbac8 | ||
|
|
744cf6fb50 | ||
|
|
4bdc464b5b | ||
|
|
eb64f50d99 | ||
|
|
73b04cdc12 | ||
|
|
437c591b2d | ||
|
|
7cbd6576d4 | ||
|
|
ac5a5cf76d | ||
|
|
18359ef3f4 | ||
|
|
3e161d8c8d | ||
|
|
56db757de3 | ||
|
|
5ff3bc944e | ||
|
|
fb93258fcc | ||
|
|
9ab120843a | ||
|
|
25f2abac80 | ||
|
|
c7ec12be9a |
@@ -3,11 +3,6 @@ secret_key = "rp-a-secret-key"
|
||||
listen = ["127.0.0.1:9999"]
|
||||
verbosity = "Verbose"
|
||||
|
||||
[api]
|
||||
listen_path = []
|
||||
listen_fd = []
|
||||
stream_fd = []
|
||||
|
||||
[[peers]]
|
||||
public_key = "rp-b-public-key"
|
||||
endpoint = "127.0.0.1:9998"
|
||||
|
||||
@@ -3,11 +3,6 @@ secret_key = "rp-b-secret-key"
|
||||
listen = ["127.0.0.1:9998"]
|
||||
verbosity = "Verbose"
|
||||
|
||||
[api]
|
||||
listen_path = []
|
||||
listen_fd = []
|
||||
stream_fd = []
|
||||
|
||||
[[peers]]
|
||||
public_key = "rp-a-public-key"
|
||||
endpoint = "127.0.0.1:9999"
|
||||
|
||||
2
.github/workflows/bench-primitives.yml
vendored
2
.github/workflows/bench-primitives.yml
vendored
@@ -4,7 +4,7 @@ permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
#pull_request:
|
||||
push:
|
||||
|
||||
env:
|
||||
|
||||
2
.github/workflows/bench-protocol.yml
vendored
2
.github/workflows/bench-protocol.yml
vendored
@@ -4,7 +4,7 @@ permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
#pull_request:
|
||||
push:
|
||||
|
||||
env:
|
||||
|
||||
128
.github/workflows/supply-chain.yml
vendored
128
.github/workflows/supply-chain.yml
vendored
@@ -28,10 +28,10 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cache/cargo-supply-chain/
|
||||
key: cargo-supply-chain-cache
|
||||
- name: Install stable toolchain # Cargo-supply-chain is incompatible with older versions
|
||||
- name: Install nightly toolchain
|
||||
run: |
|
||||
rustup toolchain install stable
|
||||
rustup default stable
|
||||
rustup toolchain install nightly
|
||||
rustup override set nightly
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.tool_cache }}/cargo-supply-chain
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
- name: Add the tool cache directory to the search path
|
||||
run: echo "${{ runner.tool_cache }}/cargo-supply-chain/bin" >> $GITHUB_PATH
|
||||
- name: Ensure that the tool cache is populated with the cargo-supply-chain binary
|
||||
run: cargo +stable install --root ${{ runner.tool_cache }}/cargo-supply-chain cargo-supply-chain
|
||||
run: cargo install --root ${{ runner.tool_cache }}/cargo-supply-chain cargo-supply-chain
|
||||
- name: Update data for cargo-supply-chain
|
||||
run: cargo supply-chain update
|
||||
- name: Generate cargo-supply-chain report about publishers
|
||||
@@ -54,6 +54,8 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
@@ -61,10 +63,10 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
key: cargo-vet-cache
|
||||
- name: Install stable toolchain # Since we are running/compiling cargo-vet, we should rely on the stable toolchain.
|
||||
- name: Install nightly toolchain
|
||||
run: |
|
||||
rustup toolchain install stable
|
||||
rustup default stable
|
||||
rustup toolchain install nightly
|
||||
rustup override set nightly
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.tool_cache }}/cargo-vet
|
||||
@@ -72,24 +74,104 @@ jobs:
|
||||
- name: Add the tool cache directory to the search path
|
||||
run: echo "${{ runner.tool_cache }}/cargo-vet/bin" >> $GITHUB_PATH
|
||||
- name: Ensure that the tool cache is populated with the cargo-vet binary
|
||||
run: cargo +stable install --root ${{ runner.tool_cache }}/cargo-vet cargo-vet
|
||||
- name: Regenerate vet exemptions for dependabot PRs
|
||||
if: github.actor == 'dependabot[bot]' # Run only for Dependabot PRs
|
||||
run: cargo vet regenerate exemptions
|
||||
- name: Check for changes in case of dependabot PR
|
||||
if: github.actor == 'dependabot[bot]' # Run only for Dependabot PRs
|
||||
run: git diff --exit-code || echo "Changes detected, committing..."
|
||||
- name: Commit and push changes for dependabot PRs
|
||||
if: success() && github.actor == 'dependabot[bot]'
|
||||
run: cargo install --root ${{ runner.tool_cache }}/cargo-vet cargo-vet
|
||||
- name: Check which event triggered this CI run, a push or a pull request.
|
||||
run: |
|
||||
git fetch origin ${{ github.head_ref }}
|
||||
git switch ${{ github.head_ref }}
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions@github.com"
|
||||
git add supply-chain/*
|
||||
git commit -m "Regenerate cargo vet exemptions"
|
||||
git push origin ${{ github.head_ref }}
|
||||
EVENT_NAME="${{ github.event_name }}"
|
||||
IS_PR="false"
|
||||
IS_PUSH="false"
|
||||
if [[ "$EVENT_NAME" == "pull_request" ]]; then
|
||||
echo "This CI run was triggered in the context of a pull request."
|
||||
IS_PR="true"
|
||||
elif [[ "$EVENT_NAME" == "push" ]]; then
|
||||
echo "This CI run was triggered in the context of a push."
|
||||
IS_PUSH="true"
|
||||
else
|
||||
echo "ERROR: This CI run was not triggered in the context of a pull request or a push. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
echo "IS_PR=$IS_PR" >> $GITHUB_ENV
|
||||
echo "IS_PUSH=$IS_PUSH" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- name: Check if last commit was by Dependabot
|
||||
run: |
|
||||
# Depending on the trigger for, the relevant commit has to be deduced differently.
|
||||
if [[ "$IS_PR" == true ]]; then
|
||||
# This is the commit ID for the last commit to the head branch of the pull request.
|
||||
# If we used github.sha here instead, it would point to a merge commit between the PR and the main branch, which is only created for the CI run.
|
||||
SHA="${{ github.event.pull_request.head.sha }}"
|
||||
REF="${{ github.head_ref }}"
|
||||
elif [[ "$IS_PUSH" == "true" ]]; then
|
||||
SHA="${{ github.sha }}" # This is the last commit to the branch.
|
||||
REF=${GITHUB_REF#refs/heads/}
|
||||
else
|
||||
echo "ERROR: This action only supports pull requests and push events as triggers. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
echo "Commit SHA is $SHA"
|
||||
echo "Branch is $REF"
|
||||
echo "REF=$REF" >> $GITHUB_ENV
|
||||
|
||||
COMMIT_AUTHOR=$(gh api repos/${{ github.repository }}/commits/$SHA --jq .author.login) # .author.login might be null, but for dependabot it will always be there and cannot be spoofed in contrast to .commit.author.name
|
||||
echo "The author of the last commit is $COMMIT_AUTHOR"
|
||||
if [[ "$COMMIT_AUTHOR" == "dependabot[bot]" ]]; then
|
||||
echo "The last commit was made by dependabot"
|
||||
LAST_COMMIT_IS_BY_DEPENDABOT=true
|
||||
else
|
||||
echo "The last commit was made by $COMMIT_AUTHOR not by dependabot"
|
||||
LAST_COMMIT_IS_BY_DEPENDABOT=false
|
||||
fi
|
||||
echo "LAST_COMMIT_IS_BY_DEPENDABOT=$LAST_COMMIT_IS_BY_DEPENDABOT" >> $GITHUB_ENV
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
- name: Check if the last commit's message ends in "--regenerate-exemptions"
|
||||
run: |
|
||||
# Get commit message
|
||||
COMMIT_MESSAGE=$(git log -1 --pretty=format:"%s")
|
||||
if [[ "$COMMIT_MESSAGE" == *"--regenerate-exemptions" ]]; then
|
||||
echo "The last commit message ends in --regenerate-exemptions"
|
||||
REGEN_EXEMP=true
|
||||
else
|
||||
echo "The last commit message does not end in --regenerate-exemptions"
|
||||
REGEN_EXEMP=false
|
||||
fi
|
||||
echo "REGEN_EXEMP=$REGEN_EXEMP" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- name: Check if the CI run happens in the context of a dependabot PR # Even if a PR is created by dependabot, the last commit can, and often should be, the regeneration of the cargo vet exemptions. It could also be from an individual making manual changes.
|
||||
run: |
|
||||
IN_DEPENDABOT_PR_CONTEXT="false"
|
||||
if [[ $IS_PR == "true" && "${{ github.event.pull_request.user.login }}" == "dependabot[bot]" ]]; then
|
||||
IN_DEPENDABOT_PR_CONTEXT="true"
|
||||
echo "This CI run is in the context of PR by dependabot."
|
||||
else
|
||||
echo "This CI run is NOT in the context of PR by dependabot."
|
||||
IN_DEPENDABOT_PR_CONTEXT="false"
|
||||
fi
|
||||
echo "IN_DEPENDABOT_PR_CONTEXT=$IN_DEPENDABOT_PR_CONTEXT" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- uses: actions/checkout@v4
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CI_BOT_PAT }}
|
||||
- name: In case of a dependabot PR, ensure that we are not in a detached HEAD state
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true'
|
||||
run: |
|
||||
git fetch origin $REF # ensure that we are up to date.
|
||||
git switch $REF # ensure that we are NOT in a detached HEAD state. This is important for the commit action in the end
|
||||
shell: bash
|
||||
- name: Regenerate cargo vet exemptions if we are in the context of a PR created by dependabot and the last commit is by dependabot or a regeneration of cargo vet exemptions was explicitly requested.
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true' && (env.LAST_COMMIT_IS_BY_DEPENDABOT == 'true' || env.REGEN_EXEMP=='true') # Run only for Dependabot PRs or if specifically requested
|
||||
run: cargo vet regenerate exemptions
|
||||
- name: Commit and push changes if we are in the context of a PR created by dependabot and the last commit is by dependabot or a regeneration of cargo vet exemptions was explicitly requested.
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true' && (env.LAST_COMMIT_IS_BY_DEPENDABOT == 'true' || env.REGEN_EXEMP=='true')
|
||||
uses: stefanzweifel/git-auto-commit-action@v6
|
||||
with:
|
||||
commit_message: Regenerate cargo vet exemptions
|
||||
commit_user_name: rosenpass-ci-bot[bot]
|
||||
commit_user_email: noreply@rosenpass.eu
|
||||
commit_author: Rosenpass CI Bot <noreply@rosenpass.eu>
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.CI_BOT_PAT }}
|
||||
- name: Invoke cargo-vet
|
||||
run: cargo vet --locked
|
||||
|
||||
35
Cargo.lock
generated
35
Cargo.lock
generated
@@ -408,9 +408,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||
|
||||
[[package]]
|
||||
name = "clap_mangen"
|
||||
version = "0.2.24"
|
||||
version = "0.2.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbae9cbfdc5d4fa8711c09bd7b83f644cb48281ac35bf97af3e47b0675864bdf"
|
||||
checksum = "27b4c3c54b30f0d9adcb47f25f61fcce35c4dd8916638c6b82fbd5f4fb4179e2"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"roff",
|
||||
@@ -1153,6 +1153,17 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipc-channel"
|
||||
version = "0.18.3"
|
||||
@@ -1246,9 +1257,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.169"
|
||||
version = "0.2.174"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
|
||||
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
|
||||
|
||||
[[package]]
|
||||
name = "libcrux"
|
||||
@@ -1408,7 +1419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2461,12 +2472,12 @@ checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.8"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
|
||||
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2630,20 +2641,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.44.2"
|
||||
version = "1.47.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48"
|
||||
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -48,7 +48,7 @@ rand = "0.8.5"
|
||||
typenum = "1.17.0"
|
||||
log = { version = "0.4.22" }
|
||||
clap = { version = "4.5.23", features = ["derive"] }
|
||||
clap_mangen = "0.2.24"
|
||||
clap_mangen = "0.2.29"
|
||||
clap_complete = "4.5.40"
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
arbitrary = { version = "1.4.1", features = ["derive"] }
|
||||
@@ -67,7 +67,7 @@ chacha20poly1305 = { version = "0.10.1", default-features = false, features = [
|
||||
zerocopy = { version = "0.7.35", features = ["derive"] }
|
||||
home = "=0.5.9" # 5.11 requires rustc 1.81
|
||||
derive_builder = "0.20.1"
|
||||
tokio = { version = "1.42", features = ["macros", "rt-multi-thread"] }
|
||||
tokio = { version = "1.46", features = ["macros", "rt-multi-thread"] }
|
||||
postcard = { version = "1.1.1", features = ["alloc"] }
|
||||
libcrux = { version = "0.0.2-pre.2" }
|
||||
libcrux-chacha20poly1305 = { version = "0.0.2-beta.3" }
|
||||
|
||||
@@ -83,6 +83,33 @@ impl HashDomain {
|
||||
Ok(Self(new_key, self.1))
|
||||
}
|
||||
|
||||
/// Version of [Self::mix] that accepts an iterator and mixes all values from the iterator into
|
||||
/// this hash domain.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass_ciphers::{hash_domain::HashDomain, KeyedHash};
|
||||
///
|
||||
/// let hasher = HashDomain::zero(KeyedHash::keyed_shake256());
|
||||
/// assert_eq!(
|
||||
/// hasher.clone().mix(b"Hello")?.mix(b"World")?.into_value(),
|
||||
/// hasher.clone().mix_many([b"Hello", b"World"])?.into_value()
|
||||
/// );
|
||||
///
|
||||
/// Ok::<(), anyhow::Error>(())
|
||||
/// ```
|
||||
pub fn mix_many<I, T>(mut self, it: I) -> Result<Self>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
for e in it {
|
||||
self = self.mix(e.as_ref())?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
|
||||
/// by calling [SecretHashDomain::invoke_primitive] with this
|
||||
/// [HashDomain]'s key as `k` and `v` as `d`.
|
||||
@@ -161,6 +188,46 @@ impl SecretHashDomain {
|
||||
Self::invoke_primitive(self.0.secret(), v, self.1)
|
||||
}
|
||||
|
||||
/// Version of [Self::mix] that accepts an iterator and mixes all values from the iterator into
|
||||
/// this hash domain.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass_ciphers::{hash_domain::HashDomain, KeyedHash};
|
||||
///
|
||||
/// rosenpass_secret_memory::secret_policy_use_only_malloc_secrets();
|
||||
///
|
||||
/// let hasher = HashDomain::zero(KeyedHash::keyed_shake256());
|
||||
/// assert_eq!(
|
||||
/// hasher
|
||||
/// .clone()
|
||||
/// .turn_secret()
|
||||
/// .mix(b"Hello")?
|
||||
/// .mix(b"World")?
|
||||
/// .into_secret()
|
||||
/// .secret(),
|
||||
/// hasher
|
||||
/// .clone()
|
||||
/// .turn_secret()
|
||||
/// .mix_many([b"Hello", b"World"])?
|
||||
/// .into_secret()
|
||||
/// .secret(),
|
||||
/// );
|
||||
|
||||
/// Ok::<(), anyhow::Error>(())
|
||||
/// ```
|
||||
pub fn mix_many<I, T>(mut self, it: I) -> Result<Self>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
for e in it {
|
||||
self = self.mix(e.as_ref())?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
|
||||
/// by calling [SecretHashDomain::invoke_primitive] with the key of this
|
||||
/// [HashDomainNamespace] as `k` and `v` as `d`.
|
||||
|
||||
8
marzipan/README.md
Normal file
8
marzipan/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Rewriting analyze.sh in Python
|
||||
|
||||
* `../analyze.sh` is the old script
|
||||
* `src/__init__.py` is the new script
|
||||
|
||||
* call the old script from the Rosenpass repository's root directory with `./analyze.sh`
|
||||
* call the new script from the marzipan directory:
|
||||
* `nix run .# -- analyze $repo` where `$repo` is the absolute(?) path to the root directory of the Rosenpass repository.
|
||||
64
marzipan/TODO.md
Normal file
64
marzipan/TODO.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# TODO for the project of rewriting Marzipan
|
||||
|
||||
## Done
|
||||
|
||||
* ~~figure out why ProVerif is started on the non-processed mpv file~~
|
||||
* ~~rework rebound warnings (`clean_warnings` Bash function)~~
|
||||
```bash
|
||||
rosenpass$ rosenpass-marzipan run-proverif target/proverif/03_identity_hiding_responder.entry.o.pv target/proverif/03_identity_hiding_responder.entry.log
|
||||
```
|
||||
* ~~provide log parameter to `rosenpass-marzipan`-call~~ (no, it was intentionally not used)
|
||||
* ~~cpp pre-processing stuff~~
|
||||
* ~~awk pre-processing stuff~~
|
||||
* ~~`pretty_output` Bash function~~
|
||||
* ~~pretty_output_line~~
|
||||
* ~~click function intervention weirdness~~
|
||||
* ~~why is everything red in the pretty output? (see line 96 in __init__.py)~~
|
||||
* ~~awk RESULT flush in marzipan()~~
|
||||
* ~~move the whole metaverif function to Python~~
|
||||
* ~move the whole analyze function to Python~
|
||||
* ~find the files~
|
||||
* ~start subprocesses in parallel~
|
||||
* ~wait for them to finish~
|
||||
* ~~rebase from main~~
|
||||
* ~~see if we still need the `extra_args is None` check in `_run_proverif`~`
|
||||
* ~~set colors differently to prevent injection attack~~
|
||||
* ~~by calling a function~~
|
||||
* ~~by prepared statements~~
|
||||
* ~~standalone function parse_result_line is no longer necessary~~
|
||||
* ~~is the clean function still necessary?~~
|
||||
* ~~implement better main function for click~~
|
||||
* ~~why does analyze fail when the target/proverif directory is not empty?~~
|
||||
* ~~return an exit status that is meaningful for CI~~
|
||||
* ~~exception handling in analyze() and in run_proverif()~~
|
||||
* ~~refactor filtering in run_proverif (see karo's comment)~~
|
||||
* ~configurable target directory~
|
||||
* ~lark parser: multiline comments, how???~
|
||||
|
||||
## Next Steps
|
||||
|
||||
* integrate marzipan.awk into Python, somehow
|
||||
* options term special cases (c.f. manual page 133, starting with "fun" term)
|
||||
* complete with CryptoVerif options
|
||||
* error when trying with: `nix run .# -- parse ../target/proverif/01_secrecy.entry.i.pv`
|
||||
* `in(C, Cinit_conf(Ssskm, Spsk, Sspkt, ic));`
|
||||
* ^
|
||||
* rewrite marzipan.awk into Python/LARK
|
||||
* define a LARK grammar for marzipan.awk rules
|
||||
* write python code for processing marzipan rules, e.g. alias replacement (step: i.pv->o.pv)
|
||||
* do not assume that the repo path has subdir marzipan
|
||||
* do not assume that the repo path has subdir analysis
|
||||
* rewrite cpp into Python/LARK (step: mpv->i.pv)
|
||||
* integrate the Nix flake into the main Nix flake
|
||||
* pull the gawk dependency into the Nix flake
|
||||
* think about next steps
|
||||
* integrate this upstream, into the CI?
|
||||
* “make it beautiful” steps? more resiliency to working directory?
|
||||
* rewrite our awk usages into Python/…?
|
||||
* yes, possibly as extension to the LARK grammar
|
||||
* and rewrite the AST within Python
|
||||
* reconstruct ProVerif input file for ProVerif
|
||||
* rewrite our CPP usages into Python/…?
|
||||
* low priority: nested comments in ProVerif code
|
||||
|
||||
“it replaces the Bash script and is idiomatic Python code”
|
||||
190
marzipan/flake.lock
generated
Normal file
190
marzipan/flake.lock
generated
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1726560853,
|
||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-github-actions": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"poetry2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1729742964,
|
||||
"narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"rev": "e04df33f62cdcf93d73e9a04142464753a16db67",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736166416,
|
||||
"narHash": "sha256-U47xeACNBpkSO6IcCm0XvahsVXpJXzjPIQG7TZlOToU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b30f97d8c32d804d2d832ee837d0f1ca0695faa5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730157240,
|
||||
"narHash": "sha256-P8wF4ag6Srmpb/gwskYpnIsnspbjZlRvu47iN527ABQ=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "75e28c029ef2605f9841e0baa335d70065fe7ae2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"poetry2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-github-actions": "nix-github-actions",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems_3",
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736280331,
|
||||
"narHash": "sha256-mkVHnky9h/s2EA+t9eEC8qxgcNTE3V+vb/9XgG4fCig=",
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"rev": "4d260d908f3d95fa4b3ef6a98781ff64e1eede22",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"poetry2nix": "poetry2nix"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"poetry2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1730120726,
|
||||
"narHash": "sha256-LqHYIxMrl/1p3/kvm2ir925tZ8DkI0KA10djk8wecSk=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "9ef337e492a5555d8e17a51c911ff1f02635be15",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
18
marzipan/flake.nix
Normal file
18
marzipan/flake.nix
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
inputs.poetry2nix.url = "github:nix-community/poetry2nix";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
outputs = (inputs:
|
||||
let scoped = (scope: scope.result);
|
||||
in scoped rec {
|
||||
inherit (builtins) removeAttrs;
|
||||
|
||||
result = (import ./nix/init.nix) {
|
||||
scoped = scoped;
|
||||
flake.self = inputs.self;
|
||||
flake.inputs = removeAttrs inputs ["self"];
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
1220
marzipan/nix/hyuga/poetry.lock
generated
Normal file
1220
marzipan/nix/hyuga/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
16
marzipan/nix/hyuga/pyproject.toml
Normal file
16
marzipan/nix/hyuga/pyproject.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[tool.poetry]
|
||||
name = "hyuga-language-server-installer"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = []
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.12,<3.13"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
hyuga = "^1.0.0"
|
||||
poetry = "^2.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
32
marzipan/nix/init.nix
Normal file
32
marzipan/nix/init.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
outer_ctx: outer_ctx.scoped rec {
|
||||
inherit (builtins) trace;
|
||||
|
||||
ctx = outer_ctx // { inherit config; };
|
||||
|
||||
inherit (ctx) scoped;
|
||||
|
||||
inherit (ctx.flake.inputs) nixpkgs flake-utils;
|
||||
inherit (nixpkgs.lib) genAttrs zipAttrsWith;
|
||||
inherit (nixpkgs.lib.debug) traceVal;
|
||||
inherit (flake-utils.lib) allSystems eachSystem;
|
||||
|
||||
result = {
|
||||
devShells = eachSupportedSystem (system: (setupSystem system).devShells);
|
||||
packages = eachSupportedSystem (system: (setupSystem system).packages);
|
||||
apps = eachSupportedSystem (system: (setupSystem system).apps);
|
||||
};
|
||||
|
||||
setupSystem = (system_name: scoped rec {
|
||||
result = (import ./system.nix) (ctx // {
|
||||
system.name = system_name;
|
||||
system.pkgs = nixpkgs.legacyPackages.${system_name};
|
||||
});
|
||||
});
|
||||
|
||||
config = {
|
||||
supportedSystems = allSystems;
|
||||
poetry.projectDir = ctx.flake.self;
|
||||
};
|
||||
|
||||
eachSupportedSystem = genAttrs config.supportedSystems;
|
||||
}
|
||||
47
marzipan/nix/system.nix
Normal file
47
marzipan/nix/system.nix
Normal file
@@ -0,0 +1,47 @@
|
||||
ctx: ctx.scoped rec {
|
||||
inherit (ctx.system) pkgs;
|
||||
inherit (ctx.flake.inputs) poetry2nix flake-utils;
|
||||
inherit (pkgs) mkShellNoCC writeShellApplication;
|
||||
inherit (flake-utils.lib) mkApp;
|
||||
|
||||
poetryCtx = poetry2nix.lib.mkPoetry2Nix { inherit pkgs; };
|
||||
inherit (poetryCtx) mkPoetryEnv mkPoetryApplication;
|
||||
|
||||
deps = [poetryEnv];
|
||||
dev-deps = []
|
||||
++ deps
|
||||
++ [poetryHyugaEnv]
|
||||
++ (with pkgs; [poetry]);
|
||||
|
||||
poetryCfg = ctx.config.poetry // { overrides = poetryOverrides; };
|
||||
poetryEnv = mkPoetryEnv poetryCfg;
|
||||
|
||||
poetryHyugaCfg = poetryCfg // { projectDir = ./hyuga; };
|
||||
poetryHyugaEnv = mkPoetryEnv poetryHyugaCfg;
|
||||
|
||||
poetryOverrides = poetryCtx.defaultPoetryOverrides.extend (final: prev: {
|
||||
hyuga = prev.hyuga.overridePythonAttrs (old: {
|
||||
buildInputs = []
|
||||
++ (old.buildInputs or [ ])
|
||||
++ [ final.poetry-core ];
|
||||
preferWheel = true;
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
result.packages.default = mkPoetryApplication poetryCfg;
|
||||
result.devShells.default = mkShellNoCC {
|
||||
packages = dev-deps;
|
||||
};
|
||||
|
||||
result.apps.replPython = mkShellApp "python-repl" ''python'';
|
||||
result.apps.replHy = mkShellApp "hy-repl" ''hy'';
|
||||
|
||||
mkShellApp = (name: script: mkApp {
|
||||
drv = writeShellApplication {
|
||||
inherit name;
|
||||
text = script;
|
||||
runtimeInputs = dev-deps;
|
||||
};
|
||||
});
|
||||
}
|
||||
1415
marzipan/poetry.lock
generated
Normal file
1415
marzipan/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
31
marzipan/pyproject.toml
Normal file
31
marzipan/pyproject.toml
Normal file
@@ -0,0 +1,31 @@
|
||||
[tool.poetry]
|
||||
name = "rosenpass-marzipan"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Author Name <author@example.com>"]
|
||||
# readme = "README.md"
|
||||
# license = "BSD"
|
||||
packages = [
|
||||
{ include = "**/*.[hp]y", from = "src", to = "rosenpass_marzipan" },
|
||||
{ include = "**/*.sh", from = "src", to = "rosenpass_marzipan" },
|
||||
#{ include = "**/*.lark", from = "src", to = "rosenpass_marzipan" },
|
||||
]
|
||||
|
||||
[tool.poetry.scripts]
|
||||
rosenpass-marzipan = 'rosenpass_marzipan:main'
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.12,<3.13"
|
||||
hy = "^1.0.0"
|
||||
lark = "^1.2.2"
|
||||
hyrule = "^0.8.0"
|
||||
ipython = "^8.32.0"
|
||||
click = "^8.1.8"
|
||||
rich = "^13.9.4"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
poetry = "^2.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
281
marzipan/src/__init__.py
Normal file
281
marzipan/src/__init__.py
Normal file
@@ -0,0 +1,281 @@
|
||||
from .util import pkgs, setup_exports, export, rename
|
||||
from .parser import *
|
||||
|
||||
# from rich.console import Console
|
||||
import click
|
||||
|
||||
target_subdir = "target/proverif"
|
||||
|
||||
(__all__, export) = setup_exports()
|
||||
export(setup_exports)
|
||||
|
||||
|
||||
console = pkgs.rich.console.Console()
|
||||
logger = pkgs.logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pkgs.logging.basicConfig(level=pkgs.logging.DEBUG)
|
||||
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, **{"file": pkgs.sys.stderr, **kwargs})
|
||||
|
||||
|
||||
def exc(argv, **kwargs):
|
||||
eprint("$", *argv)
|
||||
command = pkgs.subprocess.run(argv, **kwargs)
|
||||
|
||||
if command.returncode != 0:
|
||||
logger.error("subprocess with terminated with non-zero return code.")
|
||||
eprint("", *argv)
|
||||
exit(command.returncode)
|
||||
|
||||
if command.stdout is not None:
|
||||
return command.stdout.decode("utf-8")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def exc_piped(argv, **kwargs):
|
||||
eprint("$", *argv)
|
||||
return pkgs.subprocess.Popen(argv, **kwargs)
|
||||
|
||||
|
||||
def clean_line(prev_line, line):
|
||||
line = line.rstrip()
|
||||
if pkgs.re.match(r"^Warning: identifier \w+ rebound.$", line) or prev_line is None:
|
||||
return None
|
||||
return prev_line
|
||||
|
||||
|
||||
def run_proverif(file, extra_args=[]):
|
||||
params = ["proverif", "-test", *extra_args, file]
|
||||
logger.debug(params)
|
||||
|
||||
process = exc_piped(
|
||||
params,
|
||||
stderr=pkgs.subprocess.PIPE,
|
||||
stdout=pkgs.subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
)
|
||||
try:
|
||||
prev_line = None
|
||||
for line in process.stdout:
|
||||
cleaned_line = clean_line(prev_line, line)
|
||||
prev_line = line
|
||||
if cleaned_line is not None:
|
||||
yield cleaned_line
|
||||
if prev_line is not None:
|
||||
yield prev_line
|
||||
|
||||
except Exception as e:
|
||||
# When does this happen? Should the error even be ignored? Metaverif should probably just abort here, right? --karo
|
||||
logger.error(f"Proverif generated an exception with {params}: {e}")
|
||||
exit(1)
|
||||
finally:
|
||||
process.stdout.close()
|
||||
return_code = process.wait()
|
||||
|
||||
if return_code != 0:
|
||||
logger.error(
|
||||
f"Proverif exited with a non-zero error code {params}: {return_code}"
|
||||
)
|
||||
exit(return_code)
|
||||
|
||||
|
||||
def cpp(file, cpp_prep):
|
||||
logger.debug(f"_cpp: {file}, {cpp_prep}")
|
||||
file_path = pkgs.pathlib.Path(file)
|
||||
|
||||
dirname = file_path.parent
|
||||
cwd = pkgs.pathlib.Path.cwd()
|
||||
|
||||
params = ["cpp", "-P", f"-I{dirname}", file, "-o", cpp_prep]
|
||||
return exc(params, stderr=pkgs.sys.stderr)
|
||||
|
||||
|
||||
def awk(repo_path, cpp_prep, awk_prep):
|
||||
params = [
|
||||
"awk",
|
||||
"-f",
|
||||
str(pkgs.os.path.join(repo_path, "marzipan/marzipan.awk")),
|
||||
cpp_prep,
|
||||
]
|
||||
with open(awk_prep, "w") as file:
|
||||
exc(params, stderr=pkgs.sys.stderr, stdout=file)
|
||||
file.write("\nprocess main")
|
||||
|
||||
|
||||
def pretty_output_line(prefix, mark, color, text):
|
||||
content = f"{mark} {text}"
|
||||
console.print(prefix, style="grey42", end="", no_wrap=True)
|
||||
console.print(content, style=color)
|
||||
|
||||
|
||||
def pretty_output_init(file_path):
|
||||
expected = []
|
||||
descs = []
|
||||
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Process lemmas first
|
||||
result = pkgs.re.findall(r"@(lemma)(?=\s+\"([^\"]*)\")", content)
|
||||
if result:
|
||||
# The regex only returns lemmas. For lemmas, we always expect the result 'true' from ProVerif.
|
||||
expected.extend([True for _ in range(len(result))])
|
||||
descs.extend([e[1] for e in result])
|
||||
|
||||
# Then process regular queries
|
||||
result = pkgs.re.findall(r'@(query|reachable)(?=\s+"[^\"]*")', content)
|
||||
if result:
|
||||
# For queries, we expect 'true' from ProVerif, for reachable, we expect 'false'.
|
||||
expected.extend([e == "@query" for e in result])
|
||||
reachable_result = pkgs.re.findall(
|
||||
r'@(query|reachable)\s+"([^\"]*)"', content
|
||||
)
|
||||
descs.extend([e[1] for e in reachable_result])
|
||||
|
||||
ta = pkgs.time.time()
|
||||
res = 0
|
||||
ctr = 0
|
||||
return (ta, res, ctr, expected, descs)
|
||||
|
||||
|
||||
def pretty_output_step(file_path, line, expected, descs, res, ctr, ta):
|
||||
tz = pkgs.time.time()
|
||||
|
||||
# Output from ProVerif contains a trailing newline, which we do not have in the expected output. Remove it for meaningful matching.
|
||||
outp_clean_raw = line.rstrip()
|
||||
if outp_clean_raw == "true":
|
||||
outp_clean = True
|
||||
elif outp_clean_raw == "false":
|
||||
outp_clean = False
|
||||
else:
|
||||
outp_clean = outp_clean_raw
|
||||
|
||||
if outp_clean == expected[ctr]:
|
||||
pretty_output_line(f"{int(tz - ta)}s ", "✔", "green", descs[ctr])
|
||||
else:
|
||||
res = 1
|
||||
pretty_output_line(f"{int(tz - ta)}s ", "✖", "red", descs[ctr])
|
||||
|
||||
ctr += 1
|
||||
ta = tz
|
||||
|
||||
return (res, ctr, ta)
|
||||
|
||||
|
||||
def pretty_output(file_path):
|
||||
(ta, res, ctr, expected, descs) = pretty_output_init(file_path)
|
||||
for line in pkgs.sys.stdin:
|
||||
(res, ctr, ta) = pretty_output_step(
|
||||
file_path, line, expected, descs, res, ctr, ta
|
||||
)
|
||||
|
||||
|
||||
def get_target_dir(path, output):
|
||||
if output is not None and not output == "":
|
||||
return pkgs.pathlib.Path(output)
|
||||
else:
|
||||
return pkgs.os.path.join(path, target_subdir)
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option("--output", "output", required=False)
|
||||
@click.argument("repo_path")
|
||||
def analyze(repo_path, output):
|
||||
target_dir = get_target_dir(repo_path, output)
|
||||
pkgs.os.makedirs(target_dir, exist_ok=True)
|
||||
|
||||
entries = []
|
||||
analysis_dir = pkgs.os.path.join(repo_path, "analysis")
|
||||
entries.extend(sorted(pkgs.glob.glob(str(analysis_dir) + "/*.entry.mpv")))
|
||||
|
||||
with pkgs.concurrent.futures.ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(metaverif, repo_path, target_dir, entry): entry
|
||||
for entry in entries
|
||||
}
|
||||
for future in pkgs.concurrent.futures.as_completed(futures):
|
||||
cmd = futures[future]
|
||||
logger.info(f"Metaverif {cmd} finished.")
|
||||
|
||||
print("all processes finished.")
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option("--output", "output", required=False)
|
||||
@click.argument("repo_path")
|
||||
def clean(repo_path, output):
|
||||
cleans_failed = 0
|
||||
target_dir = get_target_dir(repo_path, output)
|
||||
if pkgs.os.path.isdir(target_dir):
|
||||
for filename in pkgs.os.listdir(target_dir):
|
||||
file_path = pkgs.os.path.join(target_dir, filename)
|
||||
if pkgs.os.path.isfile(file_path) and pkgs.os.path.splitext(file_path)[
|
||||
1
|
||||
] in [".pv", ".log"]:
|
||||
try:
|
||||
pkgs.os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error deleting {file_path}: {str(e)}")
|
||||
cleans_failed += 1
|
||||
|
||||
if cleans_failed > 0:
|
||||
print(f"{cleans_failed} could not be deleted.")
|
||||
exit(1)
|
||||
|
||||
|
||||
def metaverif(repo_path, tmpdir, file):
|
||||
print(f"Start metaverif on {file}")
|
||||
# Extract the name using regex
|
||||
name_match = pkgs.re.search(r"([^/]*)(?=\.mpv)", file)
|
||||
if name_match:
|
||||
name = name_match.group(0) # Get the matched name
|
||||
|
||||
# Create the file paths
|
||||
cpp_prep = pkgs.os.path.join(tmpdir, f"{name}.i.pv")
|
||||
awk_prep = pkgs.os.path.join(tmpdir, f"{name}.o.pv")
|
||||
|
||||
# Output the results
|
||||
print(f"Name: {name}")
|
||||
print(f"CPP Prep Path: {cpp_prep}")
|
||||
print(f"AWK Prep Path: {awk_prep}")
|
||||
|
||||
cpp(file, cpp_prep)
|
||||
awk(repo_path, cpp_prep, awk_prep)
|
||||
|
||||
log_file = pkgs.os.path.join(tmpdir, f"{name}.log")
|
||||
|
||||
ta, res, ctr, expected, descs = pretty_output_init(cpp_prep)
|
||||
with open(log_file, "a") as log:
|
||||
generator = run_proverif(awk_prep)
|
||||
for line in generator:
|
||||
log.write(line)
|
||||
# parse-result-line:
|
||||
match = pkgs.re.search(r"^RESULT .* \b(true|false)\b\.$", line)
|
||||
if match:
|
||||
result = match.group(1)
|
||||
# pretty-output:
|
||||
res, ctr, ta = pretty_output_step(
|
||||
cpp_prep, result, expected, descs, res, ctr, ta
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"No match found for the filename {file}: extension should be .mpv"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.argument("file_path")
|
||||
def parse(file_path):
|
||||
parse_main(file_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
104
marzipan/src/analyze.sh
Executable file
104
marzipan/src/analyze.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
exc() {
|
||||
echo >&2 "\$" "$@"
|
||||
"$@"
|
||||
}
|
||||
|
||||
run_proverif() {
|
||||
local file; file="$1"; shift
|
||||
local log; log="$1"; shift # intentionally unused
|
||||
|
||||
exc rosenpass-marzipan run-proverif "${file}" "${@}"
|
||||
}
|
||||
|
||||
clean_warnings() {
|
||||
exc rosenpass-marzipan clean-warnings
|
||||
}
|
||||
|
||||
color_red='red'
|
||||
color_green='green'
|
||||
color_gray='gray'
|
||||
color_clear=''
|
||||
|
||||
checkmark="✔"
|
||||
cross="❌"
|
||||
|
||||
pretty_output() {
|
||||
exc rosenpass-marzipan pretty-output "${@}"
|
||||
}
|
||||
|
||||
metaverif() {
|
||||
local file; file="$1"; shift
|
||||
local name; name="$(echo "${file}" | grep -Po '[^/]*(?=\.mpv)')"
|
||||
|
||||
local cpp_prep; cpp_prep="${tmpdir}/${name}.i.pv"
|
||||
local awk_prep; awk_prep="${tmpdir}/${name}.o.pv"
|
||||
|
||||
exc rosenpass-marzipan cpp ${file} ${cpp_prep}
|
||||
exc rosenpass-marzipan awk-prep ${cpp_prep} ${awk_prep}
|
||||
|
||||
local log; log="${tmpdir}/${name}.log"
|
||||
{
|
||||
run_proverif "${awk_prep}" "$@" \
|
||||
| clean_warnings \
|
||||
| tee "${log}" \
|
||||
| exc rosenpass-marzipan parse-result-line \
|
||||
| pretty_output "${cpp_prep}"
|
||||
} || {
|
||||
echo "TODO: Commented out some debug output"
|
||||
#if ! grep -q "^Verification summary" "${log}"; then
|
||||
# echo -ne "\033[0\r"
|
||||
# cat "${log}"
|
||||
#fi
|
||||
}
|
||||
}
|
||||
|
||||
analyze() {
|
||||
mkdir -p "${tmpdir}"
|
||||
|
||||
entries=()
|
||||
readarray -t -O "${#entries[@]}" entries < <(
|
||||
find analysis -iname '*.entry.mpv' | sort)
|
||||
|
||||
local entry
|
||||
local procs; procs=()
|
||||
for entry in "${entries[@]}"; do
|
||||
echo "call metaverif"
|
||||
# TODO: commented out for testing
|
||||
#exc rosenpass-marzipan metaverif "${tmpdir}" "${entry}" >&2 & procs+=("$!")
|
||||
exc rosenpass-marzipan metaverif "${tmpdir}" "${entry}" >&2
|
||||
done
|
||||
|
||||
# TODO: commented out for testing
|
||||
# for entry in "${procs[@]}"; do
|
||||
# exc wait -f "${entry}"
|
||||
# done
|
||||
}
|
||||
|
||||
err_usage() {
|
||||
echo >&1 "USAGE: ${0} analyze PATH"
|
||||
echo >&1 "The script will cd into PATH and continue there."
|
||||
exit 1
|
||||
}
|
||||
|
||||
main() {
|
||||
set -e -o pipefail
|
||||
|
||||
local cmd="$1"; shift || err_usage
|
||||
local dir="$1"; shift || err_usage
|
||||
|
||||
cd -- "${dir}"
|
||||
tmpdir="target/proverif"
|
||||
|
||||
echo "call main"
|
||||
|
||||
case "${cmd}" in
|
||||
analyze) analyze ;;
|
||||
clean_warnings) clean_warnings ;;
|
||||
*) err_usage
|
||||
esac
|
||||
}
|
||||
|
||||
# Do not execute main if sourced
|
||||
(return 0 2>/dev/null) || main "$@"
|
||||
467
marzipan/src/parser.py
Normal file
467
marzipan/src/parser.py
Normal file
@@ -0,0 +1,467 @@
|
||||
import sys
|
||||
|
||||
from lark import Lark, Token, Transformer, exceptions, tree
|
||||
|
||||
# taken from Page 17 in the ProVerif manual
|
||||
# At the moment, we do not reject a ProVerif model that uses reserved words as identifier,
|
||||
# because this caused problems with the LARK grammar. We plan to check this in a later
|
||||
# processing step.
|
||||
reserved_words = [
|
||||
"among",
|
||||
"axiom",
|
||||
"channel",
|
||||
"choice",
|
||||
"clauses",
|
||||
"const",
|
||||
"def",
|
||||
"diff",
|
||||
"do",
|
||||
"elimtrue",
|
||||
"else",
|
||||
"equation",
|
||||
"equivalence", # no rule yet (this is CryptoVerif-specific)
|
||||
"event",
|
||||
"expand",
|
||||
"fail",
|
||||
"for",
|
||||
"forall",
|
||||
"foreach",
|
||||
"free",
|
||||
"fun",
|
||||
"get",
|
||||
"if",
|
||||
"implementation", # no rule yet (this is CryptoVerif-specific)
|
||||
"in",
|
||||
"inj-event",
|
||||
"insert",
|
||||
"lemma",
|
||||
"let",
|
||||
"letfun",
|
||||
"letproba",
|
||||
"new",
|
||||
"noninterf",
|
||||
"noselect",
|
||||
"not",
|
||||
"nounif",
|
||||
"or",
|
||||
"otherwise",
|
||||
"out",
|
||||
"param",
|
||||
"phase",
|
||||
"pred",
|
||||
"proba",
|
||||
"process",
|
||||
"proof",
|
||||
"public_vars",
|
||||
"putbegin",
|
||||
"query",
|
||||
"reduc",
|
||||
"restriction",
|
||||
"secret",
|
||||
"select",
|
||||
"set",
|
||||
"suchthat",
|
||||
"sync",
|
||||
"table",
|
||||
"then",
|
||||
"type",
|
||||
"weaksecret",
|
||||
"yield",
|
||||
]
|
||||
|
||||
ident_regex = (
|
||||
"/^" + "".join(f"(?!{w}$)" for w in reserved_words) + "[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/"
|
||||
)
|
||||
|
||||
proverif_grammar = Lark(
|
||||
grammar="""
|
||||
PROCESS: "process"
|
||||
start: decl* PROCESS process
|
||||
YIELD: "yield"
|
||||
channel: CHANNEL
|
||||
CHANNEL: "channel"
|
||||
"""
|
||||
+ "IDENT: /[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/"
|
||||
+ """
|
||||
ZERO: "0"
|
||||
INFIX: "||"
|
||||
| "&&"
|
||||
| "="
|
||||
| "<>"
|
||||
| "<="
|
||||
| ">="
|
||||
| "<"
|
||||
| ">"
|
||||
typeid: channel
|
||||
| IDENT
|
||||
_non_empty_seq{x}: x ("," x)*
|
||||
_maybe_empty_seq{x}: [ _non_empty_seq{x} ]
|
||||
|
||||
OPTIONS_FUN_CONST: "data" | "private" | "typeConverter"
|
||||
OPTIONS_FUN: OPTIONS_FUN_CONST
|
||||
OPTIONS_CONST: OPTIONS_FUN_CONST
|
||||
OPTIONS_FREE_REDUC: "private"
|
||||
OPTIONS_PRED: "memberOptim" | "block"
|
||||
OPTIONS_PROCESS: "precise"
|
||||
OPTIONS_QUERY_LEMMA_AXIOM: "noneSat" | "discardSat" | "instantiateSat" | "fullSat" | "noneVerif" | "discardVerif" | "instantiateVerif" | "fullVerif"
|
||||
OPTIONS_AXIOM: OPTIONS_QUERY_LEMMA_AXIOM
|
||||
OPTIONS_QUERY_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "induction" | "noInduction"
|
||||
OPTIONS_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "maxSubset"
|
||||
OPTIONS_QUERY: OPTIONS_QUERY_LEMMA_AXIOM | "proveAll"
|
||||
OPTIONS_QUERY_SECRET: "reachability" | "pv_reachability" | "real_or_random" | "pv_real_or_random" | "/cv_[a-zA-Z0-9À-ÿ'_]*/"
|
||||
OPTIONS_RESTRICTION: "removeEvents" | "keepEvents" | "keep" # transl_option_lemma_query in pitsyntax.ml
|
||||
OPTIONS_EQUATION: "convergent" | "linear" # check_equations in pitsyntax.ml
|
||||
OPTIONS_TYPE: "fixed" | "bounded" # TODO(blipp): complete this. These are only for compatibility with CryptoVerif and are ignored
|
||||
options{idents}: [ "[" _non_empty_seq{idents} "]" ]
|
||||
process: ZERO
|
||||
| YIELD
|
||||
| IDENT [ "(" _maybe_empty_seq{pterm} ")" ]
|
||||
| bracketed_process
|
||||
| piped_process
|
||||
| replicated_process
|
||||
| replicated_process_bounds
|
||||
| sample_process
|
||||
| if_process
|
||||
| in_process
|
||||
| out_process
|
||||
| let_process
|
||||
| insert_process
|
||||
| get_process
|
||||
| event_process
|
||||
| phase
|
||||
| sync
|
||||
bracketed_process: "(" process ")"
|
||||
piped_process: process "|" process
|
||||
replicated_process: "!" process
|
||||
replicated_process_bounds: "!" IDENT "<=" IDENT process
|
||||
| "foreach" IDENT "<=" IDENT "do" process
|
||||
sample_process: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" process]
|
||||
| IDENT "<-R" typeid [";" process]
|
||||
let_process: "let" pattern "=" pterm ["in" process [ "else" process ]]
|
||||
| IDENT [":" typeid] "<-" pterm [";" process]
|
||||
| "let" typedecl "suchthat" pterm options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
if_process: "if" pterm "then" process [ "else" process ]
|
||||
in_process: "in" "(" pterm "," pattern ")" options{OPTIONS_PROCESS} [ ";" process ]
|
||||
get_process: IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
out_process: "out" "(" pterm "," pterm ")" [ ";" process ]
|
||||
insert_process: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" [ ";" process ]
|
||||
event_process: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] [ ";" process ]
|
||||
term: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{term} ")"
|
||||
| IDENT "(" _maybe_empty_seq{term} ")"
|
||||
| term ( "+" | "-" ) NAT
|
||||
| NAT "+" term
|
||||
| term INFIX term
|
||||
| "not" "(" term ")"
|
||||
|
||||
query: gterm ["public_vars" _non_empty_seq{IDENT}] [";" query]
|
||||
| "secret" IDENT ["public_vars" _non_empty_seq{IDENT}] options{OPTIONS_QUERY_SECRET} [";" query]
|
||||
| "putbegin" "event" ":" _non_empty_seq{IDENT} [";" query] // Opportunistically left a space between "event" and ":", ProVerif might not accept it with spaces.
|
||||
| "putbegin" "inj-event" ":" _non_empty_seq{IDENT} [";" query]
|
||||
lemma: gterm [";" lemma]
|
||||
| gterm "for" "{" "public_vars" _non_empty_seq{IDENT} "}" [";" lemma]
|
||||
| gterm "for" "{" "secret" IDENT [ "public_vars" _non_empty_seq{IDENT}] "[real_or_random]" "}" [";" lemma]
|
||||
gterm: ident_gterm
|
||||
| fun_gterm
|
||||
| choice_gterm
|
||||
| infix_gterm
|
||||
| arith_gterm
|
||||
| arith2_gterm
|
||||
| event_gterm
|
||||
| injevent_gterm
|
||||
| implies_gterm
|
||||
| paren_gterm
|
||||
| sample_gterm
|
||||
| let_gterm
|
||||
ident_gterm: IDENT
|
||||
fun_gterm: IDENT "(" _maybe_empty_seq{gterm} ")" ["phase" NAT] ["@" IDENT]
|
||||
choice_gterm: "choice" "[" gterm "," gterm "]"
|
||||
infix_gterm: gterm INFIX gterm
|
||||
arith_gterm: gterm ( "+" | "-" ) NAT
|
||||
arith2_gterm: NAT "+" gterm
|
||||
event_gterm: "event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
injevent_gterm: "inj-event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
implies_gterm: gterm "==>" gterm
|
||||
paren_gterm: "(" _maybe_empty_seq{gterm} ")"
|
||||
sample_gterm: "new" IDENT [ "[" [ gbinding ] "]" ]
|
||||
let_gterm: "let" IDENT "=" gterm "in" gterm
|
||||
|
||||
gbinding: "!" NAT "=" gterm [";" gbinding]
|
||||
| IDENT "=" gterm [";" gbinding]
|
||||
|
||||
nounifdecl: "let" IDENT "=" gformat "in" nounifdecl
|
||||
| IDENT ["(" _maybe_empty_seq{gformat} ")" ["phase" NAT]]
|
||||
gformat: IDENT
|
||||
| "*" IDENT
|
||||
| IDENT "(" _maybe_empty_seq{gformat} ")"
|
||||
| "choice" "[" gformat "," gformat "]"
|
||||
| "not" "(" _maybe_empty_seq{gformat} ")"
|
||||
| "new" IDENT [ "[" [ fbinding ] "]" ]
|
||||
| "let" IDENT "=" gformat "in" gformat
|
||||
fbinding: "!" NAT "=" gformat [";" fbinding]
|
||||
| IDENT "=" gformat [";" fbinding]
|
||||
nounifoption: "hypothesis"
|
||||
| "conclusion"
|
||||
| "ignoreAFewTimes"
|
||||
| "inductionOn" "=" IDENT
|
||||
| "inductionOn" "=" "{" _non_empty_seq{IDENT} "}"
|
||||
|
||||
pterm: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{pterm} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pterm} ")"
|
||||
| choice_pterm
|
||||
| pterm ("+" | "-") NAT
|
||||
| NAT "+" pterm
|
||||
| pterm INFIX pterm
|
||||
| not_pterm
|
||||
| sample_pterm
|
||||
| if_pterm
|
||||
| let_pterm
|
||||
| insert_pterm
|
||||
| get_pterm
|
||||
| event_pterm
|
||||
choice_pterm: "choice[" pterm "," pterm "]"
|
||||
if_pterm: "if" pterm "then" pterm [ "else" pterm ]
|
||||
not_pterm: "not" "(" pterm ")"
|
||||
let_pterm: "let" pattern "=" pterm "in" pterm [ "else" pterm ]
|
||||
| IDENT [":" typeid] "<-" pterm ";" pterm
|
||||
| "let" typedecl "suchthat" pterm "in" pterm [ "else" pterm ]
|
||||
sample_pterm: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" pterm]
|
||||
| IDENT "<-R" typeid [";" pterm]
|
||||
insert_pterm: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" ";" pterm
|
||||
event_pterm: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] ";" pterm
|
||||
get_pterm: IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" pterm [ "else" pterm ] ]
|
||||
pattern: IDENT [":" typeid]
|
||||
| "_" [ ":" typeid ]
|
||||
| NAT
|
||||
| pattern "+" NAT
|
||||
| NAT "+" pattern
|
||||
| "(" _maybe_empty_seq{pattern} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pattern} ")"
|
||||
| "=" pterm
|
||||
mayfailterm: term
|
||||
| "fail"
|
||||
mayfailterm_seq: "(" _non_empty_seq{mayfailterm} ")"
|
||||
typedecl: _non_empty_seq{IDENT} ":" typeid [ "," typedecl ]
|
||||
failtypedecl: _non_empty_seq{IDENT} ":" typeid [ "or fail" ] [ "," failtypedecl ]
|
||||
|
||||
decl: type_decl
|
||||
| channel_decl
|
||||
| free_decl
|
||||
| const_decl
|
||||
| fun_decl
|
||||
| letfun_decl
|
||||
| reduc_decl
|
||||
| fun_reduc_decl
|
||||
| equation_decl
|
||||
| pred_decl
|
||||
| table_decl
|
||||
| let_decl
|
||||
| set_settings_decl
|
||||
| event_decl
|
||||
| query_decl
|
||||
| axiom_decl
|
||||
| restriction_decl
|
||||
| lemma_decl
|
||||
| noninterf_decl
|
||||
| weaksecret_decl
|
||||
| not_decl
|
||||
| select_decl
|
||||
| noselect_decl
|
||||
| nounif_decl
|
||||
| elimtrue_decl
|
||||
| clauses_decl
|
||||
| module_decl
|
||||
#| param_decl
|
||||
#| proba_decl
|
||||
#| letproba_decl
|
||||
#| proof_decl
|
||||
#| def_decl
|
||||
#| expand_decl
|
||||
|
||||
type_decl: "type" IDENT options{OPTIONS_TYPE} "."
|
||||
channel_decl: "channel" _non_empty_seq{IDENT} "."
|
||||
free_decl: "free" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FREE_REDUC} "."
|
||||
const_decl: "const" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
fun_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
letfun_decl: "letfun" IDENT [ "(" [ typedecl ] ")" ] "=" pterm "."
|
||||
reduc_decl: "reduc" eqlist options{OPTIONS_FREE_REDUC} "."
|
||||
fun_reduc_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid "reduc" mayfailreduc options{OPTIONS_FUN_CONST} "."
|
||||
equation_decl: "equation" eqlist options{OPTIONS_EQUATION} "."
|
||||
pred_decl: "pred" IDENT [ "(" [ _maybe_empty_seq{typeid} ] ")" ] options{OPTIONS_PRED} "."
|
||||
table_decl: IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
let_decl: "let" IDENT [ "(" [ typedecl ] ")" ] "=" process "."
|
||||
|
||||
BOOL : "true" | "false"
|
||||
NONE: "none"
|
||||
FULL: "full"
|
||||
ALL: "all"
|
||||
FUNC: IDENT
|
||||
ignoretype_options: BOOL | ALL | NONE | "attacker"
|
||||
boolean_settings_names: "privateCommOnPublicTerms"
|
||||
| "rejectChoiceTrueFalse"
|
||||
| "rejectNoSimplif"
|
||||
| "allowDiffPatterns"
|
||||
| "inductionQueries"
|
||||
| "inductionLemmas"
|
||||
| "movenew"
|
||||
| "movelet"
|
||||
| "stopTerm"
|
||||
| "removeEventsForLemma"
|
||||
| "simpEqAll"
|
||||
| "eqInNames"
|
||||
| "preciseLetExpand"
|
||||
| "expandSimplifyIfCst"
|
||||
| "featureFuns"
|
||||
| "featureNames"
|
||||
| "featurePredicates"
|
||||
| "featureEvents"
|
||||
| "featureTables"
|
||||
| "featureDepth"
|
||||
| "featureWidth"
|
||||
| "simplifyDerivation"
|
||||
| "abbreviateDerivation"
|
||||
| "explainDerivation"
|
||||
| "unifyDerivation"
|
||||
| "reconstructDerivation"
|
||||
| "displayDerivation"
|
||||
| "traceBacktracking"
|
||||
| "interactiveSwapping"
|
||||
| "color"
|
||||
| "verboseLemmas"
|
||||
| "abbreviateClauses"
|
||||
| "removeUselessClausesBeforeDisplay"
|
||||
| "verboseEq"
|
||||
| "verboseDestructors"
|
||||
| "verboseTerm"
|
||||
| "verboseStatistics"
|
||||
| "verboseRules"
|
||||
| "verboseBase"
|
||||
| "verboseRedundant"
|
||||
| "verboseCompleted"
|
||||
| "verboseGoalReachable"
|
||||
|
||||
_decl_pair{name, value}: "set" name "=" value "."
|
||||
|
||||
set_settings_boolean_decl: _decl_pair{boolean_settings_names, BOOL}
|
||||
|
||||
ignore_types_values: BOOL | "all" | "none" | "attacker"
|
||||
simplify_process_values: BOOL | "interactive"
|
||||
precise_actions_values: BOOL | "trueWithoutArgsInNames"
|
||||
redundant_hyp_elim_values: BOOL | "beginOnly"
|
||||
reconstruct_trace_values: BOOL | "n"
|
||||
attacker_values: "active" | "passive"
|
||||
key_compromise_values: "none" | "approx" | "strict"
|
||||
predicates_implementable: "check" | "nocheck"
|
||||
application_values: "instantiate" | "full" | "none" | "discard"
|
||||
max_values: "none" | "n"
|
||||
sel_fun_values: "TermMaxsize" | "Term"| "NounifsetMaxsize" | "Nounifset"
|
||||
redundancy_elim_values: "best" | "simple" | "no"
|
||||
nounif_ignore_a_few_times_values: "none" | "auto" | "all"
|
||||
nounif_ignore_ntimes_values: "n"
|
||||
trace_display_values: "short" | "long" | "none"
|
||||
verbose_clauses_values: "none" | "explained" | "short"
|
||||
set_settings_decl: set_settings_boolean_decl
|
||||
| _decl_pair{"ignoreTypes", ignore_types_values}
|
||||
| _decl_pair{"simplifyProcess", simplify_process_values}
|
||||
| _decl_pair{"preciseActions", precise_actions_values}
|
||||
| _decl_pair{"redundantHypElim", redundant_hyp_elim_values}
|
||||
| _decl_pair{"reconstructTrace", reconstruct_trace_values}
|
||||
| _decl_pair{"attacker", attacker_values}
|
||||
| _decl_pair{"keyCompromise", key_compromise_values}
|
||||
| _decl_pair{"predicatesImplementable", predicates_implementable}
|
||||
| _decl_pair{"saturationApplication", application_values}
|
||||
| _decl_pair{"verificationApplication", application_values}
|
||||
| _decl_pair{"maxDepth", max_values}
|
||||
| _decl_pair{"maxHyp", max_values}
|
||||
| _decl_pair{"selFun", sel_fun_values}
|
||||
| _decl_pair{"redundancyElim", redundancy_elim_values}
|
||||
| _decl_pair{"nounifIgnoreAFewTimes", nounif_ignore_a_few_times_values}
|
||||
| _decl_pair{"nounifIgnoreNtimes", nounif_ignore_ntimes_values}
|
||||
| _decl_pair{"traceDisplay", trace_display_values}
|
||||
| _decl_pair{"verboseClauses", verbose_clauses_values}
|
||||
| set_strategy
|
||||
| set_symb_order
|
||||
|
||||
_swap_strategy_seq{x}: x ("->" x)*
|
||||
set_strategy: "set" "swapping" "=" _swap_strategy_seq{TAG} "."
|
||||
_symb_ord_seq{x}: x (">" x)*
|
||||
set_symb_order: "set" "symbOrder" "=" _symb_ord_seq{FUNC} "."
|
||||
|
||||
event_decl: "event" IDENT ["(" _maybe_empty_seq{typeid} ")"] "."
|
||||
query_decl: "query" [ typedecl ";"] query options{OPTIONS_QUERY} "."
|
||||
|
||||
axiom_decl: "axiom" [ typedecl ";"] lemma options{OPTIONS_AXIOM} "."
|
||||
restriction_decl: "restriction" [ typedecl ";"] lemma options{OPTIONS_RESTRICTION} "."
|
||||
lemma_decl: "lemma" [ typedecl ";"] lemma options{OPTIONS_LEMMA} "."
|
||||
|
||||
noninterf_decl: [ typedecl ";"] _maybe_empty_seq{nidecl} "."
|
||||
weaksecret_decl: "weaksecret" IDENT "."
|
||||
not_decl: "not" [ typedecl ";"] gterm "."
|
||||
|
||||
INT: NAT | "-" NAT
|
||||
select_decl: "select" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
noselect_decl: "noselect" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
nounif_decl: "nounif" [ typedecl ";"] nounifdecl [ "/" INT ] [ "["_non_empty_seq{nounifoption} "]" ] "."
|
||||
|
||||
elimtrue_decl: "elimtrue" [ failtypedecl ";" ] term "."
|
||||
clauses_decl: "clauses" clauses "."
|
||||
|
||||
module_decl: "@module" " " IDENT
|
||||
|
||||
# TODO: finish defining these (comes from Cryptoverif)
|
||||
#param_decl: "param" _non_empty_seq{IDENT} options "."
|
||||
#proba_decl: "proba" IDENT ["(...)"] options "."
|
||||
#letproba_decl: "letproba" IDENT ["(...)"] "= ..." "."
|
||||
#proof_decl: "proof" "{" proof "}"
|
||||
#def_decl: "def" IDENT "(" _maybe_empty_seq{typeid} ")" "{" decl* "}"
|
||||
#expand_decl: "expand" IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
|
||||
nidecl: IDENT [ "among" "(" _non_empty_seq{term} ")" ]
|
||||
equality: term "=" term
|
||||
| "let" IDENT "=" term "in" equality
|
||||
mayfailequality: IDENT mayfailterm_seq "=" mayfailterm
|
||||
eqlist: [ "forall" typedecl ";" ] equality [ ";" eqlist ]
|
||||
clause: term
|
||||
| term "->" term
|
||||
| term "<->" term
|
||||
| term "<=>" term
|
||||
clauses: [ "forall" failtypedecl ";" ] clause [ ";" clauses ]
|
||||
mayfailreduc: [ "forall" failtypedecl ";" ] mayfailequality [ "otherwise" mayfailreduc ]
|
||||
NAT: DIGIT+
|
||||
phase: "phase" NAT [";" process]
|
||||
TAG: IDENT
|
||||
sync: "sync" NAT ["[" TAG "]"] [";" process]
|
||||
COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
%import common (WORD, DIGIT, NUMBER, WS) // imports from terminal library
|
||||
%ignore WS // Disregard spaces in text
|
||||
%ignore COMMENT
|
||||
""",
|
||||
debug=True,
|
||||
# lexer_callbacks={"COMMENT": comments.append},
|
||||
)
|
||||
|
||||
# COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
# COMMENT: "(*" /(\*(?!\))|[^*])*/ "*)"
|
||||
# comment: /\(\*(?:(?!\(\*|\*\)).|(?R))*\*\)/
|
||||
|
||||
# TODO Open ProVerif compatibility questions
|
||||
# TODO * does it allow leading zeros for NAT?
|
||||
# TODO * tag is not defined? is it ident?
|
||||
# TODO * are spaces between "event" and ":" allowed?
|
||||
# TODO * spaces between "nat" and "("? "choice" and "["?
|
||||
|
||||
|
||||
def parsertest(input):
|
||||
parsetree = proverif_grammar.parse(input)
|
||||
# tree.pydot__tree_to_png(parsetree, name + ".png")
|
||||
return parsetree
|
||||
|
||||
|
||||
def parse_main(file_path):
|
||||
with open(file_path, "r") as f:
|
||||
content = f.read()
|
||||
# print(content)
|
||||
parsertest(content)
|
||||
130
marzipan/src/util.py
Normal file
130
marzipan/src/util.py
Normal file
@@ -0,0 +1,130 @@
|
||||
from typing import Callable, Any, Tuple, List, TypeVar
|
||||
from types import ModuleType as Module
|
||||
from importlib import import_module
|
||||
from dataclasses import dataclass
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
def setup_exports() -> Tuple[List[str], Callable[[T], T]]:
|
||||
__all__ = []
|
||||
|
||||
"""
|
||||
Helper to provide an export() function with little boilerplate.
|
||||
|
||||
```
|
||||
from marzipan.util import setup_exports
|
||||
(__all__, export) = setup_exports()
|
||||
```
|
||||
"""
|
||||
def export(what: T) -> T:
|
||||
match what:
|
||||
case str():
|
||||
__all__.append(what)
|
||||
case object(__name__ = name):
|
||||
__all__.append(name)
|
||||
case _:
|
||||
raise TypeError(
|
||||
f"Unsupported export type `{what}`: Export is neither `str` nor has it an attribute named `__name__`.")
|
||||
return what
|
||||
|
||||
return (__all__, export)
|
||||
|
||||
(__all__, export) = setup_exports()
|
||||
export(setup_exports)
|
||||
|
||||
@export
|
||||
def rename(name: str) -> Callable[[T], T]:
|
||||
def rename_impl(v: T) -> T:
|
||||
v.__name__ = name
|
||||
return v
|
||||
return rename_impl
|
||||
|
||||
@export
|
||||
def attempt(fn):
|
||||
# TODO: Documentation tests
|
||||
"""
|
||||
Call a function returning a tuple of (result, exception).
|
||||
|
||||
The following example uses safe_call to implement a checked_divide
|
||||
function that returns None if the division by zero is caught.
|
||||
|
||||
```python
|
||||
try_divide = attempt(lambda a, b: a/b)
|
||||
|
||||
def checked_divide(a, b):
|
||||
match try_divide(a, b):
|
||||
case (result, None):
|
||||
return result
|
||||
case (None, ZeroDivisionError()):
|
||||
return None
|
||||
case _:
|
||||
raise RuntimeError("Unreachable")
|
||||
|
||||
assert(checked_divide(1, 0) == None)
|
||||
assert(checked_divide(0, 1) == 0)
|
||||
assert(checked_divide(1, 1) == 1)
|
||||
```
|
||||
"""
|
||||
def retfn(*args, **kwargs):
|
||||
try:
|
||||
return (fn(*args, **kwargs), None)
|
||||
except Exception as e:
|
||||
return (None, e)
|
||||
retfn.__name__ = f"try_{fn.__name__}"
|
||||
return retfn
|
||||
|
||||
@export
|
||||
def scoped(fn: Callable[[], Any]) -> Any:
|
||||
"""
|
||||
Scoped variable assignment.
|
||||
|
||||
Just an alias for `call`. Use as a decorator to immediately call a function,
|
||||
assigning the return value to the function name.
|
||||
"""
|
||||
return fn()
|
||||
|
||||
@export
|
||||
def try_import(name : str) -> Tuple[Module | None, Exception | None]:
|
||||
return attempt(import_module)(name)
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Pkgs:
|
||||
__mod__: Module | None
|
||||
__prefix__: str | None
|
||||
|
||||
def __get__(self, k: str):
|
||||
return getattr(self, k)
|
||||
|
||||
def __getattribute__(self, k: str):
|
||||
match k:
|
||||
case "__mod__" | "__prefix__" | "__class__":
|
||||
# Access the underlying module value
|
||||
return super().__getattribute__(k)
|
||||
|
||||
match self:
|
||||
case Pkgs(None, None):
|
||||
# Import package from root
|
||||
return Pkgs(import_module(k), k)
|
||||
|
||||
# Try importing a subpackage
|
||||
name = f"{self.__prefix__}.{k}"
|
||||
match try_import(name):
|
||||
case (child, None):
|
||||
# Imported subpackage
|
||||
return Pkgs(child, name)
|
||||
case (_, ModuleNotFoundError()):
|
||||
# No such module; access module property instead
|
||||
return getattr(self.__mod__, k)
|
||||
case (_, err):
|
||||
# Unknown error, pass error on
|
||||
raise err
|
||||
|
||||
@scoped
|
||||
@export
|
||||
def pkgs() -> Pkgs:
|
||||
"""
|
||||
Global package scope.
|
||||
|
||||
`pkgs.marzipan` imports the package `marzipan`
|
||||
"""
|
||||
return Pkgs(None, None)
|
||||
265
marzipan/test-gpt-oss-2.py
Normal file
265
marzipan/test-gpt-oss-2.py
Normal file
@@ -0,0 +1,265 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Below is a **more “Pythonic”** rewrite of the original AWK‑to‑Python translator.
|
||||
# The logic is exactly the same – the same error messages, line numbers and exit
|
||||
# codes – but the code is organized into small, reusable functions, uses
|
||||
# `dataclasses`, type hints, `Path.read_text()`, `re.sub()` and other idiomatic
|
||||
# constructs. It is also easier to read and to extend.
|
||||
|
||||
|
||||
"""
|
||||
py_awk_translator.py
|
||||
|
||||
A line‑by‑line pre‑processor that implements the same behaviour as the
|
||||
original AWK script you posted (handling @module, @alias, @long‑alias,
|
||||
private‑variable expansion, @query/@reachable/@lemma checks and token‑wise
|
||||
alias substitution).
|
||||
|
||||
Usage
|
||||
|
||||
python3 py_awk_translator.py file1.pv file2.pv
|
||||
# or
|
||||
cat file.pv | python3 py_awk_translator.py
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Helper utilities
|
||||
# ----------------------------------------------------------------------
|
||||
TOKEN_RE = re.compile(r"[0-9A-Za-z_']")
|
||||
|
||||
def is_token_char(ch: str) -> bool:
|
||||
"""Return True if *ch* can be part of an identifier token."""
|
||||
return bool(TOKEN_RE.fullmatch(ch))
|
||||
|
||||
def die(msg: str, fname: str, lineno: int) -> None:
|
||||
"""Print an error to stderr and exit with status 1 (exactly like AWK)."""
|
||||
sys.stderr.write(f"{fname}:{lineno}: {msg}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Core translator – holds the mutable state that the AWK script kept in
|
||||
# global variables.
|
||||
# ----------------------------------------------------------------------
|
||||
@dataclass
|
||||
class Translator:
|
||||
"""Collects state while processing a file line‑by‑line."""
|
||||
|
||||
# final output buffer
|
||||
out: list[str] = field(default_factory=list)
|
||||
|
||||
# current @module name (used when expanding "~")
|
||||
module: str = ""
|
||||
|
||||
# simple one‑line aliases: name → replacement text
|
||||
aliases: Dict[str, str] = field(default_factory=dict)
|
||||
|
||||
# multi‑line alias handling
|
||||
long_name: str = ""
|
||||
long_value: str = ""
|
||||
|
||||
# error flag – mirrors the AWK variable `err`
|
||||
err: int = 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public entry point for a single line
|
||||
# ------------------------------------------------------------------
|
||||
def process(self, raw: str, fname: str, lineno: int) -> None:
|
||||
"""Apply all transformation rules to *raw* and store the result."""
|
||||
line = raw.rstrip("\n") # keep a copy for error messages
|
||||
original = line # keep the untouched line for later
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 1️⃣ @module
|
||||
# --------------------------------------------------------------
|
||||
if line.startswith("@module"):
|
||||
parts = line.split(maxsplit=1)
|
||||
self.module = parts[1] if len(parts) > 1 else ""
|
||||
self.aliases.clear()
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 2️⃣ @alias
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@alias"):
|
||||
for token in line.split()[1:]:
|
||||
if "=" in token:
|
||||
name, value = token.split("=", 1)
|
||||
self.aliases[name] = value
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 3️⃣ @long-alias‑end
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@long-alias-end"):
|
||||
if not self.long_name:
|
||||
die("Long alias not started", fname, lineno)
|
||||
# collapse multiple spaces → single space, strip trailing space
|
||||
self.long_value = re.sub(r" +", " ", self.long_value).strip()
|
||||
self.aliases[self.long_name] = self.long_value
|
||||
self.long_name = self.long_value = ""
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 4️⃣ @long-alias (start)
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@long-alias"):
|
||||
parts = line.split(maxsplit=1)
|
||||
self.long_name = parts[1] if len(parts) > 1 else ""
|
||||
self.long_value = ""
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 5️⃣ PRIVATE__ detection (illegal use of "~")
|
||||
# --------------------------------------------------------------
|
||||
elif "PRIVATE__" in line:
|
||||
die(
|
||||
"Used private variable without ~:\n\n"
|
||||
f" {lineno} > {original}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 6️⃣ @query / @reachable / @lemma validation
|
||||
# --------------------------------------------------------------
|
||||
elif re.search(r"@(query|reachable|lemma)", line):
|
||||
if not re.search(r'@(query|reachable|lemma)\s+"[^"]*"', line):
|
||||
die(
|
||||
"@query or @reachable statement without parameter:\n\n"
|
||||
f" {lineno} > {original}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
# replace the quoted part with blanks (preserve line length)
|
||||
m = re.search(r'@(query|reachable|lemma)\s+"[^"]*"', line)
|
||||
start, end = m.span()
|
||||
line = line[:start] + " " * (end - start) + line[end:]
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 7️⃣ Expand "~" to the private‑variable prefix
|
||||
# --------------------------------------------------------------
|
||||
if "~" in line:
|
||||
line = line.replace("~", f"PRIVATE__{self.module}__")
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 8️⃣ Token‑wise alias substitution (the long AWK loop)
|
||||
# --------------------------------------------------------------
|
||||
line = self._expand_aliases(line)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 9️⃣ Accumulate a multi‑line alias, if we are inside one
|
||||
# --------------------------------------------------------------
|
||||
if self.long_name:
|
||||
self.long_value += line + " "
|
||||
line = "" # the line itself must not appear in output
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 🔟 Store the (possibly empty) line for final output
|
||||
# --------------------------------------------------------------
|
||||
self.out.append(line + "\n")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helper that implements the token‑wise alias replacement
|
||||
# ------------------------------------------------------------------
|
||||
def _expand_aliases(self, text: str) -> str:
|
||||
"""Replace every whole‑token alias in *text* with its value."""
|
||||
i = 0
|
||||
result = ""
|
||||
|
||||
while i < len(text):
|
||||
# a = previous char, c = current char
|
||||
a = text[i - 1] if i > 0 else ""
|
||||
c = text[i]
|
||||
|
||||
# If we are already inside a token, just move forward
|
||||
if i > 0 and is_token_char(a):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# If the current char does not start a token, skip it
|
||||
if not is_token_char(c):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# At a token boundary – try to match any alias
|
||||
# ----------------------------------------------------------
|
||||
matched = False
|
||||
for name, value in self.aliases.items():
|
||||
if text.startswith(name, i):
|
||||
after = text[i + len(name) : i + len(name) + 1]
|
||||
if is_token_char(after): # name is only a prefix
|
||||
continue
|
||||
# Alias matches – replace it
|
||||
result += text[:i] + value
|
||||
text = text[i + len(name) :] # continue scanning the suffix
|
||||
i = 0
|
||||
matched = True
|
||||
break
|
||||
|
||||
if not matched:
|
||||
i += 1
|
||||
|
||||
return result + text
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Finalisation
|
||||
# ------------------------------------------------------------------
|
||||
def finish(self) -> None:
|
||||
"""Write the accumulated output to stdout (unless an error occurred)."""
|
||||
if self.err == 0:
|
||||
sys.stdout.write("".join(self.out))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Command‑line driver
|
||||
# ----------------------------------------------------------------------
|
||||
def _process_path(path: Path, translator: Translator) -> None:
|
||||
"""Read *path* line‑by‑line and feed it to *translator*."""
|
||||
for lineno, raw in enumerate(path.read_text(encoding="utf-8").splitlines(True), start=1):
|
||||
translator.process(raw, str(path), lineno)
|
||||
|
||||
def main() -> None:
|
||||
translator = Translator()
|
||||
|
||||
# No file arguments → read from stdin (named "<stdin>")
|
||||
if len(sys.argv) == 1:
|
||||
# stdin may contain multiple lines; we treat it as a single “virtual”
|
||||
# file so that line numbers are still correct.
|
||||
for lineno, raw in enumerate(sys.stdin, start=1):
|
||||
translator.process(raw, "<stdin>", lineno)
|
||||
else:
|
||||
for name in sys.argv[1:]:
|
||||
p = Path(name)
|
||||
if not p.is_file():
|
||||
sys.stderr.write(f"File not found: {name}\n")
|
||||
sys.exit(1)
|
||||
_process_path(p, translator)
|
||||
|
||||
translator.finish()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
## What makes this version more Pythonic?
|
||||
|
||||
# | Aspect | Original style | Refactored style |
|
||||
# |--------|----------------|------------------|
|
||||
# | **State handling** | Global variables (`buf`, `module`, …) | `@dataclass Translator` encapsulates all mutable state |
|
||||
# | **Regular‑expression reuse** | Re‑compiled on every call (`match`, `gsub`) | Compiled once (`TOKEN_RE`) and reused |
|
||||
# | **String manipulation** | Manual `substr`, concatenation in loops | Slicing, `str.replace`, `re.sub` for clarity |
|
||||
# | **Loop logic** | `for (i=1; i<length($0); i+=1)` with many manual index tricks | A single `while` loop with early‑continue guards; the inner alias‑search is a clean `for name, value in self.aliases.items()` |
|
||||
# | **Error handling** | `print(... > "/dev/stderr")` and `exit(1)` | Dedicated `die()` helper that writes to `stderr` and exits |
|
||||
# | **File I/O** | Manual `while (getline ...)` in AWK → `for` over `sys.stdin` / `Path.read_text()` | Uses `Path.read_text()` and `enumerate` for line numbers |
|
||||
# | **Readability** | Mixed AWK‑style comments, one‑liner `if` statements | Docstrings, section comments, type hints, and small helper methods |
|
||||
# | **Extensibility** | Adding a new rule required editing a monolithic block | New rules can be added as separate `elif` blocks or new methods without touching the core loop |
|
||||
|
||||
# The script can be saved as `py_awk_translator.py`, made executable (`chmod +x py_awk_translator.py`), and used exactly like the original AWK program while being easier to maintain and understand.
|
||||
292
marzipan/test-gpt-oss.py
Normal file
292
marzipan/test-gpt-oss.py
Normal file
@@ -0,0 +1,292 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# **Python 3 translation of the AWK script**
|
||||
|
||||
# Below is a drop‑in replacement that can be used the same way as the original
|
||||
# `awk` program (give it one or more file names, or let it read from *stdin*).
|
||||
# All the logic of the AWK version is kept – the only difference is that the
|
||||
# code is now ordinary, readable Python 3.
|
||||
|
||||
|
||||
"""
|
||||
translate_awk_to_py.py
|
||||
|
||||
A line‑by‑line translator for the “@module / @alias / @long‑alias …”
|
||||
pre‑processor that was originally written in AWK. The behaviour is
|
||||
identical to the AWK script you posted, including the exact error
|
||||
messages and exit codes.
|
||||
|
||||
Usage
|
||||
|
||||
python3 translate_awk_to_py.py file1.pv file2.pv
|
||||
# or
|
||||
cat file.pv | python3 translate_awk_to_py.py
|
||||
|
||||
The script prints the transformed source to *stdout* and writes any
|
||||
diagnostic messages to *stderr* (exactly like the AWK version).
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Helper functions
|
||||
# ----------------------------------------------------------------------
|
||||
def istok(ch: str) -> bool:
|
||||
"""Return True if *ch* is a token character (alnum, '_' or ''')."""
|
||||
return bool(re.match(r"[0-9a-zA-Z_']", ch))
|
||||
|
||||
def error(msg: str, fname: str, lineno: int) -> None:
|
||||
"""Print an error message to stderr and exit with status 1."""
|
||||
sys.stderr.write(f"{fname}:{lineno}: {msg}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Main processing class (keeps the same global state as the AWK script)
|
||||
# ----------------------------------------------------------------------
|
||||
class Translator:
|
||||
def __init__(self):
|
||||
self.buf = "" # final output buffer
|
||||
self.module = "" # current @module name
|
||||
self.err = 0 # error flag (mirrors AWK's)
|
||||
self.long_alias_name = "" # name of a multi‑line alias
|
||||
self.long_alias_value = "" # accumulated value of that alias
|
||||
self.aliases: dict[str, str] = {} # simple one‑line aliases
|
||||
|
||||
# ----------------------------------| AWK rule | Python implementation |
|
||||
# |----------|-----------------------|
|
||||
# | `BEGIN` block – initialise variables | `Translator.__init__` |
|
||||
# | `@module` line – set `module`, clear `aliases` | first `if` in `process_line` |
|
||||
# | `@alias` line – split `name=value` pairs into `aliases` | second `elif` |
|
||||
# | `@long-alias` / `@long-alias-end` handling | third/fourth `elif` blocks + the `if self.long_alias_name` section |
|
||||
# | Detection of illegal `PRIVATE__` usage | `elif "PRIVATE__" in orig_line` (the same string that the AWK script would have produced after the `~` replacement) |
|
||||
# | Validation of `@query|@reachable|@lemma` statements | `elif re.search(r"@(query|reachable|lemma)", …)` |
|
||||
# | Replacement of `~` with `PRIVATE__<module>__` | `line.replace("~", …)` |
|
||||
# | Token‑wise alias substitution (the long `for (i=1; …)` loop) | the `while i < len(line): …` loop that restarts from the beginning after each successful replacement |
|
||||
# | Accumulating the final output in `buf` | `self.buf += line + "\n"` |
|
||||
# | `END` block – print buffer if no error | `Translator.finish()` |
|
||||
|
||||
# The script can be saved as `translate_awk_to_py.py`, made executable (`chmod +x translate_awk_to_py.py`) and used exactly like the original AWK program. All error messages, line numbers and exit codes are identical, so any surrounding tooling that expects the AWK behaviour will continue to work.--------------------------------
|
||||
# Line‑by‑line processing (mirrors the order of the AWK rules)
|
||||
# ------------------------------------------------------------------
|
||||
def process_line(self, line: str, fname: str, lineno: int) -> None:
|
||||
"""Transform *line* according to all the rules."""
|
||||
# keep the original line for error reporting
|
||||
orig_line = line.rstrip("\n")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 1) @module
|
||||
# ------------------------------------------------------------------
|
||||
if orig_line.startswith("@module"):
|
||||
parts = orig_line.split()
|
||||
if len(parts) >= 2:
|
||||
self.module = parts[1]
|
||||
else:
|
||||
self.module = ""
|
||||
self.aliases.clear()
|
||||
line = "" # AWK does: $0 = ""
|
||||
# fall through – nothing else on this line matters
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 2) @alias
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@alias"):
|
||||
# everything after the keyword is a list of name=value pairs
|
||||
for token in orig_line.split()[1:]:
|
||||
if "=" in token:
|
||||
name, value = token.split("=", 1)
|
||||
self.aliases[name] = value
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 3) @long-alias-end
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@long-alias-end"):
|
||||
if not self.long_alias_name:
|
||||
error("Long alias not started", fname, lineno)
|
||||
# compress multiple spaces to a single space
|
||||
self.long_alias_value = re.sub(r" +", " ", self.long_alias_value)
|
||||
self.aliases[self.long_alias_name] = self.long_alias_value.strip()
|
||||
# reset the temporary variables
|
||||
self.long_alias_name = ""
|
||||
self.long_alias_value = ""
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 4) @long-alias (start of a multi‑line alias)
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@long-alias"):
|
||||
parts = orig_line.split()
|
||||
if len(parts) >= 2:
|
||||
self.long_alias_name = parts[1]
|
||||
self.long_alias_value = ""
|
||||
else:
|
||||
self.long_alias_name = ""
|
||||
self.long_alias_value = ""
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 5) PRIVATE__ detection (illegal use of "~")
|
||||
# ------------------------------------------------------------------
|
||||
elif "PRIVATE__" in orig_line:
|
||||
# The AWK version looks for the literal string PRIVATE__ (which
|
||||
# appears only after the "~" replacement). We keep the same
|
||||
# behaviour.
|
||||
error(
|
||||
"Used private variable without ~:\n\n"
|
||||
f" {lineno} > {orig_line}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 6) @query / @reachable / @lemma validation
|
||||
# ------------------------------------------------------------------
|
||||
elif re.search(r"@(query|reachable|lemma)", orig_line):
|
||||
# Must contain a quoted string after the keyword
|
||||
if not re.search(r'@(query|reachable|lemma)\s+"[^"]*"', orig_line):
|
||||
error(
|
||||
"@query or @reachable statement without parameter:\n\n"
|
||||
f" {lineno} > {orig_line}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
# Replace the quoted part with spaces (preserve line length)
|
||||
m = re.search(r'@(query|reachable|lemma)\s+"[^"]*"', orig_line)
|
||||
start, end = m.start(), m.end()
|
||||
pre = orig_line[:start]
|
||||
mat = orig_line[start:end]
|
||||
post = orig_line[end:]
|
||||
mat_spaced = " " * len(mat)
|
||||
line = pre + mat_spaced + post
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 7) Replace "~" with the private‑variable prefix
|
||||
# ------------------------------------------------------------------
|
||||
else:
|
||||
# No special rule matched yet – we keep the line as‑is for now.
|
||||
line = orig_line
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 8) Insert the private‑variable prefix (if any "~" is present)
|
||||
# ------------------------------------------------------------------
|
||||
if "~" in line:
|
||||
line = line.replace("~", f"PRIVATE__{self.module}__")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 9) Alias substitution (token‑wise, exactly like the AWK loop)
|
||||
# ------------------------------------------------------------------
|
||||
# The algorithm walks through the line character by character,
|
||||
# looking for the start of a token. When a token matches a key in
|
||||
# *self.aliases* it is replaced by the stored value and the scan
|
||||
# restarts from the beginning of the (now shorter) line.
|
||||
i = 0
|
||||
minibuf = ""
|
||||
while i < len(line):
|
||||
# a = previous character, c = current character
|
||||
a = line[i - 1] if i > 0 else ""
|
||||
c = line[i]
|
||||
|
||||
# If we are already inside a token, just move on
|
||||
if i > 0 and istok(a):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# If the current character does NOT start a token, skip it
|
||||
if not istok(c):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# We are at a token boundary – try to match any alias
|
||||
# --------------------------------------------------------------
|
||||
matched = False
|
||||
for alias, value in self.aliases.items():
|
||||
klen = len(alias)
|
||||
token = line[i : i + klen]
|
||||
after = line[i + klen : i + klen + 1] # char after the token
|
||||
|
||||
if token != alias:
|
||||
continue
|
||||
if istok(after): # alias is only a prefix of a longer token
|
||||
continue
|
||||
|
||||
# ---- alias matches -------------------------------------------------
|
||||
matched = True
|
||||
prefix = line[:i] # everything before the token
|
||||
suffix = line[i + klen :] # everything after the token
|
||||
minibuf += prefix + value
|
||||
line = suffix # continue scanning the suffix
|
||||
i = 0 # restart from the beginning
|
||||
break
|
||||
|
||||
if not matched:
|
||||
# No alias matched – keep the current character and move on
|
||||
i += 1
|
||||
|
||||
# Append whatever is left of the line after the last replacement
|
||||
line = minibuf + line
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 10) If we are inside a multi‑line alias, accumulate the line
|
||||
# ------------------------------------------------------------------
|
||||
if self.long_alias_name:
|
||||
self.long_alias_value += line + " "
|
||||
line = "" # the line itself must not appear in the output
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 11) Append the (possibly empty) line to the global buffer
|
||||
# ------------------------------------------------------------------
|
||||
self.buf += line + "\n"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Final output
|
||||
# ------------------------------------------------------------------
|
||||
def finish(self) -> None:
|
||||
"""Print the accumulated buffer if no error occurred."""
|
||||
if self.err == 0:
|
||||
sys.stdout.write(self.buf)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ----------------------------------------------------------------------
|
||||
def main() -> None:
|
||||
translator = Translator()
|
||||
|
||||
# If no file name is given we read from stdin (named "<stdin>")
|
||||
if len(sys.argv) == 1:
|
||||
translator.process_line(sys.stdin.read(), "<stdin>", 1)
|
||||
else:
|
||||
for fname in sys.argv[1:]:
|
||||
path = Path(fname)
|
||||
try:
|
||||
with path.open(encoding="utf-8") as f:
|
||||
for lineno, raw in enumerate(f, start=1):
|
||||
translator.process_line(raw, str(path), lineno)
|
||||
except FileNotFoundError:
|
||||
sys.stderr.write(f"File not found: {fname}\n")
|
||||
sys.exit(1)
|
||||
|
||||
translator.finish()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
### How the Python version mirrors the AWK script
|
||||
|
||||
# | AWK rule | Python implementation |
|
||||
# |----------|-----------------------|
|
||||
# | `BEGIN` block – initialise variables | `Translator.__init__` |
|
||||
# | `@module` line – set `module`, clear `aliases` | first `if` in `process_line` |
|
||||
# | `@alias` line – split `name=value` pairs into `aliases` | second `elif` |
|
||||
# | `@long-alias` / `@long-alias-end` handling | third/fourth `elif` blocks + the `if self.long_alias_name` section |
|
||||
# | Detection of illegal `PRIVATE__` usage | `elif "PRIVATE__" in orig_line` (the same string that the AWK script would have produced after the `~` replacement) |
|
||||
# | Validation of `@query|@reachable|@lemma` statements | `elif re.search(r"@(query|reachable|lemma)", …)` |
|
||||
# | Replacement of `~` with `PRIVATE__<module>__` | `line.replace("~", …)` |
|
||||
# | Token‑wise alias substitution (the long `for (i=1; …)` loop) | the `while i < len(line): …` loop that restarts from the beginning after each successful replacement |
|
||||
# | Accumulating the final output in `buf` | `self.buf += line + "\n"` |
|
||||
# | `END` block – print buffer if no error | `Translator.finish()` |
|
||||
|
||||
# The script can be saved as `translate_awk_to_py.py`, made executable (`chmod +x translate_awk_to_py.py`) and used exactly like the original AWK program. All error messages, line numbers and exit codes are identical, so any surrounding tooling that expects the AWK behaviour will continue to work.
|
||||
BIN
papers/graphics/rosenpass-wireguard-hybrid-security.pdf
Normal file
BIN
papers/graphics/rosenpass-wireguard-hybrid-security.pdf
Normal file
Binary file not shown.
@@ -2,6 +2,7 @@
|
||||
\usepackage{amssymb}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{fontspec}
|
||||
\usepackage{dirtytalk}
|
||||
|
||||
%font fallback
|
||||
\directlua{luaotfload.add_fallback
|
||||
|
||||
@@ -8,13 +8,15 @@ author:
|
||||
- Lisa Schmidt = {Scientific Illustrator – \\url{mullana.de}}
|
||||
- Prabhpreet Dua
|
||||
abstract: |
|
||||
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for another application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
||||
Rosenpass is a post-quantum-secure authenticated key exchange protocol. Its main practical use case is creating post-quantum-secure VPNs by combining WireGuard and Rosenpass.
|
||||
|
||||
The WireGuard implementation enjoys great trust from the cryptography community and has excellent performance characteristics. To preserve these features, the Rosenpass application runs side-by-side with WireGuard and supplies a new post-quantum-secure pre-shared key (PSK) every two minutes. WireGuard itself still performs the pre-quantum-secure key exchange and transfers any transport data with no involvement from Rosenpass at all.
|
||||
In this combination, Rosenpass generates a post-quantum-secure shared key every two minutes that is then used by WireGuard (WG) [@wg] to establish a secure connection. Rosenpass can also be used without WireGuard, providing post-quantum-secure symmetric keys for other applications, as long as the other application accepts a pre-shared key and provides cryptographic security based on the pre-shared key alone.
|
||||
|
||||
The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks. From a cryptographic perspective, Rosenpass can be thought of as a post-quantum secure variant of the Noise IK[@noise] key exchange. \say{Noise IK} means that the protocol makes both parties authenticate themselves, but that the initiator knows before the protocol starts which other party they are communicating with. There is no negotiation step where the responder communicates their identity to the initiator.
|
||||
|
||||
The Rosenpass project consists of a protocol description, an implementation written in Rust, and a symbolic analysis of the protocol’s security using ProVerif [@proverif]. We are working on a cryptographic security proof using CryptoVerif [@cryptoverif].
|
||||
|
||||
This document is a guide for engineers and researchers implementing the protocol; a scientific paper discussing the security properties of Rosenpass is work in progress.
|
||||
This document is a guide for engineers and researchers implementing the protocol.
|
||||
---
|
||||
|
||||
\enlargethispage{5mm}
|
||||
@@ -31,7 +33,7 @@ abstract: |
|
||||
|
||||
# Security
|
||||
|
||||
Rosenpass inherits most security properties from Post-Quantum WireGuard (PQWG). The security properties mentioned here are covered by the symbolic analysis in the Rosenpass repository.
|
||||
Rosenpass inherits most security properties from Post-Quantum WireGuard (PQWG). The security properties mentioned here are covered by the symbolic analysis in the Rosenpass repository.
|
||||
|
||||
## Secrecy
|
||||
Three key encapsulations using the keypairs `sski`/`spki`, `sskr`/`spkr`, and `eski`/`epki` provide secrecy (see Section \ref{variables} for an introduction of the variables). Their respective ciphertexts are called `scti`, `sctr`, and `ectr` and the resulting keys are called `spti`, `sptr`, `epti`. A single secure encapsulation is sufficient to provide secrecy. We use two different KEMs (Key Encapsulation Mechanisms; see Section \ref{skem}): Kyber and Classic McEliece.
|
||||
@@ -154,16 +156,18 @@ Rosenpass uses two types of ID variables. See Figure \ref{img:HashingTree} for h
|
||||
|
||||
The first lower-case character indicates whether the variable is a session ID (`sid`) or a peer ID (`pid`). The final character indicates the role using the characters `i`, `r`, `m`, or `t`, for `initiator`, `responder`, `mine`, or `theirs` respectively.
|
||||
|
||||
### Symmetric Keys
|
||||
|
||||
Rosenpass uses two symmetric key variables `psk` and `osk` in its interface, and maintains the entire handshake state in a variable called the chaining key.
|
||||
### Symmetric Keys {#symmetric-keys}
|
||||
|
||||
Rosenpass uses two main symmetric key variables `psk` and `osk` in its interface, and maintains the entire handshake state in a variable called the chaining key.
|
||||
|
||||
* `psk`: A pre-shared key that can be optionally supplied as input to Rosenpass.
|
||||
* `osk`: The output shared key, generated by Rosenpass and supplied to WireGuard for use as its pre-shared key.
|
||||
* `ck`: The chaining key.
|
||||
* `osk`: The output shared key, generated by Rosenpass. The main use case is to supply the key to WireGuard for use as its pre-shared key.
|
||||
* `ck`: The chaining key. This refers to various intermediate keys produced during the execution of the protocol, before the final `osk` is produced.
|
||||
|
||||
We mix all key material (e.g. `psk`) into the chaining key and derive symmetric keys such as `osk` from it. We authenticate public values by mixing them into the chaining key; in particular, we include the entire protocol transcript in the chaining key, i.e., all values transmitted over the network.
|
||||
|
||||
The protocol allows for multiple `osk`s to be generated; each of these keys is labeled with a domain separator to make sure different key usages are always given separate keys. The domain separator for using Rosenpass and WireGuard together is a token generated using the domain separator sequence `["rosenpass.eu", "wireguard psk"]` (see Fig. \ref{img:HashingTree}), as described in \ref{protocol-extension-wireguard-psk}. Third-parties using Rosenpass-keys for other purposes are asked to define their own protocol-extensions. Standard protocol extensions are described in \ref{protocol-extensions}.
|
||||
|
||||
We mix all key material (e.g. `psk`) into the chaining key, and derive symmetric keys such as `osk` from it. We authenticate public values by mixing them into the chaining key; in particular, we include the entire protocol transcript in the chaining key, i.e., all values transmitted over the network.
|
||||
|
||||
## Hashes
|
||||
|
||||
@@ -182,7 +186,7 @@ Using one hash function for multiple purposes can cause real-world security issu
|
||||
\setupimage{landscape,fullpage,label=img:HashingTree}
|
||||

|
||||
|
||||
Each tree node $\circ{}$ in Figure 3 represents the application of the keyed hash function, using the previous chaining key value as first parameter. The root of the tree is the zero key. In level one, the `PROTOCOL` identifier is applied to the zero key to generate a label unique across cryptographic protocols (unless the same label is deliberately used elsewhere). In level two, purpose identifiers are applied to the protocol label to generate labels to use with each separate hash function application within the Rosenpass protocol. The following layers contain the inputs used in each separate usage of the hash function: Beneath the identifiers `"mac"`, `"cookie"`, `"peer id"`, and `"biscuit additional data"` are hash functions or message authentication codes with a small number of inputs. The second, third, and fourth column in Figure 3 cover the long sequential branch beneath the identifier `"chaining key init"` representing the entire protocol execution, one column for each message processed during the handshake. The leaves beneath `"chaining key extract"` in the left column represent pseudo-random labels for use when extracting values from the chaining key during the protocol execution. These values such as `mix >` appear as outputs in the left column, and then as inputs `< mix` in the other three columns.
|
||||
Each tree node $\circ{}$ in Figure \ref{img:HashingTree} represents the application of the keyed hash function, using the previous chaining key value as first parameter. The root of the tree is the zero key. In level one, the `PROTOCOL` identifier is applied to the zero key to generate a label unique across cryptographic protocols (unless the same label is deliberately used elsewhere). In level two, purpose identifiers are applied to the protocol label to generate labels to use with each separate hash function application within the Rosenpass protocol. The following layers contain the inputs used in each separate usage of the hash function: Beneath the identifiers `"mac"`, `"cookie"`, `"peer id"`, and `"biscuit additional data"` are hash functions or message authentication codes with a small number of inputs. The second, third, and fourth column in Figure \ref{img:HashingTree} cover the long sequential branch beneath the identifier `"chaining key init"` representing the entire protocol execution, one column for each message processed during the handshake. The leaves beneath `"chaining key extract"` in the left column represent pseudo-random labels for use when extracting values from the chaining key during the protocol execution. These values such as `mix >` appear as outputs in the left column, and then as inputs `< mix` in the other three columns.
|
||||
|
||||
The protocol identifier depends on the hash function used with the respective peer is defined as follows if BLAKE2s [@rfc_blake2] is used:
|
||||
|
||||
@@ -289,7 +293,7 @@ fn lookup_session(sid);
|
||||
|
||||
The protocol framework used by Rosenpass allows arbitrarily many different keys to be extracted using labels for each key. The `extract_key` function is used to derive protocol-internal keys, its labels are under the “chaining key extract” node in Figure \ref{img:HashingTree}. The export key function is used to export application keys.
|
||||
|
||||
Third-party applications using the protocol are supposed to choose a unique label (e.g., their domain name) and use that as their own namespace for custom labels. The Rosenpass project itself uses the “rosenpass.eu” namespace.
|
||||
Third-party applications using the protocol are supposed to define a protocol extension (see \ref{protocol-extensions}) and choose a globally unique label, such as their domain name for custom labels of their own. The Rosenpass project itself uses the `["rosenpass.eu"]` namespace in the WireGuard PSK protocol extension (see \ref{protocol-extension-wireguard-psk}).
|
||||
|
||||
Applications can cache or statically compile the pseudo-random label values into their binary to improve performance.
|
||||
|
||||
@@ -395,7 +399,7 @@ fn load_biscuit(nct) {
|
||||
|
||||
// In December 2024, the InitConf retransmission mechanisim was redesigned
|
||||
// in a backwards-compatible way. See the changelog.
|
||||
//
|
||||
//
|
||||
// -- 2024-11-30, Karolin Varner
|
||||
if (protocol_version!(< "0.3.0")) {
|
||||
// Ensure that the biscuit is used only once
|
||||
@@ -421,6 +425,18 @@ fn enter_live() {
|
||||
txkr ← extract_key("responder payload encryption");
|
||||
txnm ← 0;
|
||||
txnt ← 0;
|
||||
|
||||
// Setup output keys for protocol extensions such as the
|
||||
// WireGuard PSK protocol extension.
|
||||
setup_osks();
|
||||
}
|
||||
```
|
||||
|
||||
The final step `setup_osks()` can be defined by protocol extensions (see \ref{protocol-extensions}) to set up `osk`s for custom use cases. By default, the WireGuard PSK (see \ref{protocol-extension-wireguard-psk}) is active.
|
||||
|
||||
```pseudorust
|
||||
fn setup_osks() {
|
||||
... // Defined by protocol extensions
|
||||
}
|
||||
```
|
||||
|
||||
@@ -448,11 +464,11 @@ ICR5 and ICR6 perform biscuit replay protection using the biscuit number. This i
|
||||
|
||||
### Denial of Service Mitigation and Cookies
|
||||
|
||||
Rosenpass derives its cookie-based DoS mitigation technique for a responder when receiving InitHello messages from Wireguard [@wg].
|
||||
Rosenpass derives its cookie-based DoS mitigation technique for a responder when receiving InitHello messages from Wireguard [@wg].
|
||||
|
||||
When the responder is under load, it may choose to not process further InitHello handshake messages, but instead to respond with a cookie reply message (see Figure \ref{img:MessageTypes}).
|
||||
|
||||
The sender of the exchange then uses this cookie in order to resend the message and have it accepted the following time by the reciever.
|
||||
The sender of the exchange then uses this cookie in order to resend the message and have it accepted the following time by the reciever.
|
||||
|
||||
For an initiator, Rosenpass ignores all messages when under load.
|
||||
|
||||
@@ -465,7 +481,7 @@ cookie_value = lhash("cookie-value", cookie_secret, initiator_host_info)[0..16]
|
||||
cookie_encrypted = XAEAD(lhash("cookie-key", spkm), nonce, cookie_value, mac_peer)
|
||||
```
|
||||
|
||||
where `cookie_secret` is a secret variable that changes every two minutes to a random value. Moreover, `lhash` is always instantiated with SHAKE256 when computing `cookie_value` for compatability reasons. `initiator_host_info` is used to identify the initiator host, and is implementation-specific for the client. This paramaters used to identify the host must be carefully chosen to ensure there is a unique mapping, especially when using IPv4 and IPv6 addresses to identify the host (such as taking care of IPv6 link-local addresses). `cookie_value` is a truncated 16 byte value from the above hash operation. `mac_peer` is the `mac` field of the peer's handshake message to which message is the reply.
|
||||
where `cookie_secret` is a secret variable that changes every two minutes to a random value. Moreover, `lhash` is always instantiated with SHAKE256 when computing `cookie_value` for compatability reasons. `initiator_host_info` is used to identify the initiator host, and is implementation-specific for the client. This paramaters used to identify the host must be carefully chosen to ensure there is a unique mapping, especially when using IPv4 and IPv6 addresses to identify the host (such as taking care of IPv6 link-local addresses). `cookie_value` is a truncated 16 byte value from the above hash operation. `mac_peer` is the `mac` field of the peer's handshake message to which message is the reply.
|
||||
|
||||
#### Envelope `mac` Field
|
||||
|
||||
@@ -495,13 +511,13 @@ else {
|
||||
Here, `seconds_since_update(peer.cookie_value)` is the amount of time in seconds ellapsed since last cookie was received, and `COOKIE_WIRE_DATA` are the message contents of all bytes of the retransmitted message prior to the `cookie` field.
|
||||
|
||||
The inititator can use an invalid value for the `cookie` value, when the responder is not under load, and the responder must ignore this value.
|
||||
However, when the responder is under load, it may reject InitHello messages with the invalid `cookie` value, and issue a cookie reply message.
|
||||
However, when the responder is under load, it may reject InitHello messages with the invalid `cookie` value, and issue a cookie reply message.
|
||||
|
||||
### Conditions to trigger DoS Mechanism
|
||||
|
||||
This whitepaper does not mandate any specific mechanism to detect responder contention (also mentioned as the under load condition) that would trigger use of the cookie mechanism.
|
||||
|
||||
For the reference implemenation, Rosenpass has derived inspiration from the Linux implementation of Wireguard. This implementation suggests that the reciever keep track of the number of messages it is processing at a given time.
|
||||
For the reference implemenation, Rosenpass has derived inspiration from the Linux implementation of Wireguard. This implementation suggests that the reciever keep track of the number of messages it is processing at a given time.
|
||||
|
||||
On receiving an incoming message, if the length of the message queue to be processed exceeds a threshold `MAX_QUEUED_INCOMING_HANDSHAKES_THRESHOLD`, the client is considered under load and its state is stored as under load. In addition, the timestamp of this instant when the client was last under load is stored. When recieving subsequent messages, if the client is still in an under load state, the client will check if the time ellpased since the client was last under load has exceeded `LAST_UNDER_LOAD_WINDOW` seconds. If this is the case, the client will update its state to normal operation, and process the message in a normal fashion.
|
||||
|
||||
@@ -520,23 +536,159 @@ The responder uses less complex form of the same mechanism: The responder never
|
||||
|
||||
### Interaction with cookie reply system
|
||||
|
||||
The cookie reply system does not interfere with the retransmission logic discussed above.
|
||||
The cookie reply system does not interfere with the retransmission logic discussed above.
|
||||
|
||||
When the initator is under load, it will ignore processing any incoming messages.
|
||||
|
||||
When a responder is under load and it receives an InitHello handshake message, the InitHello message will be discarded and a cookie reply message is sent. The initiator, then on the reciept of the cookie reply message, will store a decrypted `cookie_value` to set the `cookie` field to subsequently sent messages. As per the retransmission mechanism above, the initiator will send a retransmitted InitHello message with a valid `cookie` value appended. On receiving the retransmitted handshake message, the responder will validate the `cookie` value and resume with the handshake process.
|
||||
When a responder is under load and it receives an InitHello handshake message, the InitHello message will be discarded and a cookie reply message is sent. The initiator, then on the reciept of the cookie reply message, will store a decrypted `cookie_value` to set the `cookie` field to subsequently sent messages. As per the retransmission mechanism above, the initiator will send a retransmitted InitHello message with a valid `cookie` value appended. On receiving the retransmitted handshake message, the responder will validate the `cookie` value and resume with the handshake process.
|
||||
|
||||
When the responder is under load and it recieves an InitConf message, the message will be directly processed without checking the validity of the cookie field.
|
||||
|
||||
# Protocol extensions {#protocol-extensions}
|
||||
|
||||
The main extension point for the Rosenpass protocol is to generate `osk`s (speak output shared keys, see Sec. \ref{symmetric-keys}) for purposes other than using them to secure WireGuard. By default, the Rosenpass application generates keys for the WireGuard PSK (see \ref{protocol-extension-wireguard-psk}). It would not be impossible to use the keys generated for WireGuard in other use cases, but this might lead to attacks[@oraclecloning]. Specifying a custom protocol extension in practice just means settling on alternative domain separators (see Sec. \ref{symmetric-keys}, Fig. \ref{img:HashingTree}).
|
||||
|
||||
## Using custom domain separators in the Rosenpass application
|
||||
|
||||
The Rosenpass application supports protocol extensions to change the OSK domain separator without modification of the source code.
|
||||
|
||||
The following example configuration file can be used to execute Rosenpass in outfile mode with custom domain separators.
|
||||
In this mode, the Rosenpass application will write keys to the file specified with `key_out` and send notifications when new keys are exchanged via standard out.
|
||||
This can be used to embed Rosenpass into third-party application.
|
||||
|
||||
```toml
|
||||
# peer-a.toml
|
||||
public_key = "peer-a.pk"
|
||||
secret_key = "peer-a.sk"
|
||||
listen = ["[::1]:6789"]
|
||||
verbosity = "Verbose"
|
||||
|
||||
[[peers]]
|
||||
public_key = "peer-b.pk"
|
||||
key_out = "peer-a.osk" # path to store the key
|
||||
osk_organization = "myorg.com"
|
||||
osk_label = ["My Custom Messenger app", "Backend VPN Example Subusecase"]
|
||||
```
|
||||
|
||||
## Extension: WireGuard PSK {#protocol-extension-wireguard-psk}
|
||||
|
||||
The WireGuard PSK protocol extension is active by default; this is the mode where Rosenpass is used to provide post-quantum security for WireGuard. Hybrid security (i.e. redundant pre-quantum and post-quantum security) is achieved because WireGuard provides pre-quantum security, with or without Rosenpass.
|
||||
|
||||
This extension uses the `"rosenpass.eu"` namespace for user-labels and specifies a single additional user-label:
|
||||
|
||||
* `["rosenpass.eu", "wireguard psk"]`
|
||||
|
||||
The label's full domain separator is
|
||||
|
||||
* `[PROTOCOL, "user", "rosenpass.eu", "wireguard psk"]`
|
||||
|
||||
and can be seen in Figure \ref{img:HashingTree}.
|
||||
|
||||
We require two extra per-peer configuration variables:
|
||||
|
||||
* `wireguard_interface` — Name of a local network interface. Identifies local WireGuard interface we are supplying a PSK to.
|
||||
* `wireguard_peer` — A WireGuard public key. Identifies the particular WireGuard peer whose connection we are supplying PSKs for.
|
||||
|
||||
When creating the WireGuard interface for use with Rosenpass, the PSK used by WireGuard must be initialized to a random value; otherwise, WireGuard can establish an insecure key before Rosenpass had a change to exchange its own key.
|
||||
|
||||
```pseudorust
|
||||
fn on_wireguard_setup() {
|
||||
// We use a random PSK to make sure the other side will never
|
||||
// have a matching PSK when the WireGuard interface is created.
|
||||
//
|
||||
// Never use a fixed value here as this would lead to an attack!
|
||||
let fake_wireguard_psk = random_key();
|
||||
|
||||
// How the interface is create
|
||||
let wg_peer = WireGuard::setup_peer()
|
||||
.public_key(wireguard_peer)
|
||||
... // Supply any custom peerconfiguration
|
||||
.psk(fake_wireguard_psk);
|
||||
|
||||
// The random PSK must be supplied before the
|
||||
// WireGuard interface comes up
|
||||
WireGuard::setup_interface()
|
||||
.name(wireguard_interface)
|
||||
... // Supply any custom configuration
|
||||
.add_peer(wg_peer)
|
||||
.create();
|
||||
}
|
||||
```
|
||||
|
||||
Every time a key is successfully negotiated, we upload the key to WireGuard.
|
||||
For this protocol extension, the `setup_osks()` function is thus defined as:
|
||||
|
||||
```pseudorust
|
||||
fn setup_osks() {
|
||||
// Generate WireGuard OSK (output shared key) from Rosenpass'
|
||||
// perspective, respectively the PSK (preshared key) from
|
||||
// WireGuard's perspective
|
||||
let wireguard_psk = export_key("rosenpass.eu", "wireguard psk");
|
||||
|
||||
/// Supply the PSK to WireGuard
|
||||
WireGuard::get_interface(wireguard_interface)
|
||||
.get_peer(wireguard_peer)
|
||||
.set_psk(wireguard_psk);
|
||||
}
|
||||
```
|
||||
|
||||
The Rosenpass protocol uses key renegotiation, just like WireGuard.
|
||||
If no new `osk` is produced within a set amount of time, the OSK generated by Rosenpass times out.
|
||||
In this case, the WireGuard PSK must be overwritten with a random key.
|
||||
This interaction is visualized in Figure \ref{img:ExtWireguardPSKHybridSecurity}.
|
||||
|
||||
```pseudorust
|
||||
fn on_key_timeout() {
|
||||
// Generate a random – deliberately invalid – WireGuard PSK.
|
||||
// Never use a fixed value here as this would lead to an attack!
|
||||
let fake_wireguard_psk = random_key();
|
||||
|
||||
// Securely erase the PSK currently used by WireGuard by
|
||||
// overwriting it with the fake key we just generated.
|
||||
WireGuard::get_interface(wireguard_interface)
|
||||
.get_peer(wireguard_peer)
|
||||
.set_psk(fake_wireguard_psk);
|
||||
}
|
||||
```
|
||||
|
||||
\setupimage{label=img:ExtWireguardPSKHybridSecurity,fullpage}
|
||||

|
||||
|
||||
# Changelog
|
||||
|
||||
### 0.3.x
|
||||
|
||||
#### 2025-06-24 – Specifying the `osk` used for WireGuard as a protocol extension
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
Author: Karolin varner
|
||||
|
||||
PR: [#664](https://github.com/rosenpass/rosenpass/pull/664)
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
We introduce the concept of protocol extensions to make the option of using Rosenpass for purposes other than encrypting WireGuard more explicit. This captures the status-quo in a better way and does not constitute a functional change of the protocol.
|
||||
|
||||
When we designed the Rosenpass protocol, we built it with support for alternative `osk`-labels in mind.
|
||||
This is why we specified the domain separator for the `osk` to be `[PROTOCOL, "user", "rosenpass.eu", "wireguard psk"]`.
|
||||
By choosing alternative values for the namespace (e.g. `"myorg.eu"` instead of `"rosenpass.eu`) and the label (e.g. `"MyApp Symmetric Encryption"`), the protocol could easily accommodate alternative usage scenarios.
|
||||
|
||||
By introducing the concept of protocol extensions, we make this possibility explicit.
|
||||
|
||||
1. Reworded the abstract to make it clearer that Rosenpass can be used for other purposes than to secure WireGuard
|
||||
2. Reworded Section Symmetric Keys, adding references to the new section on protocol extension
|
||||
3. Added a `setup_osks()` function in section Hashes, to make the reference to protocol extensions explicit
|
||||
4. Added a new section on protocol extensions and the standard extension for using Rosenpass with WireGuard
|
||||
5. Added a new graphic to showcase how Rosenpass and WireGuard interact
|
||||
5. Minor formatting and intra-document references fixes
|
||||
|
||||
#### 2025-05-22 - SHAKE256 keyed hash
|
||||
\vspace{0.5em}
|
||||
|
||||
Author: David Niehues
|
||||
PR: [#653](https://github.com/rosenpass/rosenpass/pull/653)
|
||||
|
||||
PR: [#653](https://github.com/rosenpass/rosenpass/pull/653)
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
@@ -554,9 +706,11 @@ In order to maintain compatablity without introducing an explcit version number
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
Author: Karolin Varner
|
||||
Issue: [#331](https://github.com/rosenpass/rosenpass/issues/331)
|
||||
PR: [#513](https://github.com/rosenpass/rosenpass/pull/513)
|
||||
Author: Karolin Varner
|
||||
|
||||
Issue: [#331](https://github.com/rosenpass/rosenpass/issues/331)
|
||||
|
||||
PR: [#513](https://github.com/rosenpass/rosenpass/pull/513)
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
@@ -574,7 +728,7 @@ By removing all retransmission handling code from the cryptographic protocol, we
|
||||
The responder does not need to do anything special to handle RespHello retransmission – if the RespHello package is lost, the initiator retransmits InitHello and the responder can generate another RespHello package from that. InitConf retransmission needs to be handled specifically in the responder code because accepting an InitConf retransmission would reset the live session including the nonce counter, which would cause nonce reuse. Implementations must detect the case that `biscuit_no = biscuit_used` in ICR5, skip execution of ICR6 and ICR7, and just transmit another EmptyData package to confirm that the initiator can stop transmitting InitConf.
|
||||
\end{quote}
|
||||
|
||||
by
|
||||
by
|
||||
|
||||
\begin{quote}
|
||||
The responder uses less complex form of the same mechanism: The responder never retransmits RespHello, instead the responder generates a new RespHello message if InitHello is retransmitted. Responder confirmation messages of completed handshake (EmptyData) messages are retransmitted by storing the most recent InitConf messages (or their hashes) and caching the associated EmptyData messages. Through this cache, InitConf retransmission is detected and the associated EmptyData message is retransmitted.
|
||||
@@ -597,7 +751,7 @@ By removing all retransmission handling code from the cryptographic protocol, we
|
||||
\begin{minted}{pseudorust}
|
||||
// In December 2024, the InitConf retransmission mechanisim was redesigned
|
||||
// in a backwards-compatible way. See the changelog.
|
||||
//
|
||||
//
|
||||
// -- 2024-11-30, Karolin Varner
|
||||
if (protocol_version!(< "0.3.0")) {
|
||||
// Ensure that the biscuit is used only once
|
||||
@@ -611,9 +765,11 @@ By removing all retransmission handling code from the cryptographic protocol, we
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
Author: Prabhpreet Dua
|
||||
Issue: [#137](https://github.com/rosenpass/rosenpass/issues/137)
|
||||
PR: [#142](https://github.com/rosenpass/rosenpass/pull/142)
|
||||
Author: Prabhpreet Dua
|
||||
|
||||
Issue: [#137](https://github.com/rosenpass/rosenpass/issues/137)
|
||||
|
||||
PR: [#142](https://github.com/rosenpass/rosenpass/pull/142)
|
||||
|
||||
\vspace{0.5em}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ let
|
||||
xifthen
|
||||
xkeyval
|
||||
xurl
|
||||
dirtytalk
|
||||
;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
use anyhow::Result;
|
||||
use rosenpass::protocol::{
|
||||
CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, ProtocolVersion, SPk, SSk, SymKey,
|
||||
};
|
||||
use std::ops::DerefMut;
|
||||
|
||||
use anyhow::Result;
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
|
||||
|
||||
use rosenpass::protocol::basic_types::{MsgBuf, SPk, SSk, SymKey};
|
||||
use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
use rosenpass::protocol::{CryptoServer, HandleMsgResult, PeerPtr, ProtocolVersion};
|
||||
|
||||
fn handle(
|
||||
tx: &mut CryptoServer,
|
||||
msgb: &mut MsgBuf,
|
||||
@@ -54,8 +55,18 @@ fn make_server_pair(protocol_version: ProtocolVersion) -> Result<(CryptoServer,
|
||||
CryptoServer::new(ska, pka.clone()),
|
||||
CryptoServer::new(skb, pkb.clone()),
|
||||
);
|
||||
a.add_peer(Some(psk.clone()), pkb, protocol_version.clone())?;
|
||||
b.add_peer(Some(psk), pka, protocol_version)?;
|
||||
a.add_peer(
|
||||
Some(psk.clone()),
|
||||
pkb,
|
||||
protocol_version.clone(),
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
b.add_peer(
|
||||
Some(psk),
|
||||
pka,
|
||||
protocol_version,
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
Ok((a, b))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hint::black_box,
|
||||
io::{self, Write},
|
||||
ops::DerefMut,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use std::io::{self, Write};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{collections::HashMap, hint::black_box, ops::DerefMut};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use libcrux_test_utils::tracing::{EventType, Trace as _};
|
||||
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
@@ -14,9 +11,9 @@ use rosenpass_ciphers::StaticKem;
|
||||
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
|
||||
use rosenpass_util::trace_bench::RpEventType;
|
||||
|
||||
use rosenpass::protocol::{
|
||||
CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, ProtocolVersion, SPk, SSk, SymKey,
|
||||
};
|
||||
use rosenpass::protocol::basic_types::{MsgBuf, SPk, SSk, SymKey};
|
||||
use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
use rosenpass::protocol::{CryptoServer, HandleMsgResult, PeerPtr, ProtocolVersion};
|
||||
|
||||
const ITERATIONS: usize = 100;
|
||||
|
||||
@@ -77,8 +74,18 @@ fn make_server_pair(protocol_version: ProtocolVersion) -> Result<(CryptoServer,
|
||||
CryptoServer::new(ska, pka.clone()),
|
||||
CryptoServer::new(skb, pkb.clone()),
|
||||
);
|
||||
a.add_peer(Some(psk.clone()), pkb, protocol_version.clone())?;
|
||||
b.add_peer(Some(psk), pka, protocol_version)?;
|
||||
a.add_peer(
|
||||
Some(psk.clone()),
|
||||
pkb,
|
||||
protocol_version.clone(),
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
b.add_peer(
|
||||
Some(psk),
|
||||
pka,
|
||||
protocol_version,
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
Ok((a, b))
|
||||
}
|
||||
|
||||
|
||||
@@ -158,10 +158,10 @@ where
|
||||
);
|
||||
|
||||
// Actually read the secrets
|
||||
let mut sk = crate::protocol::SSk::zero();
|
||||
let mut sk = crate::protocol::basic_types::SSk::zero();
|
||||
sk_io.read_exact_til_end(sk.secret_mut()).einvalid_req()?;
|
||||
|
||||
let mut pk = crate::protocol::SPk::zero();
|
||||
let mut pk = crate::protocol::basic_types::SPk::zero();
|
||||
pk_io.read_exact_til_end(pk.borrow_mut()).einvalid_req()?;
|
||||
|
||||
// Retrieve the construction site
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::app_server::AppServer;
|
||||
|
||||
/// Configuration options for the Rosenpass API
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ApiConfig {
|
||||
/// Where in the file-system to create the unix socket the rosenpass API will be listening for
|
||||
/// connections on
|
||||
|
||||
@@ -1,56 +1,34 @@
|
||||
/// This contains the bulk of the rosenpass server IO handling code whereas
|
||||
/// the actual cryptographic code lives in the [crate::protocol] module
|
||||
use anyhow::bail;
|
||||
//! This contains the bulk of the rosenpass server IO handling code whereas
|
||||
//! the actual cryptographic code lives in the [crate::protocol] module
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::io::{stdout, ErrorKind, Write};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{cell::Cell, fmt::Debug, io, path::PathBuf, slice};
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use derive_builder::Builder;
|
||||
use log::{error, info, warn};
|
||||
use mio::Interest;
|
||||
use mio::Token;
|
||||
use rosenpass_secret_memory::Public;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::build::ConstructionSite;
|
||||
use rosenpass_util::file::StoreValueB64;
|
||||
use rosenpass_util::functional::run;
|
||||
use rosenpass_util::functional::ApplyExt;
|
||||
use rosenpass_util::io::IoResultKindHintExt;
|
||||
use rosenpass_util::io::SubstituteForIoErrorKindExt;
|
||||
use rosenpass_util::option::SomeExt;
|
||||
use rosenpass_util::result::OkExt;
|
||||
use rosenpass_wireguard_broker::WireguardBrokerMio;
|
||||
use rosenpass_wireguard_broker::{WireguardBrokerCfg, WG_KEY_LEN};
|
||||
use mio::{Interest, Token};
|
||||
use zerocopy::AsBytes;
|
||||
|
||||
use std::cell::Cell;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::stdout;
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Write;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::SocketAddrV4;
|
||||
use std::net::SocketAddrV6;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::path::PathBuf;
|
||||
use std::slice;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::config::ProtocolVersion;
|
||||
use crate::protocol::BuildCryptoServer;
|
||||
use crate::protocol::HostIdentification;
|
||||
use crate::{
|
||||
config::Verbosity,
|
||||
protocol::{CryptoServer, MsgBuf, PeerPtr, SPk, SSk, SymKey, Timing},
|
||||
};
|
||||
use rosenpass_util::attempt;
|
||||
use rosenpass_util::b64::B64Display;
|
||||
use rosenpass_util::functional::{run, ApplyExt};
|
||||
use rosenpass_util::io::{IoResultKindHintExt, SubstituteForIoErrorKindExt};
|
||||
use rosenpass_util::{
|
||||
b64::B64Display, build::ConstructionSite, file::StoreValueB64, option::SomeExt, result::OkExt,
|
||||
};
|
||||
|
||||
use rosenpass_secret_memory::{Public, Secret};
|
||||
use rosenpass_wireguard_broker::{WireguardBrokerCfg, WireguardBrokerMio, WG_KEY_LEN};
|
||||
|
||||
use crate::config::{ProtocolVersion, Verbosity};
|
||||
|
||||
use crate::protocol::basic_types::{MsgBuf, SPk, SSk, SymKey};
|
||||
use crate::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
use crate::protocol::timing::Timing;
|
||||
use crate::protocol::{BuildCryptoServer, CryptoServer, HostIdentification, PeerPtr};
|
||||
|
||||
/// The maximum size of a base64 encoded symmetric key (estimate)
|
||||
pub const MAX_B64_KEY_SIZE: usize = 32 * 5 / 3;
|
||||
@@ -1036,6 +1014,7 @@ impl AppServer {
|
||||
/// # Examples
|
||||
///
|
||||
/// See [Self::new].
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn add_peer(
|
||||
&mut self,
|
||||
psk: Option<SymKey>,
|
||||
@@ -1044,11 +1023,16 @@ impl AppServer {
|
||||
broker_peer: Option<BrokerPeer>,
|
||||
hostname: Option<String>,
|
||||
protocol_version: ProtocolVersion,
|
||||
osk_domain_separator: OskDomainSeparator,
|
||||
) -> anyhow::Result<AppPeerPtr> {
|
||||
let PeerPtr(pn) = match &mut self.crypto_site {
|
||||
ConstructionSite::Void => bail!("Crypto server construction site is void"),
|
||||
ConstructionSite::Builder(builder) => builder.add_peer(psk, pk, protocol_version),
|
||||
ConstructionSite::Product(srv) => srv.add_peer(psk, pk, protocol_version.into())?,
|
||||
ConstructionSite::Builder(builder) => {
|
||||
builder.add_peer(psk, pk, protocol_version, osk_domain_separator)
|
||||
}
|
||||
ConstructionSite::Product(srv) => {
|
||||
srv.add_peer(psk, pk, protocol_version.into(), osk_domain_separator)?
|
||||
}
|
||||
};
|
||||
assert!(pn == self.peers.len());
|
||||
|
||||
@@ -1337,7 +1321,7 @@ impl AppServer {
|
||||
break A::SendRetransmission(AppPeerPtr(no))
|
||||
}
|
||||
Some(C::Sleep(timeout)) => timeout, // No event from crypto-server, do IO
|
||||
None => crate::protocol::UNENDING, // Crypto server is uninitialized, do IO
|
||||
None => crate::protocol::timing::UNENDING, // Crypto server is uninitialized, do IO
|
||||
};
|
||||
|
||||
// Perform IO (look for a message)
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::app_server::AppServerTest;
|
||||
use crate::app_server::{AppServer, BrokerPeer};
|
||||
use crate::protocol::{SPk, SSk, SymKey};
|
||||
use crate::protocol::basic_types::{SPk, SSk, SymKey};
|
||||
|
||||
use super::config;
|
||||
|
||||
@@ -491,6 +491,7 @@ impl CliArgs {
|
||||
broker_peer,
|
||||
cfg_peer.endpoint.clone(),
|
||||
cfg_peer.protocol_version.into(),
|
||||
cfg_peer.osk_domain_separator.try_into()?,
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -607,8 +608,8 @@ impl CliArgs {
|
||||
|
||||
/// generate secret and public keys, store in files according to the paths passed as arguments
|
||||
pub fn generate_and_save_keypair(secret_key: PathBuf, public_key: PathBuf) -> anyhow::Result<()> {
|
||||
let mut ssk = crate::protocol::SSk::random();
|
||||
let mut spk = crate::protocol::SPk::random();
|
||||
let mut ssk = crate::protocol::basic_types::SSk::random();
|
||||
let mut spk = crate::protocol::basic_types::SPk::random();
|
||||
StaticKem.keygen(ssk.secret_mut(), spk.deref_mut())?;
|
||||
ssk.store_secret(secret_key)?;
|
||||
spk.store(public_key)
|
||||
|
||||
@@ -7,20 +7,19 @@
|
||||
//! - TODO: support `~` in <https://github.com/rosenpass/rosenpass/issues/237>
|
||||
//! - TODO: provide tooling to create config file from shell <https://github.com/rosenpass/rosenpass/issues/247>
|
||||
|
||||
use crate::protocol::{SPk, SSk};
|
||||
use rosenpass_util::file::LoadValue;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs,
|
||||
io::Write,
|
||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{collections::HashSet, fs, io::Write};
|
||||
|
||||
use anyhow::{bail, ensure};
|
||||
use rosenpass_util::file::{fopen_w, Visibility};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use rosenpass_util::file::{fopen_w, LoadValue, Visibility};
|
||||
|
||||
use crate::protocol::basic_types::{SPk, SSk};
|
||||
use crate::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
|
||||
use crate::app_server::AppServer;
|
||||
|
||||
#[cfg(feature = "experiment_api")]
|
||||
@@ -36,6 +35,7 @@ fn empty_api_config() -> crate::api::config::ApiConfig {
|
||||
///
|
||||
/// i.e. configuration for the `rosenpass exchange` and `rosenpass exchange-config` commands
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Rosenpass {
|
||||
// TODO: Raise error if secret key or public key alone is set during deserialization
|
||||
// SEE: https://github.com/serde-rs/serde/issues/2793
|
||||
@@ -77,6 +77,7 @@ pub struct Rosenpass {
|
||||
|
||||
/// Public key and secret key locations.
|
||||
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Keypair {
|
||||
/// path to the public key file
|
||||
pub public_key: PathBuf,
|
||||
@@ -104,6 +105,7 @@ impl Keypair {
|
||||
///
|
||||
/// - TODO: replace this type with [`log::LevelFilter`], also see <https://github.com/rosenpass/rosenpass/pull/246>
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Copy, Clone)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub enum Verbosity {
|
||||
Quiet,
|
||||
Verbose,
|
||||
@@ -111,6 +113,7 @@ pub enum Verbosity {
|
||||
|
||||
/// The protocol version to be used by a peer.
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Copy, Clone, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub enum ProtocolVersion {
|
||||
#[default]
|
||||
V02,
|
||||
@@ -119,6 +122,7 @@ pub enum ProtocolVersion {
|
||||
|
||||
/// Configuration data for a single Rosenpass peer
|
||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct RosenpassPeer {
|
||||
/// path to the public key of the peer
|
||||
pub public_key: PathBuf,
|
||||
@@ -150,10 +154,78 @@ pub struct RosenpassPeer {
|
||||
#[serde(default)]
|
||||
/// The protocol version to use for the exchange
|
||||
pub protocol_version: ProtocolVersion,
|
||||
|
||||
/// Allows using a custom domain separator
|
||||
#[serde(flatten)]
|
||||
pub osk_domain_separator: RosenpassPeerOskDomainSeparator,
|
||||
}
|
||||
|
||||
/// Configuration for [crate::protocol::osk_domain_separator::OskDomainSeparator]
|
||||
///
|
||||
/// Refer to its documentation for more information and examples of how to use this.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct RosenpassPeerOskDomainSeparator {
|
||||
/// If Rosenpass is used for purposes other then securing WireGuard,
|
||||
/// a custom domain separator and domain separator must be specified.
|
||||
///
|
||||
/// Use `osk_organization` to indicate the organization who specifies the use case
|
||||
/// and `osk_label` for a specific purpose within that organization.
|
||||
///
|
||||
/// ```toml
|
||||
/// [[peer]]
|
||||
/// public_key = "my_public_key"
|
||||
/// ...
|
||||
/// osk_organization = "myorg.com"
|
||||
/// osk_label = ["My Custom Messenger app"]
|
||||
/// ```
|
||||
pub osk_organization: Option<String>,
|
||||
// If Rosenpass is used for purposes other then securing WireGuard,
|
||||
/// a custom domain separator and domain separator must be specified.
|
||||
///
|
||||
/// Use `osk_organization` to indicate the organization who specifies the use case
|
||||
/// and `osk_label` for a specific purpose within that organization.
|
||||
///
|
||||
/// ```toml
|
||||
/// [[peer]]
|
||||
/// public_key = "my_public_key"
|
||||
/// ...
|
||||
/// osk_namespace = "myorg.com"
|
||||
/// osk_label = ["My Custom Messenger app"]
|
||||
/// ```
|
||||
pub osk_label: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl RosenpassPeerOskDomainSeparator {
|
||||
pub fn org_and_label(&self) -> anyhow::Result<Option<(&String, &Vec<String>)>> {
|
||||
match (&self.osk_organization, &self.osk_label) {
|
||||
(None, None) => Ok(None),
|
||||
(Some(org), Some(label)) => Ok(Some((&org, &label))),
|
||||
(Some(_), None) => bail!("Specified osk_organization but not osk_label in config file. You need to specify both, or none."),
|
||||
(None, Some(_)) => bail!("Specified osk_label but not osk_organization in config file. You need to specify both, or none."),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> anyhow::Result<()> {
|
||||
let _org_and_label: Option<(_, _)> = self.org_and_label()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<RosenpassPeerOskDomainSeparator> for OskDomainSeparator {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(val: RosenpassPeerOskDomainSeparator) -> anyhow::Result<Self> {
|
||||
match val.org_and_label()? {
|
||||
None => Ok(OskDomainSeparator::default()),
|
||||
Some((org, label)) => Ok(OskDomainSeparator::custom_utf8(org, label)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information for supplying exchanged keys directly to WireGuard
|
||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct WireGuard {
|
||||
/// Name of the WireGuard interface to supply with pre-shared keys generated by the Rosenpass
|
||||
/// key exchange
|
||||
@@ -292,7 +364,7 @@ impl Rosenpass {
|
||||
// check the secret-key file is a valid key
|
||||
ensure!(
|
||||
SSk::load(&keypair.secret_key).is_ok(),
|
||||
"could not load public-key file {:?}: invalid key",
|
||||
"could not load secret-key file {:?}: invalid key",
|
||||
keypair.secret_key
|
||||
);
|
||||
}
|
||||
@@ -337,6 +409,10 @@ impl Rosenpass {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = peer.osk_domain_separator.validate() {
|
||||
bail!("Invalid OSK domain separation configuration for peer {i}: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -295,25 +295,21 @@ hash_domain_ns!(
|
||||
/// We do recommend that third parties base their specific domain separators
|
||||
/// on a internet domain and/or mix in much more specific information.
|
||||
///
|
||||
/// We only really use this to derive a output key for wireguard; see [osk].
|
||||
///
|
||||
/// See [_ckextract].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// See the [module](self) documentation on how to use the hash domains in general.
|
||||
_ckextract, _user, "user");
|
||||
_ckextract, cke_user, "user");
|
||||
hash_domain_ns!(
|
||||
/// Chaining key domain separator for any rosenpass specific purposes.
|
||||
///
|
||||
/// We only really use this to derive a output key for wireguard; see [osk].
|
||||
///
|
||||
/// See [_ckextract].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// See the [module](self) documentation on how to use the hash domains in general.
|
||||
_user, _rp, "rosenpass.eu");
|
||||
cke_user, cke_user_rosenpass, "rosenpass.eu");
|
||||
hash_domain!(
|
||||
/// Chaining key domain separator for deriving the key sent to WireGuard.
|
||||
///
|
||||
@@ -325,4 +321,4 @@ hash_domain!(
|
||||
/// Check out its source code!
|
||||
///
|
||||
/// See the [module](self) documentation on how to use the hash domains in general.
|
||||
_rp, osk, "wireguard psk");
|
||||
cke_user_rosenpass, ext_wireguard_psk_osk, "wireguard psk");
|
||||
|
||||
38
rosenpass/src/protocol/basic_types.rs
Normal file
38
rosenpass/src/protocol/basic_types.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
//! Key types and other fundamental types used in the Rosenpass protocol
|
||||
|
||||
use rosenpass_cipher_traits::primitives::{Aead, Kem};
|
||||
use rosenpass_ciphers::{EphemeralKem, StaticKem, XAead, KEY_LEN};
|
||||
use rosenpass_secret_memory::{Public, PublicBox, Secret};
|
||||
|
||||
use crate::msgs::{BISCUIT_ID_LEN, MAX_MESSAGE_LEN, SESSION_ID_LEN};
|
||||
|
||||
/// Static public key
|
||||
///
|
||||
/// Using [PublicBox] instead of [Public] because Classic McEliece keys are very large.
|
||||
pub type SPk = PublicBox<{ StaticKem::PK_LEN }>;
|
||||
/// Static secret key
|
||||
pub type SSk = Secret<{ StaticKem::SK_LEN }>;
|
||||
/// Ephemeral public key
|
||||
pub type EPk = Public<{ EphemeralKem::PK_LEN }>;
|
||||
pub type ESk = Secret<{ EphemeralKem::SK_LEN }>;
|
||||
|
||||
/// Symmetric key
|
||||
pub type SymKey = Secret<KEY_LEN>;
|
||||
/// Variant of [SymKey] for use cases where the value is public
|
||||
pub type PublicSymKey = [u8; 32];
|
||||
|
||||
/// Peer ID (derived from the public key, see the hash derivations in the [whitepaper](https://rosenpass.eu/whitepaper.pdf))
|
||||
pub type PeerId = Public<KEY_LEN>;
|
||||
/// Session ID
|
||||
pub type SessionId = Public<SESSION_ID_LEN>;
|
||||
/// Biscuit ID
|
||||
pub type BiscuitId = Public<BISCUIT_ID_LEN>;
|
||||
|
||||
/// Nonce for use with random-nonce AEAD
|
||||
pub type XAEADNonce = Public<{ XAead::NONCE_LEN }>;
|
||||
|
||||
/// Buffer capably of holding any Rosenpass protocol message
|
||||
pub type MsgBuf = Public<MAX_MESSAGE_LEN>;
|
||||
|
||||
/// Server-local peer number; this is just the index in [super::CryptoServer::peers]
|
||||
pub type PeerNo = usize;
|
||||
@@ -1,12 +1,14 @@
|
||||
use super::{CryptoServer, PeerPtr, SPk, SSk, SymKey};
|
||||
use crate::config::ProtocolVersion;
|
||||
use rosenpass_util::{
|
||||
build::Build,
|
||||
mem::{DiscardResultExt, SwapWithDefaultExt},
|
||||
result::ensure_or,
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
use rosenpass_util::mem::{DiscardResultExt, SwapWithDefaultExt};
|
||||
use rosenpass_util::{build::Build, result::ensure_or};
|
||||
|
||||
use crate::config::ProtocolVersion;
|
||||
|
||||
use super::basic_types::{SPk, SSk, SymKey};
|
||||
use super::osk_domain_separator::OskDomainSeparator;
|
||||
use super::{CryptoServer, PeerPtr};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// A pair of matching public/secret keys used to launch the crypto server.
|
||||
///
|
||||
@@ -47,7 +49,8 @@ impl Keypair {
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass::protocol::{Keypair, SSk, SPk};
|
||||
/// use rosenpass::protocol::basic_types::{SSk, SPk};
|
||||
/// use rosenpass::protocol::Keypair;
|
||||
///
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
@@ -66,12 +69,13 @@ impl Keypair {
|
||||
|
||||
/// Creates a new "empty" key pair. All bytes are initialized to zero.
|
||||
///
|
||||
/// See [SSk:zero()][crate::protocol::SSk::zero] and [SPk:zero()][crate::protocol::SPk::zero], respectively.
|
||||
/// See [SSk:zero()][SSk::zero] and [SPk:zero()][SPk::zero], respectively.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass::protocol::{Keypair, SSk, SPk};
|
||||
/// use rosenpass::protocol::basic_types::{SSk, SPk};
|
||||
/// use rosenpass::protocol::Keypair;
|
||||
///
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
@@ -90,7 +94,7 @@ impl Keypair {
|
||||
|
||||
/// Creates a new (securely-)random key pair. The mechanism is described in [rosenpass_secret_memory::Secret].
|
||||
///
|
||||
/// See [SSk:random()][crate::protocol::SSk::random] and [SPk:random()][crate::protocol::SPk::random], respectively.
|
||||
/// See [SSk:random()][SSk::random] and [SPk:random()][SPk::random], respectively.
|
||||
pub fn random() -> Self {
|
||||
Self::new(SSk::random(), SPk::random())
|
||||
}
|
||||
@@ -127,7 +131,7 @@ pub struct MissingKeypair;
|
||||
///
|
||||
/// There are multiple ways of creating a crypto server:
|
||||
///
|
||||
/// 1. Provide the key pair at initialization time (using [CryptoServer::new][crate::protocol::CryptoServer::new])
|
||||
/// 1. Provide the key pair at initialization time (using [CryptoServer::new][CryptoServer::new])
|
||||
/// 2. Provide the key pair at a later time (using [BuildCryptoServer::empty])
|
||||
///
|
||||
/// With BuildCryptoServer, you can gradually configure parameters as they become available.
|
||||
@@ -145,19 +149,23 @@ pub struct MissingKeypair;
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass_util::build::Build;
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair, PeerParams, SPk, SymKey};
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
///
|
||||
/// use rosenpass::config::ProtocolVersion;
|
||||
///
|
||||
/// use rosenpass::protocol::basic_types::{SPk, SymKey};
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair, PeerParams};
|
||||
/// use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
///
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
/// secret_policy_use_only_malloc_secrets();
|
||||
///
|
||||
/// let keypair = Keypair::random();
|
||||
/// let peer1 = PeerParams { psk: Some(SymKey::random()), pk: SPk::random(), protocol_version: ProtocolVersion::V02 };
|
||||
/// let peer2 = PeerParams { psk: None, pk: SPk::random(), protocol_version: ProtocolVersion::V02 };
|
||||
/// let peer1 = PeerParams { psk: Some(SymKey::random()), pk: SPk::random(), protocol_version: ProtocolVersion::V02, osk_domain_separator: OskDomainSeparator::default() };
|
||||
/// let peer2 = PeerParams { psk: None, pk: SPk::random(), protocol_version: ProtocolVersion::V02, osk_domain_separator: OskDomainSeparator::default() };
|
||||
///
|
||||
/// let mut builder = BuildCryptoServer::new(Some(keypair.clone()), vec![peer1]);
|
||||
/// builder.add_peer(peer2.psk.clone(), peer2.pk, ProtocolVersion::V02);
|
||||
/// builder.add_peer(peer2.psk.clone(), peer2.pk, ProtocolVersion::V02, OskDomainSeparator::default());
|
||||
///
|
||||
/// let server = builder.build().expect("build failed");
|
||||
/// assert_eq!(server.peers.len(), 2);
|
||||
@@ -187,16 +195,17 @@ impl Build<CryptoServer> for BuildCryptoServer {
|
||||
|
||||
let mut srv = CryptoServer::new(sk, pk);
|
||||
|
||||
for (
|
||||
idx,
|
||||
PeerParams {
|
||||
for (idx, params) in self.peers.into_iter().enumerate() {
|
||||
let PeerParams {
|
||||
psk,
|
||||
pk,
|
||||
protocol_version,
|
||||
},
|
||||
) in self.peers.into_iter().enumerate()
|
||||
{
|
||||
let PeerPtr(idx2) = srv.add_peer(psk, pk, protocol_version.into())?;
|
||||
osk_domain_separator,
|
||||
} = params;
|
||||
|
||||
let PeerPtr(idx2) =
|
||||
srv.add_peer(psk, pk, protocol_version.into(), osk_domain_separator)?;
|
||||
|
||||
assert!(idx == idx2, "Peer id changed during CryptoServer construction from {idx} to {idx2}. This is a developer error.")
|
||||
}
|
||||
|
||||
@@ -205,13 +214,13 @@ impl Build<CryptoServer> for BuildCryptoServer {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Cryptographic key(s) identifying the connected [peer][crate::protocol::Peer] ("client")
|
||||
/// Cryptographic key(s) identifying the connected [peer][super::Peer] ("client")
|
||||
/// for a given session that is being managed by the crypto server.
|
||||
///
|
||||
/// Each peer must be identified by a [public key (SPk)][crate::protocol::SPk].
|
||||
/// Optionally, a [symmetric key (SymKey)][crate::protocol::SymKey]
|
||||
/// Each peer must be identified by a [public key (SPk)][SPk].
|
||||
/// Optionally, a [symmetric key (SymKey)][SymKey]
|
||||
/// can be provided when setting up the connection.
|
||||
/// For more information on the intended usage and security considerations, see [Peer::psk][crate::protocol::Peer::psk] and [Peer::spkt][crate::protocol::Peer::spkt].
|
||||
/// For more information on the intended usage and security considerations, see [Peer::psk][super::Peer::psk] and [Peer::spkt][super::Peer::spkt].
|
||||
pub struct PeerParams {
|
||||
/// Pre-shared (symmetric) encryption keys that should be used with this peer.
|
||||
pub psk: Option<SymKey>,
|
||||
@@ -219,6 +228,7 @@ pub struct PeerParams {
|
||||
pub pk: SPk,
|
||||
/// The used protocol version.
|
||||
pub protocol_version: ProtocolVersion,
|
||||
pub osk_domain_separator: OskDomainSeparator,
|
||||
}
|
||||
|
||||
impl BuildCryptoServer {
|
||||
@@ -317,13 +327,16 @@ impl BuildCryptoServer {
|
||||
///
|
||||
/// ```rust
|
||||
/// use rosenpass::config::ProtocolVersion;
|
||||
///
|
||||
/// use rosenpass_util::build::Build;
|
||||
/// use rosenpass::protocol::basic_types::{SymKey, SPk};
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair};
|
||||
/// use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
///
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
/// secret_policy_use_only_malloc_secrets();
|
||||
///
|
||||
/// use rosenpass_util::build::Build;
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair, SymKey, SPk};
|
||||
///
|
||||
/// // Deferred initialization: Create builder first, add some peers later
|
||||
/// let keypair_option = Some(Keypair::random());
|
||||
/// let mut builder = BuildCryptoServer::new(keypair_option, Vec::new());
|
||||
@@ -335,7 +348,7 @@ impl BuildCryptoServer {
|
||||
/// // Now we've found a peer that should be added to the configuration
|
||||
/// let pre_shared_key = SymKey::random();
|
||||
/// let public_key = SPk::random();
|
||||
/// builder.with_added_peer(Some(pre_shared_key.clone()), public_key.clone(), ProtocolVersion::V02);
|
||||
/// builder.with_added_peer(Some(pre_shared_key.clone()), public_key.clone(), ProtocolVersion::V02, OskDomainSeparator::default());
|
||||
///
|
||||
/// // New server instances will then start with the peer being registered already
|
||||
/// let server = builder.build().expect("build failed");
|
||||
@@ -350,12 +363,14 @@ impl BuildCryptoServer {
|
||||
psk: Option<SymKey>,
|
||||
pk: SPk,
|
||||
protocol_version: ProtocolVersion,
|
||||
osk_domain_separator: OskDomainSeparator,
|
||||
) -> &mut Self {
|
||||
// TODO: Check here already whether peer was already added
|
||||
self.peers.push(PeerParams {
|
||||
psk,
|
||||
pk,
|
||||
protocol_version,
|
||||
osk_domain_separator,
|
||||
});
|
||||
self
|
||||
}
|
||||
@@ -366,9 +381,10 @@ impl BuildCryptoServer {
|
||||
psk: Option<SymKey>,
|
||||
pk: SPk,
|
||||
protocol_version: ProtocolVersion,
|
||||
osk_domain_separator: OskDomainSeparator,
|
||||
) -> PeerPtr {
|
||||
let id = PeerPtr(self.peers.len());
|
||||
self.with_added_peer(psk, pk, protocol_version);
|
||||
self.with_added_peer(psk, pk, protocol_version, osk_domain_separator);
|
||||
id
|
||||
}
|
||||
|
||||
@@ -381,19 +397,23 @@ impl BuildCryptoServer {
|
||||
/// Extracting the server configuration from a builder:
|
||||
///
|
||||
/// ```rust
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// use rosenpass_util::build::Build;
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
///
|
||||
/// use rosenpass::config::ProtocolVersion;
|
||||
/// use rosenpass::hash_domains::protocol;
|
||||
/// use rosenpass_secret_memory::secret_policy_use_only_malloc_secrets;
|
||||
/// secret_policy_use_only_malloc_secrets();
|
||||
///
|
||||
/// use rosenpass_util::build::Build;
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair, SymKey, SPk};
|
||||
/// use rosenpass::protocol::basic_types::{SymKey, SPk};
|
||||
/// use rosenpass::protocol::{BuildCryptoServer, Keypair};
|
||||
/// use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
///
|
||||
/// // We have to define the security policy before using Secrets.
|
||||
/// secret_policy_use_only_malloc_secrets();
|
||||
///
|
||||
/// let keypair = Keypair::random();
|
||||
/// let peer_pk = SPk::random();
|
||||
/// let mut builder = BuildCryptoServer::new(Some(keypair.clone()), vec![]);
|
||||
/// builder.add_peer(None, peer_pk, ProtocolVersion::V02);
|
||||
/// builder.add_peer(None, peer_pk, ProtocolVersion::V02, OskDomainSeparator::default());
|
||||
///
|
||||
/// // Extract configuration parameters from the decomissioned builder
|
||||
/// let (keypair_option, peers) = builder.take_parts();
|
||||
|
||||
64
rosenpass/src/protocol/constants.rs
Normal file
64
rosenpass/src/protocol/constants.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
//! Constants and configuration values used in the rosenpass core protocol
|
||||
|
||||
use crate::msgs::MAC_SIZE;
|
||||
|
||||
use super::timing::Timing;
|
||||
|
||||
/// Time after which the responder attempts to rekey the session
|
||||
///
|
||||
/// From the wireguard paper: rekey every two minutes,
|
||||
/// discard the key if no rekey is achieved within three
|
||||
pub const REKEY_AFTER_TIME_RESPONDER: Timing = 120.0;
|
||||
/// Time after which the initiator attempts to rekey the session.
|
||||
///
|
||||
/// This happens ten seconds after [REKEY_AFTER_TIME_RESPONDER], so
|
||||
/// parties would usually switch roles after every handshake.
|
||||
///
|
||||
/// From the wireguard paper: rekey every two minutes,
|
||||
/// discard the key if no rekey is achieved within three
|
||||
pub const REKEY_AFTER_TIME_INITIATOR: Timing = 130.0;
|
||||
/// Time after which either party rejects the current key.
|
||||
///
|
||||
/// At this point a new key should have been negotiated.
|
||||
///
|
||||
/// Rejection happens 50-60 seconds after key renegotiation
|
||||
/// to allow for a graceful handover.
|
||||
/// From the wireguard paper: rekey every two minutes,
|
||||
/// discard the key if no rekey is achieved within three
|
||||
pub const REJECT_AFTER_TIME: Timing = 180.0;
|
||||
|
||||
/// The length of the `cookie_secret` in the [whitepaper](https://rosenpass.eu/whitepaper.pdf)
|
||||
pub const COOKIE_SECRET_LEN: usize = MAC_SIZE;
|
||||
/// The life time of the `cookie_secret` in the [whitepaper](https://rosenpass.eu/whitepaper.pdf)
|
||||
pub const COOKIE_SECRET_EPOCH: Timing = 120.0;
|
||||
|
||||
/// Length of a cookie value (see info about the cookie mechanism in the [whitepaper](https://rosenpass.eu/whitepaper.pdf))
|
||||
pub const COOKIE_VALUE_LEN: usize = MAC_SIZE;
|
||||
/// Time after which to delete a cookie, as the initiator, for a certain peer (see info about the cookie mechanism in the [whitepaper](https://rosenpass.eu/whitepaper.pdf))
|
||||
pub const PEER_COOKIE_VALUE_EPOCH: Timing = 120.0;
|
||||
|
||||
/// Seconds until the biscuit key is changed; we issue biscuits
|
||||
/// using one biscuit key for one epoch and store the biscuit for
|
||||
/// decryption for a second epoch
|
||||
///
|
||||
/// The biscuit mechanism is used to make sure the responder is stateless in our protocol.
|
||||
pub const BISCUIT_EPOCH: Timing = 300.0;
|
||||
|
||||
/// The initiator opportunistically retransmits their messages; it applies an increasing delay
|
||||
/// between each retreansmission. This is the factor by which the delay grows after each
|
||||
/// retransmission.
|
||||
pub const RETRANSMIT_DELAY_GROWTH: Timing = 2.0;
|
||||
/// The initiator opportunistically retransmits their messages; it applies an increasing delay
|
||||
/// between each retreansmission. This is the initial delay between retransmissions.
|
||||
pub const RETRANSMIT_DELAY_BEGIN: Timing = 0.5;
|
||||
/// The initiator opportunistically retransmits their messages; it applies an increasing delay
|
||||
/// between each retreansmission. This is the maximum delay between retransmissions.
|
||||
pub const RETRANSMIT_DELAY_END: Timing = 10.0;
|
||||
/// The initiator opportunistically retransmits their messages; it applies an increasing delay
|
||||
/// between each retreansmission. This is the jitter (randomness) applied to the retransmission
|
||||
/// delay.
|
||||
pub const RETRANSMIT_DELAY_JITTER: Timing = 0.5;
|
||||
|
||||
/// This is the maximum delay that can separate two events for us to consider the events to have
|
||||
/// happened at the same time.
|
||||
pub const EVENT_GRACE: Timing = 0.0025;
|
||||
98
rosenpass/src/protocol/cookies.rs
Normal file
98
rosenpass/src/protocol/cookies.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
//! Cryptographic key management for cookies and biscuits used in the protocol
|
||||
//!
|
||||
//! Cookies in general are conceptually similar to browser cookies;
|
||||
//! i.e. mechanisms to store information in the party connected to via network.
|
||||
//!
|
||||
//! In our case specifically we refer to any mechanisms in the Rosenpass protocol
|
||||
//! where a peer stores some information in the other party that is cryptographically
|
||||
//! protected using a temporary, randomly generated key. This file contains the mechanisms
|
||||
//! used to store the secret keys.
|
||||
//!
|
||||
//! We have two cookie-mechanisms in particular:
|
||||
//!
|
||||
//! - Rosenpass "biscuits" — the mechanism used to make sure the Rosenpass protocol is stateless
|
||||
//! with respect to the responder
|
||||
//! - WireGuard's cookie mechanism to enable proof of IP ownership; Rosenpass has experimental
|
||||
//! support for this mechanism
|
||||
//!
|
||||
//! The CookieStore type is also used to store cookie secrets sent from the responder to the
|
||||
//! initiator. This is a bad design and we should separate out this functionality.
|
||||
//!
|
||||
//! TODO: CookieStore should not be used for cookie secrets sent from responder to initiator.
|
||||
//! TODO: Move cookie lifetime management functionality into here
|
||||
|
||||
use rosenpass_ciphers::KEY_LEN;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
|
||||
use super::{constants::COOKIE_SECRET_LEN, timing::Timing};
|
||||
|
||||
/// Container for storing cookie secrets like [BiscuitKey] or [CookieSecret].
|
||||
///
|
||||
/// This is really just a secret key and a time stamp of creation. Concrete
|
||||
/// usages (such as for the biscuit key) impose a time limit about how long
|
||||
/// a key can be used and the time of creation is used to impose that time limit.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rosenpass_util::time::Timebase;
|
||||
/// use rosenpass::protocol::{timing::BCE, basic_types::SymKey, cookies::CookieStore};
|
||||
///
|
||||
/// rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
///
|
||||
/// let fixed_secret = SymKey::random();
|
||||
/// let timebase = Timebase::default();
|
||||
///
|
||||
/// let mut store = CookieStore::<32>::new();
|
||||
/// assert_ne!(store.value.secret(), SymKey::zero().secret());
|
||||
/// assert_eq!(store.created_at, BCE);
|
||||
///
|
||||
/// let time_before_call = timebase.now();
|
||||
/// store.update(&timebase, fixed_secret.secret());
|
||||
/// assert_eq!(store.value.secret(), fixed_secret.secret());
|
||||
/// assert!(store.created_at < timebase.now());
|
||||
/// assert!(store.created_at > time_before_call);
|
||||
///
|
||||
/// // Same as new()
|
||||
/// store.erase();
|
||||
/// assert_ne!(store.value.secret(), SymKey::zero().secret());
|
||||
/// assert_eq!(store.created_at, BCE);
|
||||
///
|
||||
/// let secret_before_call = store.value.clone();
|
||||
/// let time_before_call = timebase.now();
|
||||
/// store.randomize(&timebase);
|
||||
/// assert_ne!(store.value.secret(), secret_before_call.secret());
|
||||
/// assert!(store.created_at < timebase.now());
|
||||
/// assert!(store.created_at > time_before_call);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct CookieStore<const N: usize> {
|
||||
/// Time of creation of the secret key
|
||||
pub created_at: Timing,
|
||||
/// The secret key
|
||||
pub value: Secret<N>,
|
||||
}
|
||||
|
||||
/// Stores cookie secret, which is used to create a rotating the cookie value
|
||||
///
|
||||
/// Concrete value is in [super::CryptoServer::cookie_secrets].
|
||||
///
|
||||
/// The pointer type is [super::ServerCookieSecretPtr].
|
||||
pub type CookieSecret = CookieStore<COOKIE_SECRET_LEN>;
|
||||
|
||||
/// Storage for our biscuit keys.
|
||||
///
|
||||
/// The biscuit keys encrypt what we call "biscuits".
|
||||
/// These biscuits contain the responder state for a particular handshake. By moving
|
||||
/// state into these biscuits, we make sure the responder is stateless.
|
||||
///
|
||||
/// A Biscuit is like a fancy cookie. To avoid state disruption attacks,
|
||||
/// the responder doesn't store state. Instead the state is stored in a
|
||||
/// Biscuit, that is encrypted using the [BiscuitKey] which is only known to
|
||||
/// the Responder. Thus secrecy of the Responder state is not violated, still
|
||||
/// the responder can avoid storing this state.
|
||||
///
|
||||
/// Concrete value is in [super::CryptoServer::biscuit_keys].
|
||||
///
|
||||
/// The pointer type is [super::BiscuitKeyPtr].
|
||||
pub type BiscuitKey = CookieStore<KEY_LEN>;
|
||||
45
rosenpass/src/protocol/index.rs
Normal file
45
rosenpass/src/protocol/index.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
//! Quick lookup of values in [super::CryptoServer]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::basic_types::{PeerId, PeerNo, SessionId};
|
||||
use super::KnownResponseHash;
|
||||
|
||||
/// Maps various keys to peer (numbers).
|
||||
///
|
||||
/// See:
|
||||
/// - [super::CryptoServer::index]
|
||||
/// - [super::CryptoServer::peers]
|
||||
/// - [PeerNo]
|
||||
/// - [super::PeerPtr]
|
||||
/// - [super::Peer]
|
||||
pub type PeerIndex = HashMap<PeerIndexKey, PeerNo>;
|
||||
|
||||
/// We maintain various indices in [super::CryptoServer::index], mapping some key to a particular
|
||||
/// [PeerNo], i.e. to an index in [super::CryptoServer::peers]. These are the possible index key.
|
||||
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug)]
|
||||
pub enum PeerIndexKey {
|
||||
/// Lookup of a particular peer given the [PeerId], i.e. a value derived from the peers public
|
||||
/// key as created by [super::CryptoServer::pidm] or [super::Peer::pidt].
|
||||
///
|
||||
/// The peer id is used by the initiator to tell the responder about its identity in
|
||||
/// [crate::msgs::InitHello].
|
||||
///
|
||||
/// See also the pointer types [super::PeerPtr].
|
||||
Peer(PeerId),
|
||||
/// Lookup of a particular session id.
|
||||
///
|
||||
/// This is used to look up both established sessions (see
|
||||
/// [super::CryptoServer::lookup_session]) and ongoing handshakes (see [super::CryptoServer::lookup_handshake]).
|
||||
///
|
||||
/// Lookup of a peer to get an established session or a handshake is sufficient, because a peer
|
||||
/// contains a limited number of sessions and handshakes ([super::Peer::session] and [super::Peer::handshake] respectively).
|
||||
///
|
||||
/// See also the pointer types [super::IniHsPtr] and [super::SessionPtr].
|
||||
Sid(SessionId),
|
||||
/// Lookup of a cached response ([crate::msgs::Envelope]<[crate::msgs::EmptyData]>) to an [crate::msgs::InitConf] (i.e.
|
||||
/// [crate::msgs::Envelope]<[crate::msgs::InitConf]>) message.
|
||||
///
|
||||
/// See [super::KnownInitConfResponsePtr] on how this value is maintained.
|
||||
KnownInitConfResponse(KnownResponseHash),
|
||||
}
|
||||
@@ -24,12 +24,15 @@
|
||||
//!
|
||||
//! ```
|
||||
//! use std::ops::DerefMut;
|
||||
//!
|
||||
//! use rosenpass_secret_memory::policy::*;
|
||||
//! use rosenpass_cipher_traits::primitives::Kem;
|
||||
//! use rosenpass_ciphers::StaticKem;
|
||||
//! use rosenpass::{
|
||||
//! protocol::{SSk, SPk, MsgBuf, PeerPtr, CryptoServer, SymKey},
|
||||
//! };
|
||||
//!
|
||||
//! use rosenpass::protocol::basic_types::{SSk, SPk, MsgBuf, SymKey};
|
||||
//! use rosenpass::protocol::{PeerPtr, CryptoServer};
|
||||
//! use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
//!
|
||||
//! # fn main() -> anyhow::Result<()> {
|
||||
//! // Set security policy for storing secrets
|
||||
//!
|
||||
@@ -50,8 +53,8 @@
|
||||
//! let mut b = CryptoServer::new(peer_b_sk, peer_b_pk.clone());
|
||||
//!
|
||||
//! // introduce peers to each other
|
||||
//! a.add_peer(Some(psk.clone()), peer_b_pk, ProtocolVersion::V03)?;
|
||||
//! b.add_peer(Some(psk), peer_a_pk, ProtocolVersion::V03)?;
|
||||
//! a.add_peer(Some(psk.clone()), peer_b_pk, ProtocolVersion::V03, OskDomainSeparator::default())?;
|
||||
//! b.add_peer(Some(psk), peer_a_pk, ProtocolVersion::V03, OskDomainSeparator::default())?;
|
||||
//!
|
||||
//! // declare buffers for message exchange
|
||||
//! let (mut a_buf, mut b_buf) = (MsgBuf::zero(), MsgBuf::zero());
|
||||
@@ -76,8 +79,20 @@
|
||||
//! ```
|
||||
|
||||
mod build_crypto_server;
|
||||
pub use build_crypto_server::*;
|
||||
|
||||
pub mod basic_types;
|
||||
pub mod constants;
|
||||
pub mod cookies;
|
||||
pub mod index;
|
||||
pub mod osk_domain_separator;
|
||||
pub mod testutils;
|
||||
pub mod timing;
|
||||
pub mod zerocopy;
|
||||
|
||||
#[allow(clippy::module_inception)]
|
||||
mod protocol;
|
||||
|
||||
pub use build_crypto_server::*;
|
||||
pub use protocol::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
91
rosenpass/src/protocol/osk_domain_separator.rs
Normal file
91
rosenpass/src/protocol/osk_domain_separator.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
//! Management of domain separators for the OSK (output key) in the rosenpass protocol
|
||||
//!
|
||||
//! The domain separator is there to ensure that keys are bound to the purpose they are used for.
|
||||
//!
|
||||
//! See the whitepaper section on protocol extensions for more details on how this is used.
|
||||
//!
|
||||
//! # See also
|
||||
//!
|
||||
//! - [crate::protocol::Peer]
|
||||
//! - [crate::protocol::CryptoServer::add_peer]
|
||||
//! - [crate::protocol::CryptoServer::osk]
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! There are some basic examples of using custom domain separators in the examples of
|
||||
//! [super::CryptoServer::poll]. Look for the test function `test_osk_label_mismatch()`
|
||||
//! in particular.
|
||||
|
||||
use rosenpass_ciphers::subtle::keyed_hash::KeyedHash;
|
||||
use rosenpass_util::result::OkExt;
|
||||
|
||||
use crate::hash_domains;
|
||||
|
||||
use super::basic_types::PublicSymKey;
|
||||
|
||||
/// The OSK (output shared key) domain separator to use for a specific peer
|
||||
///
|
||||
#[derive(Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Default)]
|
||||
pub enum OskDomainSeparator {
|
||||
/// By default we use the domain separator that indicates that the resulting keys
|
||||
/// are used by WireGuard to establish a connection
|
||||
#[default]
|
||||
ExtensionWireguardPsk,
|
||||
/// Used for user-defined domain separators
|
||||
Custom {
|
||||
/// A globally unique string identifying the vendor or group who defines this domain
|
||||
/// separator (we use our domain ourselves – "rosenpass.eu")
|
||||
namespace: Vec<u8>,
|
||||
/// Any custom labels within that namespace. Could be descriptive prose.
|
||||
labels: Vec<Vec<u8>>,
|
||||
},
|
||||
}
|
||||
|
||||
impl OskDomainSeparator {
|
||||
/// Construct [OskDomainSeparator::ExtensionWireguardPsk]
|
||||
pub fn for_wireguard_psk() -> Self {
|
||||
Self::ExtensionWireguardPsk
|
||||
}
|
||||
|
||||
/// Construct [OskDomainSeparator::Custom] from strings
|
||||
pub fn custom_utf8<I, T>(namespace: &str, label: I) -> Self
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
T: AsRef<str>,
|
||||
{
|
||||
let namespace = namespace.as_bytes().to_owned();
|
||||
let labels = label
|
||||
.into_iter()
|
||||
.map(|e| e.as_ref().as_bytes().to_owned())
|
||||
.collect::<Vec<_>>();
|
||||
Self::Custom { namespace, labels }
|
||||
}
|
||||
|
||||
/// Variant of [Self::custom_utf8] that takes just one label (instead of a sequence)
|
||||
pub fn custom_utf8_single_label(namespace: &str, label: &str) -> Self {
|
||||
Self::custom_utf8(namespace, std::iter::once(label))
|
||||
}
|
||||
|
||||
/// The domain separator is not just an encoded string, it instead uses
|
||||
/// [rosenpass_ciphers::hash_domain::HashDomain], starting from [hash_domains::cke_user].
|
||||
///
|
||||
/// This means, that the domain separator is really a sequence of multiple different domain
|
||||
/// separators, each of which is allowed to be quite long. This is very useful as it allows
|
||||
/// users to avoid specifying complex, prosaic domain separators. To ensure that this does not
|
||||
/// force us create extra overhead when the protocol is executed, this sequence of strings is
|
||||
/// compressed into a single, fixed-length hash of all the inputs. This hash could be created
|
||||
/// at program startup and cached.
|
||||
///
|
||||
/// This function generates this fixed-length hash.
|
||||
pub fn compress_with(&self, hash_choice: KeyedHash) -> anyhow::Result<PublicSymKey> {
|
||||
use OskDomainSeparator as O;
|
||||
match &self {
|
||||
O::ExtensionWireguardPsk => hash_domains::ext_wireguard_psk_osk(hash_choice),
|
||||
O::Custom { namespace, labels } => hash_domains::cke_user(hash_choice)?
|
||||
.mix(namespace)?
|
||||
.mix_many(labels)?
|
||||
.into_value()
|
||||
.ok(),
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
703
rosenpass/src/protocol/test.rs
Normal file
703
rosenpass/src/protocol/test.rs
Normal file
@@ -0,0 +1,703 @@
|
||||
use std::{borrow::BorrowMut, fmt::Display, net::SocketAddrV4, ops::DerefMut};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use serial_test::serial;
|
||||
use zerocopy::{AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
use rosenpass_secret_memory::Public;
|
||||
use rosenpass_util::mem::DiscardResultExt;
|
||||
|
||||
use crate::msgs::{EmptyData, Envelope, InitConf, InitHello, MsgType, RespHello, MAX_MESSAGE_LEN};
|
||||
|
||||
use super::basic_types::{MsgBuf, SPk, SSk, SymKey};
|
||||
use super::constants::REKEY_AFTER_TIME_RESPONDER;
|
||||
use super::osk_domain_separator::OskDomainSeparator;
|
||||
use super::zerocopy::{truncating_cast_into, truncating_cast_into_nomut};
|
||||
use super::{
|
||||
CryptoServer, HandleMsgResult, HostIdentification, KnownInitConfResponsePtr, PeerPtr,
|
||||
PollResult, ProtocolVersion,
|
||||
};
|
||||
|
||||
struct VecHostIdentifier(Vec<u8>);
|
||||
|
||||
impl HostIdentification for VecHostIdentifier {
|
||||
fn encode(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VecHostIdentifier {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for VecHostIdentifier {
|
||||
fn from(v: Vec<u8>) -> Self {
|
||||
VecHostIdentifier(v)
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_logging() {
|
||||
use std::io::Write;
|
||||
let mut log_builder = env_logger::Builder::from_default_env(); // sets log level filter from environment (or defaults)
|
||||
log_builder.filter_level(log::LevelFilter::Info);
|
||||
log_builder.format_timestamp_nanos();
|
||||
log_builder.format(|buf, record| {
|
||||
let ts_format = buf.timestamp_nanos().to_string();
|
||||
writeln!(buf, "{}: {}", &ts_format[14..], record.args())
|
||||
});
|
||||
|
||||
let _ = log_builder.try_init();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn handles_incorrect_size_messages_v02() {
|
||||
handles_incorrect_size_messages(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn handles_incorrect_size_messages_v03() {
|
||||
handles_incorrect_size_messages(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
/// Ensure that the protocol implementation can deal with truncated
|
||||
/// messages and with overlong messages.
|
||||
///
|
||||
/// This test performs a complete handshake between two randomly generated
|
||||
/// servers; instead of delivering the message correctly at first messages
|
||||
/// of length zero through about 1.2 times the correct message size are delivered.
|
||||
///
|
||||
/// Producing an error is expected on each of these messages.
|
||||
///
|
||||
/// Finally the correct message is delivered and the same process
|
||||
/// starts again in the other direction.
|
||||
///
|
||||
/// Through all this, the handshake should still successfully terminate;
|
||||
/// i.e. an exchanged key must be produced in both servers.
|
||||
fn handles_incorrect_size_messages(protocol_version: ProtocolVersion) {
|
||||
setup_logging();
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
const OVERSIZED_MESSAGE: usize = ((MAX_MESSAGE_LEN as f32) * 1.2) as usize;
|
||||
type MsgBufPlus = Public<OVERSIZED_MESSAGE>;
|
||||
|
||||
const PEER0: PeerPtr = PeerPtr(0);
|
||||
|
||||
let (mut me, mut they) = make_server_pair(protocol_version).unwrap();
|
||||
let (mut msgbuf, mut resbuf) = (MsgBufPlus::zero(), MsgBufPlus::zero());
|
||||
|
||||
// Process the entire handshake
|
||||
let mut msglen = Some(me.initiate_handshake(PEER0, &mut *resbuf).unwrap());
|
||||
while let Some(l) = msglen {
|
||||
std::mem::swap(&mut me, &mut they);
|
||||
std::mem::swap(&mut msgbuf, &mut resbuf);
|
||||
msglen = test_incorrect_sizes_for_msg(&mut me, &*msgbuf, l, &mut *resbuf);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
me.osk(PEER0).unwrap().secret(),
|
||||
they.osk(PEER0).unwrap().secret()
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/// Used in handles_incorrect_size_messages() to first deliver many truncated
|
||||
/// and overlong messages, finally the correct message is delivered and the response
|
||||
/// returned.
|
||||
fn test_incorrect_sizes_for_msg(
|
||||
srv: &mut CryptoServer,
|
||||
msgbuf: &[u8],
|
||||
msglen: usize,
|
||||
resbuf: &mut [u8],
|
||||
) -> Option<usize> {
|
||||
resbuf.fill(0);
|
||||
|
||||
for l in 0..(((msglen as f32) * 1.2) as usize) {
|
||||
if l == msglen {
|
||||
continue;
|
||||
}
|
||||
|
||||
let res = srv.handle_msg(&msgbuf[..l], resbuf);
|
||||
assert!(res.is_err()); // handle_msg should raise an error
|
||||
assert!(!resbuf.iter().any(|x| *x != 0)); // resbuf should not have been changed
|
||||
}
|
||||
|
||||
// Apply the proper handle_msg operation
|
||||
srv.handle_msg(&msgbuf[..msglen], resbuf).unwrap().resp
|
||||
}
|
||||
|
||||
fn keygen() -> Result<(SSk, SPk)> {
|
||||
// TODO: Copied from the benchmark; deduplicate
|
||||
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
|
||||
StaticKem.keygen(sk.secret_mut(), pk.deref_mut())?;
|
||||
Ok((sk, pk))
|
||||
}
|
||||
|
||||
fn make_server_pair(protocol_version: ProtocolVersion) -> Result<(CryptoServer, CryptoServer)> {
|
||||
// TODO: Copied from the benchmark; deduplicate
|
||||
let psk = SymKey::random();
|
||||
let ((ska, pka), (skb, pkb)) = (keygen()?, keygen()?);
|
||||
let (mut a, mut b) = (
|
||||
CryptoServer::new(ska, pka.clone()),
|
||||
CryptoServer::new(skb, pkb.clone()),
|
||||
);
|
||||
a.add_peer(
|
||||
Some(psk.clone()),
|
||||
pkb,
|
||||
protocol_version.clone(),
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
b.add_peer(
|
||||
Some(psk),
|
||||
pka,
|
||||
protocol_version,
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
Ok((a, b))
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_regular_exchange_v02() {
|
||||
test_regular_exchange(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_regular_exchange_v03() {
|
||||
test_regular_exchange(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
fn test_regular_exchange(protocol_version: ProtocolVersion) {
|
||||
setup_logging();
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
type MsgBufPlus = Public<MAX_MESSAGE_LEN>;
|
||||
let (mut a, mut b) = make_server_pair(protocol_version).unwrap();
|
||||
|
||||
let mut a_to_b_buf = MsgBufPlus::zero();
|
||||
let mut b_to_a_buf = MsgBufPlus::zero();
|
||||
|
||||
let ip_a: SocketAddrV4 = "127.0.0.1:8080".parse().unwrap();
|
||||
let mut ip_addr_port_a = ip_a.ip().octets().to_vec();
|
||||
ip_addr_port_a.extend_from_slice(&ip_a.port().to_be_bytes());
|
||||
|
||||
let _ip_b: SocketAddrV4 = "127.0.0.1:8081".parse().unwrap();
|
||||
|
||||
let init_hello_len = a.initiate_handshake(PeerPtr(0), &mut *a_to_b_buf).unwrap();
|
||||
|
||||
let init_msg_type: MsgType = a_to_b_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(init_msg_type, MsgType::InitHello);
|
||||
|
||||
//B handles InitHello, sends RespHello
|
||||
let HandleMsgResult { resp, .. } = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_hello_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let resp_hello_len = resp.unwrap();
|
||||
|
||||
let resp_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(resp_msg_type, MsgType::RespHello);
|
||||
|
||||
let HandleMsgResult {
|
||||
resp,
|
||||
exchanged_with,
|
||||
} = a
|
||||
.handle_msg(&b_to_a_buf[..resp_hello_len], &mut *a_to_b_buf)
|
||||
.unwrap();
|
||||
|
||||
let init_conf_len = resp.unwrap();
|
||||
let init_conf_msg_type: MsgType = a_to_b_buf.value[0].try_into().unwrap();
|
||||
|
||||
assert_eq!(exchanged_with, Some(PeerPtr(0)));
|
||||
assert_eq!(init_conf_msg_type, MsgType::InitConf);
|
||||
|
||||
//B handles InitConf, sends EmptyData
|
||||
let HandleMsgResult {
|
||||
resp: _,
|
||||
exchanged_with,
|
||||
} = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_conf_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let empty_data_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
|
||||
assert_eq!(exchanged_with, Some(PeerPtr(0)));
|
||||
assert_eq!(empty_data_msg_type, MsgType::EmptyData);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_regular_init_conf_retransmit_v02() {
|
||||
test_regular_init_conf_retransmit(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_regular_init_conf_retransmit_v03() {
|
||||
test_regular_init_conf_retransmit(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
fn test_regular_init_conf_retransmit(protocol_version: ProtocolVersion) {
|
||||
setup_logging();
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
type MsgBufPlus = Public<MAX_MESSAGE_LEN>;
|
||||
let (mut a, mut b) = make_server_pair(protocol_version).unwrap();
|
||||
|
||||
let mut a_to_b_buf = MsgBufPlus::zero();
|
||||
let mut b_to_a_buf = MsgBufPlus::zero();
|
||||
|
||||
let ip_a: SocketAddrV4 = "127.0.0.1:8080".parse().unwrap();
|
||||
let mut ip_addr_port_a = ip_a.ip().octets().to_vec();
|
||||
ip_addr_port_a.extend_from_slice(&ip_a.port().to_be_bytes());
|
||||
|
||||
let _ip_b: SocketAddrV4 = "127.0.0.1:8081".parse().unwrap();
|
||||
|
||||
let init_hello_len = a.initiate_handshake(PeerPtr(0), &mut *a_to_b_buf).unwrap();
|
||||
|
||||
let init_msg_type: MsgType = a_to_b_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(init_msg_type, MsgType::InitHello);
|
||||
|
||||
//B handles InitHello, sends RespHello
|
||||
let HandleMsgResult { resp, .. } = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_hello_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let resp_hello_len = resp.unwrap();
|
||||
|
||||
let resp_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(resp_msg_type, MsgType::RespHello);
|
||||
|
||||
//A handles RespHello, sends InitConf, exchanges keys
|
||||
let HandleMsgResult {
|
||||
resp,
|
||||
exchanged_with,
|
||||
} = a
|
||||
.handle_msg(&b_to_a_buf[..resp_hello_len], &mut *a_to_b_buf)
|
||||
.unwrap();
|
||||
|
||||
let init_conf_len = resp.unwrap();
|
||||
let init_conf_msg_type: MsgType = a_to_b_buf.value[0].try_into().unwrap();
|
||||
|
||||
assert_eq!(exchanged_with, Some(PeerPtr(0)));
|
||||
assert_eq!(init_conf_msg_type, MsgType::InitConf);
|
||||
|
||||
//B handles InitConf, sends EmptyData
|
||||
let HandleMsgResult {
|
||||
resp: _,
|
||||
exchanged_with,
|
||||
} = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_conf_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let empty_data_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
|
||||
assert_eq!(exchanged_with, Some(PeerPtr(0)));
|
||||
assert_eq!(empty_data_msg_type, MsgType::EmptyData);
|
||||
|
||||
//B handles InitConf again, sends EmptyData
|
||||
let HandleMsgResult {
|
||||
resp: _,
|
||||
exchanged_with,
|
||||
} = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_conf_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let empty_data_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
|
||||
assert!(exchanged_with.is_none());
|
||||
assert_eq!(empty_data_msg_type, MsgType::EmptyData);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_responder_under_load_v02() {
|
||||
cookie_reply_mechanism_initiator_bails_on_message_under_load(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_responder_under_load_v03() {
|
||||
cookie_reply_mechanism_initiator_bails_on_message_under_load(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_responder_under_load(protocol_version: ProtocolVersion) {
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
use super::{Lifecycle, MortalExt};
|
||||
|
||||
setup_logging();
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
type MsgBufPlus = Public<MAX_MESSAGE_LEN>;
|
||||
let (mut a, mut b) = make_server_pair(protocol_version.clone()).unwrap();
|
||||
|
||||
let mut a_to_b_buf = MsgBufPlus::zero();
|
||||
let mut b_to_a_buf = MsgBufPlus::zero();
|
||||
|
||||
let ip_a: SocketAddrV4 = "127.0.0.1:8080".parse().unwrap();
|
||||
let mut ip_addr_port_a = ip_a.ip().octets().to_vec();
|
||||
ip_addr_port_a.extend_from_slice(&ip_a.port().to_be_bytes());
|
||||
|
||||
let _ip_b: SocketAddrV4 = "127.0.0.1:8081".parse().unwrap();
|
||||
|
||||
let init_hello_len = a.initiate_handshake(PeerPtr(0), &mut *a_to_b_buf).unwrap();
|
||||
let socket_addr_a = std::net::SocketAddr::V4(ip_a);
|
||||
let mut ip_addr_port_a = match socket_addr_a.ip() {
|
||||
std::net::IpAddr::V4(ipv4) => ipv4.octets().to_vec(),
|
||||
std::net::IpAddr::V6(ipv6) => ipv6.octets().to_vec(),
|
||||
};
|
||||
|
||||
ip_addr_port_a.extend_from_slice(&socket_addr_a.port().to_be_bytes());
|
||||
|
||||
let ip_addr_port_a: VecHostIdentifier = ip_addr_port_a.into();
|
||||
|
||||
//B handles handshake under load, should send cookie reply message with invalid cookie
|
||||
let HandleMsgResult { resp, .. } = b
|
||||
.handle_msg_under_load(
|
||||
&a_to_b_buf.as_slice()[..init_hello_len],
|
||||
&mut *b_to_a_buf,
|
||||
&ip_addr_port_a,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let cookie_reply_len = resp.unwrap();
|
||||
|
||||
//A handles cookie reply message
|
||||
a.handle_msg(&b_to_a_buf[..cookie_reply_len], &mut *a_to_b_buf)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(PeerPtr(0).cv().lifecycle(&a), Lifecycle::Young);
|
||||
|
||||
let expected_cookie_value =
|
||||
crate::hash_domains::cookie_value(protocol_version.keyed_hash())
|
||||
.unwrap()
|
||||
.mix(
|
||||
b.active_or_retired_cookie_secrets()[0]
|
||||
.unwrap()
|
||||
.get(&b)
|
||||
.value
|
||||
.secret(),
|
||||
)
|
||||
.unwrap()
|
||||
.mix(ip_addr_port_a.encode())
|
||||
.unwrap()
|
||||
.into_value()[..16]
|
||||
.to_vec();
|
||||
|
||||
assert_eq!(
|
||||
PeerPtr(0).cv().get(&a).map(|x| &x.value.secret()[..]),
|
||||
Some(&expected_cookie_value[..])
|
||||
);
|
||||
|
||||
let retx_init_hello_len = loop {
|
||||
match a.poll().unwrap() {
|
||||
PollResult::SendRetransmission(peer) => {
|
||||
break a.retransmit_handshake(peer, &mut *a_to_b_buf).unwrap();
|
||||
}
|
||||
PollResult::Sleep(time) => {
|
||||
sleep(Duration::from_secs_f64(time));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
};
|
||||
|
||||
let retx_msg_type: MsgType = a_to_b_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(retx_msg_type, MsgType::InitHello);
|
||||
|
||||
//B handles retransmitted message
|
||||
let HandleMsgResult { resp, .. } = b
|
||||
.handle_msg_under_load(
|
||||
&a_to_b_buf.as_slice()[..retx_init_hello_len],
|
||||
&mut *b_to_a_buf,
|
||||
&ip_addr_port_a,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _resp_hello_len = resp.unwrap();
|
||||
|
||||
let resp_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(resp_msg_type, MsgType::RespHello);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_initiator_bails_on_message_under_load_v02() {
|
||||
cookie_reply_mechanism_initiator_bails_on_message_under_load(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_initiator_bails_on_message_under_load_v03() {
|
||||
cookie_reply_mechanism_initiator_bails_on_message_under_load(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
#[cfg(feature = "experiment_cookie_dos_mitigation")]
|
||||
fn cookie_reply_mechanism_initiator_bails_on_message_under_load(protocol_version: ProtocolVersion) {
|
||||
setup_logging();
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
type MsgBufPlus = Public<MAX_MESSAGE_LEN>;
|
||||
let (mut a, mut b) = make_server_pair(protocol_version).unwrap();
|
||||
|
||||
let mut a_to_b_buf = MsgBufPlus::zero();
|
||||
let mut b_to_a_buf = MsgBufPlus::zero();
|
||||
|
||||
let ip_a: SocketAddrV4 = "127.0.0.1:8080".parse().unwrap();
|
||||
let mut ip_addr_port_a = ip_a.ip().octets().to_vec();
|
||||
ip_addr_port_a.extend_from_slice(&ip_a.port().to_be_bytes());
|
||||
let ip_b: SocketAddrV4 = "127.0.0.1:8081".parse().unwrap();
|
||||
|
||||
//A initiates handshake
|
||||
let init_hello_len = a.initiate_handshake(PeerPtr(0), &mut *a_to_b_buf).unwrap();
|
||||
|
||||
//B handles InitHello message, should respond with RespHello
|
||||
let HandleMsgResult { resp, .. } = b
|
||||
.handle_msg(&a_to_b_buf.as_slice()[..init_hello_len], &mut *b_to_a_buf)
|
||||
.unwrap();
|
||||
|
||||
let resp_hello_len = resp.unwrap();
|
||||
let resp_msg_type: MsgType = b_to_a_buf.value[0].try_into().unwrap();
|
||||
assert_eq!(resp_msg_type, MsgType::RespHello);
|
||||
|
||||
let socket_addr_b = std::net::SocketAddr::V4(ip_b);
|
||||
let mut ip_addr_port_b = [0u8; 18];
|
||||
let mut ip_addr_port_b_len = 0;
|
||||
match socket_addr_b.ip() {
|
||||
std::net::IpAddr::V4(ipv4) => {
|
||||
ip_addr_port_b[0..4].copy_from_slice(&ipv4.octets());
|
||||
ip_addr_port_b_len += 4;
|
||||
}
|
||||
std::net::IpAddr::V6(ipv6) => {
|
||||
ip_addr_port_b[0..16].copy_from_slice(&ipv6.octets());
|
||||
ip_addr_port_b_len += 16;
|
||||
}
|
||||
};
|
||||
|
||||
ip_addr_port_b[ip_addr_port_b_len..ip_addr_port_b_len + 2]
|
||||
.copy_from_slice(&socket_addr_b.port().to_be_bytes());
|
||||
ip_addr_port_b_len += 2;
|
||||
|
||||
let ip_addr_port_b: VecHostIdentifier =
|
||||
ip_addr_port_b[..ip_addr_port_b_len].to_vec().into();
|
||||
|
||||
//A handles RespHello message under load, should not send cookie reply
|
||||
assert!(a
|
||||
.handle_msg_under_load(
|
||||
&b_to_a_buf[..resp_hello_len],
|
||||
&mut *a_to_b_buf,
|
||||
&ip_addr_port_b
|
||||
)
|
||||
.is_err());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_conf_retransmission_v02() -> Result<()> {
|
||||
init_conf_retransmission(ProtocolVersion::V02)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_conf_retransmission_v03() -> Result<()> {
|
||||
init_conf_retransmission(ProtocolVersion::V03)
|
||||
}
|
||||
|
||||
fn init_conf_retransmission(protocol_version: ProtocolVersion) -> anyhow::Result<()> {
|
||||
rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();
|
||||
|
||||
fn keypair() -> Result<(SSk, SPk)> {
|
||||
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
|
||||
StaticKem.keygen(sk.secret_mut(), pk.deref_mut())?;
|
||||
Ok((sk, pk))
|
||||
}
|
||||
|
||||
fn proc_initiation(srv: &mut CryptoServer, peer: PeerPtr) -> Result<Envelope<InitHello>> {
|
||||
let mut buf = MsgBuf::zero();
|
||||
srv.initiate_handshake(peer, buf.as_mut_slice())?
|
||||
.discard_result();
|
||||
let msg = truncating_cast_into::<Envelope<InitHello>>(buf.borrow_mut())?;
|
||||
Ok(msg.read())
|
||||
}
|
||||
|
||||
fn proc_msg<Rx: AsBytes + FromBytes, Tx: AsBytes + FromBytes>(
|
||||
srv: &mut CryptoServer,
|
||||
rx: &Envelope<Rx>,
|
||||
) -> anyhow::Result<Envelope<Tx>> {
|
||||
let mut buf = MsgBuf::zero();
|
||||
srv.handle_msg(rx.as_bytes(), buf.as_mut_slice())?
|
||||
.resp
|
||||
.context("Failed to produce RespHello message")?
|
||||
.discard_result();
|
||||
let msg = truncating_cast_into::<Envelope<Tx>>(buf.borrow_mut())?;
|
||||
Ok(msg.read())
|
||||
}
|
||||
|
||||
fn proc_init_hello(
|
||||
srv: &mut CryptoServer,
|
||||
ih: &Envelope<InitHello>,
|
||||
) -> anyhow::Result<Envelope<RespHello>> {
|
||||
proc_msg::<InitHello, RespHello>(srv, ih)
|
||||
}
|
||||
|
||||
fn proc_resp_hello(
|
||||
srv: &mut CryptoServer,
|
||||
rh: &Envelope<RespHello>,
|
||||
) -> anyhow::Result<Envelope<InitConf>> {
|
||||
proc_msg::<RespHello, InitConf>(srv, rh)
|
||||
}
|
||||
|
||||
fn proc_init_conf(
|
||||
srv: &mut CryptoServer,
|
||||
rh: &Envelope<InitConf>,
|
||||
) -> anyhow::Result<Envelope<EmptyData>> {
|
||||
proc_msg::<InitConf, EmptyData>(srv, rh)
|
||||
}
|
||||
|
||||
fn poll(srv: &mut CryptoServer) -> anyhow::Result<()> {
|
||||
// Discard all events; just apply the side effects
|
||||
while !matches!(srv.poll()?, PollResult::Sleep(_)) {}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: Implement Clone on our message types
|
||||
fn clone_msg<Msg: AsBytes + FromBytes>(msg: &Msg) -> anyhow::Result<Msg> {
|
||||
Ok(truncating_cast_into_nomut::<Msg>(msg.as_bytes())?.read())
|
||||
}
|
||||
|
||||
fn break_payload<Msg: AsBytes + FromBytes>(
|
||||
srv: &mut CryptoServer,
|
||||
peer: PeerPtr,
|
||||
msg: &Envelope<Msg>,
|
||||
) -> anyhow::Result<Envelope<Msg>> {
|
||||
let mut msg = clone_msg(msg)?;
|
||||
msg.as_bytes_mut()[memoffset::offset_of!(Envelope<Msg>, payload)] ^= 0x01;
|
||||
msg.seal(peer, srv)?; // Recalculate seal; we do not want to focus on "seal broken" errs
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
fn check_faulty_proc_init_conf(srv: &mut CryptoServer, ic_broken: &Envelope<InitConf>) {
|
||||
let mut buf = MsgBuf::zero();
|
||||
let res = srv.handle_msg(ic_broken.as_bytes(), buf.as_mut_slice());
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
// we this as a closure in orer to use the protocol_version variable in it.
|
||||
let check_retransmission = |srv: &mut CryptoServer,
|
||||
ic: &Envelope<InitConf>,
|
||||
ic_broken: &Envelope<InitConf>,
|
||||
rc: &Envelope<EmptyData>|
|
||||
-> Result<()> {
|
||||
// Processing the same RespHello package again leads to retransmission (i.e. exactly the
|
||||
// same output)
|
||||
let rc_dup = proc_init_conf(srv, ic)?;
|
||||
assert_eq!(rc.as_bytes(), rc_dup.as_bytes());
|
||||
|
||||
// Though if we directly call handle_resp_hello() we get an error since
|
||||
// retransmission is not being handled by the cryptographic code
|
||||
let mut discard_resp_conf = EmptyData::new_zeroed();
|
||||
let res = srv.handle_init_conf(
|
||||
&ic.payload,
|
||||
&mut discard_resp_conf,
|
||||
protocol_version.clone().keyed_hash(),
|
||||
);
|
||||
assert!(res.is_err());
|
||||
|
||||
// Obviously, a broken InitConf message should still be rejected
|
||||
check_faulty_proc_init_conf(srv, ic_broken);
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let (ska, pka) = keypair()?;
|
||||
let (skb, pkb) = keypair()?;
|
||||
|
||||
// initialize server and a pre-shared key
|
||||
let mut a = CryptoServer::new(ska, pka.clone());
|
||||
let mut b = CryptoServer::new(skb, pkb.clone());
|
||||
|
||||
// introduce peers to each other
|
||||
let b_peer = a.add_peer(
|
||||
None,
|
||||
pkb,
|
||||
protocol_version.clone(),
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
let a_peer = b.add_peer(
|
||||
None,
|
||||
pka,
|
||||
protocol_version.clone(),
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
|
||||
// Execute protocol up till the responder confirmation (EmptyData)
|
||||
let ih1 = proc_initiation(&mut a, b_peer)?;
|
||||
let rh1 = proc_init_hello(&mut b, &ih1)?;
|
||||
let ic1 = proc_resp_hello(&mut a, &rh1)?;
|
||||
let rc1 = proc_init_conf(&mut b, &ic1)?;
|
||||
|
||||
// Modified version of ic1 and rc1, for tests that require it
|
||||
let ic1_broken = break_payload(&mut a, b_peer, &ic1)?;
|
||||
assert_ne!(ic1.as_bytes(), ic1_broken.as_bytes());
|
||||
|
||||
// Modified version of rc1, for tests that require it
|
||||
let rc1_broken = break_payload(&mut b, a_peer, &rc1)?;
|
||||
assert_ne!(rc1.as_bytes(), rc1_broken.as_bytes());
|
||||
|
||||
// Retransmission works as designed
|
||||
check_retransmission(&mut b, &ic1, &ic1_broken, &rc1)?;
|
||||
|
||||
// Even with a couple of poll operations in between (which clears the cache
|
||||
// after a time out of two minutes…we should never hit this time out in this
|
||||
// cache)
|
||||
for _ in 0..4 {
|
||||
poll(&mut b)?;
|
||||
check_retransmission(&mut b, &ic1, &ic1_broken, &rc1)?;
|
||||
}
|
||||
// We can even validate that the data is coming out of the cache by changing the cache
|
||||
// to use our broken messages. It does not matter that these messages are cryptographically
|
||||
// broken since we insert them manually into the cache
|
||||
// a_peer.known_init_conf_response()
|
||||
KnownInitConfResponsePtr::insert_for_request_msg(
|
||||
&mut b,
|
||||
a_peer,
|
||||
&ic1_broken,
|
||||
rc1_broken.clone(),
|
||||
);
|
||||
check_retransmission(&mut b, &ic1_broken, &ic1, &rc1_broken)?;
|
||||
|
||||
// Lets reset to the correct message though
|
||||
KnownInitConfResponsePtr::insert_for_request_msg(&mut b, a_peer, &ic1, rc1.clone());
|
||||
|
||||
// Again, nothing changes after calling poll
|
||||
poll(&mut b)?;
|
||||
check_retransmission(&mut b, &ic1, &ic1_broken, &rc1)?;
|
||||
|
||||
// Except if we jump forward into the future past the point where the responder
|
||||
// starts to initiate rekeying; in this case, the automatic time out is triggered and the cache is cleared
|
||||
super::testutils::time_travel_forward(&mut b, REKEY_AFTER_TIME_RESPONDER);
|
||||
|
||||
// As long as we do not call poll, everything is fine
|
||||
check_retransmission(&mut b, &ic1, &ic1_broken, &rc1)?;
|
||||
|
||||
// But after we do, the response is gone and can not be recreated
|
||||
// since the biscuit is stale
|
||||
poll(&mut b)?;
|
||||
check_faulty_proc_init_conf(&mut b, &ic1); // ic1 is now effectively broken
|
||||
assert!(b.peers[0].known_init_conf_response.is_none()); // The cache is gone
|
||||
|
||||
Ok(())
|
||||
}
|
||||
54
rosenpass/src/protocol/testutils.rs
Normal file
54
rosenpass/src/protocol/testutils.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
//! Helpers used in tests
|
||||
|
||||
use std::ops::DerefMut;
|
||||
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
|
||||
use super::{
|
||||
basic_types::{SPk, SSk},
|
||||
osk_domain_separator::OskDomainSeparator,
|
||||
CryptoServer, PeerPtr, ProtocolVersion,
|
||||
};
|
||||
|
||||
/// Helper for tests and examples
|
||||
pub struct ServerForTesting {
|
||||
pub peer: PeerPtr,
|
||||
pub peer_keys: (SSk, SPk),
|
||||
pub srv: CryptoServer,
|
||||
}
|
||||
|
||||
/// TODO: Document that the protocol version is only used for creating the peer for testing
|
||||
impl ServerForTesting {
|
||||
pub fn new(protocol_version: ProtocolVersion) -> anyhow::Result<Self> {
|
||||
let (mut sskm, mut spkm) = (SSk::zero(), SPk::zero());
|
||||
StaticKem.keygen(sskm.secret_mut(), spkm.deref_mut())?;
|
||||
let mut srv = CryptoServer::new(sskm, spkm);
|
||||
|
||||
let (mut sskt, mut spkt) = (SSk::zero(), SPk::zero());
|
||||
StaticKem.keygen(sskt.secret_mut(), spkt.deref_mut())?;
|
||||
let peer = srv.add_peer(
|
||||
None,
|
||||
spkt.clone(),
|
||||
protocol_version,
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
|
||||
let peer_keys = (sskt, spkt);
|
||||
Ok(ServerForTesting {
|
||||
peer,
|
||||
peer_keys,
|
||||
srv,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn tuple(self) -> (PeerPtr, (SSk, SPk), CryptoServer) {
|
||||
(self.peer, self.peer_keys, self.srv)
|
||||
}
|
||||
}
|
||||
|
||||
/// Time travel forward in time
|
||||
pub fn time_travel_forward(srv: &mut CryptoServer, secs: f64) {
|
||||
let dur = std::time::Duration::from_secs_f64(secs);
|
||||
srv.timebase.0 = srv.timebase.0.checked_sub(dur).unwrap();
|
||||
}
|
||||
46
rosenpass/src/protocol/timing.rs
Normal file
46
rosenpass/src/protocol/timing.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
//! Time-keeping related utilities for the Rosenpass protocol
|
||||
|
||||
use super::constants::EVENT_GRACE;
|
||||
|
||||
/// A type for time, e.g. for backoff before re-tries
|
||||
pub type Timing = f64;
|
||||
|
||||
/// Magic time stamp to indicate some object is ancient; "Before Common Era"
|
||||
///
|
||||
/// This is for instance used as a magic time stamp indicating age when some
|
||||
/// cryptographic object certainly needs to be refreshed.
|
||||
///
|
||||
/// Using this instead of Timing::MIN or Timing::INFINITY to avoid floating
|
||||
/// point math weirdness.
|
||||
pub const BCE: Timing = -3600.0 * 24.0 * 356.0 * 10_000.0;
|
||||
|
||||
/// Magic time stamp to indicate that some process is not time-limited
|
||||
///
|
||||
/// Actually it's eight hours; This is intentional to avoid weirdness
|
||||
/// regarding unexpectedly large numbers in system APIs as this is < i16::MAX
|
||||
pub const UNENDING: Timing = 3600.0 * 8.0;
|
||||
|
||||
/// An even `ev` has happened relative to a point in time `now`
|
||||
/// if the `ev` does not lie in the future relative to now.
|
||||
///
|
||||
/// An event lies in the future relative to `now` if
|
||||
/// does not lie in the past or present.
|
||||
///
|
||||
/// An event `ev` lies in the past if `ev < now`. It lies in the
|
||||
/// present if the absolute difference between `ev` and `now` is
|
||||
/// smaller than [EVENT_GRACE].
|
||||
///
|
||||
/// Think of this as `ev <= now` for with [EVENT_GRACE] applied.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rosenpass::protocol::{timing::has_happened, constants::EVENT_GRACE};
|
||||
/// assert!(has_happened(EVENT_GRACE * -1.0, 0.0));
|
||||
/// assert!(has_happened(0.0, 0.0));
|
||||
/// assert!(has_happened(EVENT_GRACE * 0.999, 0.0));
|
||||
/// assert!(!has_happened(EVENT_GRACE * 1.001, 0.0));
|
||||
/// ```
|
||||
pub fn has_happened(ev: Timing, now: Timing) -> bool {
|
||||
(ev - now) < EVENT_GRACE
|
||||
}
|
||||
21
rosenpass/src/protocol/zerocopy.rs
Normal file
21
rosenpass/src/protocol/zerocopy.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
//! Helpers for working with the zerocopy crate
|
||||
|
||||
use std::mem::size_of;
|
||||
|
||||
use zerocopy::{FromBytes, Ref};
|
||||
|
||||
use crate::RosenpassError;
|
||||
|
||||
/// Used to parse a network message using [zerocopy]
|
||||
pub fn truncating_cast_into<T: FromBytes>(
|
||||
buf: &mut [u8],
|
||||
) -> Result<Ref<&mut [u8], T>, RosenpassError> {
|
||||
Ref::new(&mut buf[..size_of::<T>()]).ok_or(RosenpassError::BufferSizeMismatch)
|
||||
}
|
||||
|
||||
/// Used to parse a network message using [zerocopy], mutably
|
||||
pub fn truncating_cast_into_nomut<T: FromBytes>(
|
||||
buf: &[u8],
|
||||
) -> Result<Ref<&[u8], T>, RosenpassError> {
|
||||
Ref::new(&buf[..size_of::<T>()]).ok_or(RosenpassError::BufferSizeMismatch)
|
||||
}
|
||||
@@ -15,7 +15,7 @@ use rosenpass::api::{
|
||||
supply_keypair_response_status,
|
||||
};
|
||||
use rosenpass::config::ProtocolVersion;
|
||||
use rosenpass::protocol::SymKey;
|
||||
use rosenpass::protocol::basic_types::SymKey;
|
||||
use rosenpass_util::{
|
||||
b64::B64Display,
|
||||
file::LoadValueB64,
|
||||
@@ -106,6 +106,7 @@ fn api_integration_api_setup(protocol_version: ProtocolVersion) -> anyhow::Resul
|
||||
extra_params: vec![],
|
||||
}),
|
||||
protocol_version: protocol_version.clone(),
|
||||
osk_domain_separator: Default::default(),
|
||||
}],
|
||||
};
|
||||
|
||||
@@ -127,6 +128,7 @@ fn api_integration_api_setup(protocol_version: ProtocolVersion) -> anyhow::Resul
|
||||
pre_shared_key: None,
|
||||
wg: None,
|
||||
protocol_version: protocol_version.clone(),
|
||||
osk_domain_separator: Default::default(),
|
||||
}],
|
||||
};
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ use tempfile::TempDir;
|
||||
use zerocopy::AsBytes;
|
||||
|
||||
use rosenpass::config::ProtocolVersion;
|
||||
use rosenpass::protocol::SymKey;
|
||||
use rosenpass::protocol::basic_types::SymKey;
|
||||
|
||||
struct KillChild(std::process::Child);
|
||||
|
||||
@@ -83,6 +83,7 @@ fn api_integration_test(protocol_version: ProtocolVersion) -> anyhow::Result<()>
|
||||
pre_shared_key: None,
|
||||
wg: None,
|
||||
protocol_version: protocol_version.clone(),
|
||||
osk_domain_separator: Default::default(),
|
||||
}],
|
||||
};
|
||||
|
||||
@@ -104,6 +105,7 @@ fn api_integration_test(protocol_version: ProtocolVersion) -> anyhow::Result<()>
|
||||
pre_shared_key: None,
|
||||
wg: None,
|
||||
protocol_version: protocol_version.clone(),
|
||||
osk_domain_separator: Default::default(),
|
||||
}],
|
||||
};
|
||||
|
||||
|
||||
@@ -1,21 +1,14 @@
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
ops::DerefMut,
|
||||
str::FromStr,
|
||||
sync::mpsc,
|
||||
thread::{self, sleep},
|
||||
time::Duration,
|
||||
};
|
||||
use std::thread::{self, sleep};
|
||||
use std::{net::SocketAddr, ops::DerefMut, str::FromStr, sync::mpsc, time::Duration};
|
||||
|
||||
use rosenpass::config::ProtocolVersion;
|
||||
use rosenpass::{
|
||||
app_server::{AppServer, AppServerTest, MAX_B64_KEY_SIZE},
|
||||
protocol::{SPk, SSk, SymKey},
|
||||
};
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
use rosenpass_util::{file::LoadValueB64, functional::run, mem::DiscardResultExt, result::OkExt};
|
||||
|
||||
use rosenpass::app_server::{AppServer, AppServerTest, MAX_B64_KEY_SIZE};
|
||||
use rosenpass::protocol::basic_types::{SPk, SSk, SymKey};
|
||||
use rosenpass::{config::ProtocolVersion, protocol::osk_domain_separator::OskDomainSeparator};
|
||||
|
||||
#[test]
|
||||
fn key_exchange_with_app_server_v02() -> anyhow::Result<()> {
|
||||
key_exchange_with_app_server(ProtocolVersion::V02)
|
||||
@@ -69,7 +62,8 @@ fn key_exchange_with_app_server(protocol_version: ProtocolVersion) -> anyhow::Re
|
||||
outfile,
|
||||
broker_peer,
|
||||
hostname,
|
||||
protocol_version.clone(),
|
||||
protocol_version,
|
||||
OskDomainSeparator::default(),
|
||||
)?;
|
||||
|
||||
srv.app_srv.event_loop()
|
||||
|
||||
@@ -9,29 +9,49 @@ use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
use rosenpass_util::result::OkExt;
|
||||
|
||||
use rosenpass::protocol::{
|
||||
testutils::time_travel_forward, CryptoServer, HostIdentification, MsgBuf, PeerPtr, PollResult,
|
||||
ProtocolVersion, SPk, SSk, SymKey, Timing, UNENDING,
|
||||
};
|
||||
use rosenpass::protocol::basic_types::{MsgBuf, SPk, SSk, SymKey};
|
||||
use rosenpass::protocol::osk_domain_separator::OskDomainSeparator;
|
||||
use rosenpass::protocol::testutils::time_travel_forward;
|
||||
use rosenpass::protocol::timing::{Timing, UNENDING};
|
||||
use rosenpass::protocol::{CryptoServer, HostIdentification, PeerPtr, PollResult, ProtocolVersion};
|
||||
|
||||
// TODO: Most of the utility functions in here should probably be moved to
|
||||
// rosenpass::protocol::testutils;
|
||||
|
||||
#[test]
|
||||
fn test_successful_exchange_with_poll_v02() -> anyhow::Result<()> {
|
||||
test_successful_exchange_with_poll(ProtocolVersion::V02)
|
||||
test_successful_exchange_with_poll(ProtocolVersion::V02, OskDomainSeparator::default())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_successful_exchange_with_poll_v03() -> anyhow::Result<()> {
|
||||
test_successful_exchange_with_poll(ProtocolVersion::V03)
|
||||
test_successful_exchange_with_poll(ProtocolVersion::V03, OskDomainSeparator::default())
|
||||
}
|
||||
|
||||
fn test_successful_exchange_with_poll(protocol_version: ProtocolVersion) -> anyhow::Result<()> {
|
||||
#[test]
|
||||
fn test_successful_exchange_with_poll_v02_custom_domain_separator() -> anyhow::Result<()> {
|
||||
test_successful_exchange_with_poll(
|
||||
ProtocolVersion::V02,
|
||||
OskDomainSeparator::custom_utf8_single_label("example.org", "Example Label"),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_successful_exchange_with_poll_v03_custom_domain_separator() -> anyhow::Result<()> {
|
||||
test_successful_exchange_with_poll(
|
||||
ProtocolVersion::V03,
|
||||
OskDomainSeparator::custom_utf8_single_label("example.org", "Example Label"),
|
||||
)
|
||||
}
|
||||
|
||||
fn test_successful_exchange_with_poll(
|
||||
protocol_version: ProtocolVersion,
|
||||
osk_domain_separator: OskDomainSeparator,
|
||||
) -> anyhow::Result<()> {
|
||||
// Set security policy for storing secrets; choose the one that is faster for testing
|
||||
rosenpass_secret_memory::policy::secret_policy_use_only_malloc_secrets();
|
||||
|
||||
let mut sim = RosenpassSimulator::new(protocol_version)?;
|
||||
let mut sim = RosenpassSimulator::new(protocol_version, osk_domain_separator)?;
|
||||
sim.poll_loop(150)?; // Poll 75 times
|
||||
let transcript = sim.transcript;
|
||||
|
||||
@@ -104,7 +124,7 @@ fn test_successful_exchange_under_packet_loss(
|
||||
rosenpass_secret_memory::policy::secret_policy_use_only_malloc_secrets();
|
||||
|
||||
// Create the simulator
|
||||
let mut sim = RosenpassSimulator::new(protocol_version)?;
|
||||
let mut sim = RosenpassSimulator::new(protocol_version, OskDomainSeparator::default())?;
|
||||
|
||||
// Make sure the servers are set to under load condition
|
||||
sim.srv_a.under_load = true;
|
||||
@@ -181,6 +201,94 @@ fn test_successful_exchange_under_packet_loss(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_osk_label_mismatch() -> anyhow::Result<()> {
|
||||
// Set security policy for storing secrets; choose the one that is faster for testing
|
||||
rosenpass_secret_memory::policy::secret_policy_use_only_malloc_secrets();
|
||||
|
||||
let ds_wg = OskDomainSeparator::for_wireguard_psk();
|
||||
let ds_custom1 = OskDomainSeparator::custom_utf8("example.com", ["Example Label"]);
|
||||
let ds_custom2 =
|
||||
OskDomainSeparator::custom_utf8("example.com", ["Example Label", "Second Token"]);
|
||||
|
||||
// Create the simulator
|
||||
let mut sim = RosenpassSimulator::new(ProtocolVersion::V03, ds_custom1.clone())?;
|
||||
assert_eq!(sim.srv_a.srv.peers[0].osk_domain_separator, ds_custom1);
|
||||
assert_eq!(sim.srv_b.srv.peers[0].osk_domain_separator, ds_custom1);
|
||||
|
||||
// Deliberately produce a label mismatch
|
||||
sim.srv_b.srv.peers[0].osk_domain_separator = ds_custom2.clone();
|
||||
assert_eq!(sim.srv_a.srv.peers[0].osk_domain_separator, ds_custom1);
|
||||
assert_eq!(sim.srv_b.srv.peers[0].osk_domain_separator, ds_custom2);
|
||||
|
||||
// Perform the key exchanges
|
||||
for _ in 0..300 {
|
||||
let ev = sim.poll()?;
|
||||
|
||||
assert!(!matches!(ev, TranscriptEvent::CompletedExchange(_)),
|
||||
"We deliberately provoked a mismatch in OSK domain separator, but still saw a successfully completed key exchange");
|
||||
|
||||
// Wait for a key exchange that failed with a KeyMismatch event
|
||||
let (osk_a_custom1, osk_b_custom2) = match ev {
|
||||
TranscriptEvent::FailedExchangeWithKeyMismatch(osk_a, osk_b) => {
|
||||
(osk_a.clone(), osk_b.clone())
|
||||
}
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
// The OSKs have been produced through the call to the function CryptoServer::osk(…)
|
||||
assert_eq!(
|
||||
sim.srv_a.srv.osk(PeerPtr(0))?.secret(),
|
||||
osk_a_custom1.secret()
|
||||
);
|
||||
assert_eq!(
|
||||
sim.srv_b.srv.osk(PeerPtr(0))?.secret(),
|
||||
osk_b_custom2.secret()
|
||||
);
|
||||
|
||||
// They are not matching (obviously)
|
||||
assert_ne!(osk_a_custom1.secret(), osk_b_custom2.secret());
|
||||
|
||||
// We can manually generate OSKs with matching labels
|
||||
let osk_a_custom2 = sim
|
||||
.srv_a
|
||||
.srv
|
||||
.osk_with_domain_separator(PeerPtr(0), &ds_custom2)?;
|
||||
let osk_b_custom1 = sim
|
||||
.srv_b
|
||||
.srv
|
||||
.osk_with_domain_separator(PeerPtr(0), &ds_custom1)?;
|
||||
let osk_a_wg = sim
|
||||
.srv_a
|
||||
.srv
|
||||
.osk_with_domain_separator(PeerPtr(0), &ds_wg)?;
|
||||
let osk_b_wg = sim
|
||||
.srv_b
|
||||
.srv
|
||||
.osk_with_domain_separator(PeerPtr(0), &ds_wg)?;
|
||||
|
||||
// The key exchange may have failed for some other reason, in this case we expect a
|
||||
// successful-but-label-mismatch exchange later in the protocol
|
||||
if osk_a_custom1.secret() != osk_b_custom1.secret() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// But if one of the labeled keys match, all should match
|
||||
assert_eq!(osk_a_custom2.secret(), osk_b_custom2.secret());
|
||||
assert_eq!(osk_a_wg.secret(), osk_b_wg.secret());
|
||||
|
||||
// But the three keys do not match each other
|
||||
assert_ne!(osk_a_custom1.secret(), osk_a_custom2.secret());
|
||||
assert_ne!(osk_a_custom1.secret(), osk_a_wg.secret());
|
||||
assert_ne!(osk_a_custom2.secret(), osk_a_wg.secret());
|
||||
|
||||
// The test succeeded
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
panic!("Test did not succeed even after allowing for a large number of communication rounds");
|
||||
}
|
||||
|
||||
type MessageType = u8;
|
||||
|
||||
/// Lets record the events that are produced by Rosenpass
|
||||
@@ -193,6 +301,7 @@ enum TranscriptEvent {
|
||||
event: ServerEvent,
|
||||
},
|
||||
CompletedExchange(SymKey),
|
||||
FailedExchangeWithKeyMismatch(SymKey, SymKey),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -292,7 +401,10 @@ struct SimulatorServer {
|
||||
|
||||
impl RosenpassSimulator {
|
||||
/// Set up the simulator
|
||||
fn new(protocol_version: ProtocolVersion) -> anyhow::Result<Self> {
|
||||
fn new(
|
||||
protocol_version: ProtocolVersion,
|
||||
osk_domain_separator: OskDomainSeparator,
|
||||
) -> anyhow::Result<Self> {
|
||||
// Set up the first server
|
||||
let (mut peer_a_sk, mut peer_a_pk) = (SSk::zero(), SPk::zero());
|
||||
StaticKem.keygen(peer_a_sk.secret_mut(), peer_a_pk.deref_mut())?;
|
||||
@@ -305,8 +417,18 @@ impl RosenpassSimulator {
|
||||
|
||||
// Generate a PSK and introduce the Peers to each other.
|
||||
let psk = SymKey::random();
|
||||
let peer_a = srv_a.add_peer(Some(psk.clone()), peer_b_pk, protocol_version.clone())?;
|
||||
let peer_b = srv_b.add_peer(Some(psk), peer_a_pk, protocol_version.clone())?;
|
||||
let peer_a = srv_a.add_peer(
|
||||
Some(psk.clone()),
|
||||
peer_b_pk,
|
||||
protocol_version.clone(),
|
||||
osk_domain_separator.clone(),
|
||||
)?;
|
||||
let peer_b = srv_b.add_peer(
|
||||
Some(psk),
|
||||
peer_a_pk,
|
||||
protocol_version.clone(),
|
||||
osk_domain_separator.clone(),
|
||||
)?;
|
||||
|
||||
// Set up the individual server data structures
|
||||
let srv_a = SimulatorServer::new(srv_a, peer_b);
|
||||
@@ -566,10 +688,18 @@ impl ServerPtr {
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Make sure the OSK of server A always comes first
|
||||
let (osk_a, osk_b) = match self == ServerPtr::A {
|
||||
true => (osk, other_osk),
|
||||
false => (other_osk, osk),
|
||||
};
|
||||
|
||||
// Issue the successful exchange event if the OSKs are equal;
|
||||
// be careful to use constant time comparison for things like this!
|
||||
if rosenpass_constant_time::memcmp(osk.secret(), other_osk.secret()) {
|
||||
self.enqueue_upcoming_poll_event(sim, TE::CompletedExchange(osk));
|
||||
if rosenpass_constant_time::memcmp(osk_a.secret(), osk_b.secret()) {
|
||||
self.enqueue_upcoming_poll_event(sim, TE::CompletedExchange(osk_a));
|
||||
} else {
|
||||
self.enqueue_upcoming_poll_event(sim, TE::FailedExchangeWithKeyMismatch(osk_a, osk_b));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
use anyhow::Error;
|
||||
use std::{
|
||||
future::Future, net::SocketAddr, ops::DerefMut, path::PathBuf, pin::Pin, process::Command,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::{Error, Result};
|
||||
use serde::Deserialize;
|
||||
use std::future::Future;
|
||||
use std::ops::DerefMut;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::{net::SocketAddr, path::PathBuf, process::Command};
|
||||
|
||||
use rosenpass::config::ProtocolVersion;
|
||||
|
||||
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
|
||||
use crate::key::WG_B64_LEN;
|
||||
use anyhow::Result;
|
||||
use rosenpass::config::ProtocolVersion;
|
||||
|
||||
/// Used to define a peer for the rosenpass connection that consists of
|
||||
/// a directory for storing public keys and optionally an IP address and port of the endpoint,
|
||||
/// for how long the connection should be kept alive and a list of allowed IPs for the peer.
|
||||
#[derive(Default, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ExchangePeer {
|
||||
/// Directory where public keys are stored
|
||||
pub public_keys_dir: PathBuf,
|
||||
@@ -31,6 +32,7 @@ pub struct ExchangePeer {
|
||||
|
||||
/// Options for the exchange operation of the `rp` binary.
|
||||
#[derive(Default, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ExchangeOptions {
|
||||
/// Whether the cli output should be verbose.
|
||||
pub verbose: bool,
|
||||
@@ -206,7 +208,10 @@ pub async fn exchange(options: ExchangeOptions) -> Result<()> {
|
||||
use rosenpass::{
|
||||
app_server::{AppServer, BrokerPeer},
|
||||
config::Verbosity,
|
||||
protocol::{SPk, SSk, SymKey},
|
||||
protocol::{
|
||||
basic_types::{SPk, SSk, SymKey},
|
||||
osk_domain_separator::OskDomainSeparator,
|
||||
},
|
||||
};
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::file::{LoadValue as _, LoadValueB64};
|
||||
@@ -360,6 +365,7 @@ pub async fn exchange(options: ExchangeOptions) -> Result<()> {
|
||||
broker_peer,
|
||||
peer.endpoint.map(|x| x.to_string()),
|
||||
peer.protocol_version,
|
||||
OskDomainSeparator::for_wireguard_psk(),
|
||||
)?;
|
||||
|
||||
// Configure routes, equivalent to `ip route replace <allowed_ips> dev <dev>` and set up
|
||||
|
||||
@@ -9,7 +9,7 @@ use anyhow::{anyhow, Result};
|
||||
use rosenpass_util::file::{LoadValueB64, StoreValue, StoreValueB64};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use rosenpass::protocol::{SPk, SSk};
|
||||
use rosenpass::protocol::basic_types::{SPk, SSk};
|
||||
use rosenpass_cipher_traits::primitives::Kem;
|
||||
use rosenpass_ciphers::StaticKem;
|
||||
use rosenpass_secret_memory::{file::StoreSecret as _, Public, Secret};
|
||||
@@ -118,7 +118,7 @@ pub fn pubkey(private_keys_dir: &Path, public_keys_dir: &Path) -> Result<()> {
|
||||
mod tests {
|
||||
use std::fs;
|
||||
|
||||
use rosenpass::protocol::{SPk, SSk};
|
||||
use rosenpass::protocol::basic_types::{SPk, SSk};
|
||||
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::file::LoadValue;
|
||||
|
||||
25
supply-chain-CI.md
Normal file
25
supply-chain-CI.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Continuous Integration for supply chain protection
|
||||
|
||||
This repository's CI uses non-standard mechanisms to harmonize the usage of `dependabot` together with [`cargo vet`](https://mozilla.github.io/cargo-vet/). Since cargo-vet audits for new versions of crates are rarely immediately available once dependabots bumps the version,
|
||||
the exemptions for `cargo vet` have to be regenerated for each push request opened by dependabot. To make this work, some setup is neccessary to setup the CI. The required steps are as follows:
|
||||
|
||||
1. Create a mew user on github. For the purpose of these instructions, we will assume that its mail address is `ci@example.com` and that its username is `ci-bot`. Protect this user account as you would any other user account that you intend to gve write permissions to. For example, setup MFA or protect the email address of the user. Make sure to verify your e-mail.
|
||||
2. Add `ci-bot` as a member of your organizaton with write access to the repository.
|
||||
3. In your organization, go to "Settings" -> "Personal Access tokens" -> "Settings". There select "Allow access via fine-grained personal access tokens" and save. Depending on your preferences either choose "Require administrator approval" or "Do not require administrator approval".
|
||||
4. Create a new personal access token as `ci-bot` for the rosenpass repository. That is, in the settings for `ci-bot`, select "Developer settings" -> "Personal Access tokens" -> "Fine-grained tokens". Then click on "Generate new token". Enter a name of your choosing and choose an expiration date that you feel comfortable with. A shorter expiration period will requrie more manual management by you but is more secure than a longer one. Select your organization as the resource owner and select the rosenpass repository as the repository. Under "Repository permissions", grant "Read and write"-access to the "Contens" premission for the token. Grant no other permissions to the token, except for the read-only access to the "Metadata" permission, which is mandatory. Then generate the token and copy it for the next steps.
|
||||
5. If you chose "Require administrator approval" in step 3, approve the fine grained access token by, as a organization administrator, going to "Settings" -> "Personal Access tokens" -> "Pending requests" and grant the request.
|
||||
6. Now, with your account that has administrative permissions for the repository, open the settings page for the repository and select "Secrets and variables" -> "Actions" and click "New repository secret". In the name field enter "CI_BOT_PAT". This name is mandatory, since it is explicitly referenced in the supply-chain workflow. Below, enter the token that was generated in step 4.
|
||||
7. Analogously to step 6, open the settings page for the repository and select "Secrets and variables" -> "Dependabot" and click "New repository secret". In the name field enter "CI_BOT_PAT". This name is mandatory, since it is explicitly referenced in the supply-chain workflow. Below, enter the token that was generated in step 4.
|
||||
|
||||
## What this does
|
||||
|
||||
For the `cargo vet` check in the CI for dependabot, the `cargo vet`-exemptions have to automatically be regenerated, because otherwise this CI job will always fail for dependabot PRs. After the exemptions have been regenerated, they need to be commited and pushed to the PR. This invalidates the CI run that pushed the commit so that it does not show up in the PR anymore but does not trigger a new CI run. This is a [protection by Github](https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) to prevent infinite loops. However, in this case it prevents us from having a proper CI run for dependabot PRs. The solution to this is to execute `push` operation with a personal access token.
|
||||
|
||||
## Preventing infinite loops
|
||||
|
||||
The CI is configured to avoid infinite loops by only regenerating and pushing the `cargo vet` exemptions if the CI run happens with respect to a PR opened by dependabot and not for any other pushed or pull requests. In addition one of the following conditions has to be met:
|
||||
|
||||
- The last commit was performed by dependabot
|
||||
- The last commit message ends in `--regenerate-exemptions`
|
||||
|
||||
Summarizing, the exemptions are only regenerated in the context of pull requests opened by dependabot and, the last commit was was performed by dependabot or the last commit message ends in `--regenerate-exemptions`.
|
||||
@@ -142,7 +142,7 @@ version = "0.7.4"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.clap_mangen]]
|
||||
version = "0.2.24"
|
||||
version = "0.2.29"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.cmake]]
|
||||
@@ -257,10 +257,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.10.2"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.fastrand]]
|
||||
version = "2.3.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.findshlibs]]
|
||||
version = "0.10.2"
|
||||
criteria = "safe-to-run"
|
||||
@@ -285,10 +281,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.2.15"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.gimli]]
|
||||
version = "0.31.1"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.hash32]]
|
||||
version = "0.2.1"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -341,6 +333,10 @@ criteria = "safe-to-deploy"
|
||||
version = "2.1.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.io-uring]]
|
||||
version = "0.7.9"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.ipc-channel]]
|
||||
version = "0.18.3"
|
||||
criteria = "safe-to-run"
|
||||
@@ -370,7 +366,7 @@ version = "1.3.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.libc]]
|
||||
version = "0.2.169"
|
||||
version = "0.2.174"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.libcrux]]
|
||||
@@ -529,10 +525,6 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.15"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.pin-project-lite]]
|
||||
version = "0.2.16"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.pkg-config]]
|
||||
version = "0.3.31"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -581,14 +573,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.9.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.rand_chacha]]
|
||||
version = "0.9.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.rand_core]]
|
||||
version = "0.9.3"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.redox_syscall]]
|
||||
version = "0.5.9"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -658,7 +642,7 @@ version = "0.4.9"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.socket2]]
|
||||
version = "0.5.8"
|
||||
version = "0.6.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.spin]]
|
||||
@@ -702,7 +686,7 @@ version = "2.0.11"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.tokio]]
|
||||
version = "1.44.2"
|
||||
version = "1.47.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.tokio-macros]]
|
||||
@@ -733,10 +717,6 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.17"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.utf8parse]]
|
||||
version = "0.2.2"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.uuid]]
|
||||
version = "1.14.0"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -847,7 +827,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows-targets]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows-targets]]
|
||||
version = "0.52.6"
|
||||
@@ -859,7 +839,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_aarch64_gnullvm]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_aarch64_gnullvm]]
|
||||
version = "0.52.6"
|
||||
@@ -871,7 +851,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_aarch64_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_aarch64_msvc]]
|
||||
version = "0.52.6"
|
||||
@@ -883,7 +863,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_i686_gnu]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_i686_gnu]]
|
||||
version = "0.52.6"
|
||||
@@ -899,7 +879,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_i686_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_i686_msvc]]
|
||||
version = "0.52.6"
|
||||
@@ -911,7 +891,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_gnu]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_gnu]]
|
||||
version = "0.52.6"
|
||||
@@ -923,7 +903,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_gnullvm]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_gnullvm]]
|
||||
version = "0.52.6"
|
||||
@@ -935,7 +915,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_msvc]]
|
||||
version = "0.52.6"
|
||||
|
||||
@@ -35,7 +35,7 @@ who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
user-id = 73222 # wasmtime-publish
|
||||
start = "2023-01-01"
|
||||
end = "2025-05-08"
|
||||
end = "2026-06-03"
|
||||
notes = """
|
||||
The Bytecode Alliance uses the `wasmtime-publish` crates.io account to automate
|
||||
publication of this crate from CI. This repository requires all PRs are reviewed
|
||||
@@ -144,6 +144,21 @@ who = "Dan Gohman <dev@sunfishcode.online>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.3.9 -> 0.3.10"
|
||||
|
||||
[[audits.bytecode-alliance.audits.fastrand]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.0.0 -> 2.0.1"
|
||||
notes = """
|
||||
This update had a few doc updates but no otherwise-substantial source code
|
||||
updates.
|
||||
"""
|
||||
|
||||
[[audits.bytecode-alliance.audits.fastrand]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.1.1 -> 2.3.0"
|
||||
notes = "Minor refactoring, nothing new."
|
||||
|
||||
[[audits.bytecode-alliance.audits.futures]]
|
||||
who = "Joel Dice <joel.dice@gmail.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -190,6 +205,18 @@ who = "Pat Hickey <pat@moreproductive.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.3.28 -> 0.3.31"
|
||||
|
||||
[[audits.bytecode-alliance.audits.gimli]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.29.0 -> 0.31.0"
|
||||
notes = "Various updates here and there, nothing too major, what you'd expect from a DWARF parsing crate."
|
||||
|
||||
[[audits.bytecode-alliance.audits.gimli]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.31.0 -> 0.31.1"
|
||||
notes = "No fundmanetally new `unsafe` code, some small refactoring of existing code. Lots of changes in tests, not as many changes in the rest of the crate. More dwarf!"
|
||||
|
||||
[[audits.bytecode-alliance.audits.heck]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -249,6 +276,12 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.0"
|
||||
notes = "I am the author of this crate."
|
||||
|
||||
[[audits.bytecode-alliance.audits.pin-project-lite]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.13 -> 0.2.14"
|
||||
notes = "No substantive changes in this update"
|
||||
|
||||
[[audits.bytecode-alliance.audits.pin-utils]]
|
||||
who = "Pat Hickey <phickey@fastly.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -301,6 +334,12 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.40"
|
||||
notes = "Found no unsafe or ambient capabilities used"
|
||||
|
||||
[[audits.embark-studios.audits.utf8parse]]
|
||||
who = "Johan Andersson <opensource@embark-studios.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.2.1"
|
||||
notes = "Single unsafe usage that looks sound, no ambient capabilities"
|
||||
|
||||
[[audits.fermyon.audits.oorandom]]
|
||||
who = "Radu Matei <radu.matei@fermyon.com>"
|
||||
criteria = "safe-to-run"
|
||||
@@ -411,6 +450,16 @@ delta = "1.0.1 -> 1.0.2"
|
||||
notes = "No changes to any .rs files or Rust code."
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.fastrand]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "1.9.0"
|
||||
notes = """
|
||||
`does-not-implement-crypto` is certified because this crate explicitly says
|
||||
that the RNG here is not cryptographically secure.
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.glob]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -554,6 +603,20 @@ version = "0.1.46"
|
||||
notes = "Contains no unsafe"
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.pin-project-lite]]
|
||||
who = "David Koloski <dkoloski@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.2.9"
|
||||
notes = "Reviewed on https://fxrev.dev/824504"
|
||||
aggregated-from = "https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/third_party/rust_crates/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.pin-project-lite]]
|
||||
who = "David Koloski <dkoloski@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.9 -> 0.2.13"
|
||||
notes = "Audited at https://fxrev.dev/946396"
|
||||
aggregated-from = "https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/third_party/rust_crates/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.proc-macro-error-attr]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -708,6 +771,24 @@ For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.rand_chacha]]
|
||||
who = "Lukasz Anforowicz <lukasza@chromium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.3.1"
|
||||
notes = """
|
||||
For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.rand_core]]
|
||||
who = "Lukasz Anforowicz <lukasza@chromium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.6.4"
|
||||
notes = """
|
||||
For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.regex-syntax]]
|
||||
who = "Manish Goregaokar <manishearth@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1158,12 +1239,12 @@ version = "0.3.0"
|
||||
[[audits.isrg.audits.rand_chacha]]
|
||||
who = "David Cook <dcook@divviup.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.3.1"
|
||||
delta = "0.3.1 -> 0.9.0"
|
||||
|
||||
[[audits.isrg.audits.rand_core]]
|
||||
who = "David Cook <dcook@divviup.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.6.3"
|
||||
delta = "0.6.4 -> 0.9.3"
|
||||
|
||||
[[audits.isrg.audits.rayon]]
|
||||
who = "Brandon Pitman <bran@bran.land>"
|
||||
@@ -1379,6 +1460,25 @@ criteria = "safe-to-deploy"
|
||||
delta = "0.3.1 -> 0.3.3"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "1.9.0 -> 2.0.0"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.0.1 -> 2.1.0"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.1.0 -> 2.1.1"
|
||||
notes = "Fairly trivial changes, no chance of security regression."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fnv]]
|
||||
who = "Bobby Holley <bobbyholley@gmail.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1409,6 +1509,23 @@ documentation.
|
||||
"""
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.gimli]]
|
||||
who = "Alex Franchuk <afranchuk@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.30.0"
|
||||
notes = """
|
||||
Unsafe code blocks are sound. Minimal dependencies used. No use of
|
||||
side-effectful std functions.
|
||||
"""
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.gimli]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.30.0 -> 0.29.0"
|
||||
notes = "No unsafe code, mostly algorithms and parsing. Very unlikely to cause security issues."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.hex]]
|
||||
who = "Simon Friedberger <simon@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1428,11 +1545,15 @@ delta = "1.0.0 -> 0.1.2"
|
||||
notes = "Small refactor of some simple iterator logic, no unsafe code or capabilities."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.rand_core]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
[[audits.mozilla.audits.pin-project-lite]]
|
||||
who = "Nika Layzell <nika@thelayzells.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.6.3 -> 0.6.4"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
delta = "0.2.14 -> 0.2.16"
|
||||
notes = """
|
||||
Only functional change is to work around a bug in the negative_impls feature
|
||||
(https://github.com/taiki-e/pin-project/issues/340#issuecomment-2432146009)
|
||||
"""
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/cargo-vet/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.rayon]]
|
||||
who = "Josh Stone <jistone@redhat.com>"
|
||||
@@ -1491,6 +1612,12 @@ criteria = "safe-to-deploy"
|
||||
delta = "1.0.43 -> 1.0.69"
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/glean/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.utf8parse]]
|
||||
who = "Nika Layzell <nika@thelayzells.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.1 -> 0.2.2"
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/cargo-vet/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.zeroize]]
|
||||
who = "Benjamin Beurdouche <beurdouche@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
@@ -561,7 +561,7 @@ mod tests {
|
||||
let mut file = FdIo(open_nullfd()?);
|
||||
let mut buf = [0; 10];
|
||||
assert!(matches!(file.read(&mut buf), Ok(0) | Err(_)));
|
||||
assert!(matches!(file.write(&buf), Err(_)));
|
||||
assert!(file.write(&buf).is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -618,7 +618,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_lpe_error_conversion_downcast_invalid() {
|
||||
let pos_error = PositionOutOfBufferBounds;
|
||||
let sanity_error = SanityError::PositionOutOfBufferBounds(pos_error.into());
|
||||
let sanity_error = SanityError::PositionOutOfBufferBounds(pos_error);
|
||||
match MessageLenSanityError::try_from(sanity_error) {
|
||||
Ok(_) => panic!("Conversion should always fail (incompatible enum variant)"),
|
||||
Err(err) => assert!(matches!(err, PositionOutOfBufferBounds)),
|
||||
|
||||
@@ -302,6 +302,6 @@ mod test_forgetting {
|
||||
drop_was_called.store(false, SeqCst);
|
||||
let forgetting = Forgetting::new(SetFlagOnDrop(drop_was_called.clone()));
|
||||
drop(forgetting);
|
||||
assert_eq!(drop_was_called.load(SeqCst), false);
|
||||
assert!(!drop_was_called.load(SeqCst));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user