mirror of
https://github.com/rosenpass/rosenpass.git
synced 2025-12-10 14:50:31 -08:00
Compare commits
2 Commits
dev/karo/l
...
dev/refine
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63ff75b93c | ||
|
|
5bebdd9284 |
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
use std log
|
||||
use log *
|
||||
|
||||
# cd to git root
|
||||
cd (git rev-parse --show-toplevel)
|
||||
@@ -33,7 +33,7 @@ let systems_map = {
|
||||
# aarch64-linux
|
||||
|
||||
i686-linux: ubuntu-latest,
|
||||
x86_64-darwin: macos-13,
|
||||
x86_64-darwin: macos-latest,
|
||||
x86_64-linux: ubuntu-latest
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ let runner_setup = [
|
||||
uses: "actions/checkout@v3"
|
||||
}
|
||||
{
|
||||
uses: "cachix/install-nix-action@v22",
|
||||
uses: "cachix/install-nix-action@v21",
|
||||
with: { nix_path: "nixpkgs=channel:nixos-unstable" }
|
||||
}
|
||||
{
|
||||
@@ -116,7 +116,6 @@ for system in ($targets | columns) {
|
||||
} }
|
||||
| filter {|it| $it.needed}
|
||||
| each {|it| job-id $system $it.name}
|
||||
| sort
|
||||
)
|
||||
|
||||
mut new_job = {
|
||||
@@ -198,4 +197,4 @@ $cachix_workflow | to yaml | save --force .github/workflows/nix.yaml
|
||||
$release_workflow | to yaml | save --force .github/workflows/release.yaml
|
||||
|
||||
log info "prettify generated yaml"
|
||||
prettier -w .github/workflows/
|
||||
prettier -w .github/workflows/
|
||||
@@ -1 +0,0 @@
|
||||
FROM ghcr.io/xtruder/nix-devcontainer:v1
|
||||
@@ -1,33 +0,0 @@
|
||||
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-existing-dockerfile
|
||||
{
|
||||
"name": "devcontainer-project",
|
||||
"dockerFile": "Dockerfile",
|
||||
"context": "${localWorkspaceFolder}",
|
||||
"build": {
|
||||
"args": {
|
||||
"USER_UID": "${localEnv:USER_UID}",
|
||||
"USER_GID": "${localEnv:USER_GID}"
|
||||
}
|
||||
},
|
||||
|
||||
// run arguments passed to docker
|
||||
"runArgs": ["--security-opt", "label=disable"],
|
||||
|
||||
// disable command overriding and updating remote user ID
|
||||
"overrideCommand": false,
|
||||
"userEnvProbe": "loginShell",
|
||||
"updateRemoteUserUID": false,
|
||||
|
||||
// build development environment on creation, make sure you already have shell.nix
|
||||
"onCreateCommand": "nix develop",
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [],
|
||||
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": ["rust-lang.rust-analyzer", "tamasfe.even-better-toml"]
|
||||
}
|
||||
}
|
||||
}
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -1,14 +0,0 @@
|
||||
codecov:
|
||||
branch: main
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
# basic
|
||||
target: auto #default
|
||||
threshold: 5
|
||||
base: auto
|
||||
if_ci_failed: error #success, failure, error, ignore
|
||||
informational: false
|
||||
only_pulls: true
|
||||
patch: off
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
63
.github/workflows/dependent-issues.yml
vendored
63
.github/workflows/dependent-issues.yml
vendored
@@ -1,63 +0,0 @@
|
||||
name: Dependent Issues
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- closed
|
||||
- reopened
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- closed
|
||||
- reopened
|
||||
# Makes sure we always add status check for PRs. Useful only if
|
||||
# this action is required to pass before merging. Otherwise, it
|
||||
# can be removed.
|
||||
- synchronize
|
||||
|
||||
# Schedule a daily check. Useful if you reference cross-repository
|
||||
# issues or pull requests. Otherwise, it can be removed.
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
check:
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: z0al/dependent-issues@v1
|
||||
env:
|
||||
# (Required) The token to use to make API calls to GitHub.
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# (Optional) The token to use to make API calls to GitHub for remote repos.
|
||||
GITHUB_READ_TOKEN: ${{ secrets.GITHUB_READ_TOKEN }}
|
||||
|
||||
with:
|
||||
# (Optional) The label to use to mark dependent issues
|
||||
label: dependent
|
||||
|
||||
# (Optional) Enable checking for dependencies in issues.
|
||||
# Enable by setting the value to "on". Default "off"
|
||||
check_issues: off
|
||||
|
||||
# (Optional) Ignore dependabot PRs.
|
||||
# Enable by setting the value to "on". Default "off"
|
||||
ignore_dependabot: off
|
||||
|
||||
# (Optional) A comma-separated list of keywords. Default
|
||||
# "depends on, blocked by"
|
||||
keywords: depends on, blocked by
|
||||
|
||||
# (Optional) A custom comment body. It supports `{{ dependencies }}` token.
|
||||
comment: >
|
||||
This PR/issue depends on:
|
||||
|
||||
{{ dependencies }}
|
||||
|
||||
By **[Dependent Issues](https://github.com/z0al/dependent-issues)** (🤖). Happy coding!
|
||||
172
.github/workflows/nix.yaml
vendored
172
.github/workflows/nix.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
- i686-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
- i686-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -75,12 +75,12 @@ jobs:
|
||||
x86_64-darwin---default:
|
||||
name: Build x86_64-darwin.default
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -92,14 +92,13 @@ jobs:
|
||||
x86_64-darwin---release-package:
|
||||
name: Build x86_64-darwin.release-package
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
- x86_64-darwin---rp
|
||||
- x86_64-darwin---rosenpass-oci-image
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -111,11 +110,11 @@ jobs:
|
||||
x86_64-darwin---rosenpass:
|
||||
name: Build x86_64-darwin.rosenpass
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -124,31 +123,15 @@ jobs:
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.rosenpass --print-build-logs
|
||||
x86_64-darwin---rp:
|
||||
name: Build x86_64-darwin.rp
|
||||
runs-on:
|
||||
- macos-13
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.rp --print-build-logs
|
||||
x86_64-darwin---rosenpass-oci-image:
|
||||
name: Build x86_64-darwin.rosenpass-oci-image
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -160,10 +143,10 @@ jobs:
|
||||
x86_64-darwin---check:
|
||||
name: Run Nix checks on x86_64-darwin
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -180,7 +163,7 @@ jobs:
|
||||
- x86_64-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -197,7 +180,7 @@ jobs:
|
||||
- x86_64-linux---proverif-patched
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -213,7 +196,7 @@ jobs:
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -229,10 +212,9 @@ jobs:
|
||||
needs:
|
||||
- x86_64-linux---rosenpass-static
|
||||
- x86_64-linux---rosenpass-static-oci-image
|
||||
- x86_64-linux---rp-static
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -241,30 +223,6 @@ jobs:
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.release-package --print-build-logs
|
||||
aarch64-linux---release-package:
|
||||
name: Build aarch64-linux.release-package
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- aarch64-linux---rosenpass-oci-image
|
||||
- aarch64-linux---rosenpass
|
||||
- aarch64-linux---rp
|
||||
steps:
|
||||
- run: |
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
extra_nix_config: |
|
||||
system = aarch64-linux
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.aarch64-linux.release-package --print-build-logs
|
||||
x86_64-linux---rosenpass:
|
||||
name: Build x86_64-linux.rosenpass
|
||||
runs-on:
|
||||
@@ -272,7 +230,7 @@ jobs:
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -281,48 +239,6 @@ jobs:
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass --print-build-logs
|
||||
aarch64-linux---rosenpass:
|
||||
name: Build aarch64-linux.rosenpass
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- run: |
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
extra_nix_config: |
|
||||
system = aarch64-linux
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.aarch64-linux.rosenpass --print-build-logs
|
||||
aarch64-linux---rp:
|
||||
name: Build aarch64-linux.rp
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- run: |
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
extra_nix_config: |
|
||||
system = aarch64-linux
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.aarch64-linux.rp --print-build-logs
|
||||
x86_64-linux---rosenpass-oci-image:
|
||||
name: Build x86_64-linux.rosenpass-oci-image
|
||||
runs-on:
|
||||
@@ -331,7 +247,7 @@ jobs:
|
||||
- x86_64-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -340,28 +256,6 @@ jobs:
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass-oci-image --print-build-logs
|
||||
aarch64-linux---rosenpass-oci-image:
|
||||
name: Build aarch64-linux.rosenpass-oci-image
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- aarch64-linux---rosenpass
|
||||
steps:
|
||||
- run: |
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
extra_nix_config: |
|
||||
system = aarch64-linux
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.aarch64-linux.rosenpass-oci-image --print-build-logs
|
||||
x86_64-linux---rosenpass-static:
|
||||
name: Build x86_64-linux.rosenpass-static
|
||||
runs-on:
|
||||
@@ -369,7 +263,7 @@ jobs:
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -378,22 +272,6 @@ jobs:
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass-static --print-build-logs
|
||||
x86_64-linux---rp-static:
|
||||
name: Build x86_64-linux.rp-static
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rp-static --print-build-logs
|
||||
x86_64-linux---rosenpass-static-oci-image:
|
||||
name: Build x86_64-linux.rosenpass-static-oci-image
|
||||
runs-on:
|
||||
@@ -402,7 +280,7 @@ jobs:
|
||||
- x86_64-linux---rosenpass-static
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -418,7 +296,7 @@ jobs:
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -433,7 +311,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -448,7 +326,7 @@ jobs:
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
|
||||
147
.github/workflows/qc.yaml
vendored
147
.github/workflows/qc.yaml
vendored
@@ -17,52 +17,6 @@ jobs:
|
||||
with:
|
||||
args: --check .
|
||||
|
||||
shellcheck:
|
||||
name: Shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
|
||||
rustfmt:
|
||||
name: Rust Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run Rust Formatting Script
|
||||
run: bash format_rust_code.sh --mode check
|
||||
|
||||
cargo-bench:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
# liboqs requires quite a lot of stack memory, thus we adjust
|
||||
# the default stack size picked for new threads (which is used
|
||||
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
|
||||
- run: RUST_MIN_STACK=8388608 cargo bench --workspace --exclude rosenpass-fuzzing
|
||||
|
||||
mandoc:
|
||||
name: mandoc
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install mandoc
|
||||
run: sudo apt-get install -y mandoc
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check rosenpass.1
|
||||
run: doc/check.sh doc/rosenpass.1
|
||||
- name: Check rp.1
|
||||
run: doc/check.sh doc/rp.1
|
||||
|
||||
cargo-audit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -85,6 +39,8 @@ jobs:
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- run: rustup component add clippy
|
||||
- name: Install libsodium
|
||||
run: sudo apt-get install -y libsodium-dev
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -104,104 +60,9 @@ jobs:
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- run: rustup component add clippy
|
||||
- name: Install libsodium
|
||||
run: sudo apt-get install -y libsodium-dev
|
||||
# `--no-deps` used as a workaround for a rust compiler bug. See:
|
||||
# - https://github.com/rosenpass/rosenpass/issues/62
|
||||
# - https://github.com/rust-lang/rust/issues/108378
|
||||
- run: RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --document-private-items
|
||||
|
||||
cargo-test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13]
|
||||
# - ubuntu is x86-64
|
||||
# - macos-13 is also x86-64 architecture
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
# liboqs requires quite a lot of stack memory, thus we adjust
|
||||
# the default stack size picked for new threads (which is used
|
||||
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
|
||||
- run: RUST_MIN_STACK=8388608 cargo test --workspace --all-features
|
||||
|
||||
cargo-test-nix-devshell-x86_64-linux:
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- run: nix develop --command cargo test --workspace --all-features
|
||||
|
||||
cargo-fuzz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Install nightly toolchain
|
||||
run: |
|
||||
rustup toolchain install nightly
|
||||
rustup default nightly
|
||||
- name: Install cargo-fuzz
|
||||
run: cargo install cargo-fuzz
|
||||
- name: Run fuzzing
|
||||
run: |
|
||||
cargo fuzz run fuzz_aead_enc_into -- -max_total_time=5
|
||||
cargo fuzz run fuzz_blake2b -- -max_total_time=5
|
||||
cargo fuzz run fuzz_handle_msg -- -max_total_time=5
|
||||
ulimit -s 8192000 && RUST_MIN_STACK=33554432000 && cargo fuzz run fuzz_kyber_encaps -- -max_total_time=5
|
||||
cargo fuzz run fuzz_mceliece_encaps -- -max_total_time=5
|
||||
cargo fuzz run fuzz_box_secret_alloc_malloc -- -max_total_time=5
|
||||
cargo fuzz run fuzz_box_secret_alloc_memfdsec -- -max_total_time=5
|
||||
cargo fuzz run fuzz_box_secret_alloc_memfdsec_mallocfb -- -max_total_time=5
|
||||
cargo fuzz run fuzz_vec_secret_alloc_malloc -- -max_total_time=5
|
||||
cargo fuzz run fuzz_vec_secret_alloc_memfdsec -- -max_total_time=5
|
||||
cargo fuzz run fuzz_vec_secret_alloc_memfdsec_mallocfb -- -max_total_time=5
|
||||
|
||||
codecov:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: rustup component add llvm-tools-preview
|
||||
- run: |
|
||||
cargo install cargo-llvm-cov || true
|
||||
cargo llvm-cov --lcov --output-path coverage.lcov
|
||||
# If using tarapulin
|
||||
#- run: cargo install cargo-tarpaulin
|
||||
#- run: cargo tarpaulin --out Xml
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v4.0.1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./coverage.lcov
|
||||
verbose: true
|
||||
|
||||
8
.github/workflows/release.yaml
vendored
8
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -30,10 +30,10 @@ jobs:
|
||||
x86_64-darwin---release:
|
||||
name: Build release artifacts for x86_64-darwin
|
||||
runs-on:
|
||||
- macos-13
|
||||
- macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
# TODO use CI_JOB_TOKEN once https://gitlab.com/groups/gitlab-org/-/epics/6310 is fixed
|
||||
pull-from-gh:
|
||||
only: ["schedules"]
|
||||
variables:
|
||||
REMOTE: "https://github.com/rosenpass/rosenpass.git"
|
||||
LOCAL: " git@gitlab.com:rosenpass/rosenpass.git"
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- mkdir ~/.ssh/
|
||||
- echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
|
||||
- echo "$REPO_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
- chmod 600 --recursive ~/.ssh/
|
||||
- git config --global user.email "ci@gitlab.com"
|
||||
- git config --global user.name "CI"
|
||||
script:
|
||||
- git clone --mirror $REMOTE rosenpass
|
||||
- cd rosenpass && git push --mirror $LOCAL
|
||||
2381
Cargo.lock
generated
2381
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
111
Cargo.toml
111
Cargo.toml
@@ -1,83 +1,40 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
[package]
|
||||
name = "rosenpass"
|
||||
version = "0.1.2-rc.4"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
members = [
|
||||
"rosenpass",
|
||||
"cipher-traits",
|
||||
"ciphers",
|
||||
"util",
|
||||
"constant-time",
|
||||
"oqs",
|
||||
"to",
|
||||
"fuzz",
|
||||
"secret-memory",
|
||||
"rp",
|
||||
"wireguard-broker",
|
||||
]
|
||||
[[bench]]
|
||||
name = "handshake"
|
||||
harness = false
|
||||
|
||||
default-members = ["rosenpass", "rp", "wireguard-broker"]
|
||||
|
||||
[workspace.metadata.release]
|
||||
# ensure that adding `--package` as argument to `cargo release` still creates version tags in the form of `vx.y.z`
|
||||
tag-prefix = ""
|
||||
|
||||
[workspace.dependencies]
|
||||
rosenpass = { path = "rosenpass" }
|
||||
rosenpass-util = { path = "util" }
|
||||
rosenpass-constant-time = { path = "constant-time" }
|
||||
rosenpass-cipher-traits = { path = "cipher-traits" }
|
||||
rosenpass-ciphers = { path = "ciphers" }
|
||||
rosenpass-to = { path = "to" }
|
||||
rosenpass-secret-memory = { path = "secret-memory" }
|
||||
rosenpass-oqs = { path = "oqs" }
|
||||
rosenpass-wireguard-broker = { path = "wireguard-broker" }
|
||||
doc-comment = "0.3.3"
|
||||
base64ct = {version = "1.6.0", default-features=false}
|
||||
zeroize = "1.8.1"
|
||||
memoffset = "0.9.1"
|
||||
thiserror = "1.0.61"
|
||||
paste = "1.0.15"
|
||||
env_logger = "0.10.2"
|
||||
toml = "0.7.8"
|
||||
[dependencies]
|
||||
base64 = "0.21.1"
|
||||
static_assertions = "1.1.0"
|
||||
allocator-api2 = "0.2.14"
|
||||
memsec = { git="https://github.com/rosenpass/memsec.git" ,rev="aceb9baee8aec6844125bd6612f92e9a281373df", features = [ "alloc_ext", ] }
|
||||
rand = "0.8.5"
|
||||
typenum = "1.17.0"
|
||||
log = { version = "0.4.22" }
|
||||
clap = { version = "4.5.8", features = ["derive"] }
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
arbitrary = { version = "1.3.2", features = ["derive"] }
|
||||
anyhow = { version = "1.0.86", features = ["backtrace", "std"] }
|
||||
mio = { version = "0.8.11", features = ["net", "os-poll"] }
|
||||
oqs-sys = { version = "0.9.1", default-features = false, features = [
|
||||
'classic_mceliece',
|
||||
'kyber',
|
||||
] }
|
||||
blake2 = "0.10.6"
|
||||
chacha20poly1305 = { version = "0.10.1", default-features = false, features = [
|
||||
"std",
|
||||
"heapless",
|
||||
] }
|
||||
zerocopy = { version = "0.7.35", features = ["derive"] }
|
||||
home = "0.5.9"
|
||||
derive_builder = "0.20.0"
|
||||
tokio = { version = "1.38", features = ["macros", "rt-multi-thread"] }
|
||||
postcard= {version = "1.0.8", features = ["alloc"]}
|
||||
libcrux = { version = "0.0.2-pre.2" }
|
||||
memoffset = "0.9.0"
|
||||
libsodium-sys-stable = { version = "1.19.28", features = ["use-pkg-config"] }
|
||||
oqs-sys = { version = "0.7.2", default-features = false, features = ['classic_mceliece', 'kyber'] }
|
||||
lazy_static = "1.4.0"
|
||||
thiserror = "1.0.40"
|
||||
paste = "1.0.12"
|
||||
log = { version = "0.4.17", optional = true }
|
||||
env_logger = { version = "0.10.0", optional = true }
|
||||
serde = { version = "1.0.163", features = ["derive"] }
|
||||
toml = "0.7.4"
|
||||
clap = { version = "4.3.0", features = ["derive"] }
|
||||
mio = { version = "0.8.6", features = ["net", "os-poll"] }
|
||||
|
||||
#Dev dependencies
|
||||
serial_test = "3.1.1"
|
||||
tempfile = "3"
|
||||
stacker = "0.1.15"
|
||||
libfuzzer-sys = "0.4"
|
||||
test_bin = "0.4.0"
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.71"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4.0"
|
||||
allocator-api2-tests = "0.2.15"
|
||||
procspawn = {version = "1.0.0", features= ["test-support"]}
|
||||
test_bin = "0.4.0"
|
||||
|
||||
|
||||
#Broker dependencies (might need cleanup or changes)
|
||||
wireguard-uapi = { version = "3.0.0", features = ["xplatform"] }
|
||||
command-fds = "0.2.3"
|
||||
rustix = { version = "0.38.27", features = ["net"] }
|
||||
[features]
|
||||
default = ["log", "env_logger"]
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#define INITIATOR_TEST 1
|
||||
|
||||
#include "rosenpass/03_identity_hiding.mpv"
|
||||
|
||||
// nounif a:Atom, s:seed, a2:Atom;
|
||||
// ConsumeSeed(a, s, a2) / 6300[conclusion].
|
||||
|
||||
nounif v:seed_prec; attacker(prepare_seed(trusted_seed( v )))/6217[hypothesis].
|
||||
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
|
||||
nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
|
||||
nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
|
||||
nounif v:key_prec; attacker(prepare_key(trusted_key( v )))/6213[hypothesis].
|
||||
nounif v:kem_sk_prec; attacker(prepare_kem_sk(trusted_kem_sk( v )))/6212[hypothesis].
|
||||
nounif v:key; attacker(prepare_key( v ))/6211[hypothesis].
|
||||
nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
|
||||
nounif Spk:kem_sk_tmpl;
|
||||
attacker(Creveal_kem_pk(Spk))/6110[conclusion].
|
||||
nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
|
||||
attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr ))/6109[conclusion].
|
||||
nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
|
||||
attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/6108[conclusion].
|
||||
nounif rh:RespHello_t;
|
||||
attacker(Cresp_hello( *rh ))/6107[conclusion].
|
||||
nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
|
||||
attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/6106[conclusion].
|
||||
@@ -1,96 +0,0 @@
|
||||
#define RESPONDER_TEST 1
|
||||
|
||||
#include "rosenpass/03_identity_hiding.mpv"
|
||||
|
||||
// select k:kem_pk,ih: InitHello_t; attacker(prf(prf(prf(prf(key0, PROTOCOL), MAC), kem_pk2b(k) ), IH2b(ih))) phase 1/6300[hypothesis].
|
||||
|
||||
// select epki:kem_pk, sctr:bits, pidiC:bits, auth:bits, epki2:kem_pk, sctr2:bits, pidiC2:bits, auth2:bits;
|
||||
// mess(D, prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pk2b(kem_pub(trusted_kem_sk(responder1)))),
|
||||
// IH2b(InitHello(secure_sidi, *epki, *sctr, *pidiC, *auth)))
|
||||
// ) [hypothesis, conclusion].
|
||||
|
||||
// select epki:kem_pk, sctr:bits, pidiC:bits, auth:bits, epki2:kem_pk, sctr2:bits, pidiC2:bits, auth2:bits;
|
||||
// attacker(choice[prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pk2b(kem_pub(trusted_kem_sk(responder1)))),
|
||||
// IH2b(InitHello(secure_sidi, *epki, *sctr, *pidiC, *auth))),
|
||||
|
||||
// prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pk2b(kem_pub(trusted_kem_sk(responder2)))),
|
||||
// IH2b(InitHello(secure_sidi, *epki2, *sctr2, *pidiC2, *auth2)))]
|
||||
// ) [hypothesis, conclusion].
|
||||
|
||||
// select
|
||||
// attacker(prf(prf(key0,PROTOCOL),MAC)) [hypothesis, conclusion].
|
||||
|
||||
// select
|
||||
// attacker(prf(key0,PROTOCOL)) [conclusion].
|
||||
|
||||
// select
|
||||
// attacker(key0) [conclusion].
|
||||
|
||||
// select
|
||||
// attacker(PROTOCOL) [conclusion].
|
||||
|
||||
// select
|
||||
// attacker(kem_pub(trusted_kem_sk(responder1))) /9999 [hypothesis, conclusion].
|
||||
|
||||
// select
|
||||
// attacker(kem_pub(trusted_kem_sk(responder2))) /9999 [hypothesis, conclusion].
|
||||
|
||||
// nounif ih:InitHello_t;
|
||||
// attacker(ih) / 9999 [hypothesis].
|
||||
|
||||
// nounif rh:RespHello_t;
|
||||
// attacker(rh) / 9999 [hypothesis].
|
||||
|
||||
// nounif ic:InitConf_t;
|
||||
// attacker(ic) / 9999 [hypothesis].
|
||||
|
||||
// nounif k:key;
|
||||
// attacker(ck_hs_enc( *k )) [hypothesis, conclusion].
|
||||
|
||||
// nounif k:key;
|
||||
// attacker(ck_hs_enc( *k )) phase 1 [hypothesis, conclusion].
|
||||
|
||||
// nounif k:key, b:bits;
|
||||
// attacker(ck_mix( *k , *b )) [hypothesis, conclusion].
|
||||
|
||||
// nounif k:key, b:bits;
|
||||
// attacker(ck_mix( *k , *b ))phase 1 [hypothesis, conclusion].
|
||||
|
||||
// // select k:kem_pk, epki2:kem_pk, sctr2:bits, pidiC2:bits, auth2:bits, epki:kem_pk, sctr:bits, pidiC:bits, auth:bits;
|
||||
// // attacker(choice[Envelope(prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pub(trusted_kem_sk(responder1))),
|
||||
// // InitHello(secure_sidi, *epki2, *sctr2, *pidiC2, *auth2)
|
||||
// // ), InitHello(secure_sidi, *epki2, *sctr2, *pidiC2, *auth2))
|
||||
// // Envelope(prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pub(trusted_kem_sk(responder2))),
|
||||
// // InitHello(secure_sidi, *epki, *sctr, *pidiC, *auth)),
|
||||
// // InitHello(secure_sidi, *epki, *sctr, *pidiC, *auth))
|
||||
// // ]) / 9999[hypothesis, conclusion].
|
||||
|
||||
// nounif k:key, b1:bits, b2:bits;
|
||||
// attacker(xaead_enc( *k, *b1, *b2)) / 9999[hypothesis,conclusion].
|
||||
|
||||
// nounif pk:kem_pk, k:key;
|
||||
// attacker(kem_enc( *pk , *k )) / 9999[hypothesis,conclusion].
|
||||
|
||||
// nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
|
||||
// attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/9999[hypothesis, conclusion].
|
||||
// nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
|
||||
// attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/9999[hypothesis, conclusion].
|
||||
// nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
|
||||
// attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr )) /9999 [hypothesis, conclusion].
|
||||
|
||||
// nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
|
||||
// mess(C, Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/9999[hypothesis, conclusion].
|
||||
// nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
|
||||
// mess(C, Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/9999[hypothesis, conclusion].
|
||||
// nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
|
||||
// mess(C, Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr )) /9999 [hypothesis, conclusion].
|
||||
// nounif rh:RespHello_t;
|
||||
// attacker(Cresp_hello( *rh ))[conclusion].
|
||||
// nounif v:seed_prec; attacker(prepare_seed(trusted_seed( v )))/6217[hypothesis].
|
||||
// nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
|
||||
// nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
|
||||
// nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
|
||||
// nounif v:key_prec; attacker(prepare_key(trusted_key( v )))/6213[hypothesis].
|
||||
// nounif v:kem_sk_prec; attacker(prepare_kem_sk(trusted_kem_sk( v )))/6212[hypothesis].
|
||||
// nounif v:key; attacker(prepare_key( v ))/6211[hypothesis].
|
||||
// nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
|
||||
@@ -1,29 +0,0 @@
|
||||
#define INITIATOR_TEST 1
|
||||
#define CUSTOM_MAIN 1
|
||||
|
||||
#include "rosenpass/03_identity_hiding.mpv"
|
||||
|
||||
let Oinitiator_bad_actor_inner(sk_tmp:kem_sk_prec) =
|
||||
|
||||
in(C, Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr));
|
||||
|
||||
#if RANDOMIZED_CALL_IDS
|
||||
new call:Atom;
|
||||
#else
|
||||
call <- Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr);
|
||||
#endif
|
||||
|
||||
in(C, last_cookie:key);
|
||||
tmpl <- make_trusted_kem_sk(sk_tmp);
|
||||
out(C, setup_kem_sk(tmpl));
|
||||
Oinitiator_inner(sidi, Ssskm, Spsk, tmpl, Seski, Ssptr, last_cookie, C, call).
|
||||
|
||||
let Oinitiator_bad_actor() =
|
||||
Oinitiator_bad_actor_inner(responder1) | Oinitiator_bad_actor_inner(responder2) | Oinitiator_bad_actor_inner(initiator1) | Oinitiator_bad_actor_inner(initiator2).
|
||||
|
||||
|
||||
let identity_hiding_main2() =
|
||||
0 | Oinitiator_bad_actor() | rosenpass_main2() | participants_communication() | phase 1; secretCommunication().
|
||||
|
||||
|
||||
let main = identity_hiding_main2.
|
||||
@@ -1,136 +0,0 @@
|
||||
#define CHAINING_KEY_EVENTS 1
|
||||
#define MESSAGE_TRANSMISSION_EVENTS 0
|
||||
#define SESSION_START_EVENTS 0
|
||||
#define RANDOMIZED_CALL_IDS 0
|
||||
#define COOKIE_EVENTS 1
|
||||
#define KEM_EVENTS 1
|
||||
|
||||
#include "config.mpv"
|
||||
#include "prelude/basic.mpv"
|
||||
#include "crypto/key.mpv"
|
||||
#include "crypto/kem.mpv"
|
||||
#include "rosenpass/handshake_state.mpv"
|
||||
|
||||
/* The cookie data structure is implemented based on the WireGuard protocol.
|
||||
* The ip and port is based purely on the public key and the implementation of the private cookie key is intended to mirror the biscuit key.
|
||||
* The code tests the response to a possible DOS attack by setting up alternative branches for the protocol
|
||||
* processes: Oinit_conf, Oinit_hello and resp_hello to simulate what happens when the responder or initiator is overloaded.
|
||||
* When under heavy load a valid cookie is required. When such a cookie is not present a cookie message is sent as a response.
|
||||
* Queries then test to make sure that expensive KEM operations are only conducted after a cookie has been successfully validated.
|
||||
*/
|
||||
|
||||
type CookieMsg_t.
|
||||
fun CookieMsg(
|
||||
SessionId, // sender
|
||||
bits, // nonce
|
||||
bits // cookie
|
||||
) : CookieMsg_t [data].
|
||||
|
||||
#define COOKIE_EVENTS(eventLbl) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _UnderLoadEV) (SessionId, SessionId, Atom).) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieValidated) (SessionId, SessionId, Atom).) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieSent) (SessionId, SessionId, Atom, CookieMsg_t).)
|
||||
|
||||
fun cookie_key(kem_sk) : key [private].
|
||||
fun ip_and_port(kem_pk):bits.
|
||||
letfun create_mac2_key(sskm:kem_sk, spkt:kem_pk) = prf(cookie_key(sskm), ip_and_port(spkt)).
|
||||
letfun create_cookie(sskm:kem_sk, spkm:kem_pk, spkt:kem_pk, nonce:bits, msg:bits) = xaead_enc(lprf2(COOKIE, kem_pk2b(spkm), nonce),
|
||||
k2b(create_mac2_key(sskm, spkm)), msg).
|
||||
|
||||
#define COOKIE_PROCESS(eventLbl, innerFunc) \
|
||||
new nonce:bits; \
|
||||
in(C, Ccookie(mac1, mac2)); \
|
||||
COOKIE_EV(event MCAT(eventLbl, _UnderLoadEV) (sidi, sidr, call);) \
|
||||
msgB <- Envelope(mac1, msg); \
|
||||
mac2_key <- create_mac2_key(sskm, spkt); \
|
||||
if k2b(create_mac2(mac2_key, msgB)) = mac2 then \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieValidated) (sidi, sidr, call);) \
|
||||
innerFunc \
|
||||
else \
|
||||
cookie <- create_cookie(sskm, spkm, spkt, nonce, msg); \
|
||||
cookie_msg <- CookieMsg(sidi, nonce, cookie); \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieSent) (sidi, sidr, call, cookie_msg);) \
|
||||
out(C, cookie_msg). \
|
||||
|
||||
#include "rosenpass/oracles.mpv"
|
||||
|
||||
#include "rosenpass/responder.macro"
|
||||
COOKIE_EVENTS(Oinit_conf)
|
||||
let Oinit_conf_underLoad() =
|
||||
in(C, Cinit_conf(Ssskm, Spsk, Sspkt, ic));
|
||||
in(C, last_cookie:bits);
|
||||
|
||||
msg <- IC2b(ic);
|
||||
let InitConf(sidi, sidr, biscuit, auth) = ic in
|
||||
|
||||
new call:Atom;
|
||||
|
||||
SETUP_HANDSHAKE_STATE()
|
||||
|
||||
COOKIE_PROCESS(Oinit_conf, Oinit_conf_inner(Ssskm, Spsk, Sspkt, ic, call))
|
||||
|
||||
#include "rosenpass/responder.macro"
|
||||
COOKIE_EVENTS(Oinit_hello)
|
||||
let Oinit_hello_underLoad() =
|
||||
|
||||
in(C, Cinit_hello(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih));
|
||||
in(C, Oinit_hello_last_cookie:key);
|
||||
new call:Atom;
|
||||
|
||||
msg <- IH2b(ih);
|
||||
let InitHello(sidi, epki, sctr, pidic, auth) = ih in
|
||||
SETUP_HANDSHAKE_STATE()
|
||||
|
||||
COOKIE_PROCESS(Oinit_hello, Oinit_hello_inner(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih, Oinit_hello_last_cookie, C, call))
|
||||
|
||||
let rosenpass_dos_main() = 0
|
||||
| !Oreveal_kem_pk
|
||||
| REP(INITIATOR_BOUND, Oinitiator)
|
||||
| REP(RESPONDER_BOUND, Oinit_hello)
|
||||
| REP(RESPONDER_BOUND, Oinit_conf)
|
||||
| REP(RESPONDER_BOUND, Oinit_hello_underLoad)
|
||||
| REP(RESPONDER_BOUND, Oinit_conf_underLoad).
|
||||
|
||||
let main = rosenpass_dos_main.
|
||||
|
||||
select cookie:CookieMsg_t; attacker(cookie)/6220[hypothesis].
|
||||
nounif v:key; attacker(prepare_key( v ))/6217[hypothesis].
|
||||
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
|
||||
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
|
||||
nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
|
||||
nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
|
||||
nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
|
||||
|
||||
// nounif Spk:kem_sk_tmpl;
|
||||
// attacker(Creveal_kem_pk(Spk))/6110[conclusion].
|
||||
// nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
|
||||
// attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr ))/6109[conclusion].
|
||||
// nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
|
||||
// attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/6108[conclusion].
|
||||
nounif rh:RespHello_t;
|
||||
attacker(Cresp_hello( *rh ))/6107[conclusion].
|
||||
nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
|
||||
attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/6106[conclusion].
|
||||
|
||||
@reachable "DOS protection: cookie sent"
|
||||
query sidi:SessionId, sidr:SessionId, call:Atom, cookieMsg:CookieMsg_t;
|
||||
event (Oinit_hello_CookieSent(sidi, sidr, call, cookieMsg)).
|
||||
|
||||
@lemma "DOS protection: Oinit_hello kem use when under load implies validated cookie"
|
||||
lemma sidi:SessionId, sidr:SessionId, call:Atom;
|
||||
event(Oinit_hello_UnderLoadEV(sidi, sidr, call))
|
||||
&& event(Oinit_hello_KemUse(sidi, sidr, call))
|
||||
==> event(Oinit_hello_CookieValidated(sidi, sidr, call)).
|
||||
|
||||
@lemma "DOS protection: Oinit_conf kem use when under load implies validated cookie"
|
||||
lemma sidi:SessionId, sidr:SessionId, call:Atom;
|
||||
event(Oinit_conf_UnderLoadEV(sidi, sidr, call))
|
||||
&& event(Oinit_conf_KemUse(sidi, sidr, call))
|
||||
==> event(Oinit_conf_CookieValidated(sidi, sidr, call)).
|
||||
|
||||
@lemma "DOS protection: Oresp_hello kem use when under load implies validated cookie"
|
||||
lemma sidi:SessionId, sidr:SessionId, call:Atom;
|
||||
event(Oresp_hello_UnderLoadEV(sidi, sidr, call))
|
||||
&& event(Oresp_hello_KemUse(sidi, sidr, call))
|
||||
==> event(Oresp_hello_CookieValidated(sidi, sidr, call)).
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
/*
|
||||
This identity hiding process tests whether the rosenpass protocol is able to protect the identity of an initiator or responder.
|
||||
The participants in the test are trusted initiators, trusted responders and compromised initiators and responders.
|
||||
The test consists of two phases. In the first phase all of the participants can communicate with each other using the rosenpass protocol.
|
||||
An attacker observes the first phase and is able to intercept and modify messages and choose participants to communicate with each other
|
||||
|
||||
In the second phase if the anonymity of an initiator is being tested then one of two trusted initiators is chosen.
|
||||
The chosen initiator communicates directly with a trusted responder.
|
||||
If an attacker can determine which initiator was chosen then the anonymity of the initiator has been compromised.
|
||||
Otherwise the protocol has successfully protected the initiators’ identity.
|
||||
|
||||
If the anonymity of a responder is being tested then one of two trusted responders is chosen instead.
|
||||
Then an initiator communicates directly with the chosen responder.
|
||||
If an attacker can determine which responder was chosen then the anonymity of the responder is compromised.
|
||||
Otherwise the protocol successfully protects the identity of a responder.
|
||||
|
||||
The Proverif code treats the public key as synonymous with identity.
|
||||
In the above test when a responder or initiator is chosen what is actually chosen is the public/private key pair to use for communication.
|
||||
Traditionally when a responder or initiator is chosen they would be chosen randomly.
|
||||
The way Proverif makes a "choice" is by simulating multiple processes, one process per choice
|
||||
Then the processes are compared and if an association between a public key and a process can be made the test fails.
|
||||
As the choice is at least as bad as choosing the worst possible option the credibility of the test is maintained.
|
||||
The drawback is that Proverif is only able to tell if the identity can be brute forced but misses any probabilistic associations.
|
||||
As usual Proverif also assumes perfect encryption and in particular assumes encryption cannot be linked to identity.
|
||||
|
||||
One of the tradeoffs made here is that the choice function in Proverif is slow but this is in favour of being able to write more precise tests.
|
||||
Another issue is the choice function does not work with queries so a test needs to be run for each set of assumptions.
|
||||
In this case the test uses secure rng and a fresh secure biscuit key.
|
||||
*/
|
||||
|
||||
#include "config.mpv"
|
||||
|
||||
#define CHAINING_KEY_EVENTS 1
|
||||
#define MESSAGE_TRANSMISSION_EVENTS 1
|
||||
#define SESSION_START_EVENTS 0
|
||||
#define RANDOMIZED_CALL_IDS 0
|
||||
#undef FULL_MODEL
|
||||
#undef SIMPLE_MODEL
|
||||
#define SIMPLE_MODEL 1
|
||||
|
||||
#include "prelude/basic.mpv"
|
||||
#include "crypto/key.mpv"
|
||||
#include "rosenpass/oracles.mpv"
|
||||
#include "crypto/kem.mpv"
|
||||
|
||||
#define NEW_TRUSTED_SEED(name) \
|
||||
new MCAT(name, _secret_seed):seed_prec; \
|
||||
name <- make_trusted_seed(MCAT(name, _secret_seed)); \
|
||||
|
||||
free D:channel [private].
|
||||
free secure_biscuit_no:Atom [private].
|
||||
free secure_sidi,secure_sidr:SessionId [private].
|
||||
free secure_psk:key [private].
|
||||
free initiator1, initiator2:kem_sk_prec.
|
||||
free responder1, responder2:kem_sk_prec.
|
||||
|
||||
let secure_init_hello(initiator: kem_sk_tmpl, sidi : SessionId, psk: key_tmpl, responder: kem_sk_tmpl) =
|
||||
|
||||
new epkit:kem_pk; // epki
|
||||
new sctrt:bits; // sctr
|
||||
new pidiCt:bits; // pidiC
|
||||
new autht:bits; // auth
|
||||
|
||||
NEW_TRUSTED_SEED(seski_trusted_seed)
|
||||
NEW_TRUSTED_SEED(ssptr_trusted_seed)
|
||||
new last_cookie:key;
|
||||
new call:Atom;
|
||||
|
||||
Oinitiator_inner(sidi, initiator, psk, responder, seski_trusted_seed, ssptr_trusted_seed, last_cookie, D, call).
|
||||
|
||||
let secure_resp_hello(initiator: kem_sk_tmpl, responder: kem_sk_tmpl, sidi:SessionId, sidr:SessionId, biscuit_no:Atom, psk:key_tmpl) =
|
||||
|
||||
in(D, InitHello(=secure_sidi, epki, sctr, pidiC, auth));
|
||||
|
||||
ih <- InitHello(sidi, epki, sctr, pidiC, auth);
|
||||
NEW_TRUSTED_SEED(septi_trusted_seed)
|
||||
NEW_TRUSTED_SEED(sspti_trusted_seed)
|
||||
new last_cookie:key;
|
||||
new call:Atom;
|
||||
|
||||
Oinit_hello_inner(sidr, biscuit_no, responder, psk, initiator, septi_trusted_seed, sspti_trusted_seed, ih, last_cookie, D, call).
|
||||
|
||||
let secure_init_conf(initiator: kem_sk_tmpl, responder: kem_sk_tmpl, psk:key_tmpl, sidi:SessionId, sidr:SessionId) =
|
||||
in(D, InitConf(=sidi, =sidr, biscuit, auth3));
|
||||
ic <- InitConf(sidi,sidr,biscuit, auth3);
|
||||
NEW_TRUSTED_SEED(seski_trusted_seed)
|
||||
NEW_TRUSTED_SEED(ssptr_trusted_seed)
|
||||
new last_cookie:key;
|
||||
call <- Cinit_conf(initiator, psk, responder, ic);
|
||||
|
||||
Oinit_conf_inner(initiator, psk, responder, ic, call).
|
||||
|
||||
let secure_communication(initiator: kem_sk_tmpl, responder:kem_sk_tmpl, key:key) =
|
||||
key_tmpl <- prepare_key(key);
|
||||
(!secure_init_hello(initiator, secure_sidi, key_tmpl, responder))
|
||||
| !secure_resp_hello(initiator, responder, secure_sidi, secure_sidr, secure_biscuit_no, key_tmpl)
|
||||
| !(secure_init_conf(initiator, responder, key_tmpl, secure_sidi, secure_sidr)).
|
||||
|
||||
let participant_communication_initiator(participant:kem_sk_tmpl) =
|
||||
in(C, responder:kem_sk_tmpl);
|
||||
in(C, k:key);
|
||||
secure_communication(participant, responder, k).
|
||||
|
||||
let participant_communication_responder(participant:kem_sk_tmpl) =
|
||||
in(C, initiator:kem_sk_tmpl);
|
||||
in(C, k:key);
|
||||
secure_communication(initiator, participant, k).
|
||||
|
||||
let participants_communication() =
|
||||
initiator1_tmpl <- make_trusted_kem_sk(initiator1);
|
||||
initiator2_tmpl <- make_trusted_kem_sk(initiator2);
|
||||
responder1_tmpl <- make_trusted_kem_sk(responder1);
|
||||
responder2_tmpl <- make_trusted_kem_sk(responder2);
|
||||
|
||||
!participant_communication_initiator(initiator1_tmpl) | !participant_communication_responder(initiator1_tmpl)
|
||||
| !participant_communication_initiator(initiator2_tmpl) | !participant_communication_responder(initiator2_tmpl)
|
||||
| !participant_communication_initiator(responder1_tmpl) | !participant_communication_responder(responder1_tmpl)
|
||||
| !participant_communication_initiator(responder2_tmpl) | !participant_communication_responder(responder2_tmpl).
|
||||
|
||||
let pipeChannel(D:channel, C:channel) =
|
||||
in(D, b:bits);
|
||||
out(C, b).
|
||||
|
||||
let secretCommunication() =
|
||||
|
||||
#ifdef INITIATOR_TEST
|
||||
initiator_seed <- choice[make_trusted_kem_sk(initiator1), make_trusted_kem_sk(initiator2)];
|
||||
#else
|
||||
initiator_seed <- make_trusted_kem_sk(initiator1);
|
||||
#endif
|
||||
#ifdef RESPONDER_TEST
|
||||
responder_seed <- choice[make_trusted_kem_sk(responder1), make_trusted_kem_sk(responder2)];
|
||||
#else
|
||||
responder_seed <- make_trusted_kem_sk(responder1);
|
||||
#endif
|
||||
|
||||
secure_communication(initiator_seed, responder_seed, secure_psk) | !pipeChannel(D, C).
|
||||
|
||||
let reveal_pks() =
|
||||
out(C, setup_kem_pk(make_trusted_kem_sk(responder1)));
|
||||
out(C, setup_kem_pk(make_trusted_kem_sk(responder2)));
|
||||
out(C, setup_kem_pk(make_trusted_kem_sk(initiator1)));
|
||||
out(C, setup_kem_pk(make_trusted_kem_sk(initiator2))).
|
||||
|
||||
let rosenpass_main2() =
|
||||
REP(INITIATOR_BOUND, Oinitiator)
|
||||
| REP(RESPONDER_BOUND, Oinit_hello)
|
||||
| REP(RESPONDER_BOUND, Oinit_conf).
|
||||
|
||||
let identity_hiding_main() =
|
||||
0 | reveal_pks() | rosenpass_main2() | participants_communication() | phase 1; secretCommunication().
|
||||
|
||||
#ifndef CUSTOM_MAIN
|
||||
let main = identity_hiding_main.
|
||||
#endif
|
||||
@@ -1,36 +0,0 @@
|
||||
|
||||
fun cookie_key(kem_sk) : key [private].
|
||||
fun ip_and_port(kem_pk):bits.
|
||||
letfun create_mac2_key(sskm:kem_sk, spkt:kem_pk) = prf(cookie_key(sskm), ip_and_port(spkt)).
|
||||
|
||||
letfun create_cookie(sskm:kem_sk, spkm:kem_pk, spkt:kem_pk, nonce:bits, msg:bits) = xaead_enc(lprf2(COOKIE, kem_pk2b(spkm), nonce),
|
||||
k2b(create_mac2_key(sskm, spkm)), msg).
|
||||
|
||||
type CookieMsg_t.
|
||||
fun CookieMsg(
|
||||
SessionId, // sender
|
||||
bits, // nonce
|
||||
bits // cookie
|
||||
) : CookieMsg_t [data].
|
||||
|
||||
|
||||
#define COOKIE_PROCESS(eventLbl, innerFunc) \
|
||||
in(C, Ccookie(mac1, mac2)); \
|
||||
COOKIE_EV(event MCAT(eventLbl, _UnderLoadEV) (spkm, spkt, last_cookie);) \
|
||||
msgB <- Envelope(mac1, RH2b(rh)); \
|
||||
mac2_key <- create_mac2_key(sskm, spkt) \
|
||||
let RespHello(sidi, sidr, ecti, scti, biscuit, auth) = rh in \
|
||||
if Envelope(mac2_key, msgB) = mac2 then \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieValidated) (spkm, last_cookie);) \
|
||||
innerFunc \
|
||||
else \
|
||||
new nonce:bits; \
|
||||
cookie <- create_cookie(sskm, spkm, spkt, nonce, msg) \
|
||||
cookie_msg <- CookieMsg(sidi, nonce, cookie); \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieSent) (spkm, cookie, cookie_k, cookie_msg);) \
|
||||
out(C, cookie_msg).
|
||||
|
||||
#define COOKIE_EVENTS(eventLbl) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _UnderLoadEV) (kem_pk, kem_pk, bits).) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieValidated) (kem_pk, bits, key, CookieMsg_t).) \
|
||||
COOKIE_EV(event MCAT(eventLbl, _CookieSent) (kem_pk, bits).)
|
||||
@@ -1,12 +1,11 @@
|
||||
use anyhow::Result;
|
||||
use rosenpass::protocol::{CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, SPk, SSk, SymKey};
|
||||
use std::ops::DerefMut;
|
||||
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::StaticKem;
|
||||
use rosenpass::{
|
||||
pqkem::{EphemeralKEM, CCAKEM},
|
||||
protocol::{CcaPk, CcaSk, CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, SymKey},
|
||||
sodium::sodium_init,
|
||||
};
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
|
||||
|
||||
fn handle(
|
||||
tx: &mut CryptoServer,
|
||||
@@ -39,9 +38,9 @@ fn hs(ini: &mut CryptoServer, res: &mut CryptoServer) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn keygen() -> Result<(SSk, SPk)> {
|
||||
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
|
||||
StaticKem::keygen(sk.secret_mut(), pk.deref_mut())?;
|
||||
fn keygen() -> Result<(CcaSk, CcaPk)> {
|
||||
let (mut sk, mut pk) = (CcaSk::zero(), CcaPk::zero());
|
||||
CCAKEM::keygen(sk.secret_mut(), pk.secret_mut())?;
|
||||
Ok((sk, pk))
|
||||
}
|
||||
|
||||
@@ -58,16 +57,16 @@ fn make_server_pair() -> Result<(CryptoServer, CryptoServer)> {
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
secret_policy_try_use_memfd_secrets();
|
||||
sodium_init().unwrap();
|
||||
let (mut a, mut b) = make_server_pair().unwrap();
|
||||
c.bench_function("cca_secret_alloc", |bench| {
|
||||
bench.iter(|| {
|
||||
SSk::zero();
|
||||
CcaSk::zero();
|
||||
})
|
||||
});
|
||||
c.bench_function("cca_public_alloc", |bench| {
|
||||
bench.iter(|| {
|
||||
SPk::zero();
|
||||
CcaPk::zero();
|
||||
})
|
||||
});
|
||||
c.bench_function("keygen", |bench| {
|
||||
@@ -21,16 +21,17 @@ fn generate_man() -> String {
|
||||
// This function is purposely stupid and redundant
|
||||
|
||||
let man = render_man("mandoc", "./doc/rosenpass.1");
|
||||
if let Ok(man) = man {
|
||||
return man;
|
||||
if man.is_ok() {
|
||||
return man.unwrap();
|
||||
}
|
||||
|
||||
let man = render_man("groff", "./doc/rosenpass.1");
|
||||
if let Ok(man) = man {
|
||||
return man;
|
||||
if man.is_ok() {
|
||||
return man.unwrap();
|
||||
}
|
||||
|
||||
"Cannot render manual page. Please visit https://rosenpass.eu/docs/manuals/\n".into()
|
||||
// TODO: Link to online manual here
|
||||
"Cannot render manual page\n".into()
|
||||
}
|
||||
|
||||
fn man() {
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-cipher-traits"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Rosenpass internal traits for cryptographic primitives"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[dependencies]
|
||||
@@ -1,5 +0,0 @@
|
||||
# Rosenpass internal cryptographic traits
|
||||
|
||||
Rosenpass internal library providing traits for cryptographic primitives.
|
||||
|
||||
This is an internal library; not guarantee is made about its API at this point in time.
|
||||
@@ -1,47 +0,0 @@
|
||||
//! Traits and implementations for Key Encapsulation Mechanisms (KEMs)
|
||||
//!
|
||||
//! KEMs are the interface provided by almost all post-quantum
|
||||
//! secure key exchange mechanisms.
|
||||
//!
|
||||
//! Conceptually KEMs are akin to public-key encryption, but instead of encrypting
|
||||
//! arbitrary data, KEMs are limited to the transmission of keys, randomly chosen during
|
||||
//!
|
||||
//! encapsulation.
|
||||
//! The [KEM] Trait describes the basic API offered by a Key Encapsulation
|
||||
//! Mechanism. Two implementations for it are provided, [StaticKEM] and [EphemeralKEM].
|
||||
|
||||
use std::result::Result;
|
||||
|
||||
/// Key Encapsulation Mechanism
|
||||
///
|
||||
/// The KEM interface defines three operations: Key generation, key encapsulation and key
|
||||
/// decapsulation.
|
||||
pub trait Kem {
|
||||
type Error;
|
||||
|
||||
/// Secrete Key length
|
||||
const SK_LEN: usize;
|
||||
/// Public Key length
|
||||
const PK_LEN: usize;
|
||||
/// Ciphertext length
|
||||
const CT_LEN: usize;
|
||||
/// Shared Secret length
|
||||
const SHK_LEN: usize;
|
||||
|
||||
/// Generate a keypair consisting of secret key (`sk`) and public key (`pk`)
|
||||
///
|
||||
/// `keygen() -> sk, pk`
|
||||
fn keygen(sk: &mut [u8], pk: &mut [u8]) -> Result<(), Self::Error>;
|
||||
|
||||
/// From a public key (`pk`), generate a shared key (`shk`, for local use)
|
||||
/// and a cipher text (`ct`, to be sent to the owner of the `pk`).
|
||||
///
|
||||
/// `encaps(pk) -> shk, ct`
|
||||
fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Result<(), Self::Error>;
|
||||
|
||||
/// From a secret key (`sk`) and a cipher text (`ct`) derive a shared key
|
||||
/// (`shk`)
|
||||
///
|
||||
/// `decaps(sk, ct) -> shk`
|
||||
fn decaps(shk: &mut [u8], sk: &[u8], ct: &[u8]) -> Result<(), Self::Error>;
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod kem;
|
||||
pub use kem::Kem;
|
||||
@@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-ciphers"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Rosenpass internal ciphers and other cryptographic primitives used by rosenpass."
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[features]
|
||||
experiment_libcrux = ["dep:libcrux"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
rosenpass-to = { workspace = true }
|
||||
rosenpass-constant-time = { workspace = true }
|
||||
rosenpass-secret-memory = { workspace = true }
|
||||
rosenpass-oqs = { workspace = true }
|
||||
rosenpass-util = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true }
|
||||
blake2 = { workspace = true }
|
||||
libcrux = { workspace = true, optional = true }
|
||||
@@ -1,5 +0,0 @@
|
||||
# Rosenpass internal cryptographic primitives
|
||||
|
||||
Ciphers and other cryptographic primitives used by rosenpass.
|
||||
|
||||
This is an internal library; not guarantee is made about its API at this point in time.
|
||||
@@ -1,109 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_to::To;
|
||||
|
||||
use crate::subtle::incorrect_hmac_blake2b as hash;
|
||||
|
||||
pub use hash::KEY_LEN;
|
||||
|
||||
// TODO Use a proper Dec interface
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HashDomain([u8; KEY_LEN]);
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HashDomainNamespace([u8; KEY_LEN]);
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SecretHashDomain(Secret<KEY_LEN>);
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SecretHashDomainNamespace(Secret<KEY_LEN>);
|
||||
|
||||
impl HashDomain {
|
||||
pub fn zero() -> Self {
|
||||
Self([0u8; KEY_LEN])
|
||||
}
|
||||
|
||||
pub fn dup(self) -> HashDomainNamespace {
|
||||
HashDomainNamespace(self.0)
|
||||
}
|
||||
|
||||
pub fn turn_secret(self) -> SecretHashDomain {
|
||||
SecretHashDomain(Secret::from_slice(&self.0))
|
||||
}
|
||||
|
||||
// TODO: Protocol! Use domain separation to ensure that
|
||||
pub fn mix(self, v: &[u8]) -> Result<Self> {
|
||||
Ok(Self(hash::hash(&self.0, v).collect::<[u8; KEY_LEN]>()?))
|
||||
}
|
||||
|
||||
pub fn mix_secret<const N: usize>(self, v: Secret<N>) -> Result<SecretHashDomain> {
|
||||
SecretHashDomain::invoke_primitive(&self.0, v.secret())
|
||||
}
|
||||
|
||||
pub fn into_value(self) -> [u8; KEY_LEN] {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl HashDomainNamespace {
|
||||
pub fn mix(&self, v: &[u8]) -> Result<HashDomain> {
|
||||
Ok(HashDomain(
|
||||
hash::hash(&self.0, v).collect::<[u8; KEY_LEN]>()?,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn mix_secret<const N: usize>(&self, v: Secret<N>) -> Result<SecretHashDomain> {
|
||||
SecretHashDomain::invoke_primitive(&self.0, v.secret())
|
||||
}
|
||||
}
|
||||
|
||||
impl SecretHashDomain {
|
||||
pub fn invoke_primitive(k: &[u8], d: &[u8]) -> Result<SecretHashDomain> {
|
||||
let mut r = SecretHashDomain(Secret::zero());
|
||||
hash::hash(k, d).to(r.0.secret_mut())?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn zero() -> Self {
|
||||
Self(Secret::zero())
|
||||
}
|
||||
|
||||
pub fn dup(self) -> SecretHashDomainNamespace {
|
||||
SecretHashDomainNamespace(self.0)
|
||||
}
|
||||
|
||||
pub fn danger_from_secret(k: Secret<KEY_LEN>) -> Self {
|
||||
Self(k)
|
||||
}
|
||||
|
||||
pub fn mix(self, v: &[u8]) -> Result<SecretHashDomain> {
|
||||
Self::invoke_primitive(self.0.secret(), v)
|
||||
}
|
||||
|
||||
pub fn mix_secret<const N: usize>(self, v: Secret<N>) -> Result<SecretHashDomain> {
|
||||
Self::invoke_primitive(self.0.secret(), v.secret())
|
||||
}
|
||||
|
||||
pub fn into_secret(self) -> Secret<KEY_LEN> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn into_secret_slice(mut self, v: &[u8], dst: &[u8]) -> Result<()> {
|
||||
hash::hash(v, dst).to(self.0.secret_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl SecretHashDomainNamespace {
|
||||
pub fn mix(&self, v: &[u8]) -> Result<SecretHashDomain> {
|
||||
SecretHashDomain::invoke_primitive(self.0.secret(), v)
|
||||
}
|
||||
|
||||
pub fn mix_secret<const N: usize>(&self, v: Secret<N>) -> Result<SecretHashDomain> {
|
||||
SecretHashDomain::invoke_primitive(self.0.secret(), v.secret())
|
||||
}
|
||||
|
||||
// TODO: This entire API is not very nice; we need this for biscuits, but
|
||||
// it might be better to extract a special "biscuit"
|
||||
// labeled subkey and reinitialize the chain with this
|
||||
pub fn danger_into_secret(self) -> Secret<KEY_LEN> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
use static_assertions::const_assert;
|
||||
|
||||
pub mod subtle;
|
||||
|
||||
pub const KEY_LEN: usize = 32;
|
||||
const_assert!(KEY_LEN == aead::KEY_LEN);
|
||||
const_assert!(KEY_LEN == xaead::KEY_LEN);
|
||||
const_assert!(KEY_LEN == hash_domain::KEY_LEN);
|
||||
|
||||
/// Authenticated encryption with associated data
|
||||
pub mod aead {
|
||||
#[cfg(not(feature = "libcrux"))]
|
||||
pub use crate::subtle::chacha20poly1305_ietf::{decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN};
|
||||
#[cfg(feature = "libcrux")]
|
||||
pub use crate::subtle::chacha20poly1305_ietf::{decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN};
|
||||
}
|
||||
|
||||
/// Authenticated encryption with associated data with a constant nonce
|
||||
pub mod xaead {
|
||||
pub use crate::subtle::xchacha20poly1305_ietf::{
|
||||
decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN,
|
||||
};
|
||||
}
|
||||
|
||||
pub mod hash_domain;
|
||||
|
||||
pub mod kem {
|
||||
pub use rosenpass_oqs::ClassicMceliece460896 as StaticKem;
|
||||
pub use rosenpass_oqs::Kyber512 as EphemeralKem;
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use blake2::digest::crypto_common::generic_array::GenericArray;
|
||||
use blake2::digest::crypto_common::typenum::U32;
|
||||
use blake2::digest::crypto_common::KeySizeUser;
|
||||
use blake2::digest::{FixedOutput, Mac, OutputSizeUser};
|
||||
use blake2::Blake2bMac;
|
||||
|
||||
use rosenpass_to::{ops::copy_slice, with_destination, To};
|
||||
use rosenpass_util::typenum2const;
|
||||
|
||||
type Impl = Blake2bMac<U32>;
|
||||
|
||||
type KeyLen = <Impl as KeySizeUser>::KeySize;
|
||||
type OutLen = <Impl as OutputSizeUser>::OutputSize;
|
||||
|
||||
const KEY_LEN: usize = typenum2const! { KeyLen };
|
||||
const OUT_LEN: usize = typenum2const! { OutLen };
|
||||
|
||||
pub const KEY_MIN: usize = KEY_LEN;
|
||||
pub const KEY_MAX: usize = KEY_LEN;
|
||||
pub const OUT_MIN: usize = OUT_LEN;
|
||||
pub const OUT_MAX: usize = OUT_LEN;
|
||||
|
||||
#[inline]
|
||||
pub fn hash<'a>(key: &'a [u8], data: &'a [u8]) -> impl To<[u8], anyhow::Result<()>> + 'a {
|
||||
with_destination(|out: &mut [u8]| {
|
||||
let mut h = Impl::new_from_slice(key)?;
|
||||
h.update(data);
|
||||
|
||||
// Jesus christ, blake2 crate, your usage of GenericArray might be nice and fancy
|
||||
// but it introduces a ton of complexity. This cost me half an hour just to figure
|
||||
// out the right way to use the imports while allowing for zeroization.
|
||||
// An API based on slices might actually be simpler.
|
||||
let mut tmp = Zeroizing::new([0u8; OUT_LEN]);
|
||||
let tmp = GenericArray::from_mut_slice(tmp.as_mut());
|
||||
h.finalize_into(tmp);
|
||||
copy_slice(tmp.as_ref()).to(out);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
use rosenpass_to::ops::copy_slice;
|
||||
use rosenpass_to::To;
|
||||
use rosenpass_util::typenum2const;
|
||||
|
||||
use chacha20poly1305::aead::generic_array::GenericArray;
|
||||
use chacha20poly1305::ChaCha20Poly1305 as AeadImpl;
|
||||
use chacha20poly1305::{AeadCore, AeadInPlace, KeyInit, KeySizeUser};
|
||||
|
||||
pub const KEY_LEN: usize = typenum2const! { <AeadImpl as KeySizeUser>::KeySize };
|
||||
pub const TAG_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::TagSize };
|
||||
pub const NONCE_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::NonceSize };
|
||||
|
||||
#[inline]
|
||||
pub fn encrypt(
|
||||
ciphertext: &mut [u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
ad: &[u8],
|
||||
plaintext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let nonce = GenericArray::from_slice(nonce);
|
||||
let (ct, mac) = ciphertext.split_at_mut(ciphertext.len() - TAG_LEN);
|
||||
copy_slice(plaintext).to(ct);
|
||||
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(nonce, ad, ct)?;
|
||||
copy_slice(&mac_value[..]).to(mac);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decrypt(
|
||||
plaintext: &mut [u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
ad: &[u8],
|
||||
ciphertext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let nonce = GenericArray::from_slice(nonce);
|
||||
let (ct, mac) = ciphertext.split_at(ciphertext.len() - TAG_LEN);
|
||||
let tag = GenericArray::from_slice(mac);
|
||||
copy_slice(ct).to(plaintext);
|
||||
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(nonce, ad, plaintext, tag)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
use rosenpass_to::ops::copy_slice;
|
||||
use rosenpass_to::To;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
pub const KEY_LEN: usize = 32; // Grrrr! Libcrux, please provide me these constants.
|
||||
pub const TAG_LEN: usize = 16;
|
||||
pub const NONCE_LEN: usize = 12;
|
||||
|
||||
#[inline]
|
||||
pub fn encrypt(
|
||||
ciphertext: &mut [u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
ad: &[u8],
|
||||
plaintext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let (ciphertext, mac) = ciphertext.split_at_mut(ciphertext.len() - TAG_LEN);
|
||||
|
||||
use libcrux::aead as C;
|
||||
let crux_key = C::Key::Chacha20Poly1305(C::Chacha20Key(key.try_into().unwrap()));
|
||||
let crux_iv = C::Iv(nonce.try_into().unwrap());
|
||||
|
||||
copy_slice(plaintext).to(ciphertext);
|
||||
let crux_tag = libcrux::aead::encrypt(&crux_key, ciphertext, crux_iv, ad).unwrap();
|
||||
copy_slice(crux_tag.as_ref()).to(mac);
|
||||
|
||||
match crux_key {
|
||||
C::Key::Chacha20Poly1305(mut k) => k.0.zeroize(),
|
||||
_ => panic!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decrypt(
|
||||
plaintext: &mut [u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
ad: &[u8],
|
||||
ciphertext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let (ciphertext, mac) = ciphertext.split_at(ciphertext.len() - TAG_LEN);
|
||||
|
||||
use libcrux::aead as C;
|
||||
let crux_key = C::Key::Chacha20Poly1305(C::Chacha20Key(key.try_into().unwrap()));
|
||||
let crux_iv = C::Iv(nonce.try_into().unwrap());
|
||||
let crux_tag = C::Tag::from_slice(mac).unwrap();
|
||||
|
||||
copy_slice(ciphertext).to(plaintext);
|
||||
libcrux::aead::decrypt(&crux_key, plaintext, crux_iv, ad, &crux_tag).unwrap();
|
||||
|
||||
match crux_key {
|
||||
C::Key::Chacha20Poly1305(mut k) => k.0.zeroize(),
|
||||
_ => panic!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
use anyhow::ensure;
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use rosenpass_constant_time::xor;
|
||||
use rosenpass_to::{ops::copy_slice, with_destination, To};
|
||||
|
||||
use crate::subtle::blake2b;
|
||||
|
||||
pub const KEY_LEN: usize = 32;
|
||||
pub const KEY_MIN: usize = KEY_LEN;
|
||||
pub const KEY_MAX: usize = KEY_LEN;
|
||||
pub const OUT_MIN: usize = blake2b::OUT_MIN;
|
||||
pub const OUT_MAX: usize = blake2b::OUT_MAX;
|
||||
|
||||
/// This is a woefully incorrect implementation of hmac_blake2b.
|
||||
/// See <https://github.com/rosenpass/rosenpass/issues/68#issuecomment-1563612222>
|
||||
///
|
||||
/// It accepts 32 byte keys, exclusively.
|
||||
///
|
||||
/// This will be replaced, likely by Kekkac at some point soon.
|
||||
/// <https://github.com/rosenpass/rosenpass/pull/145>
|
||||
#[inline]
|
||||
pub fn hash<'a>(key: &'a [u8], data: &'a [u8]) -> impl To<[u8], anyhow::Result<()>> + 'a {
|
||||
const IPAD: [u8; KEY_LEN] = [0x36u8; KEY_LEN];
|
||||
const OPAD: [u8; KEY_LEN] = [0x5Cu8; KEY_LEN];
|
||||
|
||||
with_destination(|out: &mut [u8]| {
|
||||
// Not bothering with padding; the implementation
|
||||
// uses appropriately sized keys.
|
||||
ensure!(key.len() == KEY_LEN);
|
||||
|
||||
type Key = Zeroizing<[u8; KEY_LEN]>;
|
||||
let mut tmp_key = Key::default();
|
||||
|
||||
copy_slice(key).to(tmp_key.as_mut());
|
||||
xor(&IPAD).to(tmp_key.as_mut());
|
||||
let mut outer_data = Key::default();
|
||||
blake2b::hash(tmp_key.as_ref(), data).to(outer_data.as_mut())?;
|
||||
|
||||
copy_slice(key).to(tmp_key.as_mut());
|
||||
xor(&OPAD).to(tmp_key.as_mut());
|
||||
blake2b::hash(tmp_key.as_ref(), outer_data.as_ref()).to(out)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
pub mod blake2b;
|
||||
#[cfg(not(feature = "libcrux"))]
|
||||
pub mod chacha20poly1305_ietf;
|
||||
#[cfg(feature = "libcrux")]
|
||||
pub mod chacha20poly1305_ietf_libcrux;
|
||||
pub mod incorrect_hmac_blake2b;
|
||||
pub mod xchacha20poly1305_ietf;
|
||||
@@ -1,45 +0,0 @@
|
||||
use rosenpass_to::ops::copy_slice;
|
||||
use rosenpass_to::To;
|
||||
use rosenpass_util::typenum2const;
|
||||
|
||||
use chacha20poly1305::aead::generic_array::GenericArray;
|
||||
use chacha20poly1305::XChaCha20Poly1305 as AeadImpl;
|
||||
use chacha20poly1305::{AeadCore, AeadInPlace, KeyInit, KeySizeUser};
|
||||
|
||||
pub const KEY_LEN: usize = typenum2const! { <AeadImpl as KeySizeUser>::KeySize };
|
||||
pub const TAG_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::TagSize };
|
||||
pub const NONCE_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::NonceSize };
|
||||
|
||||
#[inline]
|
||||
pub fn encrypt(
|
||||
ciphertext: &mut [u8],
|
||||
key: &[u8],
|
||||
nonce: &[u8],
|
||||
ad: &[u8],
|
||||
plaintext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let nonce = GenericArray::from_slice(nonce);
|
||||
let (n, ct_mac) = ciphertext.split_at_mut(NONCE_LEN);
|
||||
let (ct, mac) = ct_mac.split_at_mut(ct_mac.len() - TAG_LEN);
|
||||
copy_slice(nonce).to(n);
|
||||
copy_slice(plaintext).to(ct);
|
||||
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(nonce, ad, ct)?;
|
||||
copy_slice(&mac_value[..]).to(mac);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decrypt(
|
||||
plaintext: &mut [u8],
|
||||
key: &[u8],
|
||||
ad: &[u8],
|
||||
ciphertext: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let (n, ct_mac) = ciphertext.split_at(NONCE_LEN);
|
||||
let (ct, mac) = ct_mac.split_at(ct_mac.len() - TAG_LEN);
|
||||
let nonce = GenericArray::from_slice(n);
|
||||
let tag = GenericArray::from_slice(mac);
|
||||
copy_slice(ct).to(plaintext);
|
||||
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(nonce, ad, plaintext, tag)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-constant-time"
|
||||
version = "0.1.0"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Rosenpass internal utilities for constant time crypto implementations"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
constant_time_tests = []
|
||||
|
||||
[dependencies]
|
||||
rosenpass-to = { workspace = true }
|
||||
memsec = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8.5"
|
||||
@@ -1,5 +0,0 @@
|
||||
# Rosenpass constant time library
|
||||
|
||||
Rosenpass internal library providing basic constant-time operations.
|
||||
|
||||
This is an internal library; not guarantee is made about its API at this point in time.
|
||||
@@ -1,39 +0,0 @@
|
||||
use core::ptr;
|
||||
|
||||
/// Little endian memcmp version of quinier/memsec
|
||||
/// https://github.com/quininer/memsec/blob/bbc647967ff6d20d6dccf1c85f5d9037fcadd3b0/src/lib.rs#L30
|
||||
#[inline(never)]
|
||||
pub unsafe fn memcmp_le(b1: *const u8, b2: *const u8, len: usize) -> i32 {
|
||||
let mut res = 0;
|
||||
for i in 0..len {
|
||||
let diff =
|
||||
i32::from(ptr::read_volatile(b1.add(i))) - i32::from(ptr::read_volatile(b2.add(i)));
|
||||
res = (res & (((diff - 1) & !diff) >> 8)) | diff;
|
||||
}
|
||||
((res - 1) >> 8) + (res >> 8) + 1
|
||||
}
|
||||
|
||||
/// compares two slices of memory content and returns an integer indicating the relationship between
|
||||
/// the slices
|
||||
///
|
||||
/// ## Returns
|
||||
/// - <0 if the first byte that does not match both slices has a lower value in `a` than in `b`
|
||||
/// - 0 if the contents are equal
|
||||
/// - >0 if the first byte that does not match both slices has a higher value in `a` than in `b`
|
||||
///
|
||||
/// ## Leaks
|
||||
/// If the two slices have differents lengths, the function will return immediately. This
|
||||
/// effectively leaks the information whether the slices have equal length or not. This is widely
|
||||
/// considered safe.
|
||||
///
|
||||
/// The execution time of the function grows approx. linear with the length of the input. This is
|
||||
/// considered safe.
|
||||
///
|
||||
/// ## Tests
|
||||
/// For discussion on how to ensure the constant-time execution of this function, see
|
||||
/// <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
#[inline]
|
||||
pub fn compare(a: &[u8], b: &[u8]) -> i32 {
|
||||
assert!(a.len() == b.len());
|
||||
unsafe { memcmp_le(a.as_ptr(), b.as_ptr(), a.len()) }
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use core::hint::black_box;
|
||||
|
||||
/// Interpret the given slice as a little-endian unsigned integer
|
||||
/// and increment that integer.
|
||||
///
|
||||
/// # Leaks
|
||||
/// TODO: mention here if this function leaks any information, see
|
||||
/// <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
///
|
||||
/// ## Tests
|
||||
/// For discussion on how to ensure the constant-time execution of this function, see
|
||||
/// <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rosenpass_constant_time::increment as inc;
|
||||
/// use rosenpass_to::To;
|
||||
///
|
||||
/// fn testcase(v: &[u8], correct: &[u8]) {
|
||||
/// let mut v = v.to_owned();
|
||||
/// inc(&mut v);
|
||||
/// assert_eq!(&v, correct);
|
||||
/// }
|
||||
///
|
||||
/// testcase(b"", b"");
|
||||
/// testcase(b"\x00", b"\x01");
|
||||
/// testcase(b"\x01", b"\x02");
|
||||
/// testcase(b"\xfe", b"\xff");
|
||||
/// testcase(b"\xff", b"\x00");
|
||||
/// testcase(b"\x00\x00", b"\x01\x00");
|
||||
/// testcase(b"\x01\x00", b"\x02\x00");
|
||||
/// testcase(b"\xfe\x00", b"\xff\x00");
|
||||
/// testcase(b"\xff\x00", b"\x00\x01");
|
||||
/// testcase(b"\x00\x00\x00\x00\x00\x00", b"\x01\x00\x00\x00\x00\x00");
|
||||
/// testcase(b"\x00\xa3\x00\x77\x00\x00", b"\x01\xa3\x00\x77\x00\x00");
|
||||
/// testcase(b"\xff\xa3\x00\x77\x00\x00", b"\x00\xa4\x00\x77\x00\x00");
|
||||
/// testcase(b"\xff\xff\xff\x77\x00\x00", b"\x00\x00\x00\x78\x00\x00");
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn increment(v: &mut [u8]) {
|
||||
let mut carry = 1u8;
|
||||
for val in v.iter_mut() {
|
||||
let (v, c) = black_box(*val).overflowing_add(black_box(carry));
|
||||
*black_box(val) = v;
|
||||
*black_box(&mut carry) = black_box(black_box(c) as u8);
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
//! constant-time implementations of some primitives
|
||||
//!
|
||||
//! Rosenpass internal library providing basic constant-time operations.
|
||||
//!
|
||||
//! ## TODO
|
||||
//! Figure out methodology to ensure that code is actually constant time, see
|
||||
//! <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
|
||||
mod compare;
|
||||
mod increment;
|
||||
mod memcmp;
|
||||
mod xor;
|
||||
|
||||
pub use compare::compare;
|
||||
pub use increment::increment;
|
||||
pub use memcmp::memcmp;
|
||||
pub use xor::xor;
|
||||
@@ -1,109 +0,0 @@
|
||||
/// compares two sclices of memory content and returns whether they are equal
|
||||
///
|
||||
/// ## Leaks
|
||||
/// If the two slices have differents lengths, the function will return immediately. This
|
||||
/// effectively leaks the information whether the slices have equal length or not. This is widely
|
||||
/// considered safe.
|
||||
///
|
||||
/// The execution time of the function grows approx. linear with the length of the input. This is
|
||||
/// considered safe.
|
||||
///
|
||||
/// ## Tests
|
||||
/// [`tests::memcmp_runs_in_constant_time`] runs a stasticial test that the equality of the two
|
||||
/// input parameters does not correlate with the run time.
|
||||
///
|
||||
/// For discussion on how to (further) ensure the constant-time execution of this function,
|
||||
/// see <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
#[inline]
|
||||
pub fn memcmp(a: &[u8], b: &[u8]) -> bool {
|
||||
a.len() == b.len() && unsafe { memsec::memeq(a.as_ptr(), b.as_ptr(), a.len()) }
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "constant_time_tests"))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use std::time::Instant;
|
||||
|
||||
#[test]
|
||||
/// tests whether [memcmp] actually runs in constant time
|
||||
///
|
||||
/// This test function will run an equal amount of comparisons on two different sets of parameters:
|
||||
/// - completely equal slices
|
||||
/// - completely unequal slices.
|
||||
/// All comparisons are executed in a randomized order. The test will fail if one of the
|
||||
/// two sets is checked for equality significantly faster than the other set
|
||||
/// (absolute correlation coefficient ≥ 0.01)
|
||||
fn memcmp_runs_in_constant_time() {
|
||||
// prepare data to compare
|
||||
let n: usize = 1E6 as usize; // number of comparisons to run
|
||||
let len = 1024; // length of each slice passed as parameters to the tested comparison function
|
||||
let a1 = "a".repeat(len);
|
||||
let a2 = a1.clone();
|
||||
let b = "b".repeat(len);
|
||||
|
||||
let a1 = a1.as_bytes();
|
||||
let a2 = a2.as_bytes();
|
||||
let b = b.as_bytes();
|
||||
|
||||
// vector representing all timing tests
|
||||
//
|
||||
// Each element is a tuple of:
|
||||
// 0: whether the test compared two equal slices
|
||||
// 1: the duration needed for the comparison to run
|
||||
let mut tests = (0..n)
|
||||
.map(|i| (i < n / 2, std::time::Duration::ZERO))
|
||||
.collect::<Vec<_>>();
|
||||
tests.shuffle(&mut thread_rng());
|
||||
|
||||
// run comparisons / call function to test
|
||||
for test in tests.iter_mut() {
|
||||
let now = Instant::now();
|
||||
if test.0 {
|
||||
memcmp(a1, a2);
|
||||
} else {
|
||||
memcmp(a1, b);
|
||||
}
|
||||
test.1 = now.elapsed();
|
||||
// println!("eq: {}, elapsed: {:.2?}", test.0, test.1);
|
||||
}
|
||||
|
||||
// sort by execution time and calculate Pearson correlation coefficient
|
||||
tests.sort_by_key(|v| v.1);
|
||||
let tests = tests
|
||||
.iter()
|
||||
.map(|t| (if t.0 { 1_f64 } else { 0_f64 }, t.1.as_nanos() as f64))
|
||||
.collect::<Vec<_>>();
|
||||
// averages
|
||||
let (avg_x, avg_y): (f64, f64) = (
|
||||
tests.iter().map(|t| t.0).sum::<f64>() / n as f64,
|
||||
tests.iter().map(|t| t.1).sum::<f64>() / n as f64,
|
||||
);
|
||||
assert!((avg_x - 0.5).abs() < 1E-12);
|
||||
// standard deviations
|
||||
let sd_x = 0.5;
|
||||
let sd_y = (1_f64 / n as f64
|
||||
* tests
|
||||
.iter()
|
||||
.map(|t| {
|
||||
let difference = t.1 - avg_y;
|
||||
difference * difference
|
||||
})
|
||||
.sum::<f64>())
|
||||
.sqrt();
|
||||
// covariance
|
||||
let cv = 1_f64 / n as f64
|
||||
* tests
|
||||
.iter()
|
||||
.map(|t| (t.0 - avg_x) * (t.1 - avg_y))
|
||||
.sum::<f64>();
|
||||
// Pearson correlation
|
||||
let correlation = cv / (sd_x * sd_y);
|
||||
println!("correlation: {:.6?}", correlation);
|
||||
assert!(
|
||||
correlation.abs() < 0.01,
|
||||
"execution time correlates with result"
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
use core::hint::black_box;
|
||||
use rosenpass_to::{with_destination, To};
|
||||
|
||||
/// Xors the source into the destination
|
||||
///
|
||||
/// # Panics
|
||||
/// If source and destination are of different sizes.
|
||||
///
|
||||
/// # Leaks
|
||||
/// TODO: mention here if this function leaks any information, see
|
||||
/// <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
///
|
||||
/// ## Tests
|
||||
/// For discussion on how to ensure the constant-time execution of this function, see
|
||||
/// <https://github.com/rosenpass/rosenpass/issues/232>
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rosenpass_constant_time::xor;
|
||||
/// use rosenpass_to::To;
|
||||
/// assert_eq!(
|
||||
/// xor(b"world").to_this(|| b"hello".to_vec()),
|
||||
/// b"\x1f\n\x1e\x00\x0b");
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn xor(src: &[u8]) -> impl To<[u8], ()> + '_ {
|
||||
with_destination(|dst: &mut [u8]| {
|
||||
assert!(black_box(src.len()) == black_box(dst.len()));
|
||||
for (dv, sv) in dst.iter_mut().zip(src.iter()) {
|
||||
*black_box(dv) ^= black_box(*sv);
|
||||
}
|
||||
})
|
||||
}
|
||||
13
doc/check.sh
13
doc/check.sh
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# We have to filter this STYLE error out, because it is very platform specific
|
||||
OUTPUT=$(mandoc -Tlint "$1" | grep --invert-match "STYLE: referenced manual not found")
|
||||
|
||||
if [ -z "$OUTPUT" ]
|
||||
then
|
||||
exit 0
|
||||
else
|
||||
echo "$1 is malformatted, check mandoc -Tlint $1"
|
||||
echo "$OUTPUT"
|
||||
exit 1
|
||||
fi
|
||||
@@ -12,18 +12,18 @@
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
performs cryptographic key exchanges that are secure against quantum-computers
|
||||
and then outputs the keys.
|
||||
These keys can then be passed to various services, such as wireguard or other
|
||||
vpn services, as pre-shared-keys to achieve security against attackers with
|
||||
and outputs the keys.
|
||||
These keys can then be passed to various services such as wireguard or other
|
||||
vpn services as pre-shared-keys to achieve security against attackers with
|
||||
quantum computers.
|
||||
.Pp
|
||||
This is a research project and quantum computers are not thought to become
|
||||
practical in fewer than ten years.
|
||||
practical in less than ten years.
|
||||
If you are not specifically tasked with developing post-quantum secure systems,
|
||||
you probably do not need this tool.
|
||||
.Ss COMMANDS
|
||||
.Bl -tag -width Ds
|
||||
.It Ar gen-keys --secret-key <file-path> --public-key <file-path>
|
||||
.It Ar keygen private-key <file-path> public-key <file-path>
|
||||
Generate a keypair to use in the exchange command later.
|
||||
Send the public-key file to your communication partner and keep the private-key
|
||||
file secret!
|
||||
@@ -31,7 +31,7 @@ file secret!
|
||||
Start a process to exchange keys with the specified peers.
|
||||
You should specify at least one peer.
|
||||
.Pp
|
||||
Its
|
||||
It's
|
||||
.Ar OPTIONS
|
||||
are as follows:
|
||||
.Bl -tag -width Ds
|
||||
@@ -39,7 +39,7 @@ are as follows:
|
||||
Instructs
|
||||
.Nm
|
||||
to listen on the specified interface and port.
|
||||
By default,
|
||||
By default
|
||||
.Nm
|
||||
will listen on all interfaces and select a random port.
|
||||
.It Ar verbose
|
||||
@@ -91,24 +91,15 @@ This makes it possible to add peers entirely from
|
||||
.Sh SEE ALSO
|
||||
.Xr rp 1 ,
|
||||
.Xr wg 1
|
||||
.Rs
|
||||
.%A Karolin Varner
|
||||
.%A Benjamin Lipp
|
||||
.%A Wanja Zaeske
|
||||
.%A Lisa Schmidt
|
||||
.%D 2023
|
||||
.%T Rosenpass
|
||||
.%U https://rosenpass.eu/whitepaper.pdf
|
||||
.Re
|
||||
.Sh STANDARDS
|
||||
This tool is the reference implementation of the Rosenpass protocol, as
|
||||
specified within the whitepaper referenced above.
|
||||
This tool is the reference implementation of the Rosenpass protocol, written
|
||||
by Karolin Varner, Benjamin Lipp, Wanja Zaeske, and Lisa Schmidt.
|
||||
.Sh AUTHORS
|
||||
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
||||
.Pp
|
||||
This manual page was written by
|
||||
.An Clara Engler
|
||||
.An Emil Engler
|
||||
.Sh BUGS
|
||||
The bugs are tracked at
|
||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
||||
|
||||
6
doc/rp.1
6
doc/rp.1
@@ -59,10 +59,6 @@ listening on the provided IP and port combination, allowing connections from
|
||||
.Sh EXIT STATUS
|
||||
.Ex -std
|
||||
.Sh EXAMPLES
|
||||
In this example, we will assume that the server has an interface bound to
|
||||
192.168.0.1, that accepts incoming connections on port 9999/UDP for Rosenpass
|
||||
and port 10000/UDP for WireGuard.
|
||||
.Pp
|
||||
To create a VPN connection, start by generating secret keys on both hosts.
|
||||
.Bd -literal -offset indent
|
||||
rp genkey server.rosenpass-secret
|
||||
@@ -113,7 +109,7 @@ Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
||||
.Pp
|
||||
This manual page was written by
|
||||
.An Clara Engler
|
||||
.An Emil Engler
|
||||
.Sh BUGS
|
||||
The bugs are tracked at
|
||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
||||
|
||||
44
flake.lock
generated
44
flake.lock
generated
@@ -2,15 +2,17 @@
|
||||
"nodes": {
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": ["nixpkgs"],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712298178,
|
||||
"narHash": "sha256-590fpCPXYAkaAeBz/V91GX4/KGzPObdYtqsTWzT6AhI=",
|
||||
"lastModified": 1686291735,
|
||||
"narHash": "sha256-mpq2m6TN3ImqqUqA4u93NvkZu5vH//3spqjmPRbRlvA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "569b5b5781395da08e7064e825953c548c26af76",
|
||||
"rev": "6e6a94c4d0cac4821b6452fbae46609b89a8ddcf",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -24,11 +26,11 @@
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1710146030,
|
||||
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -39,14 +41,16 @@
|
||||
},
|
||||
"naersk": {
|
||||
"inputs": {
|
||||
"nixpkgs": ["nixpkgs"]
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1698420672,
|
||||
"narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=",
|
||||
"lastModified": 1679567394,
|
||||
"narHash": "sha256-ZvLuzPeARDLiQUt6zSZFGOs+HZmE+3g4QURc8mkBsfM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "naersk",
|
||||
"rev": "aeb58d5e8faead8980a807c840232697982d47b9",
|
||||
"rev": "88cd22380154a2c36799fe8098888f0f59861a15",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -57,18 +61,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1712168706,
|
||||
"narHash": "sha256-XP24tOobf6GGElMd0ux90FEBalUtw6NkBSVh/RlA6ik=",
|
||||
"lastModified": 1686237827,
|
||||
"narHash": "sha256-fAZB+Zkcmc+qlauiFnIH9+2qgwM0NO/ru5pWEw3tDow=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1487bdea619e4a7a53a4590c475deabb5a9d1bfb",
|
||||
"rev": "81ed90058a851eb73be835c770e062c6938c8a9e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
"id": "nixpkgs",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
@@ -82,11 +84,11 @@
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1712156296,
|
||||
"narHash": "sha256-St7ZQrkrr5lmQX9wC1ZJAFxL8W7alswnyZk9d1se3Us=",
|
||||
"lastModified": 1686239338,
|
||||
"narHash": "sha256-c6Mm7UnDf3j3akY3YB3rELFA76QRbB8ttSBsh00LWi0=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "8e581ac348e223488622f4d3003cb2bd412bf27e",
|
||||
"rev": "9c03aa1ac2e67051db83a85baf3cfee902e4dd84",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
209
flake.nix
209
flake.nix
@@ -1,6 +1,5 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
# for quicker rust builds
|
||||
@@ -30,155 +29,52 @@
|
||||
]
|
||||
(system:
|
||||
let
|
||||
scoped = (scope: scope.result);
|
||||
lib = nixpkgs.lib;
|
||||
|
||||
# normal nixpkgs
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
|
||||
# TODO remove overlay once a fix for
|
||||
# https://github.com/NixOS/nixpkgs/issues/216904 got merged
|
||||
overlays = [
|
||||
(
|
||||
final: prev: {
|
||||
iproute2 = prev.iproute2.overrideAttrs (old:
|
||||
let
|
||||
isStatic = prev.stdenv.hostPlatform.isStatic;
|
||||
in
|
||||
{
|
||||
makeFlags = old.makeFlags ++ prev.lib.optional isStatic [
|
||||
"TC_CONFIG_NO_XT=y"
|
||||
];
|
||||
});
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
# parsed Cargo.toml
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./rosenpass/Cargo.toml);
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
|
||||
# source files relevant for rust
|
||||
src = scoped rec {
|
||||
# File suffices to include
|
||||
extensions = [
|
||||
"lock"
|
||||
"rs"
|
||||
"toml"
|
||||
];
|
||||
# Files to explicitly include
|
||||
files = [
|
||||
"to/README.md"
|
||||
];
|
||||
src = pkgs.lib.sourceByRegex ./. [
|
||||
"Cargo\\.(toml|lock)"
|
||||
"build.rs"
|
||||
"(src|benches)(/.*\\.(rs|md))?"
|
||||
"rp"
|
||||
];
|
||||
|
||||
src = ./.;
|
||||
filter = (path: type: scoped rec {
|
||||
inherit (lib) any id removePrefix hasSuffix;
|
||||
anyof = (any id);
|
||||
|
||||
basename = baseNameOf (toString path);
|
||||
relative = removePrefix (toString src + "/") (toString path);
|
||||
|
||||
result = anyof [
|
||||
(type == "directory")
|
||||
(any (ext: hasSuffix ".${ext}" basename) extensions)
|
||||
(any (file: file == relative) files)
|
||||
];
|
||||
});
|
||||
|
||||
result = pkgs.lib.sources.cleanSourceWith { inherit src filter; };
|
||||
};
|
||||
# builds a bin path for all dependencies for the `rp` shellscript
|
||||
rpBinPath = p: with p; lib.makeBinPath [
|
||||
coreutils
|
||||
findutils
|
||||
gawk
|
||||
wireguard-tools
|
||||
];
|
||||
|
||||
# a function to generate a nix derivation for rosenpass against any
|
||||
# given set of nixpkgs
|
||||
rosenpassDerivation = p:
|
||||
let
|
||||
# whether we want to build a statically linked binary
|
||||
isStatic = p.targetPlatform.isStatic;
|
||||
|
||||
# the rust target of `p`
|
||||
target = p.rust.toRustTargetSpec p.targetPlatform;
|
||||
|
||||
# convert a string to shout case
|
||||
shout = string: builtins.replaceStrings [ "-" ] [ "_" ] (pkgs.lib.toUpper string);
|
||||
|
||||
# suitable Rust toolchain
|
||||
toolchain = with inputs.fenix.packages.${system}; combine [
|
||||
stable.cargo
|
||||
stable.rustc
|
||||
targets.${target}.stable.rust-std
|
||||
];
|
||||
|
||||
# naersk with a custom toolchain
|
||||
naersk = pkgs.callPackage inputs.naersk {
|
||||
cargo = toolchain;
|
||||
rustc = toolchain;
|
||||
};
|
||||
|
||||
# used to trick the build.rs into believing that CMake was ran **again**
|
||||
fakecmake = pkgs.writeScriptBin "cmake" ''
|
||||
#! ${pkgs.stdenv.shell} -e
|
||||
true
|
||||
'';
|
||||
in
|
||||
naersk.buildPackage
|
||||
{
|
||||
# metadata and source
|
||||
name = cargoToml.package.name;
|
||||
version = cargoToml.package.version;
|
||||
inherit src;
|
||||
|
||||
cargoBuildOptions = x: x ++ [ "-p" "rosenpass" ];
|
||||
cargoTestOptions = x: x ++ [ "-p" "rosenpass" ];
|
||||
|
||||
doCheck = true;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
p.stdenv.cc
|
||||
cmake # for oqs build in the oqs-sys crate
|
||||
mandoc # for the built-in manual
|
||||
removeReferencesTo
|
||||
rustPlatform.bindgenHook # for C-bindings in the crypto libs
|
||||
];
|
||||
buildInputs = with p; [ bash ];
|
||||
|
||||
override = x: {
|
||||
preBuild =
|
||||
# nix defaults to building for aarch64 _without_ the armv8-a crypto
|
||||
# extensions, but liboqs depens on these
|
||||
(lib.optionalString (system == "aarch64-linux") ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
|
||||
''
|
||||
);
|
||||
|
||||
# fortify is only compatible with dynamic linking
|
||||
hardeningDisable = lib.optional isStatic "fortify";
|
||||
};
|
||||
|
||||
overrideMain = x: {
|
||||
# CMake detects that it was served a _foreign_ target dir, and CMake
|
||||
# would be executed again upon the second build step of naersk.
|
||||
# By adding our specially optimized CMake version, we reduce the cost
|
||||
# of recompilation by 99 % while, while avoiding any CMake errors.
|
||||
nativeBuildInputs = [ (lib.hiPrio fakecmake) ] ++ x.nativeBuildInputs;
|
||||
|
||||
# make sure that libc is linked, under musl this is not the case per
|
||||
# default
|
||||
preBuild = (lib.optionalString isStatic ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -lc"
|
||||
'');
|
||||
};
|
||||
|
||||
# We want to build for a specific target...
|
||||
CARGO_BUILD_TARGET = target;
|
||||
|
||||
# ... which might require a non-default linker:
|
||||
"CARGO_TARGET_${shout target}_LINKER" =
|
||||
let
|
||||
inherit (p.stdenv) cc;
|
||||
in
|
||||
"${cc}/bin/${cc.targetPrefix}cc";
|
||||
|
||||
meta = with pkgs.lib;
|
||||
{
|
||||
inherit (cargoToml.package) description homepage;
|
||||
license = with licenses; [ mit asl20 ];
|
||||
maintainers = [ maintainers.wucke13 ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
} // (lib.mkIf isStatic {
|
||||
# otherwise pkg-config tries to link non-existent dynamic libs
|
||||
# documented here: https://docs.rs/pkg-config/latest/pkg_config/
|
||||
PKG_CONFIG_ALL_STATIC = true;
|
||||
|
||||
# tell rust to build everything statically linked
|
||||
CARGO_BUILD_RUSTFLAGS = "-C target-feature=+crt-static";
|
||||
});
|
||||
# a function to generate a nix derivation for the rp helper against any
|
||||
# given set of nixpkgs
|
||||
rpDerivation = p:
|
||||
let
|
||||
# whether we want to build a statically linked binary
|
||||
@@ -216,19 +112,18 @@
|
||||
version = cargoToml.package.version;
|
||||
inherit src;
|
||||
|
||||
cargoBuildOptions = x: x ++ [ "-p" "rp" ];
|
||||
cargoTestOptions = x: x ++ [ "-p" "rp" ];
|
||||
|
||||
doCheck = true;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
p.stdenv.cc
|
||||
cmake # for oqs build in the oqs-sys crate
|
||||
mandoc # for the built-in manual
|
||||
makeWrapper # for the rp shellscript
|
||||
pkg-config # let libsodium-sys-stable find libsodium
|
||||
removeReferencesTo
|
||||
rustPlatform.bindgenHook # for C-bindings in the crypto libs
|
||||
];
|
||||
buildInputs = with p; [ bash ];
|
||||
buildInputs = with p; [ bash libsodium ];
|
||||
|
||||
override = x: {
|
||||
preBuild =
|
||||
@@ -255,8 +150,18 @@
|
||||
preBuild = (lib.optionalString isStatic ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -lc"
|
||||
'');
|
||||
|
||||
preInstall = ''
|
||||
install -D ${./rp} $out/bin/rp
|
||||
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
|
||||
'';
|
||||
};
|
||||
|
||||
# liboqs requires quite a lot of stack memory, thus we adjust
|
||||
# the default stack size picked for new threads (which is used
|
||||
# by `cargo test`) to be _big enough_
|
||||
RUST_MIN_STACK = 8 * 1024 * 1024; # 8 MiB
|
||||
|
||||
# We want to build for a specific target...
|
||||
CARGO_BUILD_TARGET = target;
|
||||
|
||||
@@ -296,8 +201,7 @@
|
||||
rec {
|
||||
packages = rec {
|
||||
default = rosenpass;
|
||||
rosenpass = rosenpassDerivation pkgs;
|
||||
rp = rpDerivation pkgs;
|
||||
rosenpass = rpDerivation pkgs;
|
||||
rosenpass-oci-image = rosenpassOCI "rosenpass";
|
||||
|
||||
# derivation for the release
|
||||
@@ -308,10 +212,6 @@
|
||||
if pkgs.hostPlatform.isLinux then
|
||||
packages.rosenpass-static
|
||||
else packages.rosenpass;
|
||||
rp =
|
||||
if pkgs.hostPlatform.isLinux then
|
||||
packages.rp-static
|
||||
else packages.rp;
|
||||
oci-image =
|
||||
if pkgs.hostPlatform.isLinux then
|
||||
packages.rosenpass-static-oci-image
|
||||
@@ -320,15 +220,14 @@
|
||||
pkgs.runCommandNoCC "lace-result" { }
|
||||
''
|
||||
mkdir {bin,$out}
|
||||
tar -cvf $out/rosenpass-${system}-${version}.tar \
|
||||
-C ${package} bin/rosenpass \
|
||||
-C ${rp} bin/rp
|
||||
cp ${./.}/rp bin/
|
||||
tar -cvf $out/rosenpass-${system}-${version}.tar bin/rp \
|
||||
-C ${package} bin/rosenpass
|
||||
cp ${oci-image} \
|
||||
$out/rosenpass-oci-image-${system}-${version}.tar.gz
|
||||
'';
|
||||
} // (if pkgs.stdenv.isLinux then rec {
|
||||
rosenpass-static = rosenpassDerivation pkgs.pkgsStatic;
|
||||
rp-static = rpDerivation pkgs.pkgsStatic;
|
||||
rosenpass-static = rpDerivation pkgs.pkgsStatic;
|
||||
rosenpass-static-oci-image = rosenpassOCI "rosenpass-static";
|
||||
} else { });
|
||||
}
|
||||
@@ -370,6 +269,7 @@
|
||||
];
|
||||
buildPhase = ''
|
||||
export HOME=$(mktemp -d)
|
||||
export OSFONTDIR="$(kpsewhich --var-value TEXMF)/fonts/{opentype/public/nunito,truetype/google/noto}"
|
||||
latexmk -r tex/CI.rc
|
||||
'';
|
||||
installPhase = ''
|
||||
@@ -390,7 +290,7 @@
|
||||
packages.proof-proverif = pkgs.stdenv.mkDerivation {
|
||||
name = "rosenpass-proverif-proof";
|
||||
version = "unstable";
|
||||
src = pkgs.lib.sources.sourceByRegex ./. [
|
||||
src = pkgs.lib.sourceByRegex ./. [
|
||||
"analyze.sh"
|
||||
"marzipan(/marzipan.awk)?"
|
||||
"analysis(/.*)?"
|
||||
@@ -409,19 +309,20 @@
|
||||
#
|
||||
devShells.default = pkgs.mkShell {
|
||||
inherit (packages.proof-proverif) CRYPTOVERIF_LIB;
|
||||
inherit (packages.rosenpass) RUST_MIN_STACK;
|
||||
inputsFrom = [ packages.default ];
|
||||
nativeBuildInputs = with pkgs; [
|
||||
cmake # override the fakecmake from the main step above
|
||||
cargo-release
|
||||
clippy
|
||||
nodePackages.prettier
|
||||
nushell # for the .ci/gen-workflow-files.nu script
|
||||
rustfmt
|
||||
packages.proverif-patched
|
||||
];
|
||||
};
|
||||
devShells.coverage = pkgs.mkShell {
|
||||
inputsFrom = [ packages.default ];
|
||||
inherit (packages.rosenpass) RUST_MIN_STACK;
|
||||
nativeBuildInputs = with pkgs; [ inputs.fenix.packages.${system}.complete.toolchain cargo-llvm-cov ];
|
||||
};
|
||||
|
||||
@@ -429,7 +330,7 @@
|
||||
checks = {
|
||||
cargo-fmt = pkgs.runCommand "check-cargo-fmt"
|
||||
{ inherit (self.devShells.${system}.default) nativeBuildInputs buildInputs; } ''
|
||||
cargo fmt --manifest-path=${./.}/Cargo.toml --check --all && touch $out
|
||||
cargo fmt --manifest-path=${./.}/Cargo.toml --check && touch $out
|
||||
'';
|
||||
nixpkgs-fmt = pkgs.runCommand "check-nixpkgs-fmt"
|
||||
{ nativeBuildInputs = [ pkgs.nixpkgs-fmt ]; } ''
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Parse command line options
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--mode)
|
||||
mode="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if mode is specified
|
||||
if [ -z "$mode" ]; then
|
||||
echo "Please specify the mode using --mode option. Valid modes are 'check' and 'fix'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find all Markdown files in the current directory and its subdirectories
|
||||
mapfile -t md_files < <(find . -type f -name "*.md")
|
||||
|
||||
count=0
|
||||
# Iterate through each Markdown file
|
||||
for file in "${md_files[@]}"; do
|
||||
# Use awk to extract Rust code blocks enclosed within triple backticks
|
||||
rust_code_blocks=$(awk '/```rust/{flag=1; next}/```/{flag=0} flag' "$file")
|
||||
|
||||
# Count the number of Rust code blocks
|
||||
num_fences=$(awk '/```rust/{f=1} f{if(/```/){f=0; count++}} END{print count}' "$file")
|
||||
|
||||
if [ -n "$rust_code_blocks" ]; then
|
||||
echo "Processing Rust code in $file"
|
||||
# Iterate through each Rust code block
|
||||
for ((i=1; i <= num_fences ; i++)); do
|
||||
# Extract individual Rust code block using awk
|
||||
current_rust_block=$(awk -v i="$i" '/```rust/{f=1; if (++count == i) next} f&&/```/{f=0;next} f' "$file")
|
||||
# Variable to check if we have added the main function
|
||||
add_main=0
|
||||
# Check if the Rust code block is already inside a function
|
||||
if ! echo "$current_rust_block" | grep -q "fn main()"; then
|
||||
# If not, wrap it in a main function
|
||||
current_rust_block=$'fn main() {\n'"$current_rust_block"$'\n}'
|
||||
add_main=1
|
||||
fi
|
||||
if [ "$mode" == "check" ]; then
|
||||
# Apply changes to the Rust code block
|
||||
formatted_rust_code=$(echo "$current_rust_block" | rustfmt)
|
||||
# Use rustfmt to format the Rust code block, remove first and last lines, and remove the first 4 spaces if added main function
|
||||
if [ "$add_main" == 1 ]; then
|
||||
formatted_rust_code=$(echo "$formatted_rust_code" | sed '1d;$d' | sed 's/^ //')
|
||||
current_rust_block=$(echo "$current_rust_block" | sed '1d;')
|
||||
current_rust_block=$(echo "$current_rust_block" | sed '$d')
|
||||
fi
|
||||
if [ "$formatted_rust_code" == "$current_rust_block" ]; then
|
||||
echo "No changes needed in Rust code block $i in $file"
|
||||
else
|
||||
echo -e "\nChanges needed in Rust code block $i in $file:\n"
|
||||
echo "$formatted_rust_code"
|
||||
count=+1
|
||||
fi
|
||||
|
||||
elif [ "$mode" == "fix" ]; then
|
||||
# Replace current_rust_block with formatted_rust_code in the file
|
||||
formatted_rust_code=$(echo "$current_rust_block" | rustfmt)
|
||||
# Use rustfmt to format the Rust code block, remove first and last lines, and remove the first 4 spaces if added main function
|
||||
if [ "$add_main" == 1 ]; then
|
||||
formatted_rust_code=$(echo "$formatted_rust_code" | sed '1d;$d' | sed 's/^ //')
|
||||
current_rust_block=$(echo "$current_rust_block" | sed '1d;')
|
||||
current_rust_block=$(echo "$current_rust_block" | sed '$d')
|
||||
fi
|
||||
# Check if the formatted code is the same as the current Rust code block
|
||||
if [ "$formatted_rust_code" == "$current_rust_block" ]; then
|
||||
echo "No changes needed in Rust code block $i in $file"
|
||||
else
|
||||
echo "Formatting Rust code block $i in $file"
|
||||
# Replace current_rust_block with formatted_rust_code in the file
|
||||
# Use awk to find the line number of the pattern
|
||||
|
||||
start_line=$(grep -n "^\`\`\`rust" "$file" | sed -n "${i}p" | cut -d: -f1)
|
||||
end_line=$(grep -n "^\`\`\`" "$file" | awk -F: -v start_line="$start_line" '$1 > start_line {print $1; exit;}')
|
||||
|
||||
if [ -n "$start_line" ] && [ -n "$end_line" ]; then
|
||||
# Print lines before the Rust code block
|
||||
head -n "$((start_line - 1))" "$file"
|
||||
|
||||
# Print the formatted Rust code block
|
||||
echo "\`\`\`rust"
|
||||
echo "$formatted_rust_code"
|
||||
echo "\`\`\`"
|
||||
|
||||
# Print lines after the Rust code block
|
||||
tail -n +"$((end_line + 1))" "$file"
|
||||
else
|
||||
# Rust code block not found or end line not found
|
||||
cat "$file"
|
||||
fi > tmpfile && mv tmpfile "$file"
|
||||
|
||||
fi
|
||||
else
|
||||
echo "Unknown mode: $mode. Valid modes are 'check' and 'fix'."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
# CI failure if changes are needed
|
||||
if [ $count -gt 0 ]; then
|
||||
echo "CI failed: Changes needed in Rust code blocks."
|
||||
exit 1
|
||||
fi
|
||||
4
fuzz/.gitignore
vendored
4
fuzz/.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
target
|
||||
corpus
|
||||
artifacts
|
||||
coverage
|
||||
1286
fuzz/Cargo.lock
generated
1286
fuzz/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,87 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-fuzzing"
|
||||
version = "0.0.1"
|
||||
publish = false
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
experiment_libcrux = ["rosenpass-ciphers/experiment_libcrux"]
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
arbitrary = { workspace = true }
|
||||
libfuzzer-sys = { workspace = true }
|
||||
stacker = { workspace = true }
|
||||
rosenpass-secret-memory = { workspace = true }
|
||||
rosenpass-ciphers = { workspace = true }
|
||||
rosenpass-cipher-traits = { workspace = true }
|
||||
rosenpass-to = { workspace = true }
|
||||
rosenpass = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_handle_msg"
|
||||
path = "fuzz_targets/handle_msg.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_blake2b"
|
||||
path = "fuzz_targets/blake2b.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_aead_enc_into"
|
||||
path = "fuzz_targets/aead_enc_into.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_mceliece_encaps"
|
||||
path = "fuzz_targets/mceliece_encaps.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_kyber_encaps"
|
||||
path = "fuzz_targets/kyber_encaps.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_box_secret_alloc_malloc"
|
||||
path = "fuzz_targets/box_secret_alloc_malloc.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_vec_secret_alloc_malloc"
|
||||
path = "fuzz_targets/vec_secret_alloc_malloc.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_box_secret_alloc_memfdsec"
|
||||
path = "fuzz_targets/box_secret_alloc_memfdsec.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_vec_secret_alloc_memfdsec"
|
||||
path = "fuzz_targets/vec_secret_alloc_memfdsec.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_box_secret_alloc_memfdsec_mallocfb"
|
||||
path = "fuzz_targets/box_secret_alloc_memfdsec_mallocfb.rs"
|
||||
test = false
|
||||
doc = false
|
||||
|
||||
[[bin]]
|
||||
name = "fuzz_vec_secret_alloc_memfdsec_mallocfb"
|
||||
path = "fuzz_targets/vec_secret_alloc_memfdsec_mallocfb.rs"
|
||||
test = false
|
||||
doc = false
|
||||
@@ -1,29 +0,0 @@
|
||||
#![no_main]
|
||||
extern crate arbitrary;
|
||||
extern crate rosenpass;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
use rosenpass_ciphers::aead;
|
||||
|
||||
#[derive(arbitrary::Arbitrary, Debug)]
|
||||
pub struct Input {
|
||||
pub key: [u8; 32],
|
||||
pub nonce: [u8; 12],
|
||||
pub ad: Box<[u8]>,
|
||||
pub plaintext: Box<[u8]>,
|
||||
}
|
||||
|
||||
fuzz_target!(|input: Input| {
|
||||
let mut ciphertext: Vec<u8> = Vec::with_capacity(input.plaintext.len() + 16);
|
||||
ciphertext.resize(input.plaintext.len() + 16, 0);
|
||||
|
||||
aead::encrypt(
|
||||
ciphertext.as_mut_slice(),
|
||||
&input.key,
|
||||
&input.nonce,
|
||||
&input.ad,
|
||||
&input.plaintext,
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
@@ -1,20 +0,0 @@
|
||||
#![no_main]
|
||||
extern crate arbitrary;
|
||||
extern crate rosenpass;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
use rosenpass_ciphers::subtle::blake2b;
|
||||
use rosenpass_to::To;
|
||||
|
||||
#[derive(arbitrary::Arbitrary, Debug)]
|
||||
pub struct Blake2b {
|
||||
pub key: [u8; 32],
|
||||
pub data: Box<[u8]>,
|
||||
}
|
||||
|
||||
fuzz_target!(|input: Blake2b| {
|
||||
let mut out = [0u8; 32];
|
||||
|
||||
blake2b::hash(&input.key, &input.data).to(&mut out).unwrap();
|
||||
});
|
||||
@@ -1,12 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_box;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
use std::sync::Once;
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_use_only_malloc_secrets);
|
||||
let _ = secret_box(data);
|
||||
});
|
||||
@@ -1,13 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_box;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_use_only_memfd_secrets);
|
||||
let _ = secret_box(data);
|
||||
});
|
||||
@@ -1,13 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_box;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_try_use_memfd_secrets);
|
||||
let _ = secret_box(data);
|
||||
});
|
||||
@@ -1,24 +0,0 @@
|
||||
#![no_main]
|
||||
extern crate rosenpass;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
use rosenpass::protocol::CryptoServer;
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::StaticKem;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
use rosenpass_secret_memory::{PublicBox, Secret};
|
||||
use std::sync::Once;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
fuzz_target!(|rx_buf: &[u8]| {
|
||||
ONCE.call_once(secret_policy_use_only_malloc_secrets);
|
||||
let sk = Secret::from_slice(&[0; StaticKem::SK_LEN]);
|
||||
let pk = PublicBox::from_slice(&[0; StaticKem::PK_LEN]);
|
||||
|
||||
let mut cs = CryptoServer::new(sk, pk);
|
||||
let mut tx_buf = [0; 10240];
|
||||
|
||||
// We expect errors while fuzzing therefore we do not check the result.
|
||||
let _ = cs.handle_msg(rx_buf, &mut tx_buf);
|
||||
});
|
||||
@@ -1,20 +0,0 @@
|
||||
#![no_main]
|
||||
extern crate arbitrary;
|
||||
extern crate rosenpass;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::EphemeralKem;
|
||||
|
||||
#[derive(arbitrary::Arbitrary, Debug)]
|
||||
pub struct Input {
|
||||
pub pk: [u8; EphemeralKem::PK_LEN],
|
||||
}
|
||||
|
||||
fuzz_target!(|input: Input| {
|
||||
let mut ciphertext = [0u8; EphemeralKem::CT_LEN];
|
||||
let mut shared_secret = [0u8; EphemeralKem::SHK_LEN];
|
||||
|
||||
EphemeralKem::encaps(&mut shared_secret, &mut ciphertext, &input.pk).unwrap();
|
||||
});
|
||||
@@ -1,15 +0,0 @@
|
||||
#![no_main]
|
||||
extern crate rosenpass;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::StaticKem;
|
||||
|
||||
fuzz_target!(|input: [u8; StaticKem::PK_LEN]| {
|
||||
let mut ciphertext = [0u8; StaticKem::CT_LEN];
|
||||
let mut shared_secret = [0u8; StaticKem::SHK_LEN];
|
||||
|
||||
// We expect errors while fuzzing therefore we do not check the result.
|
||||
let _ = StaticKem::encaps(&mut shared_secret, &mut ciphertext, &input);
|
||||
});
|
||||
@@ -1,15 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use std::sync::Once;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_vec;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_use_only_malloc_secrets);
|
||||
let mut vec = secret_vec();
|
||||
vec.extend_from_slice(data);
|
||||
});
|
||||
@@ -1,15 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use std::sync::Once;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_vec;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_use_only_memfd_secrets);
|
||||
let mut vec = secret_vec();
|
||||
vec.extend_from_slice(data);
|
||||
});
|
||||
@@ -1,15 +0,0 @@
|
||||
#![no_main]
|
||||
|
||||
use std::sync::Once;
|
||||
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
use rosenpass_secret_memory::alloc::secret_vec;
|
||||
use rosenpass_secret_memory::policy::*;
|
||||
|
||||
static ONCE: Once = Once::new();
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
ONCE.call_once(secret_policy_try_use_memfd_secrets);
|
||||
let mut vec = secret_vec();
|
||||
vec.extend_from_slice(data);
|
||||
});
|
||||
@@ -1,40 +0,0 @@
|
||||
# Additional files
|
||||
|
||||
This folder contains additional files that are used in the project.
|
||||
|
||||
## `generate_configs.py`
|
||||
|
||||
The script is used to generate configuration files for a benchmark setup
|
||||
consisting of a device under testing (DUT) and automatic test equipment (ATE),
|
||||
basically a strong machine capable of running multiple Rosenpass instances at
|
||||
once.
|
||||
|
||||
At the top of the script multiple variables can be set to configure the DUT IP
|
||||
address and more. Once configured you may run `python3 generate_configs.py` to
|
||||
create the configuration files.
|
||||
|
||||
A new folder called `output/` is created containing the subfolder `dut/` and
|
||||
`ate/`. The former has to be copied on the DUT, ideally reproducible hardware
|
||||
like a Raspberry Pi, while the latter is copied to the ATE, i.e. a laptop.
|
||||
|
||||
### Running a benchmark
|
||||
|
||||
On the ATE a run script is required since multiple instances of `rosenpass` are
|
||||
started with different configurations in parallel. The scripts are named after
|
||||
the number of instances they start, e.g. `run-50.sh` starts 50 instances.
|
||||
|
||||
```shell
|
||||
# on the ATE aka laptop
|
||||
cd output/ate
|
||||
./run-10.sh
|
||||
```
|
||||
|
||||
On the DUT you start a single Rosenpass instance with the configuration matching
|
||||
the ATE number of peers.
|
||||
|
||||
```shell
|
||||
# on the DUT aka Raspberry Pi
|
||||
rosenpass exchange-config configs/dut-10.toml
|
||||
```
|
||||
|
||||
Use whatever measurement tool you like to monitor the DUT and ATE.
|
||||
@@ -1,105 +0,0 @@
|
||||
from pathlib import Path
|
||||
from subprocess import run
|
||||
|
||||
|
||||
config = dict(
|
||||
peer_counts=[1, 5, 10, 50, 100, 500],
|
||||
peer_count_max=100,
|
||||
ate_ip="192.168.2.1",
|
||||
dut_ip="192.168.2.4",
|
||||
dut_port=9999,
|
||||
path_to_rosenpass_bin="/Users/user/src/rosenppass/rosenpass/target/debug/rosenpass",
|
||||
)
|
||||
|
||||
print(config)
|
||||
|
||||
output_dir = Path("output")
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
template_dut = """
|
||||
public_key = "keys/dut-public-key"
|
||||
secret_key = "keys/dut-secret-key"
|
||||
listen = ["{dut_ip}:{dut_port}"]
|
||||
verbosity = "Quiet"
|
||||
"""
|
||||
template_dut_peer = """
|
||||
[[peers]] # ATE-{i}
|
||||
public_key = "keys/ate-{i}-public-key"
|
||||
endpoint = "{ate_ip}:{ate_port}"
|
||||
key_out = "out/key_out_{i}"
|
||||
"""
|
||||
|
||||
template_ate = """
|
||||
public_key = "keys/ate-{i}-public-key"
|
||||
secret_key = "keys/ate-{i}-secret-key"
|
||||
listen = ["{ate_ip}:{ate_port}"]
|
||||
verbosity = "Quiet"
|
||||
|
||||
[[peers]] # DUT
|
||||
public_key = "keys/dut-public-key"
|
||||
endpoint = "{dut_ip}:{dut_port}"
|
||||
key_out = "out/key_out_{i}"
|
||||
"""
|
||||
|
||||
(output_dir / "dut" / "keys").mkdir(exist_ok=True, parents=True)
|
||||
(output_dir / "dut" / "out").mkdir(exist_ok=True, parents=True)
|
||||
(output_dir / "dut" / "configs").mkdir(exist_ok=True, parents=True)
|
||||
(output_dir / "ate" / "keys").mkdir(exist_ok=True, parents=True)
|
||||
(output_dir / "ate" / "out").mkdir(exist_ok=True, parents=True)
|
||||
(output_dir / "ate" / "configs").mkdir(exist_ok=True, parents=True)
|
||||
|
||||
for peer_count in config["peer_counts"]:
|
||||
dut_config = template_dut.format(**config)
|
||||
for i in range(peer_count):
|
||||
dut_config += template_dut_peer.format(**config, i=i, ate_port=50000 + i)
|
||||
|
||||
(output_dir / "dut" / "configs" / f"dut-{peer_count}.toml").write_text(dut_config)
|
||||
|
||||
if not (output_dir / "dut" / "keys" / "dut-public-key").exists():
|
||||
print("Generate DUT keys")
|
||||
run(
|
||||
[
|
||||
config["path_to_rosenpass_bin"],
|
||||
"gen-keys",
|
||||
f"configs/dut-{peer_count}.toml",
|
||||
],
|
||||
cwd=output_dir / "dut",
|
||||
)
|
||||
else:
|
||||
print("DUT keys already exist")
|
||||
|
||||
# copy the DUT public key to the ATE
|
||||
(output_dir / "ate" / "keys" / "dut-public-key").write_bytes(
|
||||
(output_dir / "dut" / "keys" / "dut-public-key").read_bytes()
|
||||
)
|
||||
|
||||
ate_script = "(trap 'kill 0' SIGINT; \\\n"
|
||||
|
||||
for i in range(config["peer_count_max"]):
|
||||
(output_dir / "ate" / "configs" / f"ate-{i}.toml").write_text(
|
||||
template_ate.format(**config, i=i, ate_port=50000 + i)
|
||||
)
|
||||
|
||||
if not (output_dir / "ate" / "keys" / f"ate-{i}-public-key").exists():
|
||||
# generate ATE keys
|
||||
run(
|
||||
[config["path_to_rosenpass_bin"], "gen-keys", f"configs/ate-{i}.toml"],
|
||||
cwd=output_dir / "ate",
|
||||
)
|
||||
else:
|
||||
print(f"ATE-{i} keys already exist")
|
||||
|
||||
# copy the ATE public keys to the DUT
|
||||
(output_dir / "dut" / "keys" / f"ate-{i}-public-key").write_bytes(
|
||||
(output_dir / "ate" / "keys" / f"ate-{i}-public-key").read_bytes()
|
||||
)
|
||||
|
||||
ate_script += (
|
||||
f"{config['path_to_rosenpass_bin']} exchange-config configs/ate-{i}.toml & \\\n"
|
||||
)
|
||||
|
||||
if (i + 1) in config["peer_counts"]:
|
||||
write_script = ate_script
|
||||
write_script += "wait)"
|
||||
|
||||
(output_dir / "ate" / f"run-{i+1}.sh").write_text(write_script)
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-oqs"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Rosenpass internal bindings to liboqs"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[dependencies]
|
||||
rosenpass-cipher-traits = { workspace = true }
|
||||
rosenpass-util = { workspace = true }
|
||||
oqs-sys = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
@@ -1,5 +0,0 @@
|
||||
# Rosenpass internal liboqs bindings
|
||||
|
||||
Rosenpass internal library providing bindings to liboqs.
|
||||
|
||||
This is an internal library; not guarantee is made about its API at this point in time.
|
||||
@@ -1,80 +0,0 @@
|
||||
macro_rules! oqs_kem {
|
||||
($name:ident) => { ::paste::paste!{
|
||||
mod [< $name:snake >] {
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_util::result::Guaranteed;
|
||||
|
||||
pub enum [< $name:camel >] {}
|
||||
|
||||
/// # Panic & Safety
|
||||
///
|
||||
/// This Trait impl calls unsafe [oqs_sys] functions, that write to byte
|
||||
/// slices only identified using raw pointers. It must be ensured that the raw
|
||||
/// pointers point into byte slices of sufficient length, to avoid UB through
|
||||
/// overwriting of arbitrary data. This is ensured through assertions in the
|
||||
/// implementation.
|
||||
///
|
||||
/// __Note__: This requirement is stricter than necessary, it would suffice
|
||||
/// to only check that the buffers are big enough, allowing them to be even
|
||||
/// bigger. However, from a correctness point of view it does not make sense to
|
||||
/// allow bigger buffers.
|
||||
impl Kem for [< $name:camel >] {
|
||||
type Error = ::std::convert::Infallible;
|
||||
|
||||
const SK_LEN: usize = ::oqs_sys::kem::[<OQS_KEM _ $name:snake _ length_secret_key >] as usize;
|
||||
const PK_LEN: usize = ::oqs_sys::kem::[<OQS_KEM _ $name:snake _ length_public_key >] as usize;
|
||||
const CT_LEN: usize = ::oqs_sys::kem::[<OQS_KEM _ $name:snake _ length_ciphertext >] as usize;
|
||||
const SHK_LEN: usize = ::oqs_sys::kem::[<OQS_KEM _ $name:snake _ length_shared_secret >] as usize;
|
||||
|
||||
fn keygen(sk: &mut [u8], pk: &mut [u8]) -> Guaranteed<()> {
|
||||
assert_eq!(sk.len(), Self::SK_LEN);
|
||||
assert_eq!(pk.len(), Self::PK_LEN);
|
||||
unsafe {
|
||||
oqs_call!(
|
||||
::oqs_sys::kem::[< OQS_KEM _ $name:snake _ keypair >],
|
||||
pk.as_mut_ptr(),
|
||||
sk.as_mut_ptr()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Guaranteed<()> {
|
||||
assert_eq!(shk.len(), Self::SHK_LEN);
|
||||
assert_eq!(ct.len(), Self::CT_LEN);
|
||||
assert_eq!(pk.len(), Self::PK_LEN);
|
||||
unsafe {
|
||||
oqs_call!(
|
||||
::oqs_sys::kem::[< OQS_KEM _ $name:snake _ encaps >],
|
||||
ct.as_mut_ptr(),
|
||||
shk.as_mut_ptr(),
|
||||
pk.as_ptr()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decaps(shk: &mut [u8], sk: &[u8], ct: &[u8]) -> Guaranteed<()> {
|
||||
assert_eq!(shk.len(), Self::SHK_LEN);
|
||||
assert_eq!(sk.len(), Self::SK_LEN);
|
||||
assert_eq!(ct.len(), Self::CT_LEN);
|
||||
unsafe {
|
||||
oqs_call!(
|
||||
::oqs_sys::kem::[< OQS_KEM _ $name:snake _ decaps >],
|
||||
shk.as_mut_ptr(),
|
||||
ct.as_ptr(),
|
||||
sk.as_ptr()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub use [< $name:snake >] :: [< $name:camel >];
|
||||
}}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
macro_rules! oqs_call {
|
||||
($name:path, $($args:expr),*) => {{
|
||||
use oqs_sys::common::OQS_STATUS::*;
|
||||
|
||||
match $name($($args),*) {
|
||||
OQS_SUCCESS => {}, // nop
|
||||
OQS_EXTERNAL_LIB_ERROR_OPENSSL => {
|
||||
panic!("OpenSSL error in liboqs' {}.", stringify!($name));
|
||||
},
|
||||
OQS_ERROR => {
|
||||
panic!("Unknown error in liboqs' {}.", stringify!($name));
|
||||
}
|
||||
}
|
||||
}};
|
||||
($name:ident) => { oqs_call!($name, ) };
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
mod kem_macro;
|
||||
oqs_kem!(kyber_512);
|
||||
oqs_kem!(classic_mceliece_460896);
|
||||
@@ -177,11 +177,7 @@ version={4.0},
|
||||
\titlehead{\centerline{\includegraphics[width=4cm]{RosenPass-Logo}}}
|
||||
\title{\inserttitle}
|
||||
}
|
||||
\ifx\csname insertauthor\endcsname\relax
|
||||
\author{}
|
||||
\else
|
||||
\author{\parbox{\linewidth}{\centering\insertauthor}}
|
||||
\fi
|
||||
\author{\csname insertauthor\endcsname}
|
||||
\subject{\csname insertsubject\endcsname}
|
||||
\date{\vspace{-1cm}}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ author:
|
||||
- Benjamin Lipp = Max Planck Institute for Security and Privacy (MPI-SP)
|
||||
- Wanja Zaeske
|
||||
- Lisa Schmidt = {Scientific Illustrator – \\url{mullana.de}}
|
||||
- Prabhpreet Dua
|
||||
abstract: |
|
||||
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for another application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
||||
|
||||
@@ -219,7 +218,6 @@ The server needs to store the following variables:
|
||||
* `spkm`
|
||||
* `biscuit_key` – Randomly chosen key used to encrypt biscuits
|
||||
* `biscuit_ctr` – Retransmission protection for biscuits
|
||||
* `cookie_secret`- A randomized cookie secret to derive cookies sent to peer when under load. This secret changes every 120 seconds
|
||||
|
||||
Not mandated per se, but required in practice:
|
||||
|
||||
@@ -245,7 +243,6 @@ The initiator stores the following local state for each ongoing handshake:
|
||||
* `ck` – The chaining key
|
||||
* `eski` – The initiator's ephemeral secret key
|
||||
* `epki` – The initiator's ephemeral public key
|
||||
* `cookie_value`- Cookie value sent by an initiator peer under load, used to compute cookie field in outgoing handshake to peer under load. This value expires 120 seconds from when a peer sends this value using the CookieReply message
|
||||
|
||||
The responder stores no state. While the responder has access to all of the above variables except for `eski`, the responder discards them after generating the RespHello message. Instead, the responder state is contained inside a cookie called a biscuit. This value is returned to the responder inside the InitConf packet. The biscuit consists of:
|
||||
|
||||
@@ -431,92 +428,12 @@ The responder code handling InitConf needs to deal with the biscuits and package
|
||||
|
||||
ICR5 and ICR6 perform biscuit replay protection using the biscuit number. This is not handled in `load_biscuit()` itself because there is the case that `biscuit_no = biscuit_used` which needs to be dealt with for retransmission handling.
|
||||
|
||||
### Denial of Service Mitigation and Cookies
|
||||
|
||||
Rosenpass derives its cookie-based DoS mitigation technique for a responder when receiving InitHello messages from Wireguard [@wg].
|
||||
|
||||
When the responder is under load, it may choose to not process further InitHello handshake messages, but instead to respond with a cookie reply message (see Figure \ref{img:MessageTypes}).
|
||||
|
||||
The sender of the exchange then uses this cookie in order to resend the message and have it accepted the following time by the reciever.
|
||||
|
||||
For an initiator, Rosenpass ignores all messages when under load.
|
||||
|
||||
#### Cookie Reply Message
|
||||
|
||||
The cookie reply message is sent by the responder on receiving an InitHello message when under load. It consists of the `sidi` of the initiator, a random 24-byte bitstring `nonce` and encrypting `cookie_value` into a `cookie_encrypted` reply field which consists of the following:
|
||||
|
||||
```pseudorust
|
||||
cookie_value = lhash("cookie-value", cookie_secret, initiator_host_info)[0..16]
|
||||
cookie_encrypted = XAEAD(lhash("cookie-key", spkm), nonce, cookie_value, mac_peer)
|
||||
```
|
||||
|
||||
where `cookie_secret` is a secret variable that changes every two minutes to a random value. `initiator_host_info` is used to identify the initiator host, and is implementation-specific for the client. This paramaters used to identify the host must be carefully chosen to ensure there is a unique mapping, especially when using IPv4 and IPv6 addresses to identify the host (such as taking care of IPv6 link-local addresses). `cookie_value` is a truncated 16 byte value from the above hash operation. `mac_peer` is the `mac` field of the peer's handshake message to which message is the reply.
|
||||
|
||||
#### Envelope `mac` Field
|
||||
|
||||
Similar to `mac.1` in Wireguard handshake messages, the `mac` field of a Rosenpass envelope from a handshake packet sender's point of view consists of the following:
|
||||
|
||||
```pseudorust
|
||||
mac = lhash("mac", spkt, MAC_WIRE_DATA)[0..16]
|
||||
```
|
||||
|
||||
where `MAC_WIRE_DATA` represents all bytes of msg prior to `mac` field in the envelope.
|
||||
|
||||
If a client receives an invalid `mac` value for any message, it will discard the message.
|
||||
|
||||
#### Envelope cookie field
|
||||
|
||||
The initiator, on receiving a CookieReply message, decrypts `cookie_encrypted` and stores the `cookie_value` for the session into `peer[sid].cookie_value` for a limited time (120 seconds). This value is then used to set `cookie` field set for subsequent messages and retransmissions to the responder as follows:
|
||||
|
||||
```pseudorust
|
||||
if (peer.cookie_value.is_none() || seconds_since_update(peer[sid].cookie_value) >= 120) {
|
||||
cookie.zeroize(); //zeroed out 16 bytes bitstring
|
||||
}
|
||||
else {
|
||||
cookie = lhash("cookie",peer.cookie_value.unwrap(),COOKIE_WIRE_DATA)
|
||||
}
|
||||
```
|
||||
|
||||
Here, `seconds_since_update(peer.cookie_value)` is the amount of time in seconds ellapsed since last cookie was received, and `COOKIE_WIRE_DATA` are the message contents of all bytes of the retransmitted message prior to the `cookie` field.
|
||||
|
||||
The inititator can use an invalid value for the `cookie` value, when the responder is not under load, and the responder must ignore this value.
|
||||
However, when the responder is under load, it may reject InitHello messages with the invalid `cookie` value, and issue a cookie reply message.
|
||||
|
||||
### Conditions to trigger DoS Mechanism
|
||||
|
||||
This whitepaper does not mandate any specific mechanism to detect responder contention (also mentioned as the under load condition) that would trigger use of the cookie mechanism.
|
||||
|
||||
For the reference implemenation, Rosenpass has derived inspiration from the linux implementation of Wireguard. This implementation suggests that the reciever keep track of the number of messages it is processing at a given time.
|
||||
|
||||
On receiving an incoming message, if the length of the message queue to be processed exceeds a threshold `MAX_QUEUED_INCOMING_HANDSHAKES_THRESHOLD`, the client is considered under load and its state is stored as under load. In addition, the timestamp of this instant when the client was last under load is stored. When recieving subsequent messages, if the client is still in an under load state, the client will check if the time ellpased since the client was last under load has exceeded `LAST_UNDER_LOAD_WINDOW` seconds. If this is the case, the client will update its state to normal operation, and process the message in a normal fashion.
|
||||
|
||||
Currently, the following constants are derived from the Linux kernel implementation of Wireguard:
|
||||
|
||||
```pseudorust
|
||||
MAX_QUEUED_INCOMING_HANDSHAKES_THRESHOLD = 4096
|
||||
LAST_UNDER_LOAD_WINDOW = 1 //seconds
|
||||
```
|
||||
|
||||
## Dealing with Packet Loss
|
||||
|
||||
The initiator deals with packet loss by storing the messages it sends to the responder and retransmitting them in randomized, exponentially increasing intervals until they get a response. Receiving RespHello terminates retransmission of InitHello. A Data or EmptyData message serves as acknowledgement of receiving InitConf and terminates its retransmission.
|
||||
|
||||
The responder does not need to do anything special to handle RespHello retransmission – if the RespHello package is lost, the initiator retransmits InitHello and the responder can generate another RespHello package from that. InitConf retransmission needs to be handled specifically in the responder code because accepting an InitConf retransmission would reset the live session including the nonce counter, which would cause nonce reuse. Implementations must detect the case that `biscuit_no = biscuit_used` in ICR5, skip execution of ICR6 and ICR7, and just transmit another EmptyData package to confirm that the initiator can stop transmitting InitConf.
|
||||
|
||||
### Interaction with cookie reply system
|
||||
|
||||
The cookie reply system does not interfere with the retransmission logic discussed above.
|
||||
|
||||
When the initator is under load, it will ignore processing any incoming messages.
|
||||
|
||||
When a responder is under load and it receives an InitHello handshake message, the InitHello message will be discarded and a cookie reply message is sent. The initiator, then on the reciept of the cookie reply message, will store a decrypted `cookie_value` to set the `cookie` field to subsequently sent messages. As per the retransmission mechanism above, the initiator will send a retransmitted InitHello message with a valid `cookie` value appended. On receiving the retransmitted handshake message, the responder will validate the `cookie` value and resume with the handshake process.
|
||||
|
||||
When the responder is under load and it recieves an InitConf message, the message will be directly processed without checking the validity of the cookie field.
|
||||
|
||||
# Changelog
|
||||
|
||||
- Added section "Denial of Service Mitigation and Cookies", and modify "Dealing with Packet Loss" for DoS cookie mechanism
|
||||
|
||||
\printbibliography
|
||||
|
||||
\setupimage{landscape,fullpage,label=img:HandlingCode}
|
||||
|
||||
12
readme.md
12
readme.md
@@ -25,11 +25,11 @@ Follow [quick start instructions](https://rosenpass.eu/#start) to get a VPN up a
|
||||
|
||||
## Software architecture
|
||||
|
||||
The [rosenpass tool](./src/) is written in Rust and uses liboqs[^liboqs]. The tool establishes a symmetric key and provides it to WireGuard. Since it supplies WireGuard with key through the PSK feature using Rosenpass+WireGuard is cryptographically no less secure than using WireGuard on its own ("hybrid security"). Rosenpass refreshes the symmetric key every two minutes.
|
||||
The [rosenpass tool](./src/) is written in Rust and uses liboqs[^liboqs] and libsodium[^libsodium]. The tool establishes a symmetric key and provides it to WireGuard. Since it supplies WireGuard with key through the PSK feature using Rosenpass+WireGuard is cryptographically no less secure than using WireGuard on its own ("hybrid security"). Rosenpass refreshes the symmetric key every two minutes.
|
||||
|
||||
As with any application a small risk of critical security issues (such as buffer overflows, remote code execution) exists; the Rosenpass application is written in the Rust programming language which is much less prone to such issues. Rosenpass can also write keys to files instead of supplying them to WireGuard With a bit of scripting the stand alone mode of the implementation can be used to run the application in a Container, VM or on another host. This mode can also be used to integrate tools other than WireGuard with Rosenpass.
|
||||
|
||||
The [`rp`](./rp) tool written in Rust makes it easy to create a VPN using WireGuard and Rosenpass.
|
||||
The [`rp`](./rp) tool written in bash makes it easy to create a VPN using WireGuard and Rosenpass.
|
||||
|
||||
`rp` is easy to get started with but has a few drawbacks; it runs as root, demanding access to both WireGuard
|
||||
and Rosenpass private keys, takes control of the interface and works with exactly one interface. If you do not feel confident about running Rosenpass as root, you should use the stand-alone mode to create a more secure setup using containers, jails, or virtual machines.
|
||||
@@ -59,6 +59,7 @@ The code uses a variety of optimizations to speed up analysis such as using secr
|
||||
A wrapper script provides instant feedback about which queries execute as expected in color: A red cross if a query fails and a green check if it succeeds.
|
||||
|
||||
[^liboqs]: https://openquantumsafe.org/liboqs/
|
||||
[^libsodium]: https://doc.libsodium.org/
|
||||
[^wg]: https://www.wireguard.com/
|
||||
[^pqwg]: https://eprint.iacr.org/2020/379
|
||||
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeats the purpose of a key exchange protocol
|
||||
@@ -70,13 +71,6 @@ Rosenpass is packaged for more and more distributions, maybe also for the distri
|
||||
|
||||
[](https://repology.org/project/rosenpass/versions)
|
||||
|
||||
# Mirrors
|
||||
|
||||
Don't want to use GitHub or only have an IPv6 connection? Rosenpass has set up two mirrors for this:
|
||||
|
||||
- [NotABug](https://notabug.org/rosenpass/rosenpass)
|
||||
- [GitLab](https://gitlab.com/rosenpass/rosenpass/)
|
||||
|
||||
# Supported by
|
||||
|
||||
Funded through <a href="https://nlnet.nl/">NLNet</a> with financial support for the European Commission's <a href="https://nlnet.nl/assure">NGI Assure</a> program.
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass"
|
||||
version = "0.2.1"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[[bin]]
|
||||
name = "rosenpass"
|
||||
path = "src/main.rs"
|
||||
|
||||
[[bench]]
|
||||
name = "handshake"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
rosenpass-util = { workspace = true }
|
||||
rosenpass-constant-time = { workspace = true }
|
||||
rosenpass-ciphers = { workspace = true }
|
||||
rosenpass-cipher-traits = { workspace = true }
|
||||
rosenpass-to = { workspace = true }
|
||||
rosenpass-secret-memory = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
memoffset = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
log = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
mio = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
zerocopy = { workspace = true }
|
||||
home = { workspace = true }
|
||||
derive_builder = {workspace = true}
|
||||
rosenpass-wireguard-broker = {workspace = true}
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { workspace = true }
|
||||
test_bin = { workspace = true }
|
||||
stacker = { workspace = true }
|
||||
serial_test = {workspace = true}
|
||||
procspawn = {workspace = true}
|
||||
|
||||
[features]
|
||||
enable_broker_api = ["rosenpass-wireguard-broker/enable_broker_api"]
|
||||
enable_memfd_alloc = []
|
||||
experiment_libcrux = ["rosenpass-ciphers/experiment_libcrux"]
|
||||
@@ -1 +0,0 @@
|
||||
../readme.md
|
||||
@@ -1,377 +0,0 @@
|
||||
use anyhow::{bail, ensure};
|
||||
use clap::{Parser, Subcommand};
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::StaticKem;
|
||||
use rosenpass_secret_memory::file::StoreSecret;
|
||||
use rosenpass_secret_memory::{
|
||||
secret_policy_try_use_memfd_secrets, secret_policy_use_only_malloc_secrets,
|
||||
};
|
||||
use rosenpass_util::file::{LoadValue, LoadValueB64, StoreValue};
|
||||
use rosenpass_wireguard_broker::brokers::native_unix::{
|
||||
NativeUnixBroker, NativeUnixBrokerConfigBaseBuilder, NativeUnixBrokerConfigBaseBuilderError,
|
||||
};
|
||||
use std::ops::DerefMut;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::app_server::AppServerTest;
|
||||
use crate::app_server::{AppServer, BrokerPeer};
|
||||
use crate::protocol::{SPk, SSk, SymKey};
|
||||
|
||||
use super::config;
|
||||
|
||||
/// struct holding all CLI arguments for `clap` crate to parse
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about)]
|
||||
pub struct CliArgs {
|
||||
/// lowest log level to show – log messages at higher levels will be omitted
|
||||
#[arg(long = "log-level", value_name = "LOG_LEVEL", group = "log-level")]
|
||||
log_level: Option<log::LevelFilter>,
|
||||
|
||||
/// show verbose log output – sets log level to "debug"
|
||||
#[arg(short, long, group = "log-level")]
|
||||
verbose: bool,
|
||||
|
||||
/// show no log output – sets log level to "error"
|
||||
#[arg(short, long, group = "log-level")]
|
||||
quiet: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub command: CliCommand,
|
||||
}
|
||||
|
||||
impl CliArgs {
|
||||
/// returns the log level filter set by CLI args
|
||||
/// returns `None` if the user did not specify any log level filter via CLI
|
||||
///
|
||||
/// NOTE: the clap feature of ["argument groups"](https://docs.rs/clap/latest/clap/_derive/_tutorial/chapter_3/index.html#argument-relations)
|
||||
/// ensures that the user can not specify more than one of the possible log level arguments.
|
||||
/// Note the `#[arg("group")]` in the [`CliArgs`] struct.
|
||||
pub fn get_log_level(&self) -> Option<log::LevelFilter> {
|
||||
if self.verbose {
|
||||
return Some(log::LevelFilter::Info);
|
||||
}
|
||||
if self.quiet {
|
||||
return Some(log::LevelFilter::Error);
|
||||
}
|
||||
if let Some(level_filter) = self.log_level {
|
||||
return Some(level_filter);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// represents a command specified via CLI
|
||||
#[derive(Subcommand, Debug)]
|
||||
pub enum CliCommand {
|
||||
/// Start Rosenpass in server mode and carry on with the key exchange
|
||||
///
|
||||
/// This will parse the configuration file and perform the key exchange
|
||||
/// with the specified peers. If a peer's endpoint is specified, this
|
||||
/// Rosenpass instance will try to initiate a key exchange with the peer,
|
||||
/// otherwise only initiation attempts from the peer will be responded to.
|
||||
ExchangeConfig { config_file: PathBuf },
|
||||
|
||||
/// Start in daemon mode, performing key exchanges
|
||||
///
|
||||
/// The configuration is read from the command line. The `peer` token
|
||||
/// always separates multiple peers, e. g. if the token `peer` appears
|
||||
/// in the WIREGUARD_EXTRA_ARGS it is not put into the WireGuard arguments
|
||||
/// but instead a new peer is created.
|
||||
/* Explanation: `first_arg` and `rest_of_args` are combined into one
|
||||
* `Vec<String>`. They are only used to trick clap into displaying some
|
||||
* guidance on the CLI usage.
|
||||
*/
|
||||
#[allow(rustdoc::broken_intra_doc_links)]
|
||||
#[allow(rustdoc::invalid_html_tags)]
|
||||
Exchange {
|
||||
/// public-key <PATH> secret-key <PATH> [listen <ADDR>:<PORT>]... [verbose]
|
||||
#[clap(value_name = "OWN_CONFIG")]
|
||||
first_arg: String,
|
||||
|
||||
/// peer public-key <PATH> [ENDPOINT] [PSK] [OUTFILE] [WG]
|
||||
///
|
||||
/// ENDPOINT := endpoint <HOST/IP>:<PORT>
|
||||
///
|
||||
/// PSK := preshared-key <PATH>
|
||||
///
|
||||
/// OUTFILE := outfile <PATH>
|
||||
///
|
||||
/// WG := wireguard <WIREGUARD_DEV> <WIREGUARD_PEER> [WIREGUARD_EXTRA_ARGS]...
|
||||
#[clap(value_name = "PEERS")]
|
||||
rest_of_args: Vec<String>,
|
||||
|
||||
/// Save the parsed configuration to a file before starting the daemon
|
||||
#[clap(short, long)]
|
||||
config_file: Option<PathBuf>,
|
||||
},
|
||||
|
||||
/// Generate a demo config file
|
||||
GenConfig {
|
||||
config_file: PathBuf,
|
||||
|
||||
/// Forcefully overwrite existing config file
|
||||
#[clap(short, long)]
|
||||
force: bool,
|
||||
},
|
||||
|
||||
/// Generate the keys mentioned in a configFile
|
||||
///
|
||||
/// Generates secret- & public-key to their destination. If a config file
|
||||
/// is provided then the key file destination is taken from there.
|
||||
/// Otherwise the
|
||||
GenKeys {
|
||||
config_file: Option<PathBuf>,
|
||||
|
||||
/// where to write public-key to
|
||||
#[clap(short, long)]
|
||||
public_key: Option<PathBuf>,
|
||||
|
||||
/// where to write secret-key to
|
||||
#[clap(short, long)]
|
||||
secret_key: Option<PathBuf>,
|
||||
|
||||
/// Forcefully overwrite public- & secret-key file
|
||||
#[clap(short, long)]
|
||||
force: bool,
|
||||
},
|
||||
|
||||
/// Deprecated - use gen-keys instead
|
||||
#[allow(rustdoc::broken_intra_doc_links)]
|
||||
#[allow(rustdoc::invalid_html_tags)]
|
||||
Keygen {
|
||||
// NOTE yes, the legacy keygen argument initially really accepted "privet-key", not "secret-key"!
|
||||
/// public-key <PATH> private-key <PATH>
|
||||
args: Vec<String>,
|
||||
},
|
||||
|
||||
/// Validate a configuration
|
||||
Validate { config_files: Vec<PathBuf> },
|
||||
|
||||
/// Show the rosenpass manpage
|
||||
// TODO make this the default, but only after the manpage has been adjusted once the CLI stabilizes
|
||||
Man,
|
||||
}
|
||||
|
||||
impl CliCommand {
|
||||
/// runs the command specified via CLI
|
||||
///
|
||||
/// ## TODO
|
||||
/// - This method consumes the [`CliCommand`] value. It might be wise to use a reference...
|
||||
pub fn run(self, test_helpers: Option<AppServerTest>) -> anyhow::Result<()> {
|
||||
//Specify secret policy
|
||||
|
||||
#[cfg(feature = "enable_memfd_alloc")]
|
||||
secret_policy_try_use_memfd_secrets();
|
||||
#[cfg(not(feature = "enable_memfd_alloc"))]
|
||||
secret_policy_use_only_malloc_secrets();
|
||||
|
||||
use CliCommand::*;
|
||||
match self {
|
||||
Man => {
|
||||
let man_cmd = std::process::Command::new("man")
|
||||
.args(["1", "rosenpass"])
|
||||
.status();
|
||||
|
||||
if !(man_cmd.is_ok() && man_cmd.unwrap().success()) {
|
||||
println!(include_str!(env!("ROSENPASS_MAN")));
|
||||
}
|
||||
}
|
||||
GenConfig { config_file, force } => {
|
||||
ensure!(
|
||||
force || !config_file.exists(),
|
||||
"config file {config_file:?} already exists"
|
||||
);
|
||||
|
||||
config::Rosenpass::example_config().store(config_file)?;
|
||||
}
|
||||
|
||||
// Deprecated - use gen-keys instead
|
||||
Keygen { args } => {
|
||||
log::warn!("The 'keygen' command is deprecated. Please use the 'gen-keys' command instead.");
|
||||
|
||||
let mut public_key: Option<PathBuf> = None;
|
||||
let mut secret_key: Option<PathBuf> = None;
|
||||
|
||||
// Manual arg parsing, since clap wants to prefix flags with "--"
|
||||
let mut args = args.into_iter();
|
||||
loop {
|
||||
match (args.next().as_deref(), args.next()) {
|
||||
(Some("private-key"), Some(opt)) | (Some("secret-key"), Some(opt)) => {
|
||||
secret_key = Some(opt.into());
|
||||
}
|
||||
(Some("public-key"), Some(opt)) => {
|
||||
public_key = Some(opt.into());
|
||||
}
|
||||
(Some(flag), _) => {
|
||||
bail!("Unknown option `{}`", flag);
|
||||
}
|
||||
(_, _) => break,
|
||||
};
|
||||
}
|
||||
|
||||
if secret_key.is_none() {
|
||||
bail!("private-key is required");
|
||||
}
|
||||
if public_key.is_none() {
|
||||
bail!("public-key is required");
|
||||
}
|
||||
|
||||
generate_and_save_keypair(secret_key.unwrap(), public_key.unwrap())?;
|
||||
}
|
||||
|
||||
GenKeys {
|
||||
config_file,
|
||||
public_key,
|
||||
secret_key,
|
||||
force,
|
||||
} => {
|
||||
// figure out where the key file is specified, in the config file or directly as flag?
|
||||
let (pkf, skf) = match (config_file, public_key, secret_key) {
|
||||
(Some(config_file), _, _) => {
|
||||
ensure!(
|
||||
config_file.exists(),
|
||||
"config file {config_file:?} does not exist"
|
||||
);
|
||||
|
||||
let config = config::Rosenpass::load(config_file)?;
|
||||
|
||||
(config.public_key, config.secret_key)
|
||||
}
|
||||
(_, Some(pkf), Some(skf)) => (pkf, skf),
|
||||
_ => {
|
||||
bail!("either a config-file or both public-key and secret-key file are required")
|
||||
}
|
||||
};
|
||||
|
||||
// check that we are not overriding something unintentionally
|
||||
let mut problems = vec![];
|
||||
if !force && pkf.is_file() {
|
||||
problems.push(format!(
|
||||
"public-key file {pkf:?} exist, refusing to overwrite it"
|
||||
));
|
||||
}
|
||||
if !force && skf.is_file() {
|
||||
problems.push(format!(
|
||||
"secret-key file {skf:?} exist, refusing to overwrite it"
|
||||
));
|
||||
}
|
||||
if !problems.is_empty() {
|
||||
bail!(problems.join("\n"));
|
||||
}
|
||||
|
||||
// generate the keys and store them in files
|
||||
generate_and_save_keypair(skf, pkf)?;
|
||||
}
|
||||
|
||||
ExchangeConfig { config_file } => {
|
||||
ensure!(
|
||||
config_file.exists(),
|
||||
"config file '{config_file:?}' does not exist"
|
||||
);
|
||||
|
||||
let config = config::Rosenpass::load(config_file)?;
|
||||
config.validate()?;
|
||||
Self::event_loop(config, test_helpers)?;
|
||||
}
|
||||
|
||||
Exchange {
|
||||
first_arg,
|
||||
mut rest_of_args,
|
||||
config_file,
|
||||
} => {
|
||||
rest_of_args.insert(0, first_arg);
|
||||
let args = rest_of_args;
|
||||
let mut config = config::Rosenpass::parse_args(args)?;
|
||||
|
||||
if let Some(p) = config_file {
|
||||
config.store(&p)?;
|
||||
config.config_file_path = p;
|
||||
}
|
||||
config.validate()?;
|
||||
Self::event_loop(config, test_helpers)?;
|
||||
}
|
||||
|
||||
Validate { config_files } => {
|
||||
for file in config_files {
|
||||
match config::Rosenpass::load(&file) {
|
||||
Ok(config) => {
|
||||
eprintln!("{file:?} is valid TOML and conforms to the expected schema");
|
||||
match config.validate() {
|
||||
Ok(_) => eprintln!("{file:?} has passed all logical checks"),
|
||||
Err(_) => eprintln!("{file:?} contains logical errors"),
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("{file:?} is not valid: {e}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn event_loop(
|
||||
config: config::Rosenpass,
|
||||
test_helpers: Option<AppServerTest>,
|
||||
) -> anyhow::Result<()> {
|
||||
const MAX_PSK_SIZE: usize = 1000;
|
||||
// load own keys
|
||||
let sk = SSk::load(&config.secret_key)?;
|
||||
let pk = SPk::load(&config.public_key)?;
|
||||
|
||||
// start an application server
|
||||
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
|
||||
sk,
|
||||
pk,
|
||||
config.listen,
|
||||
config.verbosity,
|
||||
test_helpers,
|
||||
)?);
|
||||
|
||||
let broker_store_ptr = srv.register_broker(Box::new(NativeUnixBroker::new()))?;
|
||||
|
||||
fn cfg_err_map(e: NativeUnixBrokerConfigBaseBuilderError) -> anyhow::Error {
|
||||
anyhow::Error::msg(format!("NativeUnixBrokerConfigBaseBuilderError: {:?}", e))
|
||||
}
|
||||
|
||||
for cfg_peer in config.peers {
|
||||
let broker_peer = if let Some(wg) = &cfg_peer.wg {
|
||||
let peer_cfg = NativeUnixBrokerConfigBaseBuilder::default()
|
||||
.peer_id_b64(&wg.peer)?
|
||||
.interface(wg.device.clone())
|
||||
.extra_params_ser(&wg.extra_params)?
|
||||
.build()
|
||||
.map_err(cfg_err_map)?;
|
||||
|
||||
let broker_peer = BrokerPeer::new(broker_store_ptr.clone(), Box::new(peer_cfg));
|
||||
|
||||
Some(broker_peer)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
srv.add_peer(
|
||||
// psk, pk, outfile, outwg, tx_addr
|
||||
cfg_peer
|
||||
.pre_shared_key
|
||||
.map(SymKey::load_b64::<MAX_PSK_SIZE, _>)
|
||||
.transpose()?,
|
||||
SPk::load(&cfg_peer.public_key)?,
|
||||
cfg_peer.key_out,
|
||||
broker_peer,
|
||||
cfg_peer.endpoint.clone(),
|
||||
)?;
|
||||
}
|
||||
|
||||
srv.event_loop()
|
||||
}
|
||||
}
|
||||
|
||||
/// generate secret and public keys, store in files according to the paths passed as arguments
|
||||
fn generate_and_save_keypair(secret_key: PathBuf, public_key: PathBuf) -> anyhow::Result<()> {
|
||||
let mut ssk = crate::protocol::SSk::random();
|
||||
let mut spk = crate::protocol::SPk::random();
|
||||
StaticKem::keygen(ssk.secret_mut(), spk.deref_mut())?;
|
||||
ssk.store_secret(secret_key)?;
|
||||
spk.store(public_key)
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
//! Pseudo Random Functions (PRFs) with a tree-like label scheme which
|
||||
//! ensures their uniqueness
|
||||
|
||||
use anyhow::Result;
|
||||
use rosenpass_ciphers::{hash_domain::HashDomain, KEY_LEN};
|
||||
|
||||
// TODO Use labels that can serve as identifiers
|
||||
macro_rules! hash_domain_ns {
|
||||
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
||||
pub fn $name() -> Result<HashDomain> {
|
||||
let t = $base()?;
|
||||
$( let t = t.mix($lbl.as_bytes())?; )*
|
||||
Ok(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! hash_domain {
|
||||
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
||||
pub fn $name() -> Result<[u8; KEY_LEN]> {
|
||||
let t = $base()?;
|
||||
$( let t = t.mix($lbl.as_bytes())?; )*
|
||||
Ok(t.into_value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn protocol() -> Result<HashDomain> {
|
||||
HashDomain::zero().mix("Rosenpass v1 mceliece460896 Kyber512 ChaChaPoly1305 BLAKE2s".as_bytes())
|
||||
}
|
||||
|
||||
hash_domain_ns!(protocol, mac, "mac");
|
||||
hash_domain_ns!(protocol, cookie, "cookie");
|
||||
hash_domain_ns!(protocol, cookie_value, "cookie-value");
|
||||
hash_domain_ns!(protocol, cookie_key, "cookie-key");
|
||||
hash_domain_ns!(protocol, peerid, "peer id");
|
||||
hash_domain_ns!(protocol, biscuit_ad, "biscuit additional data");
|
||||
hash_domain_ns!(protocol, ckinit, "chaining key init");
|
||||
hash_domain_ns!(protocol, _ckextract, "chaining key extract");
|
||||
|
||||
hash_domain!(_ckextract, mix, "mix");
|
||||
hash_domain!(_ckextract, hs_enc, "handshake encryption");
|
||||
hash_domain!(_ckextract, ini_enc, "initiator handshake encryption");
|
||||
hash_domain!(_ckextract, res_enc, "responder handshake encryption");
|
||||
|
||||
hash_domain_ns!(_ckextract, _user, "user");
|
||||
hash_domain_ns!(_user, _rp, "rosenpass.eu");
|
||||
hash_domain!(_rp, osk, "wireguard psk");
|
||||
@@ -1,14 +0,0 @@
|
||||
pub mod app_server;
|
||||
pub mod cli;
|
||||
pub mod config;
|
||||
pub mod hash_domains;
|
||||
pub mod msgs;
|
||||
pub mod protocol;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum RosenpassError {
|
||||
#[error("buffer size mismatch")]
|
||||
BufferSizeMismatch,
|
||||
#[error("invalid message type")]
|
||||
InvalidMessageType(u8),
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use clap::Parser;
|
||||
use log::error;
|
||||
use rosenpass::cli::CliArgs;
|
||||
use std::process::exit;
|
||||
|
||||
/// Catches errors, prints them through the logger, then exits
|
||||
pub fn main() {
|
||||
// parse CLI arguments
|
||||
let args = CliArgs::parse();
|
||||
|
||||
// init logging
|
||||
{
|
||||
let mut log_builder = env_logger::Builder::from_default_env(); // sets log level filter from environment (or defaults)
|
||||
if let Some(level) = args.get_log_level() {
|
||||
log::debug!("setting log level to {:?} (set via CLI parameter)", level);
|
||||
log_builder.filter_level(level); // set log level filter from CLI args if available
|
||||
}
|
||||
log_builder.init();
|
||||
|
||||
// // check the effectiveness of the log level filter with the following lines:
|
||||
// use log::{debug, error, info, trace, warn};
|
||||
// trace!("trace dummy");
|
||||
// debug!("debug dummy");
|
||||
// info!("info dummy");
|
||||
// warn!("warn dummy");
|
||||
// error!("error dummy");
|
||||
}
|
||||
|
||||
match args.command.run(None) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("{e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
//! Data structures representing the messages going over the wire
|
||||
//!
|
||||
//! This module contains de-/serialization of the protocol's messages. Thats kind
|
||||
//! of a lie, since no actual ser/de happens. Instead, the structures offer views
|
||||
//! into mutable byte slices (`&mut [u8]`), allowing to modify the fields of an
|
||||
//! always serialized instance of the data in question. This is closely related
|
||||
//! to the concept of lenses in function programming; more on that here:
|
||||
//! [https://sinusoid.es/misc/lager/lenses.pdf](https://sinusoid.es/misc/lager/lenses.pdf)
|
||||
//! To achieve this we utilize the zerocopy library.
|
||||
//!
|
||||
use std::mem::size_of;
|
||||
use zerocopy::{AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use super::RosenpassError;
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::{EphemeralKem, StaticKem};
|
||||
use rosenpass_ciphers::{aead, xaead, KEY_LEN};
|
||||
pub const MSG_SIZE_LEN: usize = 1;
|
||||
pub const RESERVED_LEN: usize = 3;
|
||||
pub const MAC_SIZE: usize = 16;
|
||||
pub const COOKIE_SIZE: usize = 16;
|
||||
pub const SID_LEN: usize = 4;
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct Envelope<M: AsBytes + FromBytes> {
|
||||
/// [MsgType] of this message
|
||||
pub msg_type: u8,
|
||||
/// Reserved for future use
|
||||
pub reserved: [u8; 3],
|
||||
/// The actual Paylod
|
||||
pub payload: M,
|
||||
/// Message Authentication Code (mac) over all bytes until (exclusive)
|
||||
/// `mac` itself
|
||||
pub mac: [u8; 16],
|
||||
/// Currently unused, TODO: do something with this
|
||||
pub cookie: [u8; 16],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct InitHello {
|
||||
/// Randomly generated connection id
|
||||
pub sidi: [u8; 4],
|
||||
/// Kyber 512 Ephemeral Public Key
|
||||
pub epki: [u8; EphemeralKem::PK_LEN],
|
||||
/// Classic McEliece Ciphertext
|
||||
pub sctr: [u8; StaticKem::CT_LEN],
|
||||
/// Encryped: 16 byte hash of McEliece initiator static key
|
||||
pub pidic: [u8; aead::TAG_LEN + 32],
|
||||
/// Encrypted TAI64N Time Stamp (against replay attacks)
|
||||
pub auth: [u8; aead::TAG_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct RespHello {
|
||||
/// Randomly generated connection id
|
||||
pub sidr: [u8; 4],
|
||||
/// Copied from InitHello
|
||||
pub sidi: [u8; 4],
|
||||
/// Kyber 512 Ephemeral Ciphertext
|
||||
pub ecti: [u8; EphemeralKem::CT_LEN],
|
||||
/// Classic McEliece Ciphertext
|
||||
pub scti: [u8; StaticKem::CT_LEN],
|
||||
/// Empty encrypted message (just an auth tag)
|
||||
pub auth: [u8; aead::TAG_LEN],
|
||||
/// Responders handshake state in encrypted form
|
||||
pub biscuit: [u8; BISCUIT_CT_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct InitConf {
|
||||
/// Copied from InitHello
|
||||
pub sidi: [u8; 4],
|
||||
/// Copied from RespHello
|
||||
pub sidr: [u8; 4],
|
||||
/// Responders handshake state in encrypted form
|
||||
pub biscuit: [u8; BISCUIT_CT_LEN],
|
||||
/// Empty encrypted message (just an auth tag)
|
||||
pub auth: [u8; aead::TAG_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct EmptyData {
|
||||
/// Copied from RespHello
|
||||
pub sid: [u8; 4],
|
||||
/// Nonce
|
||||
pub ctr: [u8; 8],
|
||||
/// Empty encrypted message (just an auth tag)
|
||||
pub auth: [u8; aead::TAG_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct Biscuit {
|
||||
/// H(spki) – Ident ifies the initiator
|
||||
pub pidi: [u8; KEY_LEN],
|
||||
/// The biscuit number (replay protection)
|
||||
pub biscuit_no: [u8; 12],
|
||||
/// Chaining key
|
||||
pub ck: [u8; KEY_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct DataMsg {
|
||||
pub dummy: [u8; 4],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct CookieReplyInner {
|
||||
/// [MsgType] of this message
|
||||
pub msg_type: u8,
|
||||
/// Reserved for future use
|
||||
pub reserved: [u8; 3],
|
||||
/// Session ID of the sender (initiator)
|
||||
pub sid: [u8; 4],
|
||||
/// Encrypted cookie with authenticated initiator `mac`
|
||||
pub cookie_encrypted: [u8; xaead::NONCE_LEN + COOKIE_SIZE + xaead::TAG_LEN],
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(AsBytes, FromBytes, FromZeroes)]
|
||||
pub struct CookieReply {
|
||||
pub inner: CookieReplyInner,
|
||||
pub padding: [u8; size_of::<Envelope<InitHello>>() - size_of::<CookieReplyInner>()],
|
||||
}
|
||||
|
||||
// Traits /////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub trait WireMsg: std::fmt::Debug {
|
||||
const MSG_TYPE: MsgType;
|
||||
const MSG_TYPE_U8: u8 = Self::MSG_TYPE as u8;
|
||||
const BYTES: usize;
|
||||
}
|
||||
|
||||
// Constants //////////////////////////////////////////////////////////////////
|
||||
|
||||
pub const SESSION_ID_LEN: usize = 4;
|
||||
pub const BISCUIT_ID_LEN: usize = 12;
|
||||
|
||||
pub const WIRE_ENVELOPE_LEN: usize = 1 + 3 + 16 + 16; // TODO verify this
|
||||
|
||||
/// Size required to fit any message in binary form
|
||||
pub const MAX_MESSAGE_LEN: usize = 2500; // TODO fix this
|
||||
|
||||
/// Recognized message types
|
||||
#[repr(u8)]
|
||||
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
pub enum MsgType {
|
||||
InitHello = 0x81,
|
||||
RespHello = 0x82,
|
||||
InitConf = 0x83,
|
||||
EmptyData = 0x84,
|
||||
DataMsg = 0x85,
|
||||
CookieReply = 0x86,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for MsgType {
|
||||
type Error = RosenpassError;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
Ok(match value {
|
||||
0x81 => MsgType::InitHello,
|
||||
0x82 => MsgType::RespHello,
|
||||
0x83 => MsgType::InitConf,
|
||||
0x84 => MsgType::EmptyData,
|
||||
0x85 => MsgType::DataMsg,
|
||||
0x86 => MsgType::CookieReply,
|
||||
_ => return Err(RosenpassError::InvalidMessageType(value)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MsgType> for u8 {
|
||||
fn from(val: MsgType) -> Self {
|
||||
val as u8
|
||||
}
|
||||
}
|
||||
|
||||
/// length in bytes of an unencrypted Biscuit (plain text)
|
||||
pub const BISCUIT_PT_LEN: usize = size_of::<Biscuit>();
|
||||
|
||||
/// Length in bytes of an encrypted Biscuit (cipher text)
|
||||
pub const BISCUIT_CT_LEN: usize = BISCUIT_PT_LEN + xaead::NONCE_LEN + xaead::TAG_LEN;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_constants {
|
||||
use crate::msgs::{BISCUIT_CT_LEN, BISCUIT_PT_LEN};
|
||||
use rosenpass_ciphers::{xaead, KEY_LEN};
|
||||
|
||||
#[test]
|
||||
fn sodium_keysize() {
|
||||
assert_eq!(KEY_LEN, 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn biscuit_pt_len() {
|
||||
assert_eq!(BISCUIT_PT_LEN, 2 * KEY_LEN + 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn biscuit_ct_len() {
|
||||
assert_eq!(
|
||||
BISCUIT_CT_LEN,
|
||||
BISCUIT_PT_LEN + xaead::NONCE_LEN + xaead::TAG_LEN
|
||||
);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,331 +0,0 @@
|
||||
use std::{
|
||||
fs,
|
||||
net::UdpSocket,
|
||||
path::PathBuf,
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use rosenpass::{app_server::AppServerTestBuilder, cli::CliArgs};
|
||||
use rosenpass_secret_memory::{Public, Secret};
|
||||
use rosenpass_wireguard_broker::{WireguardBrokerMio, WG_KEY_LEN, WG_PEER_LEN};
|
||||
use serial_test::serial;
|
||||
use std::io::Write;
|
||||
|
||||
const BIN: &str = "rosenpass";
|
||||
|
||||
// check that we can generate keys
|
||||
#[test]
|
||||
fn generate_keys() {
|
||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("keygen");
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let secret_key_path = tmpdir.join("secret-key");
|
||||
let public_key_path = tmpdir.join("public-key");
|
||||
|
||||
let output = test_bin::get_test_bin(BIN)
|
||||
.args(["gen-keys", "--secret-key"])
|
||||
.arg(&secret_key_path)
|
||||
.arg("--public-key")
|
||||
.arg(&public_key_path)
|
||||
.output()
|
||||
.expect("Failed to start {BIN}");
|
||||
|
||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||
|
||||
assert!(secret_key_path.is_file());
|
||||
assert!(public_key_path.is_file());
|
||||
|
||||
// cleanup
|
||||
fs::remove_dir_all(&tmpdir).unwrap();
|
||||
}
|
||||
|
||||
fn find_udp_socket() -> Option<u16> {
|
||||
(1025..=u16::MAX).find(|&port| UdpSocket::bind(("::1", port)).is_ok())
|
||||
}
|
||||
|
||||
fn setup_logging() {
|
||||
let mut log_builder = env_logger::Builder::from_default_env(); // sets log level filter from environment (or defaults)
|
||||
log_builder.filter_level(log::LevelFilter::Debug);
|
||||
log_builder.format_timestamp_nanos();
|
||||
log_builder.format(|buf, record| {
|
||||
let ts_format = buf.timestamp_nanos().to_string();
|
||||
writeln!(
|
||||
buf,
|
||||
"\x1b[1m{:?}\x1b[0m {}: {}",
|
||||
std::thread::current().id(),
|
||||
&ts_format[14..],
|
||||
record.args()
|
||||
)
|
||||
});
|
||||
|
||||
let _ = log_builder.try_init();
|
||||
}
|
||||
|
||||
fn generate_key_pairs(secret_key_paths: &[PathBuf], public_key_paths: &[PathBuf]) {
|
||||
for (secret_key_path, pub_key_path) in secret_key_paths.iter().zip(public_key_paths.iter()) {
|
||||
let output = test_bin::get_test_bin(BIN)
|
||||
.args(["gen-keys", "--secret-key"])
|
||||
.arg(secret_key_path)
|
||||
.arg("--public-key")
|
||||
.arg(pub_key_path)
|
||||
.output()
|
||||
.expect("Failed to start {BIN}");
|
||||
|
||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||
assert!(secret_key_path.is_file());
|
||||
assert!(pub_key_path.is_file());
|
||||
}
|
||||
}
|
||||
|
||||
fn run_server_client_exchange(
|
||||
(server_cmd, server_test_builder): (&std::process::Command, AppServerTestBuilder),
|
||||
(client_cmd, client_test_builder): (&std::process::Command, AppServerTestBuilder),
|
||||
) {
|
||||
let (server_terminate, server_terminate_rx) = std::sync::mpsc::channel();
|
||||
let (client_terminate, client_terminate_rx) = std::sync::mpsc::channel();
|
||||
|
||||
let cli = CliArgs::try_parse_from(
|
||||
[server_cmd.get_program()]
|
||||
.into_iter()
|
||||
.chain(server_cmd.get_args()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
std::thread::spawn(move || {
|
||||
cli.command
|
||||
.run(Some(
|
||||
server_test_builder
|
||||
.termination_handler(Some(server_terminate_rx))
|
||||
.build()
|
||||
.unwrap(),
|
||||
))
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let cli = CliArgs::try_parse_from(
|
||||
[client_cmd.get_program()]
|
||||
.into_iter()
|
||||
.chain(client_cmd.get_args()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
std::thread::spawn(move || {
|
||||
cli.command
|
||||
.run(Some(
|
||||
client_test_builder
|
||||
.termination_handler(Some(client_terminate_rx))
|
||||
.build()
|
||||
.unwrap(),
|
||||
))
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
// give them some time to do the key exchange under load
|
||||
std::thread::sleep(Duration::from_secs(10));
|
||||
|
||||
// time's up, kill the childs
|
||||
server_terminate.send(()).unwrap();
|
||||
client_terminate.send(()).unwrap();
|
||||
}
|
||||
|
||||
// check that we can exchange keys
|
||||
#[test]
|
||||
#[serial]
|
||||
fn check_exchange_under_normal() {
|
||||
setup_logging();
|
||||
|
||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("exchange");
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let secret_key_paths = [tmpdir.join("secret-key-0"), tmpdir.join("secret-key-1")];
|
||||
let public_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
||||
let shared_key_paths = [tmpdir.join("shared-key-0"), tmpdir.join("shared-key-1")];
|
||||
|
||||
// generate key pairs
|
||||
generate_key_pairs(&secret_key_paths, &public_key_paths);
|
||||
|
||||
// start first process, the server
|
||||
let port = loop {
|
||||
if let Some(port) = find_udp_socket() {
|
||||
break port;
|
||||
}
|
||||
};
|
||||
|
||||
let listen_addr = format!("::1:{port}");
|
||||
let mut server_cmd = std::process::Command::new(BIN);
|
||||
|
||||
server_cmd
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[0])
|
||||
.arg("public-key")
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["listen", &listen_addr, "verbose", "peer", "public-key"])
|
||||
.arg(&public_key_paths[1])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[0]);
|
||||
|
||||
let server_test_builder = AppServerTestBuilder::default();
|
||||
|
||||
let mut client_cmd = std::process::Command::new(BIN);
|
||||
client_cmd
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[1])
|
||||
.arg("public-key")
|
||||
.arg(&public_key_paths[1])
|
||||
.args(["verbose", "peer", "public-key"])
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["endpoint", &listen_addr])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[1]);
|
||||
|
||||
let client_test_builder = AppServerTestBuilder::default();
|
||||
|
||||
run_server_client_exchange(
|
||||
(&server_cmd, server_test_builder),
|
||||
(&client_cmd, client_test_builder),
|
||||
);
|
||||
|
||||
// read the shared keys they created
|
||||
let shared_keys: Vec<_> = shared_key_paths
|
||||
.iter()
|
||||
.map(|p| fs::read_to_string(p).unwrap())
|
||||
.collect();
|
||||
|
||||
// check that they created two equal keys
|
||||
assert_eq!(shared_keys.len(), 2);
|
||||
assert_eq!(shared_keys[0], shared_keys[1]);
|
||||
|
||||
// cleanup
|
||||
fs::remove_dir_all(&tmpdir).unwrap();
|
||||
}
|
||||
|
||||
// check that we can trigger a DoS condition and we can exchange keys under DoS
|
||||
// This test creates a responder (server) with the feature flag "integration_test_always_under_load" to always be under load condition for the test.
|
||||
#[test]
|
||||
#[serial]
|
||||
fn check_exchange_under_dos() {
|
||||
setup_logging();
|
||||
|
||||
//Generate binary with responder with feature integration_test
|
||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("exchange-dos");
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let secret_key_paths = [tmpdir.join("secret-key-0"), tmpdir.join("secret-key-1")];
|
||||
let public_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
||||
let shared_key_paths = [tmpdir.join("shared-key-0"), tmpdir.join("shared-key-1")];
|
||||
|
||||
// generate key pairs
|
||||
generate_key_pairs(&secret_key_paths, &public_key_paths);
|
||||
|
||||
// start first process, the server
|
||||
let port = loop {
|
||||
if let Some(port) = find_udp_socket() {
|
||||
break port;
|
||||
}
|
||||
};
|
||||
|
||||
let listen_addr = format!("::1:{port}");
|
||||
|
||||
let mut server_cmd = std::process::Command::new(BIN);
|
||||
|
||||
server_cmd
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[0])
|
||||
.arg("public-key")
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["listen", &listen_addr, "verbose", "peer", "public-key"])
|
||||
.arg(&public_key_paths[1])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[0]);
|
||||
|
||||
let server_test_builder = AppServerTestBuilder::default().enable_dos_permanently(true);
|
||||
|
||||
let mut client_cmd = std::process::Command::new(BIN);
|
||||
client_cmd
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[1])
|
||||
.arg("public-key")
|
||||
.arg(&public_key_paths[1])
|
||||
.args(["verbose", "peer", "public-key"])
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["endpoint", &listen_addr])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[1]);
|
||||
|
||||
let client_test_builder = AppServerTestBuilder::default();
|
||||
|
||||
run_server_client_exchange(
|
||||
(&server_cmd, server_test_builder),
|
||||
(&client_cmd, client_test_builder),
|
||||
);
|
||||
|
||||
// read the shared keys they created
|
||||
let shared_keys: Vec<_> = shared_key_paths
|
||||
.iter()
|
||||
.map(|p| fs::read_to_string(p).unwrap())
|
||||
.collect();
|
||||
|
||||
// check that they created two equal keys
|
||||
assert_eq!(shared_keys.len(), 2);
|
||||
assert_eq!(shared_keys[0], shared_keys[1]);
|
||||
|
||||
// cleanup
|
||||
fs::remove_dir_all(&tmpdir).unwrap();
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
struct MockBrokerInner {
|
||||
psk: Option<Secret<WG_KEY_LEN>>,
|
||||
peer_id: Option<Public<WG_PEER_LEN>>,
|
||||
interface: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct MockBroker {
|
||||
inner: Arc<Mutex<MockBrokerInner>>,
|
||||
}
|
||||
|
||||
impl WireguardBrokerMio for MockBroker {
|
||||
type MioError = anyhow::Error;
|
||||
|
||||
fn register(
|
||||
&mut self,
|
||||
_registry: &mio::Registry,
|
||||
_token: mio::Token,
|
||||
) -> Result<(), Self::MioError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_poll(&mut self) -> Result<(), Self::MioError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unregister(&mut self, _registry: &mio::Registry) -> Result<(), Self::MioError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl rosenpass_wireguard_broker::WireGuardBroker for MockBroker {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn set_psk(
|
||||
&mut self,
|
||||
config: rosenpass_wireguard_broker::SerializedBrokerConfig<'_>,
|
||||
) -> Result<(), Self::Error> {
|
||||
loop {
|
||||
let mut lock = self.inner.try_lock();
|
||||
|
||||
if let Ok(ref mut mutex) = lock {
|
||||
**mutex = MockBrokerInner {
|
||||
psk: Some(config.psk.clone()),
|
||||
peer_id: Some(config.peer_id.clone()),
|
||||
interface: Some(std::str::from_utf8(config.interface).unwrap().to_string()),
|
||||
};
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
363
rp
Executable file
363
rp
Executable file
@@ -0,0 +1,363 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# String formatting subsystem
|
||||
|
||||
formatting_init() {
|
||||
endl=$'\n'
|
||||
}
|
||||
|
||||
enquote() {
|
||||
while (( $# > 1 )); do
|
||||
printf "%q " "${1}"; shift
|
||||
done
|
||||
if (( $# == 1 )); then
|
||||
printf "%q" "${1}"; shift
|
||||
fi
|
||||
}
|
||||
|
||||
multiline() {
|
||||
# shellcheck disable=SC1004
|
||||
echo "${1} " | awk '
|
||||
function pm(a, b, l) {
|
||||
return length(a) > l \
|
||||
&& length(b) > l \
|
||||
&& substr(a, 1, l+1) == substr(b, 1, l+1) \
|
||||
? pm(a, b, l+1) : l;
|
||||
}
|
||||
|
||||
!started && $0 !~ /^[ \t]*$/ {
|
||||
started=1
|
||||
match($0, /^[ \t]*/)
|
||||
prefix=substr($0, 1, RLENGTH)
|
||||
}
|
||||
|
||||
started {
|
||||
print(substr($0, 1 + pm($0, prefix)));
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
dbg() {
|
||||
echo >&2 "$@"
|
||||
}
|
||||
|
||||
|
||||
detect_git_dir() {
|
||||
# https://stackoverflow.com/questions/3618078/pipe-only-stderr-through-a-filter
|
||||
(
|
||||
git -C "${scriptdir}" rev-parse --show-toplevel 3>&1 1>&2 2>&3 3>&- \
|
||||
| sed '
|
||||
/not a git repository/d;
|
||||
s/^/WARNING: /'
|
||||
) 3>&1 1>&2 2>&3 3>&-
|
||||
}
|
||||
|
||||
# Cleanup subsystem (sigterm)
|
||||
|
||||
cleanup_init() {
|
||||
cleanup_actions=()
|
||||
trap cleanup_apply exit
|
||||
}
|
||||
|
||||
cleanup_apply() {
|
||||
local f
|
||||
for f in "${cleanup_actions[@]}"; do
|
||||
eval "${f}"
|
||||
done
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
cleanup_actions+=("$(multiline "${1}")")
|
||||
}
|
||||
|
||||
# Transactional execution subsystem
|
||||
|
||||
frag_init() {
|
||||
explain=0
|
||||
frag_transaction=()
|
||||
frag "
|
||||
#! /bin/bash
|
||||
set -e"
|
||||
}
|
||||
|
||||
frag_apply() {
|
||||
local f
|
||||
for f in "${frag_transaction[@]}"; do
|
||||
if (( explain == 1 )); then
|
||||
dbg "${f}"
|
||||
fi
|
||||
eval "${f}"
|
||||
done
|
||||
}
|
||||
|
||||
frag() {
|
||||
frag_transaction+=("$(multiline "${1}")")
|
||||
}
|
||||
|
||||
frag_append() {
|
||||
local len; len="${#frag_transaction[@]}"
|
||||
frag_transaction=("${frag_transaction[@]:0:len-1}" "${frag_transaction[len-1]}${1}")
|
||||
}
|
||||
|
||||
frag_append_esc() {
|
||||
frag_append " \\${endl}${1}"
|
||||
}
|
||||
|
||||
# Usage documentation subsystem
|
||||
usage_init() {
|
||||
usagestack=("${script}")
|
||||
}
|
||||
|
||||
usage_snap() {
|
||||
echo "${#usagestack}"
|
||||
}
|
||||
|
||||
usage_restore() {
|
||||
local n; n="${1}"
|
||||
dbg REST "${1}"
|
||||
usagestack=("${usagestack[@]:0:n-2}")
|
||||
}
|
||||
|
||||
|
||||
usage() {
|
||||
dbg "Usage: ${usagestack[*]}"
|
||||
}
|
||||
|
||||
fatal() {
|
||||
dbg "FATAL: $*"
|
||||
usage
|
||||
exit 1
|
||||
}
|
||||
|
||||
genkey() {
|
||||
usagestack+=("PRIVATE_KEYS_DIR")
|
||||
local skdir
|
||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
||||
|
||||
while (( $# > 0 )); do
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
-h | -help | --help | help) usage; return 0 ;;
|
||||
*) fatal "Unknown option ${arg}";;
|
||||
esac
|
||||
done
|
||||
|
||||
if test -e "${skdir}"; then
|
||||
fatal "PRIVATE_KEYS_DIR \"${skdir}\" already exists"
|
||||
fi
|
||||
|
||||
frag "
|
||||
umask 077
|
||||
mkdir -p $(enquote "${skdir}")
|
||||
wg genkey > $(enquote "${skdir}"/wgsk)
|
||||
$(enquote "${binary}") gen-keys \\
|
||||
-s $(enquote "${skdir}"/pqsk) \\
|
||||
-p $(enquote "${skdir}"/pqpk)"
|
||||
}
|
||||
|
||||
pubkey() {
|
||||
usagestack+=("PRIVATE_KEYS_DIR" "PUBLIC_KEYS_DIR")
|
||||
local skdir pkdir
|
||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
||||
pkdir="${1%/}"; shift || fatal "Required positional argument: PUBLIC_KEYS_DIR"
|
||||
|
||||
while (( $# > 0 )); do
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
-h | -help | --help | help) usage; exit 0;;
|
||||
*) fatal "Unknown option ${arg}";;
|
||||
esac
|
||||
done
|
||||
|
||||
if test -e "${pkdir}"; then
|
||||
fatal "PUBLIC_KEYS_DIR \"${pkdir}\" already exists"
|
||||
fi
|
||||
|
||||
frag "
|
||||
mkdir -p $(enquote "${pkdir}")
|
||||
wg pubkey < $(enquote "${skdir}"/wgsk) > $(enquote "${pkdir}/wgpk")
|
||||
cp $(enquote "${skdir}"/pqpk) $(enquote "${pkdir}/pqpk")"
|
||||
}
|
||||
|
||||
exchange() {
|
||||
usagestack+=("PRIVATE_KEYS_DIR" "[dev <device>]" "[listen <ip>:<port>]" "[peer PUBLIC_KEYS_DIR [endpoint <ip>:<port>] [persistent-keepalive <interval>] [allowed-ips <ip1>/<cidr1>[,<ip2>/<cidr2>]...]]...")
|
||||
local skdir dev lport
|
||||
dev="${project_name}0"
|
||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
||||
|
||||
while (( $# > 0 )); do
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
dev) dev="${1}"; shift || fatal "dev option requires parameter";;
|
||||
peer) set -- "peer" "$@"; break;; # Parsed down below
|
||||
listen)
|
||||
local listen; listen="${1}";
|
||||
lip="${listen%:*}";
|
||||
lport="${listen/*:/}";
|
||||
if [[ "$lip" = "$lport" ]]; then
|
||||
lip="[0::0]"
|
||||
fi
|
||||
shift;;
|
||||
-h | -help | --help | help) usage; return 0;;
|
||||
*) fatal "Unknown option ${arg}";;
|
||||
esac
|
||||
done
|
||||
|
||||
if (( $# == 0 )); then
|
||||
fatal "Needs at least one peer specified"
|
||||
fi
|
||||
|
||||
frag "
|
||||
# Create the Wireguard interface
|
||||
ip link add dev $(enquote "${dev}") type wireguard || true"
|
||||
|
||||
cleanup "
|
||||
ip link del dev $(enquote "${dev}") || true"
|
||||
|
||||
frag "
|
||||
ip link set dev $(enquote "${dev}") up"
|
||||
|
||||
frag "
|
||||
# Deploy the classic wireguard private key
|
||||
wg set $(enquote "${dev}") private-key $(enquote "${skdir}/wgsk")"
|
||||
|
||||
|
||||
if test -n "${lport}"; then
|
||||
frag_append "listen-port $(enquote "$(( lport + 1 ))")"
|
||||
fi
|
||||
|
||||
frag "
|
||||
# Launch the post quantum wireguard exchange daemon
|
||||
$(enquote "${binary}") exchange"
|
||||
|
||||
if (( verbose == 1 )); then
|
||||
frag_append "verbose"
|
||||
fi
|
||||
|
||||
frag_append_esc " secret-key $(enquote "${skdir}/pqsk")"
|
||||
frag_append_esc " public-key $(enquote "${skdir}/pqpk")"
|
||||
|
||||
if test -n "${lport}"; then
|
||||
frag_append_esc " listen $(enquote "${lip}:${lport}")"
|
||||
fi
|
||||
|
||||
usagestack+=("peer" "PUBLIC_KEYS_DIR endpoint IP:PORT")
|
||||
|
||||
while (( $# > 0 )); do
|
||||
shift; # Skip "peer" argument
|
||||
|
||||
local peerdir ip port keepalive allowedips
|
||||
peerdir="${1%/}"; shift || fatal "Required peer argument: PUBLIC_KEYS_DIR"
|
||||
|
||||
while (( $# > 0 )); do
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
peer) set -- "peer" "$@"; break;; # Next peer
|
||||
endpoint) ip="${1%:*}"; port="${1/*:/}"; shift;;
|
||||
persistent-keepalive) keepalive="${1}"; shift;;
|
||||
allowed-ips) allowedips="${1}"; shift;;
|
||||
-h | -help | --help | help) usage; return 0;;
|
||||
*) fatal "Unknown option ${arg}";;
|
||||
esac
|
||||
done
|
||||
|
||||
# Public key
|
||||
frag_append_esc " peer public-key $(enquote "${peerdir}/pqpk")"
|
||||
|
||||
# PSK
|
||||
local pskfile; pskfile="${peerdir}/psk"
|
||||
if test -f "${pskfile}"; then
|
||||
frag_append_esc " preshared-key $(enquote "${pskfile}")"
|
||||
fi
|
||||
|
||||
|
||||
if test -n "${ip}"; then
|
||||
frag_append_esc " endpoint $(enquote "${ip}:${port}")"
|
||||
fi
|
||||
|
||||
frag_append_esc " wireguard $(enquote "${dev}") $(enquote "$(cat "${peerdir}/wgpk")")"
|
||||
|
||||
if test -n "${ip}"; then
|
||||
frag_append_esc " endpoint $(enquote "${ip}:$(( port + 1 ))")"
|
||||
fi
|
||||
|
||||
if test -n "${keepalive}"; then
|
||||
frag_append_esc " persistent-keepalive $(enquote "${keepalive}")"
|
||||
fi
|
||||
|
||||
if test -n "${allowedips}"; then
|
||||
frag_append_esc " allowed-ips $(enquote "${allowedips}")"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
find_rosenpass_binary() {
|
||||
local binary; binary=""
|
||||
if [[ -n "${gitdir}" ]]; then
|
||||
# If rp is run from the git repo, use the newest build artifact
|
||||
binary=$(
|
||||
find "${gitdir}/result/bin/${project_name}" \
|
||||
"${gitdir}"/target/{release,debug}/"${project_name}" \
|
||||
-printf "%T@ %p\n" 2>/dev/null \
|
||||
| sort -nr \
|
||||
| awk 'NR==1 { print($2) }'
|
||||
)
|
||||
elif [[ -n "${nixdir}" ]]; then
|
||||
# If rp is run from nix, use the nix-installed rosenpass version
|
||||
binary="${nixdir}/bin/${project_name}"
|
||||
fi
|
||||
|
||||
if [[ -z "${binary}" ]]; then
|
||||
binary="${project_name}"
|
||||
fi
|
||||
|
||||
echo "${binary}"
|
||||
}
|
||||
|
||||
main() {
|
||||
formatting_init
|
||||
cleanup_init
|
||||
usage_init
|
||||
frag_init
|
||||
|
||||
project_name="rosenpass"
|
||||
verbose=0
|
||||
scriptdir="$(dirname "${script}")"
|
||||
gitdir="$(detect_git_dir)" || true
|
||||
nixdir="$(readlink -f result/bin/rp | grep -Pio '^/nix/store/[^/]+(?=/bin/[^/]+)')" || true
|
||||
binary="$(find_rosenpass_binary)"
|
||||
|
||||
# Parse command
|
||||
|
||||
usagestack+=("[explain]" "[verbose]" "genkey|pubkey|exchange" "[ARGS]...")
|
||||
|
||||
local cmd
|
||||
while (( $# > 0 )); do
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
genkey|pubkey|exchange) cmd="${arg}"; break;;
|
||||
explain) explain=1;;
|
||||
verbose) verbose=1;;
|
||||
-h | -help | --help | help) usage; return 0 ;;
|
||||
*) fatal "Unknown command ${arg}";;
|
||||
esac
|
||||
done
|
||||
|
||||
test -n "${cmd}" || fatal "No command supplied"
|
||||
usagestack=("${script}")
|
||||
|
||||
# Execute command
|
||||
|
||||
usagestack+=("${cmd}")
|
||||
"${cmd}" "$@"
|
||||
usagestack=("${script}")
|
||||
|
||||
# Apply transaction
|
||||
|
||||
frag_apply
|
||||
}
|
||||
|
||||
script="$0"
|
||||
main "$@"
|
||||
@@ -1,43 +0,0 @@
|
||||
[package]
|
||||
name = "rp"
|
||||
version = "0.2.1"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
base64ct = { workspace = true }
|
||||
x25519-dalek = { version = "2", features = ["static_secrets"] }
|
||||
zeroize = { workspace = true }
|
||||
|
||||
rosenpass = { workspace = true }
|
||||
rosenpass-ciphers = { workspace = true }
|
||||
rosenpass-cipher-traits = { workspace = true }
|
||||
rosenpass-secret-memory = { workspace = true }
|
||||
rosenpass-util = { workspace = true }
|
||||
rosenpass-wireguard-broker = {workspace = true}
|
||||
|
||||
tokio = {workspace = true}
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "freebsd"))'.dependencies]
|
||||
ctrlc-async = "3.2"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
genetlink = "0.2"
|
||||
rtnetlink = "0.14"
|
||||
netlink-packet-core = "0.7"
|
||||
netlink-packet-generic = "0.3"
|
||||
netlink-packet-wireguard = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = {workspace = true}
|
||||
stacker = {workspace = true}
|
||||
|
||||
[features]
|
||||
enable_memfd_alloc = []
|
||||
experiment_libcrux = ["rosenpass-ciphers/experiment_libcrux"]
|
||||
462
rp/src/cli.rs
462
rp/src/cli.rs
@@ -1,462 +0,0 @@
|
||||
use std::path::PathBuf;
|
||||
use std::{iter::Peekable, net::SocketAddr};
|
||||
|
||||
use crate::exchange::{ExchangeOptions, ExchangePeer};
|
||||
|
||||
pub enum Command {
|
||||
GenKey {
|
||||
private_keys_dir: PathBuf,
|
||||
},
|
||||
PubKey {
|
||||
private_keys_dir: PathBuf,
|
||||
public_keys_dir: PathBuf,
|
||||
},
|
||||
Exchange(ExchangeOptions),
|
||||
Help,
|
||||
}
|
||||
|
||||
enum CommandType {
|
||||
GenKey,
|
||||
PubKey,
|
||||
Exchange,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Cli {
|
||||
pub verbose: bool,
|
||||
pub command: Option<Command>,
|
||||
}
|
||||
|
||||
fn fatal<T>(note: &str, command: Option<CommandType>) -> Result<T, String> {
|
||||
match command {
|
||||
Some(command) => match command {
|
||||
CommandType::GenKey => Err(format!("{}\nUsage: rp genkey PRIVATE_KEYS_DIR", note)),
|
||||
CommandType::PubKey => Err(format!("{}\nUsage: rp pubkey PRIVATE_KEYS_DIR PUBLIC_KEYS_DIR", note)),
|
||||
CommandType::Exchange => Err(format!("{}\nUsage: rp exchange PRIVATE_KEYS_DIR [dev <device>] [listen <ip>:<port>] [peer PUBLIC_KEYS_DIR [endpoint <ip>:<port>] [persistent-keepalive <interval>] [allowed-ips <ip1>/<cidr1>[,<ip2>/<cidr2>]...]]...", note)),
|
||||
},
|
||||
None => Err(format!("{}\nUsage: rp [verbose] genkey|pubkey|exchange [ARGS]...", note)),
|
||||
}
|
||||
}
|
||||
|
||||
impl ExchangePeer {
|
||||
pub fn parse(args: &mut &mut Peekable<impl Iterator<Item = String>>) -> Result<Self, String> {
|
||||
let mut peer = ExchangePeer::default();
|
||||
|
||||
if let Some(public_keys_dir) = args.next() {
|
||||
peer.public_keys_dir = PathBuf::from(public_keys_dir);
|
||||
} else {
|
||||
return fatal(
|
||||
"Required positional argument: PUBLIC_KEYS_DIR",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
|
||||
while let Some(x) = args.peek() {
|
||||
let x = x.as_str();
|
||||
|
||||
// break if next peer is being defined
|
||||
if x == "peer" {
|
||||
break;
|
||||
}
|
||||
|
||||
let x = args.next().unwrap();
|
||||
let x = x.as_str();
|
||||
|
||||
match x {
|
||||
"endpoint" => {
|
||||
if let Some(addr) = args.next() {
|
||||
if let Ok(addr) = addr.parse::<SocketAddr>() {
|
||||
peer.endpoint = Some(addr);
|
||||
} else {
|
||||
return fatal(
|
||||
"invalid parameter for endpoint option",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return fatal(
|
||||
"endpoint option requires parameter",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
}
|
||||
"persistent-keepalive" => {
|
||||
if let Some(ka) = args.next() {
|
||||
if let Ok(ka) = ka.parse::<u32>() {
|
||||
peer.persistent_keepalive = Some(ka);
|
||||
} else {
|
||||
return fatal(
|
||||
"invalid parameter for persistent-keepalive option",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return fatal(
|
||||
"persistent-keepalive option requires parameter",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
}
|
||||
"allowed-ips" => {
|
||||
if let Some(ips) = args.next() {
|
||||
peer.allowed_ips = Some(ips);
|
||||
} else {
|
||||
return fatal(
|
||||
"allowed-ips option requires parameter",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return fatal(
|
||||
&format!("Unknown option {}", x),
|
||||
Some(CommandType::Exchange),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(peer)
|
||||
}
|
||||
}
|
||||
|
||||
impl ExchangeOptions {
|
||||
pub fn parse(mut args: &mut Peekable<impl Iterator<Item = String>>) -> Result<Self, String> {
|
||||
let mut options = ExchangeOptions::default();
|
||||
|
||||
if let Some(private_keys_dir) = args.next() {
|
||||
options.private_keys_dir = PathBuf::from(private_keys_dir);
|
||||
} else {
|
||||
return fatal(
|
||||
"Required positional argument: PRIVATE_KEYS_DIR",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
|
||||
while let Some(x) = args.next() {
|
||||
let x = x.as_str();
|
||||
|
||||
match x {
|
||||
"dev" => {
|
||||
if let Some(device) = args.next() {
|
||||
options.dev = Some(device);
|
||||
} else {
|
||||
return fatal("dev option requires parameter", Some(CommandType::Exchange));
|
||||
}
|
||||
}
|
||||
"listen" => {
|
||||
if let Some(addr) = args.next() {
|
||||
if let Ok(addr) = addr.parse::<SocketAddr>() {
|
||||
options.listen = Some(addr);
|
||||
} else {
|
||||
return fatal(
|
||||
"invalid parameter for listen option",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return fatal(
|
||||
"listen option requires parameter",
|
||||
Some(CommandType::Exchange),
|
||||
);
|
||||
}
|
||||
}
|
||||
"peer" => {
|
||||
let peer = ExchangePeer::parse(&mut args)?;
|
||||
options.peers.push(peer);
|
||||
}
|
||||
_ => {
|
||||
return fatal(
|
||||
&format!("Unknown option {}", x),
|
||||
Some(CommandType::Exchange),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(options)
|
||||
}
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
pub fn parse(mut args: Peekable<impl Iterator<Item = String>>) -> Result<Self, String> {
|
||||
let mut cli = Cli::default();
|
||||
|
||||
let _ = args.next(); // skip executable name
|
||||
|
||||
while let Some(x) = args.next() {
|
||||
let x = x.as_str();
|
||||
|
||||
match x {
|
||||
"verbose" => {
|
||||
cli.verbose = true;
|
||||
}
|
||||
"explain" => {
|
||||
eprintln!("WARN: the explain argument is no longer supported");
|
||||
}
|
||||
"genkey" => {
|
||||
if cli.command.is_some() {
|
||||
return fatal("Too many commands supplied", None);
|
||||
}
|
||||
|
||||
if let Some(private_keys_dir) = args.next() {
|
||||
let private_keys_dir = PathBuf::from(private_keys_dir);
|
||||
|
||||
cli.command = Some(Command::GenKey { private_keys_dir });
|
||||
} else {
|
||||
return fatal(
|
||||
"Required positional argument: PRIVATE_KEYS_DIR",
|
||||
Some(CommandType::GenKey),
|
||||
);
|
||||
}
|
||||
}
|
||||
"pubkey" => {
|
||||
if cli.command.is_some() {
|
||||
return fatal("Too many commands supplied", None);
|
||||
}
|
||||
|
||||
if let Some(private_keys_dir) = args.next() {
|
||||
let private_keys_dir = PathBuf::from(private_keys_dir);
|
||||
|
||||
if let Some(public_keys_dir) = args.next() {
|
||||
let public_keys_dir = PathBuf::from(public_keys_dir);
|
||||
|
||||
cli.command = Some(Command::PubKey {
|
||||
private_keys_dir,
|
||||
public_keys_dir,
|
||||
});
|
||||
} else {
|
||||
return fatal(
|
||||
"Required positional argument: PUBLIC_KEYS_DIR",
|
||||
Some(CommandType::PubKey),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return fatal(
|
||||
"Required positional argument: PRIVATE_KEYS_DIR",
|
||||
Some(CommandType::PubKey),
|
||||
);
|
||||
}
|
||||
}
|
||||
"exchange" => {
|
||||
if cli.command.is_some() {
|
||||
return fatal("Too many commands supplied", None);
|
||||
}
|
||||
|
||||
let options = ExchangeOptions::parse(&mut args)?;
|
||||
cli.command = Some(Command::Exchange(options));
|
||||
}
|
||||
"help" => {
|
||||
cli.command = Some(Command::Help);
|
||||
}
|
||||
_ => return fatal(&format!("Unknown command {}", x), None),
|
||||
};
|
||||
}
|
||||
|
||||
if cli.command.is_none() {
|
||||
return fatal("No command supplied", None);
|
||||
}
|
||||
|
||||
Ok(cli)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::cli::{Cli, Command};
|
||||
|
||||
#[inline]
|
||||
fn parse(arr: &[&str]) -> Result<Cli, String> {
|
||||
Cli::parse(arr.iter().map(|x| x.to_string()).peekable())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn parse_err(arr: &[&str]) -> bool {
|
||||
parse(arr).is_err()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bare_errors() {
|
||||
assert!(parse_err(&["rp"]));
|
||||
assert!(parse_err(&["rp", "verbose"]));
|
||||
assert!(parse_err(&["rp", "thiscommanddoesntexist"]));
|
||||
assert!(parse_err(&[
|
||||
"rp",
|
||||
"thiscommanddoesntexist",
|
||||
"genkey",
|
||||
"./fakedir/"
|
||||
]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn genkey_errors() {
|
||||
assert!(parse_err(&["rp", "genkey"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn genkey_works() {
|
||||
let cli = parse(&["rp", "genkey", "./fakedir"]);
|
||||
|
||||
assert!(cli.is_ok());
|
||||
let cli = cli.unwrap();
|
||||
|
||||
assert!(!cli.verbose);
|
||||
assert!(matches!(cli.command, Some(Command::GenKey { .. })));
|
||||
|
||||
match cli.command {
|
||||
Some(Command::GenKey { private_keys_dir }) => {
|
||||
assert_eq!(private_keys_dir.to_str().unwrap(), "./fakedir");
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pubkey_errors() {
|
||||
assert!(parse_err(&["rp", "pubkey"]));
|
||||
assert!(parse_err(&["rp", "pubkey", "./fakedir"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pubkey_works() {
|
||||
let cli = parse(&["rp", "pubkey", "./fakedir", "./fakedir2"]);
|
||||
|
||||
assert!(cli.is_ok());
|
||||
let cli = cli.unwrap();
|
||||
|
||||
assert!(!cli.verbose);
|
||||
assert!(matches!(cli.command, Some(Command::PubKey { .. })));
|
||||
|
||||
match cli.command {
|
||||
Some(Command::PubKey {
|
||||
private_keys_dir,
|
||||
public_keys_dir,
|
||||
}) => {
|
||||
assert_eq!(private_keys_dir.to_str().unwrap(), "./fakedir");
|
||||
assert_eq!(public_keys_dir.to_str().unwrap(), "./fakedir2");
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exchange_errors() {
|
||||
assert!(parse_err(&["rp", "exchange"]));
|
||||
assert!(parse_err(&[
|
||||
"rp",
|
||||
"exchange",
|
||||
"./fakedir",
|
||||
"notarealoption"
|
||||
]));
|
||||
assert!(parse_err(&["rp", "exchange", "./fakedir", "listen"]));
|
||||
assert!(parse_err(&[
|
||||
"rp",
|
||||
"exchange",
|
||||
"./fakedir",
|
||||
"listen",
|
||||
"notarealip"
|
||||
]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exchange_works() {
|
||||
let cli = parse(&["rp", "exchange", "./fakedir"]);
|
||||
|
||||
assert!(cli.is_ok());
|
||||
let cli = cli.unwrap();
|
||||
|
||||
assert!(!cli.verbose);
|
||||
assert!(matches!(cli.command, Some(Command::Exchange(_))));
|
||||
|
||||
match cli.command {
|
||||
Some(Command::Exchange(options)) => {
|
||||
assert_eq!(options.private_keys_dir.to_str().unwrap(), "./fakedir");
|
||||
assert!(options.dev.is_none());
|
||||
assert!(options.listen.is_none());
|
||||
assert_eq!(options.peers.len(), 0);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let cli = parse(&[
|
||||
"rp",
|
||||
"exchange",
|
||||
"./fakedir",
|
||||
"dev",
|
||||
"devname",
|
||||
"listen",
|
||||
"127.0.0.1:1234",
|
||||
]);
|
||||
|
||||
assert!(cli.is_ok());
|
||||
let cli = cli.unwrap();
|
||||
|
||||
assert!(!cli.verbose);
|
||||
assert!(matches!(cli.command, Some(Command::Exchange(_))));
|
||||
|
||||
match cli.command {
|
||||
Some(Command::Exchange(options)) => {
|
||||
assert_eq!(options.private_keys_dir.to_str().unwrap(), "./fakedir");
|
||||
assert_eq!(options.dev, Some("devname".to_string()));
|
||||
assert_eq!(options.listen, Some("127.0.0.1:1234".parse().unwrap()));
|
||||
assert_eq!(options.peers.len(), 0);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let cli = parse(&[
|
||||
"rp",
|
||||
"exchange",
|
||||
"./fakedir",
|
||||
"dev",
|
||||
"devname",
|
||||
"listen",
|
||||
"127.0.0.1:1234",
|
||||
"peer",
|
||||
"./fakedir2",
|
||||
"endpoint",
|
||||
"127.0.0.1:2345",
|
||||
"persistent-keepalive",
|
||||
"15",
|
||||
"allowed-ips",
|
||||
"123.234.11.0/24,1.1.1.0/24",
|
||||
"peer",
|
||||
"./fakedir3",
|
||||
"endpoint",
|
||||
"127.0.0.1:5432",
|
||||
"persistent-keepalive",
|
||||
"30",
|
||||
]);
|
||||
|
||||
assert!(cli.is_ok());
|
||||
let cli = cli.unwrap();
|
||||
|
||||
assert!(!cli.verbose);
|
||||
assert!(matches!(cli.command, Some(Command::Exchange(_))));
|
||||
|
||||
match cli.command {
|
||||
Some(Command::Exchange(options)) => {
|
||||
assert_eq!(options.private_keys_dir.to_str().unwrap(), "./fakedir");
|
||||
assert_eq!(options.dev, Some("devname".to_string()));
|
||||
assert_eq!(options.listen, Some("127.0.0.1:1234".parse().unwrap()));
|
||||
assert_eq!(options.peers.len(), 2);
|
||||
|
||||
let peer = &options.peers[0];
|
||||
assert_eq!(peer.public_keys_dir.to_str().unwrap(), "./fakedir2");
|
||||
assert_eq!(peer.endpoint, Some("127.0.0.1:2345".parse().unwrap()));
|
||||
assert_eq!(peer.persistent_keepalive, Some(15));
|
||||
assert_eq!(
|
||||
peer.allowed_ips,
|
||||
Some("123.234.11.0/24,1.1.1.0/24".to_string())
|
||||
);
|
||||
|
||||
let peer = &options.peers[1];
|
||||
assert_eq!(peer.public_keys_dir.to_str().unwrap(), "./fakedir3");
|
||||
assert_eq!(peer.endpoint, Some("127.0.0.1:5432".parse().unwrap()));
|
||||
assert_eq!(peer.persistent_keepalive, Some(30));
|
||||
assert!(peer.allowed_ips.is_none());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,279 +0,0 @@
|
||||
use std::{net::SocketAddr, path::PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::key::WG_B64_LEN;
|
||||
#[derive(Default)]
|
||||
pub struct ExchangePeer {
|
||||
pub public_keys_dir: PathBuf,
|
||||
pub endpoint: Option<SocketAddr>,
|
||||
pub persistent_keepalive: Option<u32>,
|
||||
pub allowed_ips: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ExchangeOptions {
|
||||
pub verbose: bool,
|
||||
pub private_keys_dir: PathBuf,
|
||||
pub dev: Option<String>,
|
||||
pub listen: Option<SocketAddr>,
|
||||
pub peers: Vec<ExchangePeer>,
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
|
||||
pub async fn exchange(_: ExchangeOptions) -> Result<()> {
|
||||
use anyhow::anyhow;
|
||||
|
||||
Err(anyhow!(
|
||||
"Your system {} is not yet supported. We are happy to receive patches to address this :)",
|
||||
std::env::consts::OS
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
|
||||
mod netlink {
|
||||
use anyhow::Result;
|
||||
use futures_util::{StreamExt as _, TryStreamExt as _};
|
||||
use genetlink::GenetlinkHandle;
|
||||
use netlink_packet_core::{NLM_F_ACK, NLM_F_REQUEST};
|
||||
use netlink_packet_wireguard::nlas::WgDeviceAttrs;
|
||||
use rtnetlink::Handle;
|
||||
|
||||
pub async fn link_create_and_up(rtnetlink: &Handle, link_name: String) -> Result<u32> {
|
||||
// add the link
|
||||
rtnetlink
|
||||
.link()
|
||||
.add()
|
||||
.wireguard(link_name.clone())
|
||||
.execute()
|
||||
.await?;
|
||||
|
||||
// retrieve the link to be able to up it
|
||||
let link = rtnetlink
|
||||
.link()
|
||||
.get()
|
||||
.match_name(link_name.clone())
|
||||
.execute()
|
||||
.into_stream()
|
||||
.into_future()
|
||||
.await
|
||||
.0
|
||||
.unwrap()?;
|
||||
|
||||
// up the link
|
||||
rtnetlink
|
||||
.link()
|
||||
.set(link.header.index)
|
||||
.up()
|
||||
.execute()
|
||||
.await?;
|
||||
|
||||
Ok(link.header.index)
|
||||
}
|
||||
|
||||
pub async fn link_cleanup(rtnetlink: &Handle, index: u32) -> Result<()> {
|
||||
rtnetlink.link().del(index).execute().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn link_cleanup_standalone(index: u32) -> Result<()> {
|
||||
let (connection, rtnetlink, _) = rtnetlink::new_connection()?;
|
||||
tokio::spawn(connection);
|
||||
|
||||
// We don't care if this fails, as the device may already have been auto-cleaned up.
|
||||
let _ = rtnetlink.link().del(index).execute().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This replicates the functionality of the `wg set` command line tool.
|
||||
///
|
||||
/// It sets the specified WireGuard attributes of the indexed device by
|
||||
/// communicating with WireGuard's generic netlink interface, like the
|
||||
/// `wg` tool does.
|
||||
pub async fn wg_set(
|
||||
genetlink: &mut GenetlinkHandle,
|
||||
index: u32,
|
||||
mut attr: Vec<WgDeviceAttrs>,
|
||||
) -> Result<()> {
|
||||
use futures_util::StreamExt as _;
|
||||
use netlink_packet_core::{NetlinkMessage, NetlinkPayload};
|
||||
use netlink_packet_generic::GenlMessage;
|
||||
use netlink_packet_wireguard::{Wireguard, WireguardCmd};
|
||||
|
||||
// Scope our `set` command to only the device of the specified index
|
||||
attr.insert(0, WgDeviceAttrs::IfIndex(index));
|
||||
|
||||
// Construct the WireGuard-specific netlink packet
|
||||
let wgc = Wireguard {
|
||||
cmd: WireguardCmd::SetDevice,
|
||||
nlas: attr,
|
||||
};
|
||||
|
||||
// Construct final message
|
||||
let genl = GenlMessage::from_payload(wgc);
|
||||
let mut nlmsg = NetlinkMessage::from(genl);
|
||||
nlmsg.header.flags = NLM_F_REQUEST | NLM_F_ACK;
|
||||
|
||||
// Send and wait for the ACK or error
|
||||
let (res, _) = genetlink.request(nlmsg).await?.into_future().await;
|
||||
if let Some(res) = res {
|
||||
let res = res?;
|
||||
if let NetlinkPayload::Error(err) = res.payload {
|
||||
return Err(err.to_io().into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
|
||||
pub async fn exchange(options: ExchangeOptions) -> Result<()> {
|
||||
use std::fs;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use netlink_packet_wireguard::{constants::WG_KEY_LEN, nlas::WgDeviceAttrs};
|
||||
use rosenpass::{
|
||||
app_server::{AppServer, BrokerPeer},
|
||||
config::Verbosity,
|
||||
protocol::{SPk, SSk, SymKey},
|
||||
};
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::file::{LoadValue as _, LoadValueB64};
|
||||
use rosenpass_wireguard_broker::brokers::native_unix::{
|
||||
NativeUnixBroker, NativeUnixBrokerConfigBaseBuilder, NativeUnixBrokerConfigBaseBuilderError,
|
||||
};
|
||||
|
||||
let (connection, rtnetlink, _) = rtnetlink::new_connection()?;
|
||||
tokio::spawn(connection);
|
||||
|
||||
let link_name = options.dev.unwrap_or("rosenpass0".to_string());
|
||||
let link_index = netlink::link_create_and_up(&rtnetlink, link_name.clone()).await?;
|
||||
|
||||
ctrlc_async::set_async_handler(async move {
|
||||
netlink::link_cleanup_standalone(link_index)
|
||||
.await
|
||||
.expect("Failed to clean up");
|
||||
})?;
|
||||
|
||||
// Deploy the classic wireguard private key
|
||||
let (connection, mut genetlink, _) = genetlink::new_connection()?;
|
||||
tokio::spawn(connection);
|
||||
|
||||
let wgsk_path = options.private_keys_dir.join("wgsk");
|
||||
|
||||
let wgsk = Secret::<WG_KEY_LEN>::load_b64::<WG_B64_LEN, _>(wgsk_path)?;
|
||||
|
||||
let mut attr: Vec<WgDeviceAttrs> = Vec::with_capacity(2);
|
||||
attr.push(WgDeviceAttrs::PrivateKey(*wgsk.secret()));
|
||||
|
||||
if let Some(listen) = options.listen {
|
||||
if listen.port() == u16::MAX {
|
||||
return Err(anyhow!("You may not use {} as the listen port.", u16::MAX));
|
||||
}
|
||||
|
||||
attr.push(WgDeviceAttrs::ListenPort(listen.port() + 1));
|
||||
}
|
||||
|
||||
netlink::wg_set(&mut genetlink, link_index, attr).await?;
|
||||
|
||||
let pqsk = options.private_keys_dir.join("pqsk");
|
||||
let pqpk = options.private_keys_dir.join("pqpk");
|
||||
|
||||
let sk = SSk::load(&pqsk)?;
|
||||
let pk = SPk::load(&pqpk)?;
|
||||
|
||||
let mut srv = Box::new(AppServer::new(
|
||||
sk,
|
||||
pk,
|
||||
if let Some(listen) = options.listen {
|
||||
vec![listen]
|
||||
} else {
|
||||
Vec::with_capacity(0)
|
||||
},
|
||||
if options.verbose {
|
||||
Verbosity::Verbose
|
||||
} else {
|
||||
Verbosity::Quiet
|
||||
},
|
||||
None,
|
||||
)?);
|
||||
|
||||
let broker_store_ptr = srv.register_broker(Box::new(NativeUnixBroker::new()))?;
|
||||
|
||||
fn cfg_err_map(e: NativeUnixBrokerConfigBaseBuilderError) -> anyhow::Error {
|
||||
anyhow::Error::msg(format!("NativeUnixBrokerConfigBaseBuilderError: {:?}", e))
|
||||
}
|
||||
|
||||
for peer in options.peers {
|
||||
let wgpk = peer.public_keys_dir.join("wgpk");
|
||||
let pqpk = peer.public_keys_dir.join("pqpk");
|
||||
let psk = peer.public_keys_dir.join("psk");
|
||||
|
||||
let mut extra_params: Vec<String> = Vec::with_capacity(6);
|
||||
if let Some(endpoint) = peer.endpoint {
|
||||
extra_params.push("endpoint".to_string());
|
||||
|
||||
// Peer endpoints always use (port + 1) in wg set params
|
||||
let endpoint = SocketAddr::new(endpoint.ip(), endpoint.port() + 1);
|
||||
extra_params.push(endpoint.to_string());
|
||||
}
|
||||
if let Some(persistent_keepalive) = peer.persistent_keepalive {
|
||||
extra_params.push("persistent-keepalive".to_string());
|
||||
extra_params.push(persistent_keepalive.to_string());
|
||||
}
|
||||
if let Some(allowed_ips) = &peer.allowed_ips {
|
||||
extra_params.push("allowed-ips".to_string());
|
||||
extra_params.push(allowed_ips.clone());
|
||||
}
|
||||
|
||||
let peer_cfg = NativeUnixBrokerConfigBaseBuilder::default()
|
||||
.peer_id_b64(&fs::read_to_string(wgpk)?)?
|
||||
.interface(link_name.clone())
|
||||
.extra_params_ser(&extra_params)?
|
||||
.build()
|
||||
.map_err(cfg_err_map)?;
|
||||
|
||||
let broker_peer = Some(BrokerPeer::new(
|
||||
broker_store_ptr.clone(),
|
||||
Box::new(peer_cfg),
|
||||
));
|
||||
|
||||
srv.add_peer(
|
||||
if psk.exists() {
|
||||
Some(SymKey::load_b64::<WG_B64_LEN, _>(psk))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
.transpose()?,
|
||||
SPk::load(&pqpk)?,
|
||||
None,
|
||||
broker_peer,
|
||||
peer.endpoint.map(|x| x.to_string()),
|
||||
)?;
|
||||
}
|
||||
|
||||
let out = srv.event_loop();
|
||||
|
||||
netlink::link_cleanup(&rtnetlink, link_index).await?;
|
||||
|
||||
match out {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
// Check if the returned error is actually EINTR, in which case, the run actually succeeded.
|
||||
let is_ok = if let Some(e) = e.root_cause().downcast_ref::<std::io::Error>() {
|
||||
matches!(e.kind(), std::io::ErrorKind::Interrupted)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if is_ok {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
152
rp/src/key.rs
152
rp/src/key.rs
@@ -1,152 +0,0 @@
|
||||
use std::{
|
||||
fs::{self, DirBuilder},
|
||||
ops::DerefMut,
|
||||
os::unix::fs::{DirBuilderExt, PermissionsExt},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use rosenpass_util::file::{LoadValueB64, StoreValue, StoreValueB64};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use rosenpass::protocol::{SPk, SSk};
|
||||
use rosenpass_cipher_traits::Kem;
|
||||
use rosenpass_ciphers::kem::StaticKem;
|
||||
use rosenpass_secret_memory::{file::StoreSecret as _, Public, Secret};
|
||||
|
||||
pub const WG_B64_LEN: usize = 32 * 5 / 3;
|
||||
|
||||
#[cfg(not(target_family = "unix"))]
|
||||
pub fn genkey(_: &Path) -> Result<()> {
|
||||
Err(anyhow!(
|
||||
"Your system {} is not yet supported. We are happy to receive patches to address this :)",
|
||||
std::env::consts::OS
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
pub fn genkey(private_keys_dir: &Path) -> Result<()> {
|
||||
if private_keys_dir.exists() {
|
||||
if fs::metadata(private_keys_dir)?.permissions().mode() != 0o700 {
|
||||
return Err(anyhow!(
|
||||
"Directory {:?} has incorrect permissions: please use 0700 for proper security.",
|
||||
private_keys_dir
|
||||
));
|
||||
}
|
||||
} else {
|
||||
DirBuilder::new()
|
||||
.recursive(true)
|
||||
.mode(0o700)
|
||||
.create(private_keys_dir)?;
|
||||
}
|
||||
|
||||
let wgsk_path = private_keys_dir.join("wgsk");
|
||||
let pqsk_path = private_keys_dir.join("pqsk");
|
||||
let pqpk_path = private_keys_dir.join("pqpk");
|
||||
|
||||
if !wgsk_path.exists() {
|
||||
let wgsk: Secret<32> = Secret::random();
|
||||
wgsk.store_b64::<WG_B64_LEN, _>(wgsk_path)?;
|
||||
} else {
|
||||
eprintln!(
|
||||
"WireGuard secret key already exists at {:#?}: not regenerating",
|
||||
wgsk_path
|
||||
);
|
||||
}
|
||||
|
||||
if !pqsk_path.exists() && !pqpk_path.exists() {
|
||||
let mut pqsk = SSk::random();
|
||||
let mut pqpk = SPk::random();
|
||||
StaticKem::keygen(pqsk.secret_mut(), pqpk.deref_mut())?;
|
||||
pqpk.store(pqpk_path)?;
|
||||
pqsk.store_secret(pqsk_path)?;
|
||||
} else {
|
||||
eprintln!(
|
||||
"Rosenpass keys already exist in {:#?}: not regenerating",
|
||||
private_keys_dir
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pubkey(private_keys_dir: &Path, public_keys_dir: &Path) -> Result<()> {
|
||||
if public_keys_dir.exists() {
|
||||
return Err(anyhow!("Directory {:?} already exists", public_keys_dir));
|
||||
}
|
||||
|
||||
fs::create_dir_all(public_keys_dir)?;
|
||||
|
||||
let private_wgsk = private_keys_dir.join("wgsk");
|
||||
let public_wgpk = public_keys_dir.join("wgpk");
|
||||
let private_pqpk = private_keys_dir.join("pqpk");
|
||||
let public_pqpk = public_keys_dir.join("pqpk");
|
||||
|
||||
let wgsk = Secret::load_b64::<WG_B64_LEN, _>(private_wgsk)?;
|
||||
let mut wgpk: Public<32> = {
|
||||
let mut secret = x25519_dalek::StaticSecret::from(*wgsk.secret());
|
||||
let public = x25519_dalek::PublicKey::from(&secret);
|
||||
secret.zeroize();
|
||||
Public::from_slice(public.as_bytes())
|
||||
};
|
||||
|
||||
wgpk.store_b64::<WG_B64_LEN, _>(public_wgpk)?;
|
||||
wgpk.zeroize();
|
||||
|
||||
fs::copy(private_pqpk, public_pqpk)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs;
|
||||
|
||||
use rosenpass::protocol::{SPk, SSk};
|
||||
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::file::LoadValue;
|
||||
use rosenpass_util::file::LoadValueB64;
|
||||
use tempfile::tempdir;
|
||||
|
||||
use crate::key::{genkey, pubkey, WG_B64_LEN};
|
||||
|
||||
#[test]
|
||||
fn test_key_loopback() {
|
||||
secret_policy_try_use_memfd_secrets();
|
||||
let private_keys_dir = tempdir().unwrap();
|
||||
fs::remove_dir(private_keys_dir.path()).unwrap();
|
||||
|
||||
// Guranteed to have 16MB of stack size
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
assert!(genkey(private_keys_dir.path()).is_ok());
|
||||
});
|
||||
|
||||
assert!(private_keys_dir.path().exists());
|
||||
assert!(private_keys_dir.path().is_dir());
|
||||
assert!(SPk::load(private_keys_dir.path().join("pqpk")).is_ok());
|
||||
assert!(SSk::load(private_keys_dir.path().join("pqsk")).is_ok());
|
||||
assert!(
|
||||
Secret::<32>::load_b64::<WG_B64_LEN, _>(private_keys_dir.path().join("wgsk")).is_ok()
|
||||
);
|
||||
|
||||
let public_keys_dir = tempdir().unwrap();
|
||||
fs::remove_dir(public_keys_dir.path()).unwrap();
|
||||
|
||||
// Guranteed to have 16MB of stack size
|
||||
stacker::grow(8 * 1024 * 1024, || {
|
||||
assert!(pubkey(private_keys_dir.path(), public_keys_dir.path()).is_ok());
|
||||
});
|
||||
|
||||
assert!(public_keys_dir.path().exists());
|
||||
assert!(public_keys_dir.path().is_dir());
|
||||
assert!(SPk::load(public_keys_dir.path().join("pqpk")).is_ok());
|
||||
assert!(
|
||||
Secret::<32>::load_b64::<WG_B64_LEN, _>(public_keys_dir.path().join("wgpk")).is_ok()
|
||||
);
|
||||
|
||||
let pk_1 = fs::read(private_keys_dir.path().join("pqpk")).unwrap();
|
||||
let pk_2 = fs::read(public_keys_dir.path().join("pqpk")).unwrap();
|
||||
assert_eq!(pk_1, pk_2);
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
use std::process::exit;
|
||||
|
||||
use cli::{Cli, Command};
|
||||
use exchange::exchange;
|
||||
use key::{genkey, pubkey};
|
||||
use rosenpass_secret_memory::policy;
|
||||
|
||||
mod cli;
|
||||
mod exchange;
|
||||
mod key;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
#[cfg(feature = "enable_memfd_alloc")]
|
||||
policy::secret_policy_try_use_memfd_secrets();
|
||||
#[cfg(not(feature = "enable_memfd_alloc"))]
|
||||
policy::secret_policy_use_only_malloc_secrets();
|
||||
|
||||
let cli = match Cli::parse(std::env::args().peekable()) {
|
||||
Ok(cli) => cli,
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let command = cli.command.unwrap();
|
||||
|
||||
let res = match command {
|
||||
Command::GenKey { private_keys_dir } => genkey(&private_keys_dir),
|
||||
Command::PubKey {
|
||||
private_keys_dir,
|
||||
public_keys_dir,
|
||||
} => pubkey(&private_keys_dir, &public_keys_dir),
|
||||
Command::Exchange(mut options) => {
|
||||
options.verbose = cli.verbose;
|
||||
exchange(options).await
|
||||
}
|
||||
Command::Help => {
|
||||
println!("Usage: rp [verbose] genkey|pubkey|exchange [ARGS]...");
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!("An error occurred: {}", err);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "rosenpass-secret-memory"
|
||||
version = "0.1.0"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Rosenpass internal utilities for storing secrets in memory"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
rosenpass-to = { workspace = true }
|
||||
rosenpass-util = { workspace = true }
|
||||
zeroize = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
memsec = { workspace = true }
|
||||
allocator-api2 = { workspace = true }
|
||||
log = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
allocator-api2-tests = { workspace = true }
|
||||
tempfile = {workspace = true}
|
||||
base64ct = {workspace = true}
|
||||
procspawn = {workspace = true}
|
||||
@@ -1,5 +0,0 @@
|
||||
# Rosenpass secure memory library
|
||||
|
||||
Rosenpass internal library providing utilities for securely storing secret data in memory.
|
||||
|
||||
This is an internal library; not guarantee is made about its API at this point in time.
|
||||
@@ -1,112 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::ptr::NonNull;
|
||||
|
||||
use allocator_api2::alloc::{AllocError, Allocator, Layout};
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
struct MallocAllocatorContents;
|
||||
|
||||
/// Memory allocation using using the memsec crate
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct MallocAllocator {
|
||||
_dummy_private_data: MallocAllocatorContents,
|
||||
}
|
||||
|
||||
/// A box backed by the memsec allocator
|
||||
pub type MallocBox<T> = allocator_api2::boxed::Box<T, MallocAllocator>;
|
||||
|
||||
/// A vector backed by the memsec allocator
|
||||
pub type MallocVec<T> = allocator_api2::vec::Vec<T, MallocAllocator>;
|
||||
|
||||
pub fn malloc_box_try<T>(x: T) -> Result<MallocBox<T>, AllocError> {
|
||||
MallocBox::<T>::try_new_in(x, MallocAllocator::new())
|
||||
}
|
||||
|
||||
pub fn malloc_box<T>(x: T) -> MallocBox<T> {
|
||||
MallocBox::<T>::new_in(x, MallocAllocator::new())
|
||||
}
|
||||
|
||||
pub fn malloc_vec<T>() -> MallocVec<T> {
|
||||
MallocVec::<T>::new_in(MallocAllocator::new())
|
||||
}
|
||||
|
||||
impl MallocAllocator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
_dummy_private_data: MallocAllocatorContents,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Allocator for MallocAllocator {
|
||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
// Call memsec allocator
|
||||
let mem: Option<NonNull<[u8]>> = unsafe { memsec::malloc_sized(layout.size()) };
|
||||
|
||||
// Unwrap the option
|
||||
let Some(mem) = mem else {
|
||||
log::error!("Allocation {layout:?} was requested but memsec returned a null pointer");
|
||||
return Err(AllocError);
|
||||
};
|
||||
|
||||
// Ensure the right alignment is used
|
||||
let off = (mem.as_ptr() as *const u8).align_offset(layout.align());
|
||||
if off != 0 {
|
||||
log::error!("Allocation {layout:?} was requested but malloc-based memsec returned allocation \
|
||||
with offset {off} from the requested alignment. Malloc always allocates values \
|
||||
at the end of a memory page for security reasons, custom alignments are not supported. \
|
||||
You could try allocating an oversized value.");
|
||||
unsafe { memsec::free(mem) };
|
||||
return Err(AllocError);
|
||||
};
|
||||
|
||||
Ok(mem)
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) {
|
||||
unsafe {
|
||||
memsec::free(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MallocAllocator {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.write_str("<memsec based Rust allocator>")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use allocator_api2_tests::make_test;
|
||||
|
||||
use super::*;
|
||||
|
||||
make_test! { test_sizes(MallocAllocator::new()) }
|
||||
make_test! { test_vec(MallocAllocator::new()) }
|
||||
make_test! { test_many_boxes(MallocAllocator::new()) }
|
||||
|
||||
#[test]
|
||||
fn malloc_allocation() {
|
||||
let alloc = MallocAllocator::new();
|
||||
malloc_allocation_impl::<0>(&alloc);
|
||||
malloc_allocation_impl::<7>(&alloc);
|
||||
malloc_allocation_impl::<8>(&alloc);
|
||||
malloc_allocation_impl::<64>(&alloc);
|
||||
malloc_allocation_impl::<999>(&alloc);
|
||||
}
|
||||
|
||||
fn malloc_allocation_impl<const N: usize>(alloc: &MallocAllocator) {
|
||||
let layout = Layout::new::<[u8; N]>();
|
||||
let mem = alloc.allocate(layout).unwrap();
|
||||
|
||||
// https://libsodium.gitbook.io/doc/memory_management#guarded-heap-allocations
|
||||
// promises us that allocated memory is initialized with the magic byte 0xDB
|
||||
// and memsec promises to provide a reimplementation of the libsodium mechanism;
|
||||
// it uses the magic value 0xD0 though
|
||||
assert_eq!(unsafe { mem.as_ref() }, &[0xD0u8; N]);
|
||||
|
||||
let mem = NonNull::new(mem.as_ptr() as *mut u8).unwrap();
|
||||
unsafe { alloc.deallocate(mem, layout) };
|
||||
}
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
#![cfg(target_os = "linux")]
|
||||
use std::fmt;
|
||||
use std::ptr::NonNull;
|
||||
|
||||
use allocator_api2::alloc::{AllocError, Allocator, Layout};
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
struct MemfdSecAllocatorContents;
|
||||
|
||||
/// Memory allocation using using the memsec crate
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct MemfdSecAllocator {
|
||||
_dummy_private_data: MemfdSecAllocatorContents,
|
||||
}
|
||||
|
||||
/// A box backed by the memsec allocator
|
||||
pub type MemfdSecBox<T> = allocator_api2::boxed::Box<T, MemfdSecAllocator>;
|
||||
|
||||
/// A vector backed by the memsec allocator
|
||||
pub type MemfdSecVec<T> = allocator_api2::vec::Vec<T, MemfdSecAllocator>;
|
||||
|
||||
pub fn memfdsec_box_try<T>(x: T) -> Result<MemfdSecBox<T>, AllocError> {
|
||||
MemfdSecBox::<T>::try_new_in(x, MemfdSecAllocator::new())
|
||||
}
|
||||
|
||||
pub fn memfdsec_box<T>(x: T) -> MemfdSecBox<T> {
|
||||
MemfdSecBox::<T>::new_in(x, MemfdSecAllocator::new())
|
||||
}
|
||||
|
||||
pub fn memfdsec_vec<T>() -> MemfdSecVec<T> {
|
||||
MemfdSecVec::<T>::new_in(MemfdSecAllocator::new())
|
||||
}
|
||||
|
||||
impl MemfdSecAllocator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
_dummy_private_data: MemfdSecAllocatorContents,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Allocator for MemfdSecAllocator {
|
||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
// Call memsec allocator
|
||||
let mem: Option<NonNull<[u8]>> = unsafe { memsec::memfd_secret_sized(layout.size()) };
|
||||
|
||||
// Unwrap the option
|
||||
let Some(mem) = mem else {
|
||||
log::error!("Allocation {layout:?} was requested but memfd-based memsec returned a null pointer");
|
||||
return Err(AllocError);
|
||||
};
|
||||
|
||||
// Ensure the right alignment is used
|
||||
let off = (mem.as_ptr() as *const u8).align_offset(layout.align());
|
||||
if off != 0 {
|
||||
log::error!("Allocation {layout:?} was requested but memfd-based memsec returned allocation \
|
||||
with offset {off} from the requested alignment. Memfd always allocates values \
|
||||
at the end of a memory page for security reasons, custom alignments are not supported. \
|
||||
You could try allocating an oversized value.");
|
||||
unsafe { memsec::free_memfd_secret(mem) };
|
||||
return Err(AllocError);
|
||||
};
|
||||
|
||||
Ok(mem)
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) {
|
||||
unsafe {
|
||||
memsec::free_memfd_secret(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MemfdSecAllocator {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.write_str("<memsec based Rust allocator>")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use allocator_api2_tests::make_test;
|
||||
|
||||
use super::*;
|
||||
|
||||
make_test! { test_sizes(MemfdSecAllocator::new()) }
|
||||
make_test! { test_vec(MemfdSecAllocator::new()) }
|
||||
make_test! { test_many_boxes(MemfdSecAllocator::new()) }
|
||||
|
||||
#[test]
|
||||
fn memfdsec_allocation() {
|
||||
let alloc = MemfdSecAllocator::new();
|
||||
memfdsec_allocation_impl::<0>(&alloc);
|
||||
memfdsec_allocation_impl::<7>(&alloc);
|
||||
memfdsec_allocation_impl::<8>(&alloc);
|
||||
memfdsec_allocation_impl::<64>(&alloc);
|
||||
memfdsec_allocation_impl::<999>(&alloc);
|
||||
}
|
||||
|
||||
fn memfdsec_allocation_impl<const N: usize>(alloc: &MemfdSecAllocator) {
|
||||
let layout = Layout::new::<[u8; N]>();
|
||||
let mem = alloc.allocate(layout).unwrap();
|
||||
|
||||
// https://libsodium.gitbook.io/doc/memory_management#guarded-heap-allocations
|
||||
// promises us that allocated memory is initialized with the magic byte 0xDB
|
||||
// and memsec promises to provide a reimplementation of the libsodium mechanism;
|
||||
// it uses the magic value 0xD0 though
|
||||
assert_eq!(unsafe { mem.as_ref() }, &[0xD0u8; N]);
|
||||
let mem = NonNull::new(mem.as_ptr() as *mut u8).unwrap();
|
||||
unsafe { alloc.deallocate(mem, layout) };
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod malloc;
|
||||
pub mod memfdsec;
|
||||
@@ -1,86 +0,0 @@
|
||||
pub mod memsec;
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use allocator_api2::alloc::{AllocError, Allocator};
|
||||
use memsec::malloc::MallocAllocator;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
use memsec::memfdsec::MemfdSecAllocator;
|
||||
|
||||
static ALLOC_TYPE: OnceLock<SecretAllocType> = OnceLock::new();
|
||||
|
||||
/// Sets the secret allocation type to use.
|
||||
/// Intended usage at startup before secret allocation
|
||||
/// takes place
|
||||
pub fn set_secret_alloc_type(alloc_type: SecretAllocType) {
|
||||
ALLOC_TYPE.set(alloc_type).unwrap();
|
||||
}
|
||||
|
||||
pub fn get_or_init_secret_alloc_type(alloc_type: SecretAllocType) -> SecretAllocType {
|
||||
*ALLOC_TYPE.get_or_init(|| alloc_type)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SecretAllocType {
|
||||
MemsecMalloc,
|
||||
#[cfg(target_os = "linux")]
|
||||
MemsecMemfdSec,
|
||||
}
|
||||
|
||||
pub struct SecretAlloc {
|
||||
alloc_type: SecretAllocType,
|
||||
}
|
||||
|
||||
impl Default for SecretAlloc {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
alloc_type: *ALLOC_TYPE.get().expect(
|
||||
"Secret security policy not specified. \
|
||||
Run the specifying policy function in \
|
||||
rosenpass_secret_memory::policy or set a \
|
||||
custom policy by initializing \
|
||||
rosenpass_secret_memory::alloc::ALLOC_TYPE \
|
||||
before using secrets",
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Allocator for SecretAlloc {
|
||||
fn allocate(
|
||||
&self,
|
||||
layout: std::alloc::Layout,
|
||||
) -> Result<std::ptr::NonNull<[u8]>, allocator_api2::alloc::AllocError> {
|
||||
match self.alloc_type {
|
||||
SecretAllocType::MemsecMalloc => MallocAllocator::default().allocate(layout),
|
||||
#[cfg(target_os = "linux")]
|
||||
SecretAllocType::MemsecMemfdSec => MemfdSecAllocator::default().allocate(layout),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, ptr: std::ptr::NonNull<u8>, layout: std::alloc::Layout) {
|
||||
match self.alloc_type {
|
||||
SecretAllocType::MemsecMalloc => MallocAllocator::default().deallocate(ptr, layout),
|
||||
#[cfg(target_os = "linux")]
|
||||
SecretAllocType::MemsecMemfdSec => MemfdSecAllocator::default().deallocate(ptr, layout),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type SecretBox<T> = allocator_api2::boxed::Box<T, SecretAlloc>;
|
||||
|
||||
/// A vector backed by the memsec allocator
|
||||
pub type SecretVec<T> = allocator_api2::vec::Vec<T, SecretAlloc>;
|
||||
|
||||
pub fn secret_box_try<T>(x: T) -> Result<SecretBox<T>, AllocError> {
|
||||
SecretBox::<T>::try_new_in(x, SecretAlloc::default())
|
||||
}
|
||||
|
||||
pub fn secret_box<T>(x: T) -> SecretBox<T> {
|
||||
SecretBox::<T>::new_in(x, SecretAlloc::default())
|
||||
}
|
||||
|
||||
pub fn secret_vec<T>() -> SecretVec<T> {
|
||||
SecretVec::<T>::new_in(SecretAlloc::default())
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
/// Writes the contents of an `&[u8]` as hexadecimal symbols to a [std::fmt::Formatter]
|
||||
pub fn debug_crypto_array(v: &[u8], fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.write_str("[{}]=")?;
|
||||
if v.len() > 64 {
|
||||
for byte in &v[..32] {
|
||||
std::fmt::LowerHex::fmt(byte, fmt)?;
|
||||
}
|
||||
fmt.write_str("…")?;
|
||||
for byte in &v[v.len() - 32..] {
|
||||
std::fmt::LowerHex::fmt(byte, fmt)?;
|
||||
}
|
||||
} else {
|
||||
for byte in v {
|
||||
std::fmt::LowerHex::fmt(byte, fmt)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
pub trait StoreSecret {
|
||||
type Error;
|
||||
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<(), Self::Error>;
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<(), Self::Error>;
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
pub mod debug;
|
||||
pub mod file;
|
||||
pub mod rand;
|
||||
|
||||
pub mod alloc;
|
||||
|
||||
mod public;
|
||||
pub use crate::public::Public;
|
||||
pub use crate::public::PublicBox;
|
||||
|
||||
mod secret;
|
||||
pub use crate::secret::Secret;
|
||||
|
||||
pub mod policy;
|
||||
pub use crate::policy::*;
|
||||
@@ -1,82 +0,0 @@
|
||||
pub fn secret_policy_try_use_memfd_secrets() {
|
||||
let alloc_type = {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
if crate::alloc::memsec::memfdsec::memfdsec_box_try(0u8).is_ok() {
|
||||
crate::alloc::SecretAllocType::MemsecMemfdSec
|
||||
} else {
|
||||
crate::alloc::SecretAllocType::MemsecMalloc
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
crate::alloc::SecretAllocType::MemsecMalloc
|
||||
}
|
||||
};
|
||||
assert_eq!(
|
||||
alloc_type,
|
||||
crate::alloc::get_or_init_secret_alloc_type(alloc_type)
|
||||
);
|
||||
|
||||
log::info!("Secrets will be allocated using {:?}", alloc_type);
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn secret_policy_use_only_memfd_secrets() {
|
||||
let alloc_type = crate::alloc::SecretAllocType::MemsecMemfdSec;
|
||||
|
||||
assert_eq!(
|
||||
alloc_type,
|
||||
crate::alloc::get_or_init_secret_alloc_type(alloc_type)
|
||||
);
|
||||
|
||||
log::info!("Secrets will be allocated using {:?}", alloc_type);
|
||||
}
|
||||
|
||||
pub fn secret_policy_use_only_malloc_secrets() {
|
||||
let alloc_type = crate::alloc::SecretAllocType::MemsecMalloc;
|
||||
assert_eq!(
|
||||
alloc_type,
|
||||
crate::alloc::get_or_init_secret_alloc_type(alloc_type)
|
||||
);
|
||||
|
||||
log::info!("Secrets will be allocated using {:?}", alloc_type);
|
||||
}
|
||||
|
||||
pub mod test {
|
||||
#[macro_export]
|
||||
macro_rules! test_spawn_process_with_policies {
|
||||
($body:block, $($f: expr),*) => {
|
||||
$(
|
||||
let handle = procspawn::spawn((), |_| {
|
||||
|
||||
$f();
|
||||
|
||||
$body
|
||||
|
||||
});
|
||||
handle.join().unwrap();
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! test_spawn_process_provided_policies {
|
||||
($body: block) => {
|
||||
$crate::test_spawn_process_with_policies!(
|
||||
$body,
|
||||
$crate::policy::secret_policy_try_use_memfd_secrets,
|
||||
$crate::secret_policy_use_only_malloc_secrets
|
||||
);
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
$crate::test_spawn_process_with_policies!(
|
||||
$body,
|
||||
$crate::policy::secret_policy_use_only_memfd_secrets
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,452 +0,0 @@
|
||||
use crate::debug::debug_crypto_array;
|
||||
use anyhow::Context;
|
||||
use rand::{Fill as Randomize, Rng};
|
||||
use rosenpass_to::{ops::copy_slice, To};
|
||||
use rosenpass_util::b64::{b64_decode, b64_encode};
|
||||
use rosenpass_util::file::{
|
||||
fopen_r, fopen_w, LoadValue, LoadValueB64, ReadExactToEnd, ReadSliceToEnd, StoreValue,
|
||||
StoreValueB64, StoreValueB64Writer, Visibility,
|
||||
};
|
||||
use rosenpass_util::functional::mutating;
|
||||
use std::borrow::{Borrow, BorrowMut};
|
||||
use std::fmt;
|
||||
use std::io::Write;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::Path;
|
||||
|
||||
/// Contains information in the form of a byte array that may be known to the
|
||||
/// public
|
||||
// TODO: We should get rid of the Public type; just use a normal value
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[repr(transparent)]
|
||||
pub struct Public<const N: usize> {
|
||||
pub value: [u8; N],
|
||||
}
|
||||
|
||||
impl<const N: usize> Public<N> {
|
||||
/// Create a new [Public] from a byte slice
|
||||
pub fn from_slice(value: &[u8]) -> Self {
|
||||
copy_slice(value).to_this(Self::zero)
|
||||
}
|
||||
|
||||
/// Create a new [Public] from a byte array
|
||||
pub fn new(value: [u8; N]) -> Self {
|
||||
Self { value }
|
||||
}
|
||||
|
||||
/// Create a zero initialized [Public]
|
||||
pub fn zero() -> Self {
|
||||
Self { value: [0u8; N] }
|
||||
}
|
||||
|
||||
/// Create a random initialized [Public]
|
||||
pub fn random() -> Self {
|
||||
mutating(Self::zero(), |r| r.randomize())
|
||||
}
|
||||
|
||||
/// Randomize all bytes in an existing [Public]
|
||||
pub fn randomize(&mut self) {
|
||||
self.try_fill(&mut crate::rand::rng()).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Randomize for Public<N> {
|
||||
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), rand::Error> {
|
||||
self.value.try_fill(rng)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> fmt::Debug for Public<N> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
debug_crypto_array(&self.value, fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Deref for Public<N> {
|
||||
type Target = [u8; N];
|
||||
|
||||
fn deref(&self) -> &[u8; N] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> DerefMut for Public<N> {
|
||||
fn deref_mut(&mut self) -> &mut [u8; N] {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Borrow<[u8; N]> for Public<N> {
|
||||
fn borrow(&self) -> &[u8; N] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
impl<const N: usize> BorrowMut<[u8; N]> for Public<N> {
|
||||
fn borrow_mut(&mut self) -> &mut [u8; N] {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Borrow<[u8]> for Public<N> {
|
||||
fn borrow(&self) -> &[u8] {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
impl<const N: usize> BorrowMut<[u8]> for Public<N> {
|
||||
fn borrow_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Public<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn load<P: AsRef<Path>>(path: P) -> anyhow::Result<Self> {
|
||||
let mut v = Self::random();
|
||||
fopen_r(path)?.read_exact_to_end(&mut *v)?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValue for Public<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
std::fs::write(path, **self)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValueB64 for Public<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn load_b64<const F: usize, P: AsRef<Path>>(path: P) -> Result<Self, Self::Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let mut f = [0u8; F];
|
||||
let mut v = Public::zero();
|
||||
let p = path.as_ref();
|
||||
|
||||
let len = fopen_r(p)?
|
||||
.read_slice_to_end(&mut f)
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
|
||||
b64_decode(&f[0..len], &mut v.value)
|
||||
.with_context(|| format!("Could not decode base64 file {p:?}"))?;
|
||||
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64 for Public<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64<const F: usize, P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
let p = path.as_ref();
|
||||
let mut f = [0u8; F];
|
||||
let encoded_str = b64_encode(&self.value, &mut f)
|
||||
.with_context(|| format!("Could not encode base64 file {p:?}"))?;
|
||||
fopen_w(p, Visibility::Public)?
|
||||
.write_all(encoded_str.as_bytes())
|
||||
.with_context(|| format!("Could not write file {p:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64Writer for Public<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64_writer<const F: usize, W: std::io::Write>(
|
||||
&self,
|
||||
mut writer: W,
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut f = [0u8; F];
|
||||
let encoded_str =
|
||||
b64_encode(&self.value, &mut f).with_context(|| "Could not encode secret to base64")?;
|
||||
|
||||
writer
|
||||
.write_all(encoded_str.as_bytes())
|
||||
.with_context(|| "Could not write base64 to writer")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[repr(transparent)]
|
||||
pub struct PublicBox<const N: usize> {
|
||||
pub inner: Box<Public<N>>,
|
||||
}
|
||||
|
||||
impl<const N: usize> PublicBox<N> {
|
||||
/// Create a new [PublicBox] from a byte slice
|
||||
pub fn from_slice(value: &[u8]) -> Self {
|
||||
Self {
|
||||
inner: Box::new(Public::from_slice(value)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [PublicBox] from a byte array
|
||||
pub fn new(value: [u8; N]) -> Self {
|
||||
Self {
|
||||
inner: Box::new(Public::new(value)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a zero initialized [PublicBox]
|
||||
pub fn zero() -> Self {
|
||||
Self {
|
||||
inner: Box::new(Public::zero()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a random initialized [PublicBox]
|
||||
pub fn random() -> Self {
|
||||
Self {
|
||||
inner: Box::new(Public::random()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Randomize all bytes in an existing [PublicBox]
|
||||
pub fn randomize(&mut self) {
|
||||
self.inner.randomize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Randomize for PublicBox<N> {
|
||||
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), rand::Error> {
|
||||
self.inner.try_fill(rng)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> fmt::Debug for PublicBox<N> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
debug_crypto_array(&**self, fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Deref for PublicBox<N> {
|
||||
type Target = [u8; N];
|
||||
|
||||
fn deref(&self) -> &[u8; N] {
|
||||
self.inner.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> DerefMut for PublicBox<N> {
|
||||
fn deref_mut(&mut self) -> &mut [u8; N] {
|
||||
self.inner.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Borrow<[u8]> for PublicBox<N> {
|
||||
fn borrow(&self) -> &[u8] {
|
||||
self.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> BorrowMut<[u8]> for PublicBox<N> {
|
||||
fn borrow_mut(&mut self) -> &mut [u8] {
|
||||
self.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for PublicBox<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
// This is implemented separately from Public to avoid allocating too much stack memory
|
||||
fn load<P: AsRef<Path>>(path: P) -> anyhow::Result<Self> {
|
||||
let mut p = Self::random();
|
||||
fopen_r(path)?.read_exact_to_end(p.deref_mut())?;
|
||||
Ok(p)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValue for PublicBox<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
self.inner.store(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValueB64 for PublicBox<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
// This is implemented separately from Public to avoid allocating too much stack memory
|
||||
fn load_b64<const F: usize, P: AsRef<Path>>(path: P) -> Result<Self, Self::Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
// A vector is used here to ensure heap allocation without copy from stack
|
||||
let mut f = vec![0u8; F];
|
||||
let mut v = PublicBox::zero();
|
||||
let p = path.as_ref();
|
||||
|
||||
let len = fopen_r(p)?
|
||||
.read_slice_to_end(&mut f)
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
|
||||
b64_decode(&f[0..len], v.deref_mut())
|
||||
.with_context(|| format!("Could not decode base64 file {p:?}"))?;
|
||||
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64 for PublicBox<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64<const F: usize, P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
self.inner.store_b64::<F, P>(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64Writer for PublicBox<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64_writer<const F: usize, W: std::io::Write>(
|
||||
&self,
|
||||
writer: W,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.inner.store_b64_writer::<F, W>(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{Public, PublicBox};
|
||||
use rosenpass_util::{
|
||||
b64::b64_encode,
|
||||
file::{
|
||||
fopen_w, LoadValue, LoadValueB64, StoreValue, StoreValueB64, StoreValueB64Writer,
|
||||
Visibility,
|
||||
},
|
||||
};
|
||||
use std::{fs, ops::Deref, os::unix::fs::PermissionsExt};
|
||||
use tempfile::tempdir;
|
||||
|
||||
/// Number of bytes in payload for load and store tests
|
||||
const N: usize = 100;
|
||||
|
||||
/// Convenience function for running a load/store test
|
||||
fn run_load_store_test<
|
||||
T: LoadValue<Error = anyhow::Error>
|
||||
+ StoreValue<Error = anyhow::Error>
|
||||
+ Deref<Target = [u8; N]>,
|
||||
>() {
|
||||
// Generate original random bytes
|
||||
let original_bytes: [u8; N] = [rand::random(); N];
|
||||
|
||||
// Create a temporary directory
|
||||
let temp_dir = tempdir().unwrap();
|
||||
|
||||
// Store the original bytes to an example file in the temporary directory
|
||||
let example_file = temp_dir.path().join("example_file");
|
||||
std::fs::write(example_file.clone(), &original_bytes).unwrap();
|
||||
|
||||
// Load the value from the example file into our generic type
|
||||
let loaded_public = T::load(&example_file).unwrap();
|
||||
|
||||
// Check that the loaded value matches the original bytes
|
||||
assert_eq!(loaded_public.deref(), &original_bytes);
|
||||
|
||||
// Store the loaded value to a different file in the temporary directory
|
||||
let new_file = temp_dir.path().join("new_file");
|
||||
loaded_public.store(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
}
|
||||
|
||||
/// Convenience function for running a base64 load/store test
|
||||
fn run_base64_load_store_test<
|
||||
T: LoadValueB64<Error = anyhow::Error>
|
||||
+ StoreValueB64<Error = anyhow::Error>
|
||||
+ StoreValueB64Writer<Error = anyhow::Error>
|
||||
+ Deref<Target = [u8; N]>,
|
||||
>() {
|
||||
// Generate original random bytes
|
||||
let original_bytes: [u8; N] = [rand::random(); N];
|
||||
// Create a temporary directory
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let example_file = temp_dir.path().join("example_file");
|
||||
let mut encoded_public = [0u8; N * 2];
|
||||
let encoded_public = b64_encode(&original_bytes, &mut encoded_public).unwrap();
|
||||
std::fs::write(&example_file, encoded_public).unwrap();
|
||||
|
||||
// Load the public from the example file
|
||||
let loaded_public = T::load_b64::<{ N * 2 }, _>(&example_file).unwrap();
|
||||
// Check that the loaded public matches the original bytes
|
||||
assert_eq!(loaded_public.deref(), &original_bytes);
|
||||
|
||||
// Store the loaded public to a different file in the temporary directory
|
||||
let new_file = temp_dir.path().join("new_file");
|
||||
loaded_public.store_b64::<{ N * 2 }, _>(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
|
||||
// Check new file permissions are public
|
||||
let metadata = fs::metadata(&new_file).unwrap();
|
||||
assert_eq!(metadata.permissions().mode() & 0o000777, 0o644);
|
||||
|
||||
// Store the loaded public to a different file in the temporary directory for a second time
|
||||
let new_file = temp_dir.path().join("new_file_writer");
|
||||
let new_file_writer = fopen_w(new_file.clone(), Visibility::Public).unwrap();
|
||||
loaded_public
|
||||
.store_b64_writer::<{ N * 2 }, _>(&new_file_writer)
|
||||
.unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
|
||||
// Check new file permissions are public
|
||||
let metadata = fs::metadata(&new_file).unwrap();
|
||||
assert_eq!(metadata.permissions().mode() & 0o000777, 0o644);
|
||||
}
|
||||
|
||||
/// Test loading a [Public] from an example file, and then storing it again in a new file
|
||||
#[test]
|
||||
fn test_public_load_store() {
|
||||
run_load_store_test::<Public<N>>();
|
||||
}
|
||||
|
||||
/// Test loading a [PublicBox] from an example file, and then storing it again in a new file
|
||||
#[test]
|
||||
fn test_public_box_load_store() {
|
||||
run_load_store_test::<PublicBox<N>>();
|
||||
}
|
||||
|
||||
/// Test loading a base64-encoded [Public] from an example file, and then storing it again
|
||||
/// in a different file
|
||||
#[test]
|
||||
fn test_public_load_store_base64() {
|
||||
run_base64_load_store_test::<Public<N>>();
|
||||
}
|
||||
|
||||
/// Test loading a base64-encoded [PublicBox] from an example file, and then storing it
|
||||
/// again in a different file
|
||||
#[test]
|
||||
fn test_public_box_load_store_base64() {
|
||||
run_base64_load_store_test::<PublicBox<N>>();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
pub type Rng = rand::rngs::ThreadRng;
|
||||
|
||||
pub fn rng() -> Rng {
|
||||
rand::thread_rng()
|
||||
}
|
||||
@@ -1,467 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::fmt;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use rand::{Fill as Randomize, Rng};
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use rosenpass_util::b64::{b64_decode, b64_encode};
|
||||
use rosenpass_util::file::{
|
||||
fopen_r, LoadValue, LoadValueB64, ReadExactToEnd, ReadSliceToEnd, StoreValueB64,
|
||||
StoreValueB64Writer,
|
||||
};
|
||||
use rosenpass_util::functional::mutating;
|
||||
|
||||
use crate::alloc::{secret_box, SecretBox, SecretVec};
|
||||
use crate::file::StoreSecret;
|
||||
|
||||
use rosenpass_util::file::{fopen_w, Visibility};
|
||||
use std::io::Write;
|
||||
// This might become a problem in library usage; it's effectively a memory
|
||||
// leak which probably isn't a problem right now because most memory will
|
||||
// be reused…
|
||||
thread_local! {
|
||||
static SECRET_CACHE: RefCell<SecretMemoryPool> = RefCell::new(SecretMemoryPool::new());
|
||||
}
|
||||
|
||||
fn with_secret_memory_pool<Fn, R>(mut f: Fn) -> R
|
||||
where
|
||||
Fn: FnMut(Option<&mut SecretMemoryPool>) -> R,
|
||||
{
|
||||
// This acquires the SECRET_CACHE
|
||||
SECRET_CACHE
|
||||
.try_with(|cell| {
|
||||
// And acquires the inner reference
|
||||
cell.try_borrow_mut()
|
||||
.as_deref_mut()
|
||||
// To call the given function
|
||||
.map(|pool| f(Some(pool)))
|
||||
.ok()
|
||||
})
|
||||
.ok()
|
||||
.flatten()
|
||||
// Failing that, the given function is called with None
|
||||
.unwrap_or_else(|| f(None))
|
||||
}
|
||||
|
||||
// Wrapper around SecretBox that applies automatic zeroization
|
||||
#[derive(Debug)]
|
||||
struct ZeroizingSecretBox<T: Zeroize + ?Sized>(Option<SecretBox<T>>);
|
||||
|
||||
impl<T: Zeroize> ZeroizingSecretBox<T> {
|
||||
fn new(boxed: T) -> Self {
|
||||
ZeroizingSecretBox(Some(secret_box(boxed)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroize + ?Sized> ZeroizingSecretBox<T> {
|
||||
fn from_secret_box(inner: SecretBox<T>) -> Self {
|
||||
Self(Some(inner))
|
||||
}
|
||||
|
||||
fn take(mut self) -> SecretBox<T> {
|
||||
self.0.take().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroize + ?Sized> ZeroizeOnDrop for ZeroizingSecretBox<T> {}
|
||||
impl<T: Zeroize + ?Sized> Zeroize for ZeroizingSecretBox<T> {
|
||||
fn zeroize(&mut self) {
|
||||
if let Some(inner) = &mut self.0 {
|
||||
let inner: &mut SecretBox<T> = inner; // type annotation
|
||||
inner.zeroize()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroize + ?Sized> Drop for ZeroizingSecretBox<T> {
|
||||
fn drop(&mut self) {
|
||||
self.zeroize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroize + ?Sized> Deref for ZeroizingSecretBox<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
self.0.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Zeroize + ?Sized> DerefMut for ZeroizingSecretBox<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
self.0.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Pool that stores secret memory allocations
|
||||
///
|
||||
/// Allocation of secret memory is expensive. Thus, this struct provides a
|
||||
/// pool of secret memory, readily available to yield protected, slices of
|
||||
/// memory.
|
||||
#[derive(Debug)] // TODO check on Debug derive, is that clever
|
||||
struct SecretMemoryPool {
|
||||
pool: HashMap<usize, Vec<ZeroizingSecretBox<[u8]>>>,
|
||||
}
|
||||
|
||||
impl SecretMemoryPool {
|
||||
/// Create a new [SecretMemoryPool]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pool: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return secret back to the pool for future re-use
|
||||
pub fn release<const N: usize>(&mut self, mut sec: ZeroizingSecretBox<[u8; N]>) {
|
||||
sec.zeroize();
|
||||
|
||||
// This conversion sequence is weird but at least it guarantees
|
||||
// that the heap allocation is preserved according to the docs
|
||||
let sec: SecretVec<u8> = sec.take().into();
|
||||
let sec: SecretBox<[u8]> = sec.into();
|
||||
|
||||
self.pool
|
||||
.entry(N)
|
||||
.or_default()
|
||||
.push(ZeroizingSecretBox::from_secret_box(sec));
|
||||
}
|
||||
|
||||
/// Take protected memory from the pool, allocating new one if no suitable
|
||||
/// chunk is found in the inventory.
|
||||
///
|
||||
/// The secret is guaranteed to be full of nullbytes
|
||||
pub fn take<const N: usize>(&mut self) -> ZeroizingSecretBox<[u8; N]> {
|
||||
let entry = self.pool.entry(N).or_default();
|
||||
let inner = match entry.pop() {
|
||||
None => secret_box([0u8; N]),
|
||||
Some(sec) => sec.take().try_into().unwrap(),
|
||||
};
|
||||
ZeroizingSecretBox::from_secret_box(inner)
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage for secret data
|
||||
pub struct Secret<const N: usize> {
|
||||
storage: Option<ZeroizingSecretBox<[u8; N]>>,
|
||||
}
|
||||
|
||||
impl<const N: usize> Secret<N> {
|
||||
pub fn from_slice(slice: &[u8]) -> Self {
|
||||
let mut new_self = Self::zero();
|
||||
new_self.secret_mut().copy_from_slice(slice);
|
||||
new_self
|
||||
}
|
||||
|
||||
/// Returns a new [Secret] that is zero initialized
|
||||
pub fn zero() -> Self {
|
||||
// Using [SecretMemoryPool] here because this operation is expensive,
|
||||
// yet it is used in hot loops
|
||||
let buf = with_secret_memory_pool(|pool| {
|
||||
pool.map(|p| p.take())
|
||||
.unwrap_or_else(|| ZeroizingSecretBox::new([0u8; N]))
|
||||
});
|
||||
|
||||
Self { storage: Some(buf) }
|
||||
}
|
||||
|
||||
/// Returns a new [Secret] that is randomized
|
||||
pub fn random() -> Self {
|
||||
mutating(Self::zero(), |r| r.randomize())
|
||||
}
|
||||
|
||||
/// Sets all data an existing secret to random bytes
|
||||
pub fn randomize(&mut self) {
|
||||
self.try_fill(&mut crate::rand::rng()).unwrap()
|
||||
}
|
||||
|
||||
/// Borrows the data
|
||||
pub fn secret(&self) -> &[u8; N] {
|
||||
self.storage.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Borrows the data mutably
|
||||
pub fn secret_mut(&mut self) -> &mut [u8; N] {
|
||||
self.storage.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Randomize for Secret<N> {
|
||||
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), rand::Error> {
|
||||
// Zeroize self first just to make sure the barriers from the zeroize create take
|
||||
// effect to prevent the compiler from optimizing this away.
|
||||
// We should at some point replace this with our own barriers.
|
||||
self.zeroize();
|
||||
self.secret_mut().try_fill(rng)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> ZeroizeOnDrop for Secret<N> {}
|
||||
impl<const N: usize> Zeroize for Secret<N> {
|
||||
fn zeroize(&mut self) {
|
||||
if let Some(inner) = &mut self.storage {
|
||||
inner.zeroize()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Drop for Secret<N> {
|
||||
fn drop(&mut self) {
|
||||
with_secret_memory_pool(|pool| {
|
||||
if let Some((pool, secret)) = pool.zip(self.storage.take()) {
|
||||
pool.release(secret);
|
||||
}
|
||||
});
|
||||
|
||||
// This should be unnecessary: The pool has one item – the inner secret – which
|
||||
// zeroizes itself on drop. Calling it should not do any harm though…
|
||||
self.zeroize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Clone for Secret<N> {
|
||||
fn clone(&self) -> Self {
|
||||
Self::from_slice(self.secret())
|
||||
}
|
||||
}
|
||||
|
||||
/// The Debug implementation of [Secret] does not reveal the secret data,
|
||||
/// instead a placeholder `<SECRET>` is used
|
||||
impl<const N: usize> fmt::Debug for Secret<N> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.write_str("<SECRET>")
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Secret<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn load<P: AsRef<Path>>(path: P) -> anyhow::Result<Self> {
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
fopen_r(p)?
|
||||
.read_exact_to_end(v.secret_mut())
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValueB64 for Secret<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn load_b64<const F: usize, P: AsRef<Path>>(path: P) -> anyhow::Result<Self> {
|
||||
let mut f: Secret<F> = Secret::random();
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
|
||||
let len = fopen_r(p)?
|
||||
.read_slice_to_end(f.secret_mut())
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
|
||||
b64_decode(&f.secret()[0..len], v.secret_mut())
|
||||
.with_context(|| format!("Could not decode base64 file {p:?}"))?;
|
||||
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64 for Secret<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64<const F: usize, P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
let p = path.as_ref();
|
||||
|
||||
let mut f: Secret<F> = Secret::random();
|
||||
let encoded_str = b64_encode(self.secret(), f.secret_mut())
|
||||
.with_context(|| format!("Could not encode base64 file {p:?}"))?;
|
||||
|
||||
fopen_w(p, Visibility::Secret)?
|
||||
.write_all(encoded_str.as_bytes())
|
||||
.with_context(|| format!("Could not write file {p:?}"))?;
|
||||
f.zeroize();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValueB64Writer for Secret<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_b64_writer<const F: usize, W: Write>(&self, mut writer: W) -> anyhow::Result<()> {
|
||||
let mut f: Secret<F> = Secret::random();
|
||||
let encoded_str = b64_encode(self.secret(), f.secret_mut())
|
||||
.with_context(|| "Could not encode secret to base64")?;
|
||||
|
||||
writer
|
||||
.write_all(encoded_str.as_bytes())
|
||||
.with_context(|| "Could not write base64 to writer")?;
|
||||
f.zeroize();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreSecret for Secret<N> {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
fopen_w(path, Visibility::Secret)?.write_all(self.secret())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
fopen_w(path, Visibility::Public)?.write_all(self.secret())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::test_spawn_process_provided_policies;
|
||||
|
||||
use super::*;
|
||||
use std::{fs, os::unix::fs::PermissionsExt};
|
||||
use tempfile::tempdir;
|
||||
|
||||
procspawn::enable_test_support!();
|
||||
|
||||
/// check that we can alloc using the magic pool
|
||||
#[test]
|
||||
fn secret_memory_pool_take() {
|
||||
test_spawn_process_provided_policies!({
|
||||
const N: usize = 0x100;
|
||||
let mut pool = SecretMemoryPool::new();
|
||||
let secret: ZeroizingSecretBox<[u8; N]> = pool.take();
|
||||
assert_eq!(secret.as_ref(), &[0; N]);
|
||||
});
|
||||
}
|
||||
|
||||
/// check that a secret lives, even if its [SecretMemoryPool] is deleted
|
||||
#[test]
|
||||
fn secret_memory_pool_drop() {
|
||||
test_spawn_process_provided_policies!({
|
||||
const N: usize = 0x100;
|
||||
let mut pool = SecretMemoryPool::new();
|
||||
let secret: ZeroizingSecretBox<[u8; N]> = pool.take();
|
||||
std::mem::drop(pool);
|
||||
assert_eq!(secret.as_ref(), &[0; N]);
|
||||
});
|
||||
}
|
||||
|
||||
/// check that a secret can be reborn, freshly initialized with zero
|
||||
#[test]
|
||||
fn secret_memory_pool_release() {
|
||||
test_spawn_process_provided_policies!({
|
||||
const N: usize = 1;
|
||||
let mut pool = SecretMemoryPool::new();
|
||||
let mut secret: ZeroizingSecretBox<[u8; N]> = pool.take();
|
||||
let old_secret_ptr = secret.as_ref().as_ptr();
|
||||
|
||||
secret.as_mut()[0] = 0x13;
|
||||
pool.release(secret);
|
||||
|
||||
// now check that we get the same ptr
|
||||
let new_secret: ZeroizingSecretBox<[u8; N]> = pool.take();
|
||||
assert_eq!(old_secret_ptr, new_secret.as_ref().as_ptr());
|
||||
|
||||
// and that the secret was zeroized
|
||||
assert_eq!(new_secret.as_ref(), &[0; N]);
|
||||
});
|
||||
}
|
||||
|
||||
/// test loading a secret from an example file, and then storing it again in a different file
|
||||
#[test]
|
||||
fn test_secret_load_store() {
|
||||
test_spawn_process_provided_policies!({
|
||||
const N: usize = 100;
|
||||
|
||||
// Generate original random bytes
|
||||
let original_bytes: [u8; N] = [rand::random(); N];
|
||||
|
||||
// Create a temporary directory
|
||||
let temp_dir = tempdir().unwrap();
|
||||
|
||||
// Store the original secret to an example file in the temporary directory
|
||||
let example_file = temp_dir.path().join("example_file");
|
||||
std::fs::write(example_file.clone(), &original_bytes).unwrap();
|
||||
|
||||
// Load the secret from the example file
|
||||
let loaded_secret = Secret::load(&example_file).unwrap();
|
||||
|
||||
// Check that the loaded secret matches the original bytes
|
||||
assert_eq!(loaded_secret.secret(), &original_bytes);
|
||||
|
||||
// Store the loaded secret to a different file in the temporary directory
|
||||
let new_file = temp_dir.path().join("new_file");
|
||||
loaded_secret.store(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
});
|
||||
}
|
||||
|
||||
/// test loading a base64 encoded secret from an example file, and then storing it again in a different file
|
||||
#[test]
|
||||
fn test_secret_load_store_base64() {
|
||||
test_spawn_process_provided_policies!({
|
||||
const N: usize = 100;
|
||||
// Generate original random bytes
|
||||
let original_bytes: [u8; N] = [rand::random(); N];
|
||||
// Create a temporary directory
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let example_file = temp_dir.path().join("example_file");
|
||||
let mut encoded_secret = [0u8; N * 2];
|
||||
let encoded_secret = b64_encode(&original_bytes, &mut encoded_secret).unwrap();
|
||||
|
||||
std::fs::write(&example_file, encoded_secret).unwrap();
|
||||
|
||||
// Load the secret from the example file
|
||||
let loaded_secret = Secret::load_b64::<{ N * 2 }, _>(&example_file).unwrap();
|
||||
// Check that the loaded secret matches the original bytes
|
||||
assert_eq!(loaded_secret.secret(), &original_bytes);
|
||||
|
||||
// Store the loaded secret to a different file in the temporary directory
|
||||
let new_file = temp_dir.path().join("new_file");
|
||||
loaded_secret.store_b64::<{ N * 2 }, _>(&new_file).unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
|
||||
//Check new file permissions are secret
|
||||
let metadata = fs::metadata(&new_file).unwrap();
|
||||
assert_eq!(metadata.permissions().mode() & 0o000777, 0o600);
|
||||
|
||||
// Store the loaded secret to a different file in the temporary directory for a second time
|
||||
let new_file = temp_dir.path().join("new_file_writer");
|
||||
let new_file_writer = fopen_w(new_file.clone(), Visibility::Secret).unwrap();
|
||||
loaded_secret
|
||||
.store_b64_writer::<{ N * 2 }, _>(&new_file_writer)
|
||||
.unwrap();
|
||||
|
||||
// Read the contents of the new file
|
||||
let new_file_contents = fs::read(&new_file).unwrap();
|
||||
// Read the contents of the original file
|
||||
let original_file_contents = fs::read(&example_file).unwrap();
|
||||
// Check that the contents of the new file match the original file
|
||||
assert_eq!(new_file_contents, original_file_contents);
|
||||
|
||||
//Check new file permissions are secret
|
||||
let metadata = fs::metadata(&new_file).unwrap();
|
||||
assert_eq!(metadata.permissions().mode() & 0o000777, 0o600);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,10 @@
|
||||
use anyhow::bail;
|
||||
|
||||
use anyhow::Result;
|
||||
use derive_builder::Builder;
|
||||
use log::{error, info, warn};
|
||||
use mio::Interest;
|
||||
use mio::Token;
|
||||
use rosenpass_secret_memory::Public;
|
||||
use rosenpass_secret_memory::Secret;
|
||||
use rosenpass_util::file::StoreValueB64;
|
||||
use rosenpass_wireguard_broker::WireguardBrokerMio;
|
||||
use rosenpass_wireguard_broker::{WireguardBrokerCfg, WG_KEY_LEN};
|
||||
use zerocopy::AsBytes;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::io::Write;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::io::ErrorKind;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
@@ -24,29 +13,23 @@ use std::net::SocketAddrV4;
|
||||
use std::net::SocketAddrV6;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::process::Stdio;
|
||||
use std::slice;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::protocol::HostIdentification;
|
||||
use crate::util::fopen_w;
|
||||
use crate::RosenpassError;
|
||||
use crate::{
|
||||
config::Verbosity,
|
||||
protocol::{CryptoServer, MsgBuf, PeerPtr, SPk, SSk, SymKey, Timing},
|
||||
util::{b64_writer, fmt_b64},
|
||||
Result,
|
||||
};
|
||||
use rosenpass_util::attempt;
|
||||
use rosenpass_util::b64::B64Display;
|
||||
|
||||
const MAX_B64_KEY_SIZE: usize = 32 * 5 / 3;
|
||||
const MAX_B64_PEER_ID_SIZE: usize = 32 * 5 / 3;
|
||||
|
||||
const IPV4_ANY_ADDR: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const IPV6_ANY_ADDR: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
const UNDER_LOAD_RATIO: f64 = 0.5;
|
||||
const DURATION_UPDATE_UNDER_LOAD_STATUS: Duration = Duration::from_millis(500);
|
||||
|
||||
const BROKER_ID_BYTES: usize = 8;
|
||||
|
||||
fn ipv4_any_binding() -> SocketAddr {
|
||||
// addr, port
|
||||
SocketAddr::V4(SocketAddrV4::new(IPV4_ANY_ADDR, 0))
|
||||
@@ -57,50 +40,10 @@ fn ipv6_any_binding() -> SocketAddr {
|
||||
SocketAddr::V6(SocketAddrV6::new(IPV6_ANY_ADDR, 0, 0, 0))
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MioTokenDispenser {
|
||||
counter: usize,
|
||||
}
|
||||
|
||||
impl MioTokenDispenser {
|
||||
fn dispense(&mut self) -> Token {
|
||||
let r = self.counter;
|
||||
self.counter += 1;
|
||||
Token(r)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BrokerStore {
|
||||
store: HashMap<
|
||||
Public<BROKER_ID_BYTES>,
|
||||
Box<dyn WireguardBrokerMio<Error = anyhow::Error, MioError = anyhow::Error>>,
|
||||
>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BrokerStorePtr(pub Public<BROKER_ID_BYTES>);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BrokerPeer {
|
||||
ptr: BrokerStorePtr,
|
||||
peer_cfg: Box<dyn WireguardBrokerCfg>,
|
||||
}
|
||||
|
||||
impl BrokerPeer {
|
||||
pub fn new(ptr: BrokerStorePtr, peer_cfg: Box<dyn WireguardBrokerCfg>) -> Self {
|
||||
Self { ptr, peer_cfg }
|
||||
}
|
||||
|
||||
pub fn ptr(&self) -> &BrokerStorePtr {
|
||||
&self.ptr
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AppPeer {
|
||||
pub outfile: Option<PathBuf>,
|
||||
pub broker_peer: Option<BrokerPeer>,
|
||||
pub outwg: Option<WireguardOut>, // TODO make this a generic command
|
||||
pub initial_endpoint: Option<Endpoint>,
|
||||
pub current_endpoint: Option<Endpoint>,
|
||||
}
|
||||
@@ -121,23 +64,6 @@ pub struct WireguardOut {
|
||||
pub extra_params: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum DoSOperation {
|
||||
UnderLoad,
|
||||
Normal,
|
||||
}
|
||||
/// Integration test helpers for AppServer
|
||||
#[derive(Debug, Builder)]
|
||||
#[builder(pattern = "owned")]
|
||||
pub struct AppServerTest {
|
||||
/// Enable DoS operation permanently
|
||||
#[builder(default = "false")]
|
||||
pub enable_dos_permanently: bool,
|
||||
/// Terminate application signal
|
||||
#[builder(default = "None")]
|
||||
pub termination_handler: Option<std::sync::mpsc::Receiver<()>>,
|
||||
}
|
||||
|
||||
/// Holds the state of the application, namely the external IO
|
||||
///
|
||||
/// Responsible for file IO, network IO
|
||||
@@ -148,17 +74,9 @@ pub struct AppServer {
|
||||
pub sockets: Vec<mio::net::UdpSocket>,
|
||||
pub events: mio::Events,
|
||||
pub mio_poll: mio::Poll,
|
||||
pub mio_token_dispenser: MioTokenDispenser,
|
||||
pub brokers: BrokerStore,
|
||||
pub peers: Vec<AppPeer>,
|
||||
pub verbosity: Verbosity,
|
||||
pub all_sockets_drained: bool,
|
||||
pub under_load: DoSOperation,
|
||||
pub blocking_polls_count: usize,
|
||||
pub non_blocking_polls_count: usize,
|
||||
pub unpolled_count: usize,
|
||||
pub last_update_time: Instant,
|
||||
pub test_helpers: Option<AppServerTest>,
|
||||
}
|
||||
|
||||
/// A socket pointer is an index assigned to a socket;
|
||||
@@ -179,8 +97,8 @@ impl SocketPtr {
|
||||
&mut srv.sockets[self.0]
|
||||
}
|
||||
|
||||
pub fn send_to(&self, srv: &AppServer, buf: &[u8], addr: SocketAddr) -> anyhow::Result<()> {
|
||||
self.get(srv).send_to(buf, addr)?;
|
||||
pub fn send_to(&self, srv: &AppServer, buf: &[u8], addr: SocketAddr) -> Result<()> {
|
||||
self.get(srv).send_to(&buf, addr)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -207,17 +125,6 @@ impl AppPeerPtr {
|
||||
pub fn get_app_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut AppPeer {
|
||||
&mut srv.peers[self.0]
|
||||
}
|
||||
|
||||
pub fn set_psk(&self, server: &mut AppServer, psk: &Secret<WG_KEY_LEN>) -> anyhow::Result<()> {
|
||||
if let Some(broker) = server.peers[self.0].broker_peer.as_ref() {
|
||||
let config = broker.peer_cfg.create_config(psk);
|
||||
let broker = server.brokers.store.get_mut(&broker.ptr().0).unwrap();
|
||||
broker.set_psk(config)?;
|
||||
} else if server.peers[self.0].outfile.is_none() {
|
||||
log::warn!("No broker peer found for peer {}", self.0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -252,7 +159,13 @@ pub enum Endpoint {
|
||||
/// at the same time. It also would reply on the same port RespHello was
|
||||
/// sent to when listening on multiple ports on the same interface. This
|
||||
/// may be required for some arcane firewall setups.
|
||||
SocketBoundAddress(SocketBoundEndpoint),
|
||||
SocketBoundAddress {
|
||||
/// The socket the address can be reached under; this is generally
|
||||
/// determined when we actually receive an RespHello message
|
||||
socket: SocketPtr,
|
||||
/// Just the address
|
||||
addr: SocketAddr,
|
||||
},
|
||||
// A host name or IP address; storing the hostname here instead of an
|
||||
// ip address makes sure that we look up the host name whenever we try
|
||||
// to make a connection; this may be beneficial in some setups where a host-name
|
||||
@@ -260,85 +173,6 @@ pub enum Endpoint {
|
||||
Discovery(HostPathDiscoveryEndpoint),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Endpoint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Endpoint::SocketBoundAddress(host) => write!(f, "{}", host),
|
||||
Endpoint::Discovery(host) => write!(f, "{}", host),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SocketBoundEndpoint {
|
||||
/// The socket the address can be reached under; this is generally
|
||||
/// determined when we actually receive an RespHello message
|
||||
socket: SocketPtr,
|
||||
/// Just the address
|
||||
addr: SocketAddr,
|
||||
/// identifier
|
||||
bytes: (usize, [u8; SocketBoundEndpoint::BUFFER_SIZE]),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SocketBoundEndpoint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.addr)
|
||||
}
|
||||
}
|
||||
|
||||
impl SocketBoundEndpoint {
|
||||
const SOCKET_SIZE: usize = usize::BITS as usize / 8;
|
||||
const IPV6_SIZE: usize = 16;
|
||||
const PORT_SIZE: usize = 2;
|
||||
const SCOPE_ID_SIZE: usize = 4;
|
||||
|
||||
const BUFFER_SIZE: usize = SocketBoundEndpoint::SOCKET_SIZE
|
||||
+ SocketBoundEndpoint::IPV6_SIZE
|
||||
+ SocketBoundEndpoint::PORT_SIZE
|
||||
+ SocketBoundEndpoint::SCOPE_ID_SIZE;
|
||||
|
||||
pub fn new(socket: SocketPtr, addr: SocketAddr) -> Self {
|
||||
let bytes = Self::to_bytes(&socket, &addr);
|
||||
Self {
|
||||
socket,
|
||||
addr,
|
||||
bytes,
|
||||
}
|
||||
}
|
||||
|
||||
fn to_bytes(
|
||||
socket: &SocketPtr,
|
||||
addr: &SocketAddr,
|
||||
) -> (usize, [u8; SocketBoundEndpoint::BUFFER_SIZE]) {
|
||||
let mut buf = [0u8; SocketBoundEndpoint::BUFFER_SIZE];
|
||||
let addr = match addr {
|
||||
SocketAddr::V4(addr) => {
|
||||
//Map IPv4-mapped to IPv6 addresses
|
||||
let ip = addr.ip().to_ipv6_mapped();
|
||||
SocketAddrV6::new(ip, addr.port(), 0, 0)
|
||||
}
|
||||
SocketAddr::V6(addr) => *addr,
|
||||
};
|
||||
let mut len: usize = 0;
|
||||
buf[len..len + SocketBoundEndpoint::SOCKET_SIZE].copy_from_slice(&socket.0.to_be_bytes());
|
||||
len += SocketBoundEndpoint::SOCKET_SIZE;
|
||||
buf[len..len + SocketBoundEndpoint::IPV6_SIZE].copy_from_slice(&addr.ip().octets());
|
||||
len += SocketBoundEndpoint::IPV6_SIZE;
|
||||
buf[len..len + SocketBoundEndpoint::PORT_SIZE].copy_from_slice(&addr.port().to_be_bytes());
|
||||
len += SocketBoundEndpoint::PORT_SIZE;
|
||||
buf[len..len + SocketBoundEndpoint::SCOPE_ID_SIZE]
|
||||
.copy_from_slice(&addr.scope_id().to_be_bytes());
|
||||
len += SocketBoundEndpoint::SCOPE_ID_SIZE;
|
||||
(len, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl HostIdentification for SocketBoundEndpoint {
|
||||
fn encode(&self) -> &[u8] {
|
||||
&self.bytes.1[0..self.bytes.0]
|
||||
}
|
||||
}
|
||||
|
||||
impl Endpoint {
|
||||
/// Start discovery from some addresses
|
||||
pub fn discovery_from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
||||
@@ -346,7 +180,7 @@ impl Endpoint {
|
||||
}
|
||||
|
||||
/// Start endpoint discovery from a hostname
|
||||
pub fn discovery_from_hostname(hostname: String) -> anyhow::Result<Self> {
|
||||
pub fn discovery_from_hostname(hostname: String) -> Result<Self> {
|
||||
let host = HostPathDiscoveryEndpoint::lookup(hostname)?;
|
||||
Ok(Endpoint::Discovery(host))
|
||||
}
|
||||
@@ -376,10 +210,10 @@ impl Endpoint {
|
||||
Some(Self::discovery_from_addresses(addrs))
|
||||
}
|
||||
|
||||
pub fn send(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
||||
pub fn send(&self, srv: &AppServer, buf: &[u8]) -> Result<()> {
|
||||
use Endpoint::*;
|
||||
match self {
|
||||
SocketBoundAddress(host) => host.socket.send_to(srv, buf, host.addr),
|
||||
SocketBoundAddress { socket, addr } => socket.send_to(srv, buf, *addr),
|
||||
Discovery(host) => host.send_scouting(srv, buf),
|
||||
}
|
||||
}
|
||||
@@ -387,7 +221,7 @@ impl Endpoint {
|
||||
fn addresses(&self) -> &[SocketAddr] {
|
||||
use Endpoint::*;
|
||||
match self {
|
||||
SocketBoundAddress(host) => slice::from_ref(&host.addr),
|
||||
SocketBoundAddress { addr, .. } => slice::from_ref(addr),
|
||||
Discovery(host) => host.addresses(),
|
||||
}
|
||||
}
|
||||
@@ -425,12 +259,6 @@ pub struct HostPathDiscoveryEndpoint {
|
||||
addresses: Vec<SocketAddr>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HostPathDiscoveryEndpoint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self.addresses)
|
||||
}
|
||||
}
|
||||
|
||||
impl HostPathDiscoveryEndpoint {
|
||||
pub fn from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
||||
let scouting_state = Cell::new((0, 0));
|
||||
@@ -441,7 +269,7 @@ impl HostPathDiscoveryEndpoint {
|
||||
}
|
||||
|
||||
/// Lookup a hostname
|
||||
pub fn lookup(hostname: String) -> anyhow::Result<Self> {
|
||||
pub fn lookup(hostname: String) -> Result<Self> {
|
||||
Ok(Self {
|
||||
addresses: ToSocketAddrs::to_socket_addrs(&hostname)?.collect(),
|
||||
scouting_state: Cell::new((0, 0)),
|
||||
@@ -462,16 +290,16 @@ impl HostPathDiscoveryEndpoint {
|
||||
/// Attempt to reach the host
|
||||
///
|
||||
/// Will round-robin-try different socket-ip-combinations on each call.
|
||||
pub fn send_scouting(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
||||
pub fn send_scouting(&self, srv: &AppServer, buf: &[u8]) -> Result<()> {
|
||||
let (addr_off, sock_off) = self.scouting_state.get();
|
||||
|
||||
let mut addrs = (self.addresses)
|
||||
let mut addrs = (&self.addresses)
|
||||
.iter()
|
||||
.enumerate()
|
||||
.cycle()
|
||||
.skip(addr_off)
|
||||
.take(self.addresses.len());
|
||||
let mut sockets = (srv.sockets)
|
||||
let mut sockets = (&srv.sockets)
|
||||
.iter()
|
||||
.enumerate()
|
||||
.cycle()
|
||||
@@ -496,30 +324,24 @@ impl HostPathDiscoveryEndpoint {
|
||||
.to_string()
|
||||
.starts_with("Address family not supported by protocol");
|
||||
if !ignore {
|
||||
warn!("Socket #{} refusing to send to {}: {}", sock_no, addr, err);
|
||||
warn!("Socket #{} refusing to send to {}: ", sock_no, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bail!("Unable to send message: All sockets returned errors.")
|
||||
error!("Unable to send message: All sockets returned errors.");
|
||||
return Err(RosenpassError::RuntimeError);
|
||||
}
|
||||
}
|
||||
|
||||
impl AppServer {
|
||||
pub fn new(
|
||||
sk: SSk,
|
||||
pk: SPk,
|
||||
addrs: Vec<SocketAddr>,
|
||||
verbosity: Verbosity,
|
||||
test_helpers: Option<AppServerTest>,
|
||||
) -> anyhow::Result<Self> {
|
||||
pub fn new(sk: SSk, pk: SPk, addrs: Vec<SocketAddr>, verbosity: Verbosity) -> Result<Self> {
|
||||
// setup mio
|
||||
let mio_poll = mio::Poll::new()?;
|
||||
let events = mio::Events::with_capacity(20);
|
||||
let mut mio_token_dispenser = MioTokenDispenser::default();
|
||||
let events = mio::Events::with_capacity(8);
|
||||
|
||||
// bind each SocketAddr to a socket
|
||||
let maybe_sockets: Result<Vec<_>, _> =
|
||||
let maybe_sockets: std::result::Result<Vec<_>, std::io::Error> =
|
||||
addrs.into_iter().map(mio::net::UdpSocket::bind).collect();
|
||||
let mut sockets = maybe_sockets?;
|
||||
|
||||
@@ -586,16 +408,15 @@ impl AppServer {
|
||||
}
|
||||
|
||||
if sockets.is_empty() {
|
||||
bail!("No sockets to listen on!")
|
||||
error!("No sockets to listen on!");
|
||||
return Err(RosenpassError::RuntimeError);
|
||||
}
|
||||
|
||||
// register all sockets to mio
|
||||
for socket in sockets.iter_mut() {
|
||||
mio_poll.registry().register(
|
||||
socket,
|
||||
mio_token_dispenser.dispense(),
|
||||
Interest::READABLE,
|
||||
)?;
|
||||
for (i, socket) in sockets.iter_mut().enumerate() {
|
||||
mio_poll
|
||||
.registry()
|
||||
.register(socket, Token(i), Interest::READABLE)?;
|
||||
}
|
||||
|
||||
// TODO use mio::net::UnixStream together with std::os::unix::net::UnixStream for Linux
|
||||
@@ -607,15 +428,7 @@ impl AppServer {
|
||||
sockets,
|
||||
events,
|
||||
mio_poll,
|
||||
mio_token_dispenser,
|
||||
brokers: BrokerStore::default(),
|
||||
all_sockets_drained: false,
|
||||
under_load: DoSOperation::Normal,
|
||||
blocking_polls_count: 0,
|
||||
non_blocking_polls_count: 0,
|
||||
unpolled_count: 0,
|
||||
last_update_time: Instant::now(),
|
||||
test_helpers,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -623,52 +436,14 @@ impl AppServer {
|
||||
matches!(self.verbosity, Verbosity::Verbose)
|
||||
}
|
||||
|
||||
pub fn register_broker(
|
||||
&mut self,
|
||||
broker: Box<dyn WireguardBrokerMio<Error = anyhow::Error, MioError = anyhow::Error>>,
|
||||
) -> Result<BrokerStorePtr> {
|
||||
let ptr = Public::from_slice((self.brokers.store.len() as u64).as_bytes());
|
||||
|
||||
if self.brokers.store.insert(ptr, broker).is_some() {
|
||||
bail!("Broker already registered");
|
||||
}
|
||||
//Register broker
|
||||
self.brokers
|
||||
.store
|
||||
.get_mut(&ptr)
|
||||
.ok_or(anyhow::format_err!("Broker wasn't added to registry"))?
|
||||
.register(
|
||||
self.mio_poll.registry(),
|
||||
self.mio_token_dispenser.dispense(),
|
||||
)?;
|
||||
|
||||
Ok(BrokerStorePtr(ptr))
|
||||
}
|
||||
|
||||
pub fn unregister_broker(&mut self, ptr: BrokerStorePtr) -> Result<()> {
|
||||
//Unregister broker
|
||||
self.brokers
|
||||
.store
|
||||
.get_mut(&ptr.0)
|
||||
.ok_or_else(|| anyhow::anyhow!("Broker not found"))?
|
||||
.unregister(self.mio_poll.registry())?;
|
||||
|
||||
//Remove broker from store
|
||||
self.brokers
|
||||
.store
|
||||
.remove(&ptr.0)
|
||||
.ok_or_else(|| anyhow::anyhow!("Broker not found"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_peer(
|
||||
&mut self,
|
||||
psk: Option<SymKey>,
|
||||
pk: SPk,
|
||||
outfile: Option<PathBuf>,
|
||||
broker_peer: Option<BrokerPeer>,
|
||||
outwg: Option<WireguardOut>,
|
||||
hostname: Option<String>,
|
||||
) -> anyhow::Result<AppPeerPtr> {
|
||||
) -> Result<AppPeerPtr> {
|
||||
let PeerPtr(pn) = self.crypt.add_peer(psk, pk)?;
|
||||
assert!(pn == self.peers.len());
|
||||
let initial_endpoint = hostname
|
||||
@@ -677,14 +452,14 @@ impl AppServer {
|
||||
let current_endpoint = None;
|
||||
self.peers.push(AppPeer {
|
||||
outfile,
|
||||
broker_peer,
|
||||
outwg,
|
||||
initial_endpoint,
|
||||
current_endpoint,
|
||||
});
|
||||
Ok(AppPeerPtr(pn))
|
||||
}
|
||||
|
||||
pub fn listen_loop(&mut self) -> anyhow::Result<()> {
|
||||
pub fn listen_loop(&mut self) -> Result<()> {
|
||||
const INIT_SLEEP: f64 = 0.01;
|
||||
const MAX_FAILURES: i32 = 10;
|
||||
let mut failure_cnt = 0;
|
||||
@@ -705,10 +480,11 @@ impl AppServer {
|
||||
let sleep = INIT_SLEEP * 2.0f64.powf(f64::from(failure_cnt - 1));
|
||||
let tries_left = MAX_FAILURES - (failure_cnt - 1);
|
||||
error!(
|
||||
"unexpected error after processing {} messages: {:?} {}",
|
||||
"unexpected error after processing {} messages: {:?}",
|
||||
msgs_processed,
|
||||
err,
|
||||
err.backtrace()
|
||||
// TODO do we need backtraces?
|
||||
// err.backtrace()
|
||||
);
|
||||
if tries_left > 0 {
|
||||
error!("re-initializing networking in {sleep}! {tries_left} tries left.");
|
||||
@@ -716,11 +492,12 @@ impl AppServer {
|
||||
continue;
|
||||
}
|
||||
|
||||
bail!("too many network failures");
|
||||
error!("too many network failures");
|
||||
return Err(RosenpassError::RuntimeError);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn event_loop(&mut self) -> anyhow::Result<()> {
|
||||
pub fn event_loop(&mut self) -> Result<()> {
|
||||
let (mut rx, mut tx) = (MsgBuf::zero(), MsgBuf::zero());
|
||||
|
||||
/// if socket address for peer is known, call closure
|
||||
@@ -744,23 +521,10 @@ impl AppServer {
|
||||
use crate::protocol::HandleMsgResult;
|
||||
use AppPollResult::*;
|
||||
use KeyOutputReason::*;
|
||||
|
||||
if let Some(AppServerTest {
|
||||
termination_handler: Some(terminate),
|
||||
..
|
||||
}) = &self.test_helpers
|
||||
{
|
||||
if terminate.try_recv().is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
match self.poll(&mut *rx)? {
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
SendInitiation(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.initiate_handshake(peer.lower(), &mut *tx))?,
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
SendRetransmission(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.retransmit_handshake(peer.lower(), &mut *tx))?,
|
||||
@@ -779,20 +543,12 @@ impl AppServer {
|
||||
}
|
||||
|
||||
ReceivedMessage(len, endpoint) => {
|
||||
let msg_result = match self.under_load {
|
||||
DoSOperation::UnderLoad => {
|
||||
self.handle_msg_under_load(&endpoint, &rx[..len], &mut *tx)
|
||||
}
|
||||
DoSOperation::Normal => self.crypt.handle_msg(&rx[..len], &mut *tx),
|
||||
};
|
||||
match msg_result {
|
||||
match self.crypt.handle_msg(&rx[..len], &mut *tx) {
|
||||
Err(ref e) => {
|
||||
self.verbose().then(|| {
|
||||
info!(
|
||||
"error processing incoming message from {}: {:?} {}",
|
||||
endpoint,
|
||||
e,
|
||||
e.backtrace()
|
||||
error!(
|
||||
"error processing incoming message from {:?}: {:?}",
|
||||
endpoint, e
|
||||
);
|
||||
});
|
||||
}
|
||||
@@ -820,40 +576,18 @@ impl AppServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_msg_under_load(
|
||||
&mut self,
|
||||
endpoint: &Endpoint,
|
||||
rx: &[u8],
|
||||
tx: &mut [u8],
|
||||
) -> Result<crate::protocol::HandleMsgResult> {
|
||||
match endpoint {
|
||||
Endpoint::SocketBoundAddress(socket) => {
|
||||
self.crypt.handle_msg_under_load(rx, &mut *tx, socket)
|
||||
}
|
||||
Endpoint::Discovery(_) => {
|
||||
anyhow::bail!("Host-path discovery is not supported under load")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn output_key(
|
||||
&mut self,
|
||||
peer: AppPeerPtr,
|
||||
why: KeyOutputReason,
|
||||
key: &SymKey,
|
||||
) -> anyhow::Result<()> {
|
||||
pub fn output_key(&self, peer: AppPeerPtr, why: KeyOutputReason, key: &SymKey) -> Result<()> {
|
||||
let peerid = peer.lower().get(&self.crypt).pidt()?;
|
||||
let ap = peer.get_app(self);
|
||||
|
||||
if self.verbose() {
|
||||
let msg = match why {
|
||||
KeyOutputReason::Exchanged => "Exchanged key with peer",
|
||||
KeyOutputReason::Stale => "Erasing outdated key from peer",
|
||||
};
|
||||
info!("{} {}", msg, peerid.fmt_b64::<MAX_B64_PEER_ID_SIZE>());
|
||||
info!("{} {}", msg, fmt_b64(&*peerid));
|
||||
}
|
||||
|
||||
let ap = peer.get_app(self);
|
||||
|
||||
if let Some(of) = ap.outfile.as_ref() {
|
||||
// This might leave some fragments of the secret on the stack;
|
||||
// in practice this is likely not a problem because the stack likely
|
||||
@@ -862,7 +596,7 @@ impl AppServer {
|
||||
// data will linger in the linux page cache anyways with the current
|
||||
// implementation, going to great length to erase the secret here is
|
||||
// not worth it right now.
|
||||
key.store_b64::<MAX_B64_KEY_SIZE, _>(of)?;
|
||||
b64_writer(fopen_w(of)?).write_all(key.secret())?;
|
||||
let why = match why {
|
||||
KeyOutputReason::Exchanged => "exchanged",
|
||||
KeyOutputReason::Stale => "stale",
|
||||
@@ -872,16 +606,28 @@ impl AppServer {
|
||||
// it is meant to allow external detection of a successful key-exchange
|
||||
println!(
|
||||
"output-key peer {} key-file {of:?} {why}",
|
||||
peerid.fmt_b64::<MAX_B64_PEER_ID_SIZE>()
|
||||
fmt_b64(&*peerid)
|
||||
);
|
||||
}
|
||||
|
||||
peer.set_psk(self, key)?;
|
||||
if let Some(owg) = ap.outwg.as_ref() {
|
||||
let child = Command::new("wg")
|
||||
.arg("set")
|
||||
.arg(&owg.dev)
|
||||
.arg("peer")
|
||||
.arg(&owg.pk)
|
||||
.arg("preshared-key")
|
||||
.arg("/dev/stdin")
|
||||
.stdin(Stdio::piped())
|
||||
.args(&owg.extra_params)
|
||||
.spawn()?;
|
||||
b64_writer(child.stdin.unwrap()).write_all(key.secret())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn poll(&mut self, rx_buf: &mut [u8]) -> anyhow::Result<AppPollResult> {
|
||||
pub fn poll(&mut self, rx_buf: &mut [u8]) -> Result<AppPollResult> {
|
||||
use crate::protocol::PollResult as C;
|
||||
use AppPollResult as A;
|
||||
loop {
|
||||
@@ -906,7 +652,7 @@ impl AppServer {
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
timeout: Timing,
|
||||
) -> anyhow::Result<Option<(usize, Endpoint)>> {
|
||||
) -> Result<Option<(usize, Endpoint)>> {
|
||||
let timeout = Duration::from_secs_f64(timeout);
|
||||
|
||||
// if there is no time to wait on IO, well, then, lets not waste any time!
|
||||
@@ -933,56 +679,9 @@ impl AppServer {
|
||||
|
||||
// only poll if we drained all sockets before
|
||||
if self.all_sockets_drained {
|
||||
//Non blocked polling
|
||||
self.mio_poll
|
||||
.poll(&mut self.events, Some(Duration::from_secs(0)))?;
|
||||
|
||||
if self.events.iter().peekable().peek().is_none() {
|
||||
// if there are no events, then add to blocking poll count
|
||||
self.blocking_polls_count += 1;
|
||||
//Execute blocking poll
|
||||
self.mio_poll.poll(&mut self.events, Some(timeout))?;
|
||||
} else {
|
||||
self.non_blocking_polls_count += 1;
|
||||
}
|
||||
} else {
|
||||
self.unpolled_count += 1;
|
||||
self.mio_poll.poll(&mut self.events, Some(timeout))?;
|
||||
}
|
||||
|
||||
if let Some(AppServerTest {
|
||||
enable_dos_permanently: true,
|
||||
..
|
||||
}) = self.test_helpers
|
||||
{
|
||||
self.under_load = DoSOperation::UnderLoad;
|
||||
} else {
|
||||
//Reset blocking poll count if waiting for more than BLOCKING_POLL_COUNT_DURATION
|
||||
if self.last_update_time.elapsed() > DURATION_UPDATE_UNDER_LOAD_STATUS {
|
||||
self.last_update_time = Instant::now();
|
||||
let total_polls = self.blocking_polls_count + self.non_blocking_polls_count;
|
||||
|
||||
let load_ratio = if total_polls > 0 {
|
||||
self.non_blocking_polls_count as f64 / total_polls as f64
|
||||
} else if self.unpolled_count > 0 {
|
||||
//There are no polls, so we are under load
|
||||
1.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
if load_ratio > UNDER_LOAD_RATIO {
|
||||
self.under_load = DoSOperation::UnderLoad;
|
||||
} else {
|
||||
self.under_load = DoSOperation::Normal;
|
||||
}
|
||||
|
||||
self.blocking_polls_count = 0;
|
||||
self.non_blocking_polls_count = 0;
|
||||
self.unpolled_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// drain all sockets
|
||||
let mut would_block_count = 0;
|
||||
for (sock_no, socket) in self.sockets.iter_mut().enumerate() {
|
||||
match socket.recv_from(buf) {
|
||||
@@ -991,10 +690,10 @@ impl AppServer {
|
||||
self.all_sockets_drained = false;
|
||||
return Ok(Some((
|
||||
n,
|
||||
Endpoint::SocketBoundAddress(SocketBoundEndpoint::new(
|
||||
SocketPtr(sock_no),
|
||||
Endpoint::SocketBoundAddress {
|
||||
socket: SocketPtr(sock_no),
|
||||
addr,
|
||||
)),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Err(e) if e.kind() == ErrorKind::WouldBlock => {
|
||||
@@ -1008,11 +707,6 @@ impl AppServer {
|
||||
// if each socket returned WouldBlock, then we drained them all at least once indeed
|
||||
self.all_sockets_drained = would_block_count == self.sockets.len();
|
||||
|
||||
// Process brokers poll
|
||||
for (_, broker) in self.brokers.store.iter_mut() {
|
||||
broker.process_poll()?;
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user