Compare commits
149 Commits
whitepaper
...
v0.2.1-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
44264a7bb6 | ||
|
|
b095bdaa7c | ||
|
|
9597e485bf | ||
|
|
ab085998bb | ||
|
|
3901e668cb | ||
|
|
b7444bf9b4 | ||
|
|
0051cbd48e | ||
|
|
27746781c0 | ||
|
|
93439858d1 | ||
|
|
1223048b48 | ||
|
|
932bde39cc | ||
|
|
1d9e62e56b | ||
|
|
3af722a066 | ||
|
|
df60b0bfc3 | ||
|
|
6274c6fcdd | ||
|
|
cd00f023fb | ||
|
|
13563237cb | ||
|
|
447a4f7a44 | ||
|
|
6bac6a59ff | ||
|
|
e5e04c6d95 | ||
|
|
15ce25ccd2 | ||
|
|
1b383d494c | ||
|
|
605b6463ff | ||
|
|
04eb86af87 | ||
|
|
bf850e3072 | ||
|
|
dd39936220 | ||
|
|
b15f17133f | ||
|
|
b50820ecc0 | ||
|
|
f323839967 | ||
|
|
6e15c38254 | ||
|
|
b7a76849b7 | ||
|
|
d2d72143b5 | ||
|
|
1135cd7bbb | ||
|
|
51f04f749f | ||
|
|
37d1326481 | ||
|
|
d0a84294aa | ||
|
|
a98f64c17d | ||
|
|
d6a7ebe88f | ||
|
|
212336728c | ||
|
|
f48a923dbf | ||
|
|
7b5d0f7d66 | ||
|
|
1e37f89e83 | ||
|
|
b997238f42 | ||
|
|
d915e63445 | ||
|
|
53d7996dd3 | ||
|
|
47b4d394ef | ||
|
|
578d9e2eb5 | ||
|
|
d6b83a4a0b | ||
|
|
959cd50ef6 | ||
|
|
6025623aad | ||
|
|
5a67b4708a | ||
|
|
45145cdd9b | ||
|
|
66e696fea3 | ||
|
|
91d0592ad6 | ||
|
|
8ff9b53365 | ||
|
|
067a839d4b | ||
|
|
38835fb0f8 | ||
|
|
a2b177470c | ||
|
|
1c1e38e2f7 | ||
|
|
46383bdc4d | ||
|
|
2805d686e6 | ||
|
|
b274519bad | ||
|
|
3086c7fb93 | ||
|
|
d21e3af1bb | ||
|
|
b0332971df | ||
|
|
be508b486a | ||
|
|
4314a0915a | ||
|
|
0d2ca37bbb | ||
|
|
7b69afabbc | ||
|
|
e24172d9b5 | ||
|
|
d01c96c1de | ||
|
|
4a3b59fd15 | ||
|
|
11d60bcced | ||
|
|
73a8489232 | ||
|
|
2ac2c84c71 | ||
|
|
a0f79478cc | ||
|
|
7e6985fdc6 | ||
|
|
b958eacaae | ||
|
|
397a776c55 | ||
|
|
19fe7360d2 | ||
|
|
b29720b0c6 | ||
|
|
78e32a6f14 | ||
|
|
5f78857ff5 | ||
|
|
69f62673a5 | ||
|
|
097fd0332d | ||
|
|
303c5a569c | ||
|
|
7aa48b95af | ||
|
|
229224d078 | ||
|
|
e12cd18a42 | ||
|
|
0b1a00a32e | ||
|
|
7c3cd1acf6 | ||
|
|
3856d774ff | ||
|
|
62fab066d4 | ||
|
|
9469b62f58 | ||
|
|
f8bea94330 | ||
|
|
f3c343c472 | ||
|
|
7154af52f9 | ||
|
|
e03fed404f | ||
|
|
42798699e4 | ||
|
|
b99d072879 | ||
|
|
d5b2a9414f | ||
|
|
13cc7e05ed | ||
|
|
096c811491 | ||
|
|
cefe9ce762 | ||
|
|
378fddb645 | ||
|
|
695ef6a769 | ||
|
|
b4d74d64f7 | ||
|
|
0456ded6b9 | ||
|
|
838fd19694 | ||
|
|
94d57f2f87 | ||
|
|
279b3c49fc | ||
|
|
9c40c77f71 | ||
|
|
c79dffa627 | ||
|
|
b8f19c5510 | ||
|
|
f459b91abf | ||
|
|
801ce4cd34 | ||
|
|
a36da78bc8 | ||
|
|
df02f616bf | ||
|
|
87b08bcee1 | ||
|
|
897fa3daf6 | ||
|
|
953b861b4c | ||
|
|
1a61a99575 | ||
|
|
25a7a0736b | ||
|
|
844e9b3c7e | ||
|
|
a723951c71 | ||
|
|
be9ac58bf9 | ||
|
|
75853159fe | ||
|
|
95aba257fd | ||
|
|
34d0bab5c5 | ||
|
|
91d1986126 | ||
|
|
319785cf6e | ||
|
|
df5a6125cd | ||
|
|
80697e6189 | ||
|
|
6212153c48 | ||
|
|
4645ed5569 | ||
|
|
2aeb9067e2 | ||
|
|
c64917fe2e | ||
|
|
a011cc1e1c | ||
|
|
ad75d2218c | ||
|
|
566795afd2 | ||
|
|
8eea5284bf | ||
|
|
df00c1987c | ||
|
|
becc8c057a | ||
|
|
1625d94b71 | ||
|
|
a62405190e | ||
|
|
5afa6c19a6 | ||
|
|
ecc1b75b00 | ||
|
|
fe80792873 | ||
|
|
3fc28a0b70 |
200
.ci/gen-workflow-files.nu
Executable file
@@ -0,0 +1,200 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
use log *
|
||||
|
||||
# cd to git root
|
||||
cd (git rev-parse --show-toplevel)
|
||||
|
||||
# check if a subject depends on a potential dependency
|
||||
def depends [
|
||||
subject:string # package to examine
|
||||
maybe_dep:string # maybe a dependency of subject
|
||||
] {
|
||||
not ( nix why-depends --quiet --derivation $subject $maybe_dep | is-empty )
|
||||
}
|
||||
|
||||
# get attribute names of the attribute set
|
||||
def get-attr-names [
|
||||
expr: # nix expression to get attrNames of
|
||||
] {
|
||||
nix eval --json $expr --apply builtins.attrNames | from json
|
||||
}
|
||||
|
||||
def job-id [
|
||||
system:string,
|
||||
derivation:string,
|
||||
] {
|
||||
$"($system)---($derivation)"
|
||||
}
|
||||
|
||||
# map from nixos system to github runner type
|
||||
let systems_map = {
|
||||
# aarch64-darwin
|
||||
# aarch64-linux
|
||||
|
||||
i686-linux: ubuntu-latest,
|
||||
x86_64-darwin: macos-13,
|
||||
x86_64-linux: ubuntu-latest
|
||||
}
|
||||
|
||||
let targets = (get-attr-names ".#packages"
|
||||
| par-each {|system| { $system : (get-attr-names $".#packages.($system)") } }
|
||||
| reduce {|it, acc| $acc | merge $it }
|
||||
)
|
||||
|
||||
mut cachix_workflow = {
|
||||
name: "Nix",
|
||||
permissions: {contents: write},
|
||||
on: {
|
||||
pull_request: null,
|
||||
push: {branches: [main]}
|
||||
},
|
||||
jobs: {},
|
||||
}
|
||||
|
||||
mut release_workflow = {
|
||||
name: "Release",
|
||||
permissions: {contents: write},
|
||||
on: { push: {tags: ["v*"]} },
|
||||
jobs: {},
|
||||
}
|
||||
|
||||
let runner_setup = [
|
||||
{
|
||||
uses: "actions/checkout@v3"
|
||||
}
|
||||
{
|
||||
uses: "cachix/install-nix-action@v22",
|
||||
with: { nix_path: "nixpkgs=channel:nixos-unstable" }
|
||||
}
|
||||
{
|
||||
uses: "cachix/cachix-action@v12",
|
||||
with: {
|
||||
name: rosenpass,
|
||||
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
for system in ($targets | columns) {
|
||||
if ($systems_map | get -i $system | is-empty) {
|
||||
log info $"skipping ($system), since there are no GH-Actions runners for it"
|
||||
continue
|
||||
}
|
||||
|
||||
# lookup the correct runner for $system
|
||||
let runs_on = [ ($systems_map | get $system) ]
|
||||
|
||||
# add jobs for all derivations
|
||||
let derivations = ($targets | get $system)
|
||||
for derivation in $derivations {
|
||||
|
||||
if ($system == "i686-linux") and ($derivation | str contains "static") {
|
||||
log info $"skipping ($system).($derivation), due to liboqs 0.8 not present in oqs-sys"
|
||||
continue
|
||||
}
|
||||
|
||||
if ($system == "i686-linux") and ($derivation | str contains "release-package") {
|
||||
log info $"skipping ($system).($derivation), due to liboqs 0.8 not present in oqs-sys"
|
||||
continue
|
||||
}
|
||||
|
||||
# job_id for GH-Actions
|
||||
let id = ( job-id $system $derivation )
|
||||
|
||||
# name displayed
|
||||
let name = $"($system).($derivation)"
|
||||
|
||||
# collection of dependencies
|
||||
# TODO currently only considers dependencies on the same $system
|
||||
let needs = ($derivations
|
||||
| filter {|it| $it != $derivation and $it != "default" } # filter out self and default
|
||||
| par-each {|it| {
|
||||
name: $it, # the other derivation
|
||||
# does self depend on $it?
|
||||
needed: (depends $".#packages.($system).($derivation)" $".#packages.($system).($it)")
|
||||
} }
|
||||
| filter {|it| $it.needed}
|
||||
| each {|it| job-id $system $it.name}
|
||||
)
|
||||
|
||||
mut new_job = {
|
||||
name: $"Build ($name)",
|
||||
"runs-on": $runs_on,
|
||||
needs: $needs,
|
||||
steps: ($runner_setup | append [
|
||||
{
|
||||
name: Build,
|
||||
run: $"nix build .#packages.($system).($derivation) --print-build-logs"
|
||||
}
|
||||
])
|
||||
}
|
||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $id $new_job )
|
||||
}
|
||||
|
||||
# add check job
|
||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $"($system)---check" {
|
||||
name: $"Run Nix checks on ($system)",
|
||||
"runs-on": $runs_on,
|
||||
steps: ($runner_setup | append {
|
||||
name: Check,
|
||||
run: "nix flake check . --print-build-logs"
|
||||
})
|
||||
})
|
||||
|
||||
# add release job
|
||||
$release_workflow.jobs = ($release_workflow.jobs | insert $"($system)---release" {
|
||||
name: $"Build release artifacts for ($system)",
|
||||
"runs-on": $runs_on,
|
||||
steps: ($runner_setup | append [
|
||||
{
|
||||
name: "Build release",
|
||||
run: "nix build .#release-package --print-build-logs"
|
||||
}
|
||||
{
|
||||
name: Release,
|
||||
uses: "softprops/action-gh-release@v1",
|
||||
with: {
|
||||
draft: "${{ contains(github.ref_name, 'rc') }}",
|
||||
prerelease: "${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}",
|
||||
files: "result/*"
|
||||
}
|
||||
}
|
||||
])
|
||||
})
|
||||
}
|
||||
|
||||
# add whitepaper job with upload
|
||||
let system = "x86_64-linux"
|
||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $"($system)---whitepaper-upload" {
|
||||
name: $"Upload whitepaper ($system)",
|
||||
"runs-on": ($systems_map | get $system),
|
||||
"if": "${{ github.ref == 'refs/heads/main' }}",
|
||||
steps: ($runner_setup | append [
|
||||
{
|
||||
name: "Git add git sha and commit",
|
||||
run: "cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin"
|
||||
}
|
||||
{
|
||||
name: Build,
|
||||
run: $"nix build .#packages.($system).whitepaper --print-build-logs"
|
||||
}
|
||||
{
|
||||
name: "Deploy PDF artifacts",
|
||||
uses: "peaceiris/actions-gh-pages@v3",
|
||||
with: {
|
||||
github_token: "${{ secrets.GITHUB_TOKEN }}",
|
||||
publish_dir: result/,
|
||||
publish_branch: papers-pdf,
|
||||
force_orphan: true
|
||||
}
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
log info "saving nix-cachix workflow"
|
||||
$cachix_workflow | to yaml | save --force .github/workflows/nix.yaml
|
||||
$release_workflow | to yaml | save --force .github/workflows/release.yaml
|
||||
|
||||
log info "prettify generated yaml"
|
||||
prettier -w .github/workflows/
|
||||
49
.github/workflows/doc-upload.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Update website docs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "doc/**"
|
||||
|
||||
jobs:
|
||||
update-website:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Clone rosenpass-website repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: rosenpass/rosenpass-website
|
||||
ref: main
|
||||
path: rosenpass-website
|
||||
token: ${{ secrets.PRIVACC }}
|
||||
|
||||
- name: Copy docs to website repo
|
||||
run: |
|
||||
cp -R doc/* rosenpass-website/static/docs/
|
||||
|
||||
- name: Install mandoc
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mandoc
|
||||
|
||||
- name: Compile man pages to HTML
|
||||
run: |
|
||||
cd rosenpass-website/static/docs/
|
||||
for file in *.1; do
|
||||
mandoc -Thtml "$file" > "${file%.*}.html"
|
||||
done
|
||||
|
||||
- name: Commit changes to website repo
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
author_name: GitHub Actions
|
||||
author_email: actions@github.com
|
||||
message: Update docs
|
||||
cwd: rosenpass-website/static/docs
|
||||
github_token: ${{ secrets.PRIVACC }
|
||||
390
.github/workflows/nix.yaml
vendored
@@ -1,74 +1,346 @@
|
||||
name: Nix Related Actions
|
||||
name: Nix
|
||||
permissions:
|
||||
contents: write
|
||||
on:
|
||||
pull_request:
|
||||
pull_request: null
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.derivation }} on ${{ matrix.nix-system }}
|
||||
i686-linux---default:
|
||||
name: Build i686-linux.default
|
||||
runs-on:
|
||||
- nix
|
||||
- ${{ matrix.nix-system }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
nix-system:
|
||||
- x86_64-linux
|
||||
# - aarch64-linux
|
||||
derivation:
|
||||
- rosenpass
|
||||
- rosenpass-static
|
||||
- rosenpass-oci-image
|
||||
- rosenpass-static-oci-image
|
||||
- proof-proverif
|
||||
- whitepaper
|
||||
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- i686-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Generate gitHeadInfo.gin for the whitepaper
|
||||
if: ${{ matrix.derivation == 'whitepaper' }}
|
||||
run: ( cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin )
|
||||
- name: Build ${{ matrix.derivation }}@${{ matrix.nix-system }}
|
||||
run: |
|
||||
# build the package
|
||||
nix build .#packages.${{ matrix.nix-system }}.${{ matrix.derivation }} --print-build-logs
|
||||
|
||||
# copy over the results
|
||||
if [[ -f $(readlink --canonicalize result ) ]]; then
|
||||
mkdir -- ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
fi
|
||||
cp --recursive -- $(readlink --canonicalize result) ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
chmod --recursive ug+rw -- ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
|
||||
# add version information
|
||||
git rev-parse --abbrev-ref HEAD > ${{ matrix.derivation }}-${{ matrix.nix-system }}/git-version
|
||||
git rev-parse HEAD > ${{ matrix.derivation }}-${{ matrix.nix-system }}/git-sha
|
||||
|
||||
# override the `rp` script to keep compatible with non-nix systems
|
||||
if [[ -f ${{ matrix.derivation }}-${{ matrix.nix-system }}/bin/rp ]]; then
|
||||
cp --force -- rp ${{ matrix.derivation }}-${{ matrix.nix-system }}/bin/
|
||||
fi
|
||||
- name: Upload build results
|
||||
uses: actions/upload-artifact@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
name: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
path: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.i686-linux.default --print-build-logs
|
||||
i686-linux---rosenpass:
|
||||
name: Build i686-linux.rosenpass
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.i686-linux.rosenpass --print-build-logs
|
||||
i686-linux---rosenpass-oci-image:
|
||||
name: Build i686-linux.rosenpass-oci-image
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- i686-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.i686-linux.rosenpass-oci-image --print-build-logs
|
||||
i686-linux---check:
|
||||
name: Run Nix checks on i686-linux
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Check
|
||||
run: nix flake check . --print-build-logs
|
||||
x86_64-darwin---default:
|
||||
name: Build x86_64-darwin.default
|
||||
runs-on:
|
||||
- macos-13
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.default --print-build-logs
|
||||
x86_64-darwin---release-package:
|
||||
name: Build x86_64-darwin.release-package
|
||||
runs-on:
|
||||
- macos-13
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
- x86_64-darwin---rosenpass-oci-image
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.release-package --print-build-logs
|
||||
x86_64-darwin---rosenpass:
|
||||
name: Build x86_64-darwin.rosenpass
|
||||
runs-on:
|
||||
- macos-13
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.rosenpass --print-build-logs
|
||||
x86_64-darwin---rosenpass-oci-image:
|
||||
name: Build x86_64-darwin.rosenpass-oci-image
|
||||
runs-on:
|
||||
- macos-13
|
||||
needs:
|
||||
- x86_64-darwin---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-darwin.rosenpass-oci-image --print-build-logs
|
||||
x86_64-darwin---check:
|
||||
name: Run Nix checks on x86_64-darwin
|
||||
runs-on:
|
||||
- macos-13
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Check
|
||||
run: nix flake check . --print-build-logs
|
||||
x86_64-linux---default:
|
||||
name: Build x86_64-linux.default
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- x86_64-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.default --print-build-logs
|
||||
x86_64-linux---proof-proverif:
|
||||
name: Build x86_64-linux.proof-proverif
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- x86_64-linux---proverif-patched
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.proof-proverif --print-build-logs
|
||||
x86_64-linux---proverif-patched:
|
||||
name: Build x86_64-linux.proverif-patched
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.proverif-patched --print-build-logs
|
||||
x86_64-linux---release-package:
|
||||
name: Build x86_64-linux.release-package
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- x86_64-linux---rosenpass-static-oci-image
|
||||
- x86_64-linux---rosenpass-static
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.release-package --print-build-logs
|
||||
x86_64-linux---rosenpass:
|
||||
name: Build x86_64-linux.rosenpass
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass --print-build-logs
|
||||
x86_64-linux---rosenpass-oci-image:
|
||||
name: Build x86_64-linux.rosenpass-oci-image
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- x86_64-linux---rosenpass
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass-oci-image --print-build-logs
|
||||
x86_64-linux---rosenpass-static:
|
||||
name: Build x86_64-linux.rosenpass-static
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass-static --print-build-logs
|
||||
x86_64-linux---rosenpass-static-oci-image:
|
||||
name: Build x86_64-linux.rosenpass-static-oci-image
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs:
|
||||
- x86_64-linux---rosenpass-static
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.rosenpass-static-oci-image --print-build-logs
|
||||
x86_64-linux---whitepaper:
|
||||
name: Build x86_64-linux.whitepaper
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
needs: []
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.whitepaper --print-build-logs
|
||||
x86_64-linux---check:
|
||||
name: Run Nix checks on x86_64-linux
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Check
|
||||
run: nix flake check . --print-build-logs
|
||||
x86_64-linux---whitepaper-upload:
|
||||
name: Upload whitepaper x86_64-linux
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Git add git sha and commit
|
||||
run: cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin
|
||||
- name: Build
|
||||
run: nix build .#packages.x86_64-linux.whitepaper --print-build-logs
|
||||
- name: Deploy PDF artifacts
|
||||
if: ${{ matrix.derivation == 'whitepaper' && github.ref == 'refs/heads/main' }}
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||
publish_dir: result/
|
||||
publish_branch: papers-pdf
|
||||
force_orphan: true
|
||||
checks:
|
||||
name: Run Nix checks
|
||||
runs-on: nixos
|
||||
needs: build
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run Checks
|
||||
run: nix flake check . --print-build-logs
|
||||
|
||||
88
.github/workflows/qc.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Quality Control
|
||||
name: QC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
@@ -12,15 +12,31 @@ jobs:
|
||||
prettier:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actionsx/prettier@v2
|
||||
with:
|
||||
args: --check .
|
||||
|
||||
shellcheck:
|
||||
name: Shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
|
||||
cargo-audit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/audit-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
cargo-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
@@ -31,17 +47,73 @@ jobs:
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- run: rustup component add clippy
|
||||
- name: Install xmllint
|
||||
- name: Install libsodium
|
||||
run: sudo apt-get install -y libsodium-dev
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --all-features
|
||||
|
||||
cargo-audit:
|
||||
cargo-doc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions-rs/audit-check@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- run: rustup component add clippy
|
||||
- name: Install libsodium
|
||||
run: sudo apt-get install -y libsodium-dev
|
||||
# `--no-deps` used as a workaround for a rust compiler bug. See:
|
||||
# - https://github.com/rosenpass/rosenpass/issues/62
|
||||
# - https://github.com/rust-lang/rust/issues/108378
|
||||
- run: RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --document-private-items
|
||||
|
||||
cargo-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Install libsodium
|
||||
run: sudo apt-get install -y libsodium-dev
|
||||
# liboqs requires quite a lot of stack memory, thus we adjust
|
||||
# the default stack size picked for new threads (which is used
|
||||
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
|
||||
- run: RUST_MIN_STACK=8388608 cargo test
|
||||
|
||||
cargo-test-nix-devshell-x86_64-linux:
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
- uses: cachix/install-nix-action@v21
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- run: nix develop --command cargo test
|
||||
|
||||
73
.github/workflows/release.yaml
vendored
@@ -3,26 +3,69 @@ permissions:
|
||||
contents: write
|
||||
on:
|
||||
push:
|
||||
tags: ["v*"]
|
||||
|
||||
tags:
|
||||
- v*
|
||||
jobs:
|
||||
release:
|
||||
name: Build ${{ matrix.derivation }} on ${{ matrix.nix-system }}
|
||||
i686-linux---release:
|
||||
name: Build release artifacts for i686-linux
|
||||
runs-on:
|
||||
- nix
|
||||
- ${{ matrix.nix-system }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
nix-system:
|
||||
- x86_64-linux
|
||||
# - aarch64-linux
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build release-package for ${{ matrix.nix-system }}
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build release
|
||||
run: nix build .#release-package --print-build-logs
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
result/*
|
||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
||||
files: result/*
|
||||
x86_64-darwin---release:
|
||||
name: Build release artifacts for x86_64-darwin
|
||||
runs-on:
|
||||
- macos-13
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build release
|
||||
run: nix build .#release-package --print-build-logs
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
||||
files: result/*
|
||||
x86_64-linux---release:
|
||||
name: Build release artifacts for x86_64-linux
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v22
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
name: rosenpass
|
||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
- name: Build release
|
||||
run: nix build .#release-package --print-build-logs
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
||||
files: result/*
|
||||
|
||||
17
.gitlab-ci.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# TODO use CI_JOB_TOKEN once https://gitlab.com/groups/gitlab-org/-/epics/6310 is fixed
|
||||
pull-from-gh:
|
||||
only: ["schedules"]
|
||||
variables:
|
||||
REMOTE: "https://github.com/rosenpass/rosenpass.git"
|
||||
LOCAL: " git@gitlab.com:rosenpass/rosenpass.git"
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- mkdir ~/.ssh/
|
||||
- echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
|
||||
- echo "$REPO_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
- chmod 600 --recursive ~/.ssh/
|
||||
- git config --global user.email "ci@gitlab.com"
|
||||
- git config --global user.name "CI"
|
||||
script:
|
||||
- git clone --mirror $REMOTE rosenpass
|
||||
- cd rosenpass && git push --mirror $LOCAL
|
||||
976
Cargo.lock
generated
41
Cargo.toml
@@ -1,35 +1,10 @@
|
||||
[package]
|
||||
name = "rosenpass"
|
||||
version = "0.1.1"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
|
||||
[[bench]]
|
||||
name = "handshake"
|
||||
harness = false
|
||||
members = [
|
||||
"rosenpass",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "1.0.52", features = ["backtrace"] }
|
||||
base64 = "0.13.0"
|
||||
clap = { version = "3.0.0", features = ["yaml"] }
|
||||
static_assertions = "1.1.0"
|
||||
memoffset = "0.6.5"
|
||||
libsodium-sys-stable = { version = "1.19.26", features = ["use-pkg-config"] }
|
||||
oqs-sys = { version = "0.7.1", default-features = false, features = ['classic_mceliece', 'kyber'] }
|
||||
lazy_static = "1.4.0"
|
||||
thiserror = "1.0.38"
|
||||
paste = "1.0.11"
|
||||
log = { version = "0.4.17", optional = true }
|
||||
env_logger = { version = "0.10.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.3.5"
|
||||
test_bin = "0.4.0"
|
||||
|
||||
[features]
|
||||
default = ["log", "env_logger"]
|
||||
[workspace.metadata.release]
|
||||
# ensure that adding `--package` as argument to `cargo release` still creates version tags in the form of `vx.y.z`
|
||||
tag-prefix = ""
|
||||
|
||||
2
config-examples/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
peer-*-*-key
|
||||
peer-*-out
|
||||
18
config-examples/peer-a-config.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
public_key = "peer-a-public-key"
|
||||
secret_key = "peer-a-secret-key"
|
||||
listen = ["[::]:10001"]
|
||||
verbosity = "Quiet"
|
||||
|
||||
[[peers]]
|
||||
public_key = "peer-b-public-key"
|
||||
endpoint = "localhost:10002"
|
||||
key_out = "peer-a-rp-out-key"
|
||||
# exchange_command = [
|
||||
# "wg",
|
||||
# "set",
|
||||
# "wg0",
|
||||
# "peer",
|
||||
# "<PEER_ID>",
|
||||
# "preshared-key",
|
||||
# "/dev/stdin",
|
||||
# ]
|
||||
18
config-examples/peer-b-config.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
public_key = "peer-b-public-key"
|
||||
secret_key = "peer-b-secret-key"
|
||||
listen = ["[::]:10002"]
|
||||
verbosity = "Quiet"
|
||||
|
||||
[[peers]]
|
||||
public_key = "peer-a-public-key"
|
||||
endpoint = "localhost:10001"
|
||||
key_out = "peer-b-rp-out-key"
|
||||
# exchange_command = [
|
||||
# "wg",
|
||||
# "set",
|
||||
# "wg0",
|
||||
# "peer",
|
||||
# "<PEER_ID>",
|
||||
# "preshared-key",
|
||||
# "/dev/stdin",
|
||||
# ]
|
||||
105
doc/rosenpass.1
Normal file
@@ -0,0 +1,105 @@
|
||||
.Dd $Mdocdate$
|
||||
.Dt ROSENPASS 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm rosenpass
|
||||
.Nd builds post-quantum-secure VPNs
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Op COMMAND
|
||||
.Op Ar OPTIONS ...
|
||||
.Op Ar ARGS ...
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
performs cryptographic key exchanges that are secure against quantum-computers
|
||||
and then outputs the keys.
|
||||
These keys can then be passed to various services, such as wireguard or other
|
||||
vpn services, as pre-shared-keys to achieve security against attackers with
|
||||
quantum computers.
|
||||
.Pp
|
||||
This is a research project and quantum computers are not thought to become
|
||||
practical in fewer than ten years.
|
||||
If you are not specifically tasked with developing post-quantum secure systems,
|
||||
you probably do not need this tool.
|
||||
.Ss COMMANDS
|
||||
.Bl -tag -width Ds
|
||||
.It Ar keygen private-key <file-path> public-key <file-path>
|
||||
Generate a keypair to use in the exchange command later.
|
||||
Send the public-key file to your communication partner and keep the private-key
|
||||
file secret!
|
||||
.It Ar exchange private-key <file-path> public-key <file-path> [ OPTIONS ] PEERS
|
||||
Start a process to exchange keys with the specified peers.
|
||||
You should specify at least one peer.
|
||||
.Pp
|
||||
Its
|
||||
.Ar OPTIONS
|
||||
are as follows:
|
||||
.Bl -tag -width Ds
|
||||
.It Ar listen <ip>[:<port>]
|
||||
Instructs
|
||||
.Nm
|
||||
to listen on the specified interface and port.
|
||||
By default,
|
||||
.Nm
|
||||
will listen on all interfaces and select a random port.
|
||||
.It Ar verbose
|
||||
Extra logging.
|
||||
.El
|
||||
.El
|
||||
.Ss PEER
|
||||
Each
|
||||
.Ar PEER
|
||||
is defined as follows:
|
||||
.Qq peer public-key <file-path> [endpoint <ip>[:<port>]] [preshared-key <file-path>] [outfile <file-path>] [wireguard <dev> <peer> <extra_params>]
|
||||
.Pp
|
||||
Providing a
|
||||
.Ar PEER
|
||||
instructs
|
||||
.Nm
|
||||
to exchange keys with the given peer and write the resulting PSK into the given
|
||||
output file.
|
||||
You must either specify the outfile or wireguard output option.
|
||||
.Pp
|
||||
The parameters of
|
||||
.Ar PEER
|
||||
are as follows:
|
||||
.Bl -tag -width Ds
|
||||
.It Ar endpoint <ip>[:<port>]
|
||||
Specifies the address where the peer can be reached.
|
||||
This will be automatically updated after the first successful key exchange with
|
||||
the peer.
|
||||
If this is unspecified, the peer must initiate the connection.
|
||||
.It Ar preshared-key <file-path>
|
||||
You may specify a pre-shared key which will be mixed into the final secret.
|
||||
.It Ar outfile <file-path>
|
||||
You may specify a file to write the exchanged keys to.
|
||||
If this option is specified,
|
||||
.Nm
|
||||
will write a notification to standard out every time the key is updated.
|
||||
.It Ar wireguard <dev> <peer> <extra_params>
|
||||
This allows you to directly specify a wireguard peer to deploy the
|
||||
pre-shared-key to.
|
||||
You may specify extra parameters you would pass to
|
||||
.Qq wg set
|
||||
besides the preshared-key parameter which is used by
|
||||
.Nm .
|
||||
This makes it possible to add peers entirely from
|
||||
.Nm .
|
||||
.El
|
||||
.Sh EXIT STATUS
|
||||
.Ex -std
|
||||
.Sh SEE ALSO
|
||||
.Xr rp 1 ,
|
||||
.Xr wg 1
|
||||
.Sh STANDARDS
|
||||
This tool is the reference implementation of the Rosenpass protocol, written
|
||||
by Karolin Varner, Benjamin Lipp, Wanja Zaeske, and Lisa Schmidt.
|
||||
.Sh AUTHORS
|
||||
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
||||
.Pp
|
||||
This manual page was written by
|
||||
.An Emil Engler
|
||||
.Sh BUGS
|
||||
The bugs are tracked at
|
||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
||||
119
doc/rp.1
Normal file
@@ -0,0 +1,119 @@
|
||||
.Dd $Mdocdate$
|
||||
.Dt RP 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm rp
|
||||
.Nd high-level interface to rosenpass
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Op Ar explain
|
||||
.Op Ar verbose
|
||||
.Ar genkey Ar ... | Ar pubkey ... | Ar exchange ...
|
||||
.Nm
|
||||
.Op ...
|
||||
.Ar genkey PRIVATE_KEYS_DIR
|
||||
.Nm
|
||||
.Op ...
|
||||
.Ar pubkey Ar PRIVATE_KEYS_DIR Ar PUBLIC_KEYS_DIR
|
||||
.Nm
|
||||
.Op ...
|
||||
.\" Splitting this across several lines
|
||||
.Ar exchange Ar PRIVATE_KEYS_DIR
|
||||
.Op dev <device>
|
||||
.Op listen <ip>:<port>
|
||||
.\" Because the peer argument is complicated, it would be heel to represent it
|
||||
.\" in mdoc... Using an ugly hack instead, thereby losing semantic.
|
||||
[peer PUBLIC_KEYS_DIR [endpoint <ip>:<port>] [persistent-keepalive <interval>]
|
||||
[allowed-ips <ip1>/<cidr1>[,<ip2>/<cidr2>] ...]] ...
|
||||
.Sh DESCRIPTION
|
||||
The
|
||||
.Nm
|
||||
program
|
||||
is used to build a VPN with WireGuard and Rosenpass.
|
||||
.Pp
|
||||
The optional
|
||||
.Op explain
|
||||
and
|
||||
.Op verbose
|
||||
options can be used to obtain further help or to enable a detailed view on the
|
||||
operations, respectively.
|
||||
.Ss COMMANDS
|
||||
.Bl -tag -width Ds
|
||||
.It Ar genkey Ar PRIVATE_KEYS_DIR
|
||||
Creates a new directory with appropriate permissions and generates all the
|
||||
necessary private keys required for a peer to participate in a rosenpass
|
||||
connection.
|
||||
.It Ar pubkey Ar PRIVATE_KEYS_DIR Ar PUBLIC_KEYS_DIR
|
||||
Creates a fresh directory at
|
||||
.Ar PUBLIC_KEYS_DIR ,
|
||||
which contains the extracted public keys from the private keys generated by
|
||||
.Ar genkey
|
||||
and located inside
|
||||
.Ar PRIVATE_KEYS_DIR .
|
||||
.It Ar exchange Ar PRIVATE_KEYS_DIR [dev <device>] [listen <ip>:<port>] [PEERS]
|
||||
Starts the VPN on interface
|
||||
.Ar device ,
|
||||
listening on the provided IP and port combination, allowing connections from
|
||||
.Ar PEERS .
|
||||
.El
|
||||
.Sh EXIT STATUS
|
||||
.Ex -std
|
||||
.Sh EXAMPLES
|
||||
In this example, we will assume that the server has an interface bound to
|
||||
192.168.0.1, that accepts incoming connections on port 9999/UDP for Rosenpass
|
||||
and port 10000/UDP for WireGuard.
|
||||
.Pp
|
||||
To create a VPN connection, start by generating secret keys on both hosts.
|
||||
.Bd -literal -offset indent
|
||||
rp genkey server.rosenpass-secret
|
||||
rp genkey client.rosenpass-secret
|
||||
.Ed
|
||||
.Pp
|
||||
Extract the public keys:
|
||||
.Bd -literal -offset indent
|
||||
rp pubkey server.rosenpass-secret server.rosenpass-public
|
||||
rp pubkey client.rosenpass-secret client.rosenpass-public
|
||||
.Ed
|
||||
.Pp
|
||||
Copy the
|
||||
.Qq -public
|
||||
directories to the other peers and then start the VPN.
|
||||
On the server:
|
||||
.Bd -literal -offset indent
|
||||
sudo rp exchange server.rosenpass-secret dev rosenpass0 listen 192.168.0.1:9999 \\
|
||||
peer client.rosenpass-public allowed-ips fe80::/64
|
||||
.Ed
|
||||
.Pp
|
||||
On the client:
|
||||
.Bd -literal -offset indent
|
||||
sudo rp exchange client.rosenpass-secret dev rosenpass 0 \\
|
||||
peer server.rosenpass-public endpoint 192.168.0.1:9999 allowed-ips fe80::/64
|
||||
.Ed
|
||||
.Pp
|
||||
Assign IP addresses:
|
||||
.Bd -literal -offset indent
|
||||
sudo ip a add fe80::1/64 dev rosenpass0 # Server
|
||||
sudo ip a add fe80::2/64 dev rosenpass0 # Client
|
||||
.Ed
|
||||
.Pp
|
||||
Test the connection by pinging the server on the client machine:
|
||||
.Bd -literal -offset indent
|
||||
ping fe80::1%rosenpass0 # Client
|
||||
.Ed
|
||||
.Pp
|
||||
You can watch how rosenpass replaces the WireGuard PSK with the following:
|
||||
.Bd -literal -offset indent
|
||||
watch -n 0.2 'wg show all; wg show all preshared-keys'
|
||||
.Ed
|
||||
.Sh SEE ALSO
|
||||
.Xr rosenpass 1 ,
|
||||
.Xr wg 1
|
||||
.Sh AUTHORS
|
||||
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
||||
.Pp
|
||||
This manual page was written by
|
||||
.An Emil Engler
|
||||
.Sh BUGS
|
||||
The bugs are tracked at
|
||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
||||
81
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1674240251,
|
||||
"narHash": "sha256-AVMmf/CtcGensTZmMicToDpOwySEGNKYgRPC7lu3m8w=",
|
||||
"lastModified": 1699770036,
|
||||
"narHash": "sha256-bZmI7ytPAYLpyFNgj5xirDkKuAniOkj1xHdv5aIJ5GM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "d8067f4d1d3d30732703209bec5ca7d62aaececc",
|
||||
"rev": "81ab0b4f7ae9ebb57daa0edf119c4891806e4d3a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -22,12 +22,15 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1694529238,
|
||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -36,13 +39,33 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"naersk": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1698420672,
|
||||
"narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "naersk",
|
||||
"rev": "aeb58d5e8faead8980a807c840232697982d47b9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "naersk",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1672968032,
|
||||
"narHash": "sha256-26Jns3GmHem44a06UN5Rj/KOD9qNJThyQrom02Ijur8=",
|
||||
"lastModified": 1698846319,
|
||||
"narHash": "sha256-4jyW/dqFBVpWFnhl0nvP6EN4lP7/ZqPxYRjl6var0Oc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2dea8991d89b9f1e78d874945f78ca15f6954289",
|
||||
"rev": "34bdaaf1f0b7fb6d9091472edc968ff10a8c2857",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -50,37 +73,22 @@
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1676496762,
|
||||
"narHash": "sha256-GFAxjaTgh8KJ8q7BYaI4EVGI5K98ooW70fG/83rSb08=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1bddde315297c092712b0ef03d9def7a474b28ae",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable"
|
||||
"naersk": "naersk",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1674162026,
|
||||
"narHash": "sha256-iY0bxoVE7zAZmp0BB/m5hZW5pWHUfgntDvc1m2zyt/U=",
|
||||
"lastModified": 1699715108,
|
||||
"narHash": "sha256-yPozsobJU55gj+szgo4Lpcg1lHvGQYAT6Y4MrC80mWE=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "6e52c64031825920983515b9e975e93232739f7f",
|
||||
"rev": "5fcf5289e726785d20d3aa4d13d90a43ed248e83",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -89,6 +97,21 @@
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
188
flake.nix
@@ -1,8 +1,11 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs-unstable.url = "github:NixOS/nixpkgs";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
# for quicker rust builds
|
||||
naersk.url = "github:nix-community/naersk";
|
||||
naersk.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
# for rust nightly with llvm-tools-preview
|
||||
fenix.url = "github:nix-community/fenix";
|
||||
fenix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -19,12 +22,15 @@
|
||||
"aarch64-linux"
|
||||
|
||||
# unsuported best-effort
|
||||
"i686-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
# "x86_64-windows"
|
||||
]
|
||||
(system:
|
||||
let
|
||||
lib = nixpkgs.lib;
|
||||
|
||||
# normal nixpkgs
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
@@ -47,14 +53,17 @@
|
||||
)
|
||||
];
|
||||
};
|
||||
|
||||
# parsed Cargo.toml
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./rosenpass/Cargo.toml);
|
||||
|
||||
# source files relevant for rust
|
||||
src = pkgs.lib.sourceByRegex ./. [
|
||||
"Cargo\\.(toml|lock)"
|
||||
"(src|benches)(/.*\\.(rs|md))?"
|
||||
"rp"
|
||||
src = pkgs.lib.sources.sourceFilesBySuffices ./. [
|
||||
".lock"
|
||||
".rs"
|
||||
".toml"
|
||||
];
|
||||
|
||||
# builds a bin path for all dependencies for the `rp` shellscript
|
||||
rpBinPath = p: with p; lib.makeBinPath [
|
||||
coreutils
|
||||
@@ -62,60 +71,119 @@
|
||||
gawk
|
||||
wireguard-tools
|
||||
];
|
||||
|
||||
# a function to generate a nix derivation for rosenpass against any
|
||||
# given set of nixpkgs
|
||||
rpDerivation = p:
|
||||
let
|
||||
isStatic = p.stdenv.hostPlatform.isStatic;
|
||||
in
|
||||
p.rustPlatform.buildRustPackage {
|
||||
# metadata and source
|
||||
pname = cargoToml.package.name;
|
||||
version = cargoToml.package.version;
|
||||
inherit src;
|
||||
cargoLock = {
|
||||
lockFile = src + "/Cargo.lock";
|
||||
# whether we want to build a statically linked binary
|
||||
isStatic = p.targetPlatform.isStatic;
|
||||
|
||||
# the rust target of `p`
|
||||
target = p.rust.toRustTargetSpec p.targetPlatform;
|
||||
|
||||
# convert a string to shout case
|
||||
shout = string: builtins.replaceStrings [ "-" ] [ "_" ] (pkgs.lib.toUpper string);
|
||||
|
||||
# suitable Rust toolchain
|
||||
toolchain = with inputs.fenix.packages.${system}; combine [
|
||||
stable.cargo
|
||||
stable.rustc
|
||||
targets.${target}.stable.rust-std
|
||||
];
|
||||
|
||||
# naersk with a custom toolchain
|
||||
naersk = pkgs.callPackage inputs.naersk {
|
||||
cargo = toolchain;
|
||||
rustc = toolchain;
|
||||
};
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
cmake # for oqs build in the oqs-sys crate
|
||||
makeWrapper # for the rp shellscript
|
||||
pkg-config # let libsodium-sys-stable find libsodium
|
||||
removeReferencesTo
|
||||
rustPlatform.bindgenHook # for C-bindings in the crypto libs
|
||||
];
|
||||
buildInputs = with p; [ bash libsodium ];
|
||||
# used to trick the build.rs into believing that CMake was ran **again**
|
||||
fakecmake = pkgs.writeScriptBin "cmake" ''
|
||||
#! ${pkgs.stdenv.shell} -e
|
||||
true
|
||||
'';
|
||||
in
|
||||
naersk.buildPackage
|
||||
{
|
||||
# metadata and source
|
||||
name = cargoToml.package.name;
|
||||
version = cargoToml.package.version;
|
||||
inherit src;
|
||||
|
||||
cargoBuildOptions = x: x ++ [ "-p" "rosenpass" ];
|
||||
cargoTestOptions = x: x ++ [ "-p" "rosenpass" ];
|
||||
|
||||
doCheck = true;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
p.stdenv.cc
|
||||
cmake # for oqs build in the oqs-sys crate
|
||||
mandoc # for the built-in manual
|
||||
makeWrapper # for the rp shellscript
|
||||
pkg-config # let libsodium-sys-stable find libsodium
|
||||
removeReferencesTo
|
||||
rustPlatform.bindgenHook # for C-bindings in the crypto libs
|
||||
];
|
||||
buildInputs = with p; [ bash libsodium ];
|
||||
|
||||
override = x: {
|
||||
preBuild =
|
||||
# nix defaults to building for aarch64 _without_ the armv8-a crypto
|
||||
# extensions, but liboqs depens on these
|
||||
(lib.optionalString (system == "aarch64-linux") ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
|
||||
''
|
||||
);
|
||||
|
||||
# fortify is only compatible with dynamic linking
|
||||
hardeningDisable = lib.optional isStatic "fortify";
|
||||
};
|
||||
|
||||
overrideMain = x: {
|
||||
# CMake detects that it was served a _foreign_ target dir, and CMake
|
||||
# would be executed again upon the second build step of naersk.
|
||||
# By adding our specially optimized CMake version, we reduce the cost
|
||||
# of recompilation by 99 % while, while avoiding any CMake errors.
|
||||
nativeBuildInputs = [ (lib.hiPrio fakecmake) ] ++ x.nativeBuildInputs;
|
||||
|
||||
# make sure that libc is linked, under musl this is not the case per
|
||||
# default
|
||||
preBuild = (lib.optionalString isStatic ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -lc"
|
||||
'');
|
||||
|
||||
preInstall = ''
|
||||
install -D ${./rp} $out/bin/rp
|
||||
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
|
||||
'';
|
||||
};
|
||||
|
||||
# We want to build for a specific target...
|
||||
CARGO_BUILD_TARGET = target;
|
||||
|
||||
# ... which might require a non-default linker:
|
||||
"CARGO_TARGET_${shout target}_LINKER" =
|
||||
let
|
||||
inherit (p.stdenv) cc;
|
||||
in
|
||||
"${cc}/bin/${cc.targetPrefix}cc";
|
||||
|
||||
meta = with pkgs.lib;
|
||||
{
|
||||
inherit (cargoToml.package) description homepage;
|
||||
license = with licenses; [ mit asl20 ];
|
||||
maintainers = [ maintainers.wucke13 ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
} // (lib.mkIf isStatic {
|
||||
# otherwise pkg-config tries to link non-existent dynamic libs
|
||||
# documented here: https://docs.rs/pkg-config/latest/pkg_config/
|
||||
PKG_CONFIG_ALL_STATIC = true;
|
||||
|
||||
# nix defaults to building for aarch64 _without_ the armv8-a
|
||||
# crypto extensions, but liboqs depens on these
|
||||
preBuild =
|
||||
if system == "aarch64-linux" then ''
|
||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
|
||||
'' else "";
|
||||
|
||||
preInstall = ''
|
||||
install -D rp $out/bin/rp
|
||||
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
|
||||
'';
|
||||
|
||||
# nix progated the *.dev outputs of buildInputs for static
|
||||
# builds, but that is non-sense for an executables only package
|
||||
postFixup =
|
||||
if isStatic then ''
|
||||
remove-references-to -t ${p.bash.dev} -t ${p.libsodium.dev} \
|
||||
$out/nix-support/propagated-build-inputs
|
||||
'' else "";
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
inherit (cargoToml.package) description homepage;
|
||||
license = with licenses; [ mit asl20 ];
|
||||
maintainers = [ maintainers.wucke13 ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
};
|
||||
# tell rust to build everything statically linked
|
||||
CARGO_BUILD_RUSTFLAGS = "-C target-feature=+crt-static";
|
||||
});
|
||||
# a function to generate a docker image based of rosenpass
|
||||
rosenpassOCI = name: pkgs.dockerTools.buildImage rec {
|
||||
inherit name;
|
||||
@@ -178,14 +246,11 @@
|
||||
#
|
||||
packages.whitepaper =
|
||||
let
|
||||
pkgs = import inputs.nixpkgs-unstable {
|
||||
inherit system;
|
||||
};
|
||||
tlsetup = (pkgs.texlive.combine {
|
||||
inherit (pkgs.texlive) scheme-basic acmart amsfonts ccicons
|
||||
csquotes csvsimple doclicense fancyvrb fontspec gobble
|
||||
koma-script ifmtarg latexmk lm markdown mathtools minted noto
|
||||
nunito pgf soul soulutf8 unicode-math lualatex-math
|
||||
nunito pgf soul unicode-math lualatex-math paralist
|
||||
gitinfo2 eso-pic biblatex biblatex-trad biblatex-software
|
||||
xkeyval xurl xifthen biber;
|
||||
});
|
||||
@@ -222,7 +287,7 @@
|
||||
packages.proof-proverif = pkgs.stdenv.mkDerivation {
|
||||
name = "rosenpass-proverif-proof";
|
||||
version = "unstable";
|
||||
src = pkgs.lib.sourceByRegex ./. [
|
||||
src = pkgs.lib.sources.sourceByRegex ./. [
|
||||
"analyze.sh"
|
||||
"marzipan(/marzipan.awk)?"
|
||||
"analysis(/.*)?"
|
||||
@@ -243,6 +308,7 @@
|
||||
inherit (packages.proof-proverif) CRYPTOVERIF_LIB;
|
||||
inputsFrom = [ packages.default ];
|
||||
nativeBuildInputs = with pkgs; [
|
||||
cmake # override the fakecmake from the main step above
|
||||
cargo-release
|
||||
clippy
|
||||
nodePackages.prettier
|
||||
@@ -257,12 +323,10 @@
|
||||
|
||||
|
||||
checks = {
|
||||
# Blocked by https://github.com/rust-lang/rustfmt/issues/4306
|
||||
# @dakoraa wants a coding style suitable for her accessible coding setup
|
||||
# cargo-fmt = pkgs.runCommand "check-cargo-fmt"
|
||||
# { inherit (devShells.default) nativeBuildInputs buildInputs; } ''
|
||||
# cargo fmt --manifest-path=${src}/Cargo.toml --check > $out
|
||||
# '';
|
||||
cargo-fmt = pkgs.runCommand "check-cargo-fmt"
|
||||
{ inherit (self.devShells.${system}.default) nativeBuildInputs buildInputs; } ''
|
||||
cargo fmt --manifest-path=${./.}/Cargo.toml --check && touch $out
|
||||
'';
|
||||
nixpkgs-fmt = pkgs.runCommand "check-nixpkgs-fmt"
|
||||
{ nativeBuildInputs = [ pkgs.nixpkgs-fmt ]; } ''
|
||||
nixpkgs-fmt --check ${./.} && touch $out
|
||||
@@ -272,6 +336,8 @@
|
||||
cd ${./.} && prettier --check . && touch $out
|
||||
'';
|
||||
};
|
||||
|
||||
formatter = pkgs.nixpkgs-fmt;
|
||||
}))
|
||||
];
|
||||
}
|
||||
|
||||
BIN
papers/assets/2023-03-20-rg-tutorial-screenshot.png
Normal file
|
After Width: | Height: | Size: 122 KiB |
BIN
papers/assets/2023-03-20-symbolic-analysis-screenshot.png
Normal file
|
After Width: | Height: | Size: 227 KiB |
5
papers/graphics/readme.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Illustrations
|
||||
|
||||
## License
|
||||
|
||||
The graphics graphics (SVG, PDF, and PNG files) in this folder are released under the CC BY-SA 4.0 license.
|
||||
BIN
papers/graphics/rosenpass-wp-hashing-tree.afdesign
Normal file
|
Before Width: | Height: | Size: 725 KiB After Width: | Height: | Size: 725 KiB |
@@ -1345,7 +1345,7 @@
|
||||
<g transform="matrix(1,0,0,1,420.66,-1031.32)">
|
||||
<g transform="matrix(31.25,0,0,31.25,1431.32,1459.33)">
|
||||
</g>
|
||||
<text x="1179.63px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"k<tspan x="1207.79px 1224.25px " y="1459.33px 1459.33px ">ey</tspan> chaining init"</text>
|
||||
<text x="1179.63px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"chaining k<tspan x="1334px 1350.47px " y="1459.33px 1459.33px ">ey</tspan> init"</text>
|
||||
</g>
|
||||
</g>
|
||||
<g transform="matrix(0.389246,0,0,0.136584,299.374,1166.87)">
|
||||
@@ -1437,7 +1437,7 @@
|
||||
<g transform="matrix(0.99675,0,0,0.996238,-597.124,-172.692)">
|
||||
<g transform="matrix(31.25,0,0,31.25,1492.94,1459.33)">
|
||||
</g>
|
||||
<text x="1187.16px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"k<tspan x="1215.32px 1231.79px " y="1459.33px 1459.33px ">ey</tspan> chaining e<tspan x="1398.88px " y="1459.33px ">x</tspan>tr<tspan x="1437.88px " y="1459.33px ">a</tspan>ct"</text>
|
||||
<text x="1187.16px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"chaining k<tspan x="1341.54px 1358px " y="1459.33px 1459.33px ">ey</tspan> e<tspan x="1398.88px " y="1459.33px ">x</tspan>tr<tspan x="1437.88px " y="1459.33px ">a</tspan>ct"</text>
|
||||
</g>
|
||||
<g transform="matrix(0.99675,0,0,0.996238,-380.054,-779.158)">
|
||||
<g transform="matrix(31.25,0,0,31.25,1463.54,1459.33)">
|
||||
|
||||
|
Before Width: | Height: | Size: 218 KiB After Width: | Height: | Size: 218 KiB |
@@ -1,218 +0,0 @@
|
||||
root: 0 { shape: text }
|
||||
PROTOCOL: "PROTOCOL" { shape: text }
|
||||
|
||||
protocol_comment: 'PROTOCOL = "rosenpass 1 rosenpass.eu aead=chachapoly1305 dprf=blake2s ekem=lightsaber skem=mceliece460896 xaead=xchachapoly1305"' { shape: text}
|
||||
|
||||
ck_init: '"chaining key init"' { shape: text }
|
||||
ck_ext: '"chaining key extract"' { shape: text }
|
||||
|
||||
mac: '"mac"' { shape: text }
|
||||
mac_param: MAC_WIRE_DATA { shape: text }
|
||||
cookie: '"cookie"' { shape: text }
|
||||
cookie_param: COOKIE_WIRE_DATA { shape: text }
|
||||
peer_id: '"peer_id"' { shape: text }
|
||||
peer_id_p1: spkm { shape: text}
|
||||
peer_id_p2: spkt { shape: text}
|
||||
|
||||
root -> PROTOCOL
|
||||
|
||||
PROTOCOL -> mac -> mac_param
|
||||
PROTOCOL -> cookie -> cookie_param
|
||||
PROTOCOL -> peer_id -> peer_id_p1 -> peer_id_p2
|
||||
PROTOCOL -> ck_init
|
||||
PROTOCOL -> ck_ext
|
||||
|
||||
mix: '"mix"' { shape: text }
|
||||
user: '"user"' { shape: text }
|
||||
rp_eu: '"rosenpass.eu"' { shape: text }
|
||||
wg_psk: '"wireguard psk"' { shape: text }
|
||||
hs_enc: '"handshake encryption"' { shape: text }
|
||||
ini_enc: '"initiator session encryption"' { shape: text }
|
||||
res_enc: '"responder session encryption"' { shape: text }
|
||||
|
||||
ck_ext -> mix
|
||||
ck_ext -> user -> rp_eu -> wg_psk
|
||||
ck_ext -> hs_enc
|
||||
ck_ext -> ini_enc
|
||||
ck_ext -> res_enc
|
||||
|
||||
# ck_init -> InitHello.start
|
||||
|
||||
InitHello {
|
||||
start -> d0 \
|
||||
-> m1 -> d1 \
|
||||
-> m2 -> d2
|
||||
|
||||
d2 -> encaps_spkr.m1
|
||||
encaps_spkr.d3 -> encrypt_ltk.m1
|
||||
encaps_spkr.d3 -> encrypt_ltk.key
|
||||
encrypt_ltk.d1 -> encrypt_auth.m1
|
||||
encrypt_ltk.d1 -> encrypt_auth.key
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
m2: "mix" { shape: text }
|
||||
|
||||
start: '"chaining key init"' { shape: text }
|
||||
d0: "spkr" { shape: circle }
|
||||
d1: "sidi" { shape: circle }
|
||||
d2: "epki" { shape: circle }
|
||||
|
||||
encaps_spkr {
|
||||
m1 -> d1 \
|
||||
-> m2 -> d2 \
|
||||
-> m3 -> d3 \
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
m2: "mix" { shape: text }
|
||||
m3: "mix" { shape: text }
|
||||
|
||||
d1: "spkr" { shape: circle }
|
||||
d2: "sctr" { shape: circle }
|
||||
d3: "sptr" { shape: circle }
|
||||
}
|
||||
|
||||
encrypt_ltk {
|
||||
m1 -> d1
|
||||
|
||||
encrypt: 'Aead::enc(peer_id(spkr, spki))'
|
||||
key -> encrypt: {
|
||||
target-arrowhead.label: key
|
||||
}
|
||||
data -> encrypt: {
|
||||
target-arrowhead.label: data
|
||||
}
|
||||
encrypt -> d1: {
|
||||
source-arrowhead.label: output
|
||||
}
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
key: '"handshake encryption"' { shape: text }
|
||||
data: 'ref from "peer id" branch after spkt' { shape: text }
|
||||
d1: "ct" { shape: diamond }
|
||||
}
|
||||
|
||||
encrypt_auth {
|
||||
m1 -> d1
|
||||
|
||||
encrypt: 'Aead::enc(empty())'
|
||||
key -> encrypt: {
|
||||
target-arrowhead.label: key
|
||||
}
|
||||
encrypt -> d1: {
|
||||
source-arrowhead.label: output
|
||||
}
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
key: '"handshake encryption"' { shape: text }
|
||||
d1: "ct" { shape: diamond }
|
||||
}
|
||||
}
|
||||
|
||||
RespHello {
|
||||
start -> d0 -> m1 -> d1
|
||||
d1 -> encaps_epki.m1
|
||||
encaps_epki.d3 -> encaps_spki.m1
|
||||
encaps_spki.d3 -> m2 -> d2
|
||||
d2 -> encrypt_auth.m1
|
||||
|
||||
store_biscuit -> d2
|
||||
"pidi" -> store_biscuit {
|
||||
target-arrowhead.label: "field=peerid"
|
||||
}
|
||||
encaps_spki.d3 -> store_biscuit {
|
||||
target-arrowhead.label: "field=ck"
|
||||
}
|
||||
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
m2: "mix" { shape: text }
|
||||
|
||||
start: '(state from InitHello)' { shape: text }
|
||||
d0: "sidr" { shape: circle }
|
||||
d1: "sidi" { shape: circle }
|
||||
d2: "biscuit" { shape: diamond }
|
||||
|
||||
store_biscuit: "store_biscuit()"
|
||||
|
||||
encaps_epki {
|
||||
m1 -> d1 \
|
||||
-> m2 -> d2 \
|
||||
-> m3 -> d3 \
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
m2: "mix" { shape: text }
|
||||
m3: "mix" { shape: text }
|
||||
|
||||
d1: "epki" { shape: circle }
|
||||
d2: "ecti" { shape: circle }
|
||||
d3: "epti" { shape: circle }
|
||||
}
|
||||
|
||||
encaps_spki {
|
||||
m1 -> d1 \
|
||||
-> m2 -> d2 \
|
||||
-> m3 -> d3 \
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
m2: "mix" { shape: text }
|
||||
m3: "mix" { shape: text }
|
||||
|
||||
d1: "spki" { shape: circle }
|
||||
d2: "scti" { shape: circle }
|
||||
d3: "spti" { shape: circle }
|
||||
}
|
||||
|
||||
encrypt_auth {
|
||||
m1 -> d1
|
||||
|
||||
encrypt: 'Aead::enc(empty())'
|
||||
key -> encrypt: {
|
||||
target-arrowhead.label: key
|
||||
}
|
||||
encrypt -> d1: {
|
||||
source-arrowhead.label: output
|
||||
}
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
key: '"handshake encryption"' { shape: text }
|
||||
d1: "ct" { shape: diamond }
|
||||
}
|
||||
}
|
||||
|
||||
InitConf {
|
||||
start -> d0 -> m1 -> d1 -> encrypt_auth.m1
|
||||
|
||||
encrypt_auth.d1 -> ol1 -> o1
|
||||
encrypt_auth.d1 -> ol2 -> o2
|
||||
encrypt_auth.d1 -> ol3 -> o3
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
|
||||
start: '(state from RespHello)' { shape: text }
|
||||
d0: "sidi" { shape: circle }
|
||||
d1: "sidr" { shape: circle }
|
||||
|
||||
ol1: '"wireguard psk"' { shape: text }
|
||||
ol2: '"initiator session encryption"' { shape: text }
|
||||
ol3: '"responder session encryption"' { shape: text}
|
||||
o2: "" { shape: page }
|
||||
o1: "" { shape: step }
|
||||
o2: "" { shape: step }
|
||||
o3: "" { shape: step }
|
||||
|
||||
encrypt_auth {
|
||||
m1 -> d1
|
||||
|
||||
encrypt: 'Aead::enc(empty())'
|
||||
key -> encrypt: {
|
||||
target-arrowhead.label: key
|
||||
}
|
||||
encrypt -> d1: {
|
||||
source-arrowhead.label: output
|
||||
}
|
||||
|
||||
m1: "mix" { shape: text }
|
||||
key: '"handshake encryption"' { shape: text }
|
||||
d1: "ct" { shape: diamond }
|
||||
}
|
||||
}
|
||||
|
Before Width: | Height: | Size: 847 KiB |
@@ -23,3 +23,7 @@ inside `papers/`. The PDF files will be located directly in `papers/`.
|
||||
The version info is using gitinfo2. To use the setup one has to run the `papers/tex/gitinfo2.sh` script. In local copies it's also possible to add this as a post-checkout or post-commit hook to keep it automatically up to date.
|
||||
|
||||
The version information in the footer automatically includes a “draft”. This can be removed by tagging a release version using `\jobname-release`, e.h. `whitepaper-release` for the `whitepaper.md` file.
|
||||
|
||||
## Licensing of assets
|
||||
|
||||
The text files and graphics in this folder (i.e. whitepaper.md, the SVG, PDF, and PNG files in the graphics/ folder) are released under the CC BY-SA 4.0 license.
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
Protocol: {
|
||||
shape: sequence_diagram
|
||||
ini: "Initiator"
|
||||
res: "Responder"
|
||||
ini -> res: "InitHello"
|
||||
res -> ini: "RespHello"
|
||||
ini -> res: "InitConf"
|
||||
res -> ini: "EmptyData"
|
||||
}
|
||||
|
||||
Envelope: "Envelope" {
|
||||
shape: class
|
||||
type: "1"
|
||||
'': 3
|
||||
payload: variable
|
||||
mac: 16
|
||||
cookie: 16
|
||||
}
|
||||
|
||||
Envelope.payload -> InitHello
|
||||
InitHello: "InitHello (type=0x81)" {
|
||||
shape: class
|
||||
sidi: 4
|
||||
epki: 800
|
||||
sctr: 188
|
||||
peerid: 32 + 16 = 48
|
||||
auth: 16
|
||||
}
|
||||
|
||||
Envelope.payload -> RespHello
|
||||
RespHello: "RespHello (type=0x82)" {
|
||||
shape: class
|
||||
sidr: 4
|
||||
sidi: 4
|
||||
ecti: 768
|
||||
scti: 188
|
||||
biscuit: 76 + 24 + 16 = 116
|
||||
auth: 16
|
||||
}
|
||||
|
||||
Envelope.payload -> InitConf
|
||||
InitConf: "InitConf (type=0x83)" {
|
||||
shape: class
|
||||
sidi: 4
|
||||
sidr: 4
|
||||
biscuit: 76 + 24 +16 = 116
|
||||
auth: 16
|
||||
}
|
||||
|
||||
Envelope.payload -> EmptyData
|
||||
EmptyData: "EmptyData (type=0x84)" {
|
||||
shape: class
|
||||
sidx: 4
|
||||
ctr: 8
|
||||
auth: 16
|
||||
}
|
||||
|
||||
Envelope.payload -> Data
|
||||
Data: "Data (type=0x85)" {
|
||||
shape: class
|
||||
sidx: 4
|
||||
ctr: 8
|
||||
data: variable + 16
|
||||
}
|
||||
|
||||
Envelope.payload -> CookieReply
|
||||
CookieReply: "CookieReply (type=0x86)" {
|
||||
shape: class
|
||||
sidx: 4
|
||||
nonce: 24
|
||||
cookie: 16 + 16 = 32
|
||||
}
|
||||
|
||||
RespHello.biscuit -> Biscuit
|
||||
InitConf.biscuit -> Biscuit
|
||||
Biscuit: "Biscuit" {
|
||||
shape: class
|
||||
peerid: 32
|
||||
no: 12
|
||||
ck: 32
|
||||
}
|
||||
|
Before Width: | Height: | Size: 669 KiB |
@@ -79,6 +79,8 @@
|
||||
letter-csv .initial:n = ,
|
||||
letter-content .tl_set:N = \l_letter_csv_content_tl,
|
||||
letter-content .initial:n=,
|
||||
tableofcontents .bool_gset:N = \g__ptxcd_tableofcontents_bool,
|
||||
tableofcontents .initial:n = true,
|
||||
}
|
||||
|
||||
\tl_new:N \l__markdown_sequence_tl
|
||||
|
||||
@@ -130,7 +130,7 @@
|
||||
\bool_set_false:N \l_tmpa_bool
|
||||
\gitAbbrevHash{}~(\gitAuthorDate
|
||||
\clist_map_inline:Nn \gitTags {
|
||||
\exp_args:Nx \str_if_eq:nnT {\jobname-release} {test-whitepaper} {\bool_set_true:N \l_tmpa_bool\clist_map_break:}
|
||||
\exp_args:Nx \str_if_eq:nnT {\jobname-release} {whitepaper-release} {\bool_set_true:N \l_tmpa_bool\clist_map_break:}
|
||||
}
|
||||
\bool_if:NF \l_tmpa_bool {~--~draft}
|
||||
)
|
||||
@@ -171,7 +171,12 @@ version={4.0},
|
||||
\ExplSyntaxOn
|
||||
\SetTemplatePreamble{
|
||||
\hypersetup{pdftitle=\inserttitle,pdfauthor=The~Rosenpass~Project}
|
||||
\title{\vspace*{-2.5cm}\includegraphics[width=4cm]{RosenPass-Logo}}
|
||||
\exp_args:NV\tl_if_eq:nnTF \inserttitle{Rosenpass} {
|
||||
\title{\vspace*{-2.5cm}\includegraphics[width=4cm]{RosenPass-Logo}}
|
||||
} {
|
||||
\titlehead{\centerline{\includegraphics[width=4cm]{RosenPass-Logo}}}
|
||||
\title{\inserttitle}
|
||||
}
|
||||
\author{\csname insertauthor\endcsname}
|
||||
\subject{\csname insertsubject\endcsname}
|
||||
\date{\vspace{-1cm}}
|
||||
@@ -374,29 +379,28 @@ version={4.0},
|
||||
}
|
||||
}
|
||||
}
|
||||
\makeatother
|
||||
\ExplSyntaxOff
|
||||
|
||||
% end of namepartpicturesetup
|
||||
|
||||
\newcommand{\captionbox}[1]{{\setlength{\fboxsep}{.5ex}\colorbox{rosenpass-gray}{#1}}}
|
||||
|
||||
\makeatletter
|
||||
\renewenvironment{abstract}{
|
||||
\small
|
||||
\begin{center}\normalfont\sectfont\nobreak\abstractname\@endparpenalty\@M\end{center}%
|
||||
}{
|
||||
\par
|
||||
}
|
||||
\makeatother
|
||||
|
||||
|
||||
\SetTemplateBegin{
|
||||
\maketitle
|
||||
\begin{abstract}
|
||||
\noindent\csname insertabstract\endcsname
|
||||
\end{abstract}
|
||||
\tableofcontents
|
||||
\bool_if:NT \g__ptxcd_tableofcontents_bool \tableofcontents
|
||||
\clearpage
|
||||
}
|
||||
\makeatother
|
||||
\ExplSyntaxOff
|
||||
|
||||
\SetTemplateEnd{
|
||||
}
|
||||
\SetTemplateEnd{}
|
||||
|
||||
@@ -7,13 +7,13 @@ author:
|
||||
- Wanja Zaeske
|
||||
- Lisa Schmidt = {Scientific Illustrator – \\url{mullana.de}}
|
||||
abstract: |
|
||||
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for some other application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
||||
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for another application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
||||
|
||||
The WireGuard implementation enjoys great trust from the cryptography community and has excellent performance characteristics. To preserve these features, the Rosenpass application runs side-by-side with WireGuard and supplies a new post-quantum-secure pre-shared key (PSK) every two minutes. WireGuard itself still performs the pre-quantum-secure key exchange and transfers any transport data with no involvement from Rosenpass at all.
|
||||
|
||||
The Rosenpass project consists of a protocol description, an implementation written in Rust, and a symbolic analysis of the protocol’s security using ProVerif [@proverif]. We are working on a cryptographic security proof using CryptoVerif [@cryptoverif].
|
||||
|
||||
This document is a guide to engineers and researchers implementing the protocol; a scientific paper discussing the security properties of Rosenpass is work in progress.
|
||||
This document is a guide for engineers and researchers implementing the protocol; a scientific paper discussing the security properties of Rosenpass is work in progress.
|
||||
---
|
||||
|
||||
\enlargethispage{5mm}
|
||||
@@ -33,7 +33,7 @@ abstract: |
|
||||
Rosenpass inherits most security properties from Post-Quantum WireGuard (PQWG). The security properties mentioned here are covered by the symbolic analysis in the Rosenpass repository.
|
||||
|
||||
## Secrecy
|
||||
Three key encapsulations using the keypairs `sski`/`spki`, `sskr`/`spkr`, and `eski`/`epki` provide secrecy (see Section \ref{variables} for an introduction of the variables). Their respective ciphertexts are called `scti`, `sctr`, and `ectr` and the resulting keys are called `spti`, `sptr`, `epti`. A single secure encapsulation is sufficient to provide secrecy. We use two different KEMs (Key Encapsulation Methods; see section \ref{skem}): Kyber and Classic McEliece.
|
||||
Three key encapsulations using the keypairs `sski`/`spki`, `sskr`/`spkr`, and `eski`/`epki` provide secrecy (see Section \ref{variables} for an introduction of the variables). Their respective ciphertexts are called `scti`, `sctr`, and `ectr` and the resulting keys are called `spti`, `sptr`, `epti`. A single secure encapsulation is sufficient to provide secrecy. We use two different KEMs (Key Encapsulation Mechanisms; see section \ref{skem}): Kyber and Classic McEliece.
|
||||
|
||||
## Authenticity
|
||||
|
||||
@@ -169,7 +169,7 @@ Rosenpass uses a cryptographic hash function for multiple purposes:
|
||||
* Computing the cookie to guard against denial of service attacks. This is a feature adopted from WireGuard, but not yet included in the implementation of Rosenpass.
|
||||
* Computing the peer ID
|
||||
* Key derivation during and after the handshake
|
||||
* Computing the additional data for the biscuit encryption, to prove some privacy for its contents
|
||||
* Computing the additional data for the biscuit encryption, to provide some privacy for its contents
|
||||
|
||||
Using one hash function for multiple purposes can cause real-world security issues and even key recovery attacks [@oraclecloning]. We choose a tree-based domain separation scheme based on a keyed hash function – the previously introduced primitive `hash` – to make sure all our hash function calls can be seen as distinct.
|
||||
|
||||
@@ -237,7 +237,7 @@ For each peer, the server stores:
|
||||
The initiator stores the following local state for each ongoing handshake:
|
||||
|
||||
* A reference to the peer structure
|
||||
* A state indicator to keep track of the message expected from the responder next
|
||||
* A state indicator to keep track of the next message expected from the responder
|
||||
* `sidi` – Initiator session ID
|
||||
* `sidr` – Responder session ID
|
||||
* `ck` – The chaining key
|
||||
|
||||
17
readme.md
@@ -14,14 +14,14 @@ This repository contains
|
||||
|
||||
## Getting started
|
||||
|
||||
First, [install rosenpass](#Getting-Rosenpass). Then, check out the help funtions of `rp` & `rosenpass`:
|
||||
First, [install rosenpass](#Getting-Rosenpass). Then, check out the help functions of `rp` & `rosenpass`:
|
||||
|
||||
```sh
|
||||
rp help
|
||||
rosenpass help
|
||||
```
|
||||
|
||||
Follow [quickstart instructions](https://rosenpass.eu/#start) to get a VPN up and running.
|
||||
Follow [quick start instructions](https://rosenpass.eu/#start) to get a VPN up and running.
|
||||
|
||||
## Software architecture
|
||||
|
||||
@@ -54,7 +54,7 @@ We are working on a cryptographic proof of security, but we already provide a sy
|
||||
(manual) $ ./analyze.sh
|
||||
```
|
||||
|
||||
The analysis is implemented according to modern software engineering principles: Using the C preprocessor, we where able to split the analysis into multiple files and uses some metaprogramming to avoid repetition.
|
||||
The analysis is implemented according to modern software engineering principles: Using the C preprocessor, we where able to split the analysis into multiple files and uses some meta programming to avoid repetition.
|
||||
The code uses a variety of optimizations to speed up analysis such as using secret functions to model trusted/malicious setup. We split the model into two separate entry points which can be analyzed in parallel. Each is much faster than both models combined.
|
||||
A wrapper script provides instant feedback about which queries execute as expected in color: A red cross if a query fails and a green check if it succeeds.
|
||||
|
||||
@@ -62,15 +62,22 @@ A wrapper script provides instant feedback about which queries execute as expect
|
||||
[^libsodium]: https://doc.libsodium.org/
|
||||
[^wg]: https://www.wireguard.com/
|
||||
[^pqwg]: https://eprint.iacr.org/2020/379
|
||||
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeates the purpose of a key exchange protocol
|
||||
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeats the purpose of a key exchange protocol
|
||||
[^wg-statedis]: https://lists.zx2c4.com/pipermail/wireguard/2021-August/006916.htmlA
|
||||
|
||||
# Getting Rosenpass
|
||||
|
||||
Rosenpass is packaged for more and more distros, maybe also for the distro of your choice?
|
||||
Rosenpass is packaged for more and more distributions, maybe also for the distribution of your choice?
|
||||
|
||||
[](https://repology.org/project/rosenpass/versions)
|
||||
|
||||
# Mirrors
|
||||
|
||||
Don't want to use GitHub or only have an IPv6 connection? Rosenpass has set up two mirrors for this:
|
||||
|
||||
- [NotABug](https://notabug.org/rosenpass/rosenpass)
|
||||
- [GitLab](https://gitlab.com/rosenpass/rosenpass/)
|
||||
|
||||
# Supported by
|
||||
|
||||
Funded through <a href="https://nlnet.nl/">NLNet</a> with financial support for the European Commission's <a href="https://nlnet.nl/assure">NGI Assure</a> program.
|
||||
|
||||
42
rosenpass/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "rosenpass"
|
||||
version = "0.2.1-rc.3"
|
||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||
homepage = "https://rosenpass.eu/"
|
||||
repository = "https://github.com/rosenpass/rosenpass"
|
||||
readme = "readme.md"
|
||||
|
||||
[[bench]]
|
||||
name = "handshake"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "1.0.71", features = ["backtrace"] }
|
||||
base64 = "0.21.1"
|
||||
static_assertions = "1.1.0"
|
||||
memoffset = "0.9.0"
|
||||
libsodium-sys-stable = { version = "1.19.28", features = ["use-pkg-config"] }
|
||||
oqs-sys = { version = "0.8", default-features = false, features = ['classic_mceliece', 'kyber'] }
|
||||
lazy_static = "1.4.0"
|
||||
thiserror = "1.0.40"
|
||||
paste = "1.0.12"
|
||||
log = { version = "0.4.17", optional = true }
|
||||
env_logger = { version = "0.10.0", optional = true }
|
||||
serde = { version = "1.0.163", features = ["derive"] }
|
||||
toml = "0.7.4"
|
||||
clap = { version = "4.3.0", features = ["derive"] }
|
||||
mio = { version = "0.8.6", features = ["net", "os-poll"] }
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.71"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4.0"
|
||||
test_bin = "0.4.0"
|
||||
stacker = "0.1.15"
|
||||
|
||||
[features]
|
||||
default = ["log", "env_logger"]
|
||||
@@ -1,17 +1,18 @@
|
||||
use anyhow::Result;
|
||||
use rosenpass::pqkem::KEM;
|
||||
use rosenpass::{
|
||||
pqkem::{CCAKEM, KEM},
|
||||
protocol::{CcaPk, CcaSk, HandleMsgResult, MsgBuf, PeerPtr, Server, SymKey},
|
||||
pqkem::StaticKEM,
|
||||
protocol::{CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, SPk, SSk, SymKey},
|
||||
sodium::sodium_init,
|
||||
};
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
fn handle(
|
||||
tx: &mut Server,
|
||||
tx: &mut CryptoServer,
|
||||
msgb: &mut MsgBuf,
|
||||
msgl: usize,
|
||||
rx: &mut Server,
|
||||
rx: &mut CryptoServer,
|
||||
resb: &mut MsgBuf,
|
||||
) -> Result<(Option<SymKey>, Option<SymKey>)> {
|
||||
let HandleMsgResult {
|
||||
@@ -30,7 +31,7 @@ fn handle(
|
||||
Ok((txk, rxk.or(xch)))
|
||||
}
|
||||
|
||||
fn hs(ini: &mut Server, res: &mut Server) -> Result<()> {
|
||||
fn hs(ini: &mut CryptoServer, res: &mut CryptoServer) -> Result<()> {
|
||||
let (mut inib, mut resb) = (MsgBuf::zero(), MsgBuf::zero());
|
||||
let sz = ini.initiate_handshake(PeerPtr(0), &mut *inib)?;
|
||||
let (kini, kres) = handle(ini, &mut inib, sz, res, &mut resb)?;
|
||||
@@ -38,16 +39,19 @@ fn hs(ini: &mut Server, res: &mut Server) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn keygen() -> Result<(CcaSk, CcaPk)> {
|
||||
let (mut sk, mut pk) = (CcaSk::zero(), CcaPk::zero());
|
||||
CCAKEM::keygen(sk.secret_mut(), pk.secret_mut())?;
|
||||
fn keygen() -> Result<(SSk, SPk)> {
|
||||
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
|
||||
StaticKEM::keygen(sk.secret_mut(), pk.secret_mut())?;
|
||||
Ok((sk, pk))
|
||||
}
|
||||
|
||||
fn make_server_pair() -> Result<(Server, Server)> {
|
||||
fn make_server_pair() -> Result<(CryptoServer, CryptoServer)> {
|
||||
let psk = SymKey::random();
|
||||
let ((ska, pka), (skb, pkb)) = (keygen()?, keygen()?);
|
||||
let (mut a, mut b) = (Server::new(ska, pka.clone()), Server::new(skb, pkb.clone()));
|
||||
let (mut a, mut b) = (
|
||||
CryptoServer::new(ska, pka.clone()),
|
||||
CryptoServer::new(skb, pkb.clone()),
|
||||
);
|
||||
a.add_peer(Some(psk.clone()), pkb)?;
|
||||
b.add_peer(Some(psk), pka)?;
|
||||
Ok((a, b))
|
||||
@@ -58,12 +62,12 @@ fn criterion_benchmark(c: &mut Criterion) {
|
||||
let (mut a, mut b) = make_server_pair().unwrap();
|
||||
c.bench_function("cca_secret_alloc", |bench| {
|
||||
bench.iter(|| {
|
||||
CcaSk::zero();
|
||||
SSk::zero();
|
||||
})
|
||||
});
|
||||
c.bench_function("cca_public_alloc", |bench| {
|
||||
bench.iter(|| {
|
||||
CcaPk::zero();
|
||||
SPk::zero();
|
||||
})
|
||||
});
|
||||
c.bench_function("keygen", |bench| {
|
||||
53
rosenpass/build.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use anyhow::bail;
|
||||
use anyhow::Result;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
|
||||
/// Invokes a troff compiler to compile a manual page
|
||||
fn render_man(compiler: &str, man: &str) -> Result<String> {
|
||||
let out = Command::new(compiler).args(["-Tascii", man]).output()?;
|
||||
if !out.status.success() {
|
||||
bail!("{} returned an error", compiler);
|
||||
}
|
||||
|
||||
Ok(String::from_utf8(out.stdout)?)
|
||||
}
|
||||
|
||||
/// Generates the manual page
|
||||
fn generate_man() -> String {
|
||||
// This function is purposely stupid and redundant
|
||||
|
||||
let man = render_man("mandoc", "./doc/rosenpass.1");
|
||||
if let Ok(man) = man {
|
||||
return man;
|
||||
}
|
||||
|
||||
let man = render_man("groff", "./doc/rosenpass.1");
|
||||
if let Ok(man) = man {
|
||||
return man;
|
||||
}
|
||||
|
||||
// TODO: Link to online manual here
|
||||
"Cannot render manual page\n".into()
|
||||
}
|
||||
|
||||
fn man() {
|
||||
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
|
||||
let man = generate_man();
|
||||
let path = out_dir.join("rosenpass.1.ascii");
|
||||
|
||||
let mut file = File::create(&path).unwrap();
|
||||
file.write_all(man.as_bytes()).unwrap();
|
||||
|
||||
println!("cargo:rustc-env=ROSENPASS_MAN={}", path.display());
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// For now, rerun the build script on every time, as the build script
|
||||
// is not very expensive right now.
|
||||
println!("cargo:rerun-if-changed=./");
|
||||
man();
|
||||
}
|
||||
1
rosenpass/readme.md
Symbolic link
@@ -0,0 +1 @@
|
||||
../readme.md
|
||||
738
rosenpass/src/app_server.rs
Normal file
@@ -0,0 +1,738 @@
|
||||
use anyhow::bail;
|
||||
|
||||
use anyhow::Result;
|
||||
use log::{debug, error, info, warn};
|
||||
use mio::Interest;
|
||||
use mio::Token;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::io::Write;
|
||||
|
||||
use std::io::ErrorKind;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::SocketAddrV4;
|
||||
use std::net::SocketAddrV6;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::process::Stdio;
|
||||
use std::slice;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::util::fopen_w;
|
||||
use crate::{
|
||||
config::Verbosity,
|
||||
protocol::{CryptoServer, MsgBuf, PeerPtr, SPk, SSk, SymKey, Timing},
|
||||
util::{b64_writer, fmt_b64},
|
||||
};
|
||||
|
||||
const IPV4_ANY_ADDR: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const IPV6_ANY_ADDR: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
fn ipv4_any_binding() -> SocketAddr {
|
||||
// addr, port
|
||||
SocketAddr::V4(SocketAddrV4::new(IPV4_ANY_ADDR, 0))
|
||||
}
|
||||
|
||||
fn ipv6_any_binding() -> SocketAddr {
|
||||
// addr, port, flowinfo, scope_id
|
||||
SocketAddr::V6(SocketAddrV6::new(IPV6_ANY_ADDR, 0, 0, 0))
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AppPeer {
|
||||
pub outfile: Option<PathBuf>,
|
||||
pub outwg: Option<WireguardOut>, // TODO make this a generic command
|
||||
pub initial_endpoint: Option<Endpoint>,
|
||||
pub current_endpoint: Option<Endpoint>,
|
||||
}
|
||||
|
||||
impl AppPeer {
|
||||
pub fn endpoint(&self) -> Option<&Endpoint> {
|
||||
self.current_endpoint
|
||||
.as_ref()
|
||||
.or(self.initial_endpoint.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct WireguardOut {
|
||||
// impl KeyOutput
|
||||
pub dev: String,
|
||||
pub pk: String,
|
||||
pub extra_params: Vec<String>,
|
||||
}
|
||||
|
||||
/// Holds the state of the application, namely the external IO
|
||||
///
|
||||
/// Responsible for file IO, network IO
|
||||
// TODO add user control via unix domain socket and stdin/stdout
|
||||
#[derive(Debug)]
|
||||
pub struct AppServer {
|
||||
pub crypt: CryptoServer,
|
||||
pub sockets: Vec<mio::net::UdpSocket>,
|
||||
pub events: mio::Events,
|
||||
pub mio_poll: mio::Poll,
|
||||
pub peers: Vec<AppPeer>,
|
||||
pub verbosity: Verbosity,
|
||||
pub all_sockets_drained: bool,
|
||||
}
|
||||
|
||||
/// A socket pointer is an index assigned to a socket;
|
||||
/// right now the index is just the sockets index in AppServer::sockets.
|
||||
///
|
||||
/// Holding this as a reference instead of an &mut UdpSocket is useful
|
||||
/// to deal with the borrow checker, because otherwise we could not refer
|
||||
/// to a socket and another member of AppServer at the same time.
|
||||
#[derive(Debug)]
|
||||
pub struct SocketPtr(pub usize);
|
||||
|
||||
impl SocketPtr {
|
||||
pub fn get<'a>(&self, srv: &'a AppServer) -> &'a mio::net::UdpSocket {
|
||||
&srv.sockets[self.0]
|
||||
}
|
||||
|
||||
pub fn get_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut mio::net::UdpSocket {
|
||||
&mut srv.sockets[self.0]
|
||||
}
|
||||
|
||||
pub fn send_to(&self, srv: &AppServer, buf: &[u8], addr: SocketAddr) -> anyhow::Result<()> {
|
||||
self.get(srv).send_to(buf, addr)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Index based pointer to a Peer
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct AppPeerPtr(pub usize);
|
||||
|
||||
impl AppPeerPtr {
|
||||
/// Takes an index based handle and returns the actual peer
|
||||
pub fn lift(p: PeerPtr) -> Self {
|
||||
Self(p.0)
|
||||
}
|
||||
|
||||
/// Returns an index based handle to one Peer
|
||||
pub fn lower(&self) -> PeerPtr {
|
||||
PeerPtr(self.0)
|
||||
}
|
||||
|
||||
pub fn get_app<'a>(&self, srv: &'a AppServer) -> &'a AppPeer {
|
||||
&srv.peers[self.0]
|
||||
}
|
||||
|
||||
pub fn get_app_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut AppPeer {
|
||||
&mut srv.peers[self.0]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AppPollResult {
|
||||
DeleteKey(AppPeerPtr),
|
||||
SendInitiation(AppPeerPtr),
|
||||
SendRetransmission(AppPeerPtr),
|
||||
ReceivedMessage(usize, Endpoint),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum KeyOutputReason {
|
||||
Exchanged,
|
||||
Stale,
|
||||
}
|
||||
|
||||
/// Represents a communication partner rosenpass may be sending packets to
|
||||
///
|
||||
/// Generally at the start of Rosenpass either no address or a Hostname is known;
|
||||
/// later when we actually start to receive RespHello packages, we know the specific Address
|
||||
/// and socket to use with a peer
|
||||
#[derive(Debug)]
|
||||
pub enum Endpoint {
|
||||
/// Rosenpass supports multiple sockets, so we include the information
|
||||
/// which socket an address can be reached on. This probably does not
|
||||
/// make much of a difference in most setups where two sockets are just
|
||||
/// used to enable dual stack operation; it does make a difference in
|
||||
/// more complex use cases.
|
||||
///
|
||||
/// For instance it enables using multiple interfaces with overlapping
|
||||
/// ip spaces, such as listening on a private IP network and a public IP
|
||||
/// at the same time. It also would reply on the same port RespHello was
|
||||
/// sent to when listening on multiple ports on the same interface. This
|
||||
/// may be required for some arcane firewall setups.
|
||||
SocketBoundAddress {
|
||||
/// The socket the address can be reached under; this is generally
|
||||
/// determined when we actually receive an RespHello message
|
||||
socket: SocketPtr,
|
||||
/// Just the address
|
||||
addr: SocketAddr,
|
||||
},
|
||||
// A host name or IP address; storing the hostname here instead of an
|
||||
// ip address makes sure that we look up the host name whenever we try
|
||||
// to make a connection; this may be beneficial in some setups where a host-name
|
||||
// at first can not be resolved but becomes resolvable later.
|
||||
Discovery(HostPathDiscoveryEndpoint),
|
||||
}
|
||||
|
||||
impl Endpoint {
|
||||
/// Start discovery from some addresses
|
||||
pub fn discovery_from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
||||
Endpoint::Discovery(HostPathDiscoveryEndpoint::from_addresses(addresses))
|
||||
}
|
||||
|
||||
/// Start endpoint discovery from a hostname
|
||||
pub fn discovery_from_hostname(hostname: String) -> anyhow::Result<Self> {
|
||||
let host = HostPathDiscoveryEndpoint::lookup(hostname)?;
|
||||
Ok(Endpoint::Discovery(host))
|
||||
}
|
||||
|
||||
// Restart discovery; joining two sources of (potential) addresses
|
||||
//
|
||||
// This is used when the connection to an endpoint is lost in order
|
||||
// to include the addresses specified on the command line and the
|
||||
// address last used in the discovery process
|
||||
pub fn discovery_from_multiple_sources(
|
||||
a: Option<&Endpoint>,
|
||||
b: Option<&Endpoint>,
|
||||
) -> Option<Self> {
|
||||
let sources = match (a, b) {
|
||||
(Some(e), None) | (None, Some(e)) => e.addresses().iter().chain(&[]),
|
||||
(Some(e1), Some(e2)) => e1.addresses().iter().chain(e2.addresses()),
|
||||
(None, None) => return None,
|
||||
};
|
||||
let lower_size_bound = sources.size_hint().0;
|
||||
let mut dedup = std::collections::HashSet::with_capacity(lower_size_bound);
|
||||
let mut addrs = Vec::with_capacity(lower_size_bound);
|
||||
for a in sources {
|
||||
if dedup.insert(a) {
|
||||
addrs.push(*a);
|
||||
}
|
||||
}
|
||||
Some(Self::discovery_from_addresses(addrs))
|
||||
}
|
||||
|
||||
pub fn send(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
||||
use Endpoint::*;
|
||||
match self {
|
||||
SocketBoundAddress { socket, addr } => socket.send_to(srv, buf, *addr),
|
||||
Discovery(host) => host.send_scouting(srv, buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn addresses(&self) -> &[SocketAddr] {
|
||||
use Endpoint::*;
|
||||
match self {
|
||||
SocketBoundAddress { addr, .. } => slice::from_ref(addr),
|
||||
Discovery(host) => host.addresses(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles host-path discovery
|
||||
///
|
||||
/// When rosenpass is started, we either know no peer address
|
||||
/// or we know a hostname. How to contact this hostname may not
|
||||
/// be entirely clear for two reasons:
|
||||
///
|
||||
/// 1. We have multiple sockets; only a subset of those may be able to contact the host
|
||||
/// 2. DNS resolution can return multiple addresses
|
||||
///
|
||||
/// We could just use the first working socket and the first address returned, but this
|
||||
/// may be error prone: Some of the sockets may appear to be able to contact the host,
|
||||
/// but the packets will be dropped. Some of the addresses may appear to be reachable
|
||||
/// but the packets could be lost.
|
||||
///
|
||||
/// In contrast to TCP, UDP has no mechanism to ensure packets actually arrive.
|
||||
///
|
||||
/// To robustly handle host path discovery, we try each socket-ip-combination in a round
|
||||
/// robin fashion; the struct stores the offset of the last used combination internally and
|
||||
/// and will continue with the next combination on every call.
|
||||
///
|
||||
/// Retransmission handling will continue normally; i.e. increasing the distance between
|
||||
/// retransmissions on every retransmission, until it is long enough to bore a human. Therefor
|
||||
/// it is important to avoid having a large number of sockets drop packets not just for efficiency
|
||||
/// but to avoid latency issues too.
|
||||
///
|
||||
// TODO: We might consider adjusting the retransmission handling to account for host-path discovery
|
||||
#[derive(Debug)]
|
||||
pub struct HostPathDiscoveryEndpoint {
|
||||
scouting_state: Cell<(usize, usize)>, // addr_off, sock_off
|
||||
addresses: Vec<SocketAddr>,
|
||||
}
|
||||
|
||||
impl HostPathDiscoveryEndpoint {
|
||||
pub fn from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
||||
let scouting_state = Cell::new((0, 0));
|
||||
Self {
|
||||
addresses,
|
||||
scouting_state,
|
||||
}
|
||||
}
|
||||
|
||||
/// Lookup a hostname
|
||||
pub fn lookup(hostname: String) -> anyhow::Result<Self> {
|
||||
Ok(Self {
|
||||
addresses: ToSocketAddrs::to_socket_addrs(&hostname)?.collect(),
|
||||
scouting_state: Cell::new((0, 0)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn addresses(&self) -> &Vec<SocketAddr> {
|
||||
&self.addresses
|
||||
}
|
||||
|
||||
fn insert_next_scout_offset(&self, srv: &AppServer, addr_no: usize, sock_no: usize) {
|
||||
self.scouting_state.set((
|
||||
(addr_no + 1) % self.addresses.len(),
|
||||
(sock_no + 1) % srv.sockets.len(),
|
||||
));
|
||||
}
|
||||
|
||||
/// Attempt to reach the host
|
||||
///
|
||||
/// Will round-robin-try different socket-ip-combinations on each call.
|
||||
pub fn send_scouting(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
||||
let (addr_off, sock_off) = self.scouting_state.get();
|
||||
|
||||
let mut addrs = (self.addresses)
|
||||
.iter()
|
||||
.enumerate()
|
||||
.cycle()
|
||||
.skip(addr_off)
|
||||
.take(self.addresses.len());
|
||||
let mut sockets = (srv.sockets)
|
||||
.iter()
|
||||
.enumerate()
|
||||
.cycle()
|
||||
.skip(sock_off)
|
||||
.take(srv.sockets.len());
|
||||
|
||||
for (addr_no, addr) in addrs.by_ref() {
|
||||
for (sock_no, sock) in sockets.by_ref() {
|
||||
let res = sock.send_to(buf, *addr);
|
||||
let err = match res {
|
||||
Ok(_) => {
|
||||
self.insert_next_scout_offset(srv, addr_no, sock_no);
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => e,
|
||||
};
|
||||
|
||||
// TODO: replace this by
|
||||
// e.kind() == io::ErrorKind::NetworkUnreachable
|
||||
// once https://github.com/rust-lang/rust/issues/86442 lands
|
||||
let ignore = err
|
||||
.to_string()
|
||||
.starts_with("Address family not supported by protocol");
|
||||
if !ignore {
|
||||
warn!("Socket #{} refusing to send to {}: ", sock_no, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bail!("Unable to send message: All sockets returned errors.")
|
||||
}
|
||||
}
|
||||
|
||||
impl AppServer {
|
||||
pub fn new(
|
||||
sk: SSk,
|
||||
pk: SPk,
|
||||
addrs: Vec<SocketAddr>,
|
||||
verbosity: Verbosity,
|
||||
) -> anyhow::Result<Self> {
|
||||
// setup mio
|
||||
let mio_poll = mio::Poll::new()?;
|
||||
let events = mio::Events::with_capacity(8);
|
||||
|
||||
// bind each SocketAddr to a socket
|
||||
let maybe_sockets: Result<Vec<_>, _> =
|
||||
addrs.into_iter().map(mio::net::UdpSocket::bind).collect();
|
||||
let mut sockets = maybe_sockets?;
|
||||
|
||||
// When no socket is specified, rosenpass should open one port on all
|
||||
// available interfaces best-effort. Here are the cases how this can possibly go:
|
||||
//
|
||||
// Some operating systems (such as Linux [^linux] and FreeBSD [^freebsd])
|
||||
// using IPv6 sockets to handle IPv4 connections; on these systems
|
||||
// binding to the `[::]:0` address will typically open a dual-stack
|
||||
// socket. Some other systems such as OpenBSD [^openbsd] do not support this feature.
|
||||
//
|
||||
// Dual-stack systems provide a flag to enable or disable this
|
||||
// behavior – the IPV6_V6ONLY flag. OpenBSD supports this flag
|
||||
// read-only. MIO[^mio] provides a way to read this flag but not
|
||||
// to write it.
|
||||
//
|
||||
// - One dual-stack IPv6 socket, if the operating supports dual-stack sockets and
|
||||
// correctly reports this
|
||||
// - One IPv6 socket and one IPv4 socket if the operating does not support dual stack
|
||||
// sockets or disables them by default assuming this is also correctly reported
|
||||
// - One IPv6 socket and no IPv4 socket if IPv6 socket is not dual-stack and opening
|
||||
// the IPv6 socket fails
|
||||
// - One IPv4 socket and no IPv6 socket if opening the IPv6 socket fails
|
||||
// - One dual-stack IPv6 socket and a redundant IPv4 socket if dual-stack sockets are
|
||||
// supported but the operating system does not correctly report this (specifically,
|
||||
// if the only_v6() call raises an error)
|
||||
// - Rosenpass exits if no socket could be opened
|
||||
//
|
||||
// [^freebsd]: https://man.freebsd.org/cgi/man.cgi?query=ip6&sektion=4&manpath=FreeBSD+6.0-RELEASE
|
||||
// [^openbsd]: https://man.openbsd.org/ip6.4
|
||||
// [^linux]: https://man7.org/linux/man-pages/man7/ipv6.7.html
|
||||
// [^mio]: https://docs.rs/mio/0.8.6/mio/net/struct.UdpSocket.html#method.only_v6
|
||||
if sockets.is_empty() {
|
||||
macro_rules! try_register_socket {
|
||||
($title:expr, $binding:expr) => {{
|
||||
let r = mio::net::UdpSocket::bind($binding);
|
||||
match r {
|
||||
Ok(sock) => {
|
||||
sockets.push(sock);
|
||||
Some(sockets.len() - 1)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Could not bind to {} socket: {}", $title, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
let v6 = try_register_socket!("IPv6", ipv6_any_binding());
|
||||
|
||||
let need_v4 = match v6.map(|no| sockets[no].only_v6()) {
|
||||
Some(Ok(v)) => v,
|
||||
None => true,
|
||||
Some(Err(e)) => {
|
||||
warn!("Unable to detect whether the IPv6 socket supports dual-stack operation: {}", e);
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
if need_v4 {
|
||||
try_register_socket!("IPv4", ipv4_any_binding());
|
||||
}
|
||||
}
|
||||
|
||||
if sockets.is_empty() {
|
||||
bail!("No sockets to listen on!")
|
||||
}
|
||||
|
||||
// register all sockets to mio
|
||||
for (i, socket) in sockets.iter_mut().enumerate() {
|
||||
mio_poll
|
||||
.registry()
|
||||
.register(socket, Token(i), Interest::READABLE)?;
|
||||
}
|
||||
|
||||
// TODO use mio::net::UnixStream together with std::os::unix::net::UnixStream for Linux
|
||||
|
||||
Ok(Self {
|
||||
crypt: CryptoServer::new(sk, pk),
|
||||
peers: Vec::new(),
|
||||
verbosity,
|
||||
sockets,
|
||||
events,
|
||||
mio_poll,
|
||||
all_sockets_drained: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verbose(&self) -> bool {
|
||||
matches!(self.verbosity, Verbosity::Verbose)
|
||||
}
|
||||
|
||||
pub fn add_peer(
|
||||
&mut self,
|
||||
psk: Option<SymKey>,
|
||||
pk: SPk,
|
||||
outfile: Option<PathBuf>,
|
||||
outwg: Option<WireguardOut>,
|
||||
hostname: Option<String>,
|
||||
) -> anyhow::Result<AppPeerPtr> {
|
||||
let PeerPtr(pn) = self.crypt.add_peer(psk, pk)?;
|
||||
assert!(pn == self.peers.len());
|
||||
let initial_endpoint = hostname
|
||||
.map(Endpoint::discovery_from_hostname)
|
||||
.transpose()?;
|
||||
let current_endpoint = None;
|
||||
self.peers.push(AppPeer {
|
||||
outfile,
|
||||
outwg,
|
||||
initial_endpoint,
|
||||
current_endpoint,
|
||||
});
|
||||
Ok(AppPeerPtr(pn))
|
||||
}
|
||||
|
||||
pub fn listen_loop(&mut self) -> anyhow::Result<()> {
|
||||
const INIT_SLEEP: f64 = 0.01;
|
||||
const MAX_FAILURES: i32 = 10;
|
||||
let mut failure_cnt = 0;
|
||||
|
||||
loop {
|
||||
let msgs_processed = 0usize;
|
||||
let err = match self.event_loop() {
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) => e,
|
||||
};
|
||||
|
||||
// This should not happen…
|
||||
failure_cnt = if msgs_processed > 0 {
|
||||
0
|
||||
} else {
|
||||
failure_cnt + 1
|
||||
};
|
||||
let sleep = INIT_SLEEP * 2.0f64.powf(f64::from(failure_cnt - 1));
|
||||
let tries_left = MAX_FAILURES - (failure_cnt - 1);
|
||||
error!(
|
||||
"unexpected error after processing {} messages: {:?} {}",
|
||||
msgs_processed,
|
||||
err,
|
||||
err.backtrace()
|
||||
);
|
||||
if tries_left > 0 {
|
||||
error!("re-initializing networking in {sleep}! {tries_left} tries left.");
|
||||
std::thread::sleep(self.crypt.timebase.dur(sleep));
|
||||
continue;
|
||||
}
|
||||
|
||||
bail!("too many network failures");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn event_loop(&mut self) -> anyhow::Result<()> {
|
||||
let (mut rx, mut tx) = (MsgBuf::zero(), MsgBuf::zero());
|
||||
|
||||
/// if socket address for peer is known, call closure
|
||||
/// assumes that closure leaves a message in `tx`
|
||||
/// assumes that closure returns the length of message in bytes
|
||||
macro_rules! tx_maybe_with {
|
||||
($peer:expr, $fn:expr) => {
|
||||
attempt!({
|
||||
let p = $peer;
|
||||
if p.get_app(self).endpoint().is_some() {
|
||||
let len = $fn()?;
|
||||
let ep: &Endpoint = p.get_app(self).endpoint().unwrap();
|
||||
ep.send(self, &tx[..len])?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
loop {
|
||||
use crate::protocol::HandleMsgResult;
|
||||
use AppPollResult::*;
|
||||
use KeyOutputReason::*;
|
||||
match self.poll(&mut *rx)? {
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
SendInitiation(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.initiate_handshake(peer.lower(), &mut *tx))?,
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
SendRetransmission(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.retransmit_handshake(peer.lower(), &mut *tx))?,
|
||||
DeleteKey(peer) => {
|
||||
self.output_key(peer, Stale, &SymKey::random())?;
|
||||
|
||||
// There was a loss of connection apparently; restart host discovery
|
||||
// starting from the last used address but including all the initially
|
||||
// specified addresses
|
||||
// TODO: We could do this preemptively, before any connection loss actually occurs.
|
||||
let p = peer.get_app_mut(self);
|
||||
p.current_endpoint = Endpoint::discovery_from_multiple_sources(
|
||||
p.current_endpoint.as_ref(),
|
||||
p.initial_endpoint.as_ref(),
|
||||
);
|
||||
}
|
||||
|
||||
ReceivedMessage(len, endpoint) => {
|
||||
match self.crypt.handle_msg(&rx[..len], &mut *tx) {
|
||||
Err(ref e) => {
|
||||
self.verbose().then(|| {
|
||||
info!(
|
||||
"error processing incoming message from {:?}: {:?} {}",
|
||||
endpoint,
|
||||
e,
|
||||
e.backtrace()
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
Ok(HandleMsgResult {
|
||||
resp,
|
||||
exchanged_with,
|
||||
..
|
||||
}) => {
|
||||
if let Some(len) = resp {
|
||||
endpoint.send(self, &tx[0..len])?;
|
||||
}
|
||||
|
||||
if let Some(p) = exchanged_with {
|
||||
let ap = AppPeerPtr::lift(p);
|
||||
ap.get_app_mut(self).current_endpoint = Some(endpoint);
|
||||
|
||||
// TODO: Maybe we should rather call the key "rosenpass output"?
|
||||
self.output_key(ap, Exchanged, &self.crypt.osk(p)?)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn output_key(
|
||||
&self,
|
||||
peer: AppPeerPtr,
|
||||
why: KeyOutputReason,
|
||||
key: &SymKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let peerid = peer.lower().get(&self.crypt).pidt()?;
|
||||
let ap = peer.get_app(self);
|
||||
|
||||
if self.verbose() {
|
||||
let msg = match why {
|
||||
KeyOutputReason::Exchanged => "Exchanged key with peer",
|
||||
KeyOutputReason::Stale => "Erasing outdated key from peer",
|
||||
};
|
||||
info!("{} {}", msg, fmt_b64(&*peerid));
|
||||
}
|
||||
|
||||
if let Some(of) = ap.outfile.as_ref() {
|
||||
// This might leave some fragments of the secret on the stack;
|
||||
// in practice this is likely not a problem because the stack likely
|
||||
// will be overwritten by something else soon but this is not exactly
|
||||
// guaranteed. It would be possible to remedy this, but since the secret
|
||||
// data will linger in the linux page cache anyways with the current
|
||||
// implementation, going to great length to erase the secret here is
|
||||
// not worth it right now.
|
||||
b64_writer(fopen_w(of)?).write_all(key.secret())?;
|
||||
let why = match why {
|
||||
KeyOutputReason::Exchanged => "exchanged",
|
||||
KeyOutputReason::Stale => "stale",
|
||||
};
|
||||
|
||||
// this is intentionally writing to stdout instead of stderr, because
|
||||
// it is meant to allow external detection of a successful key-exchange
|
||||
println!(
|
||||
"output-key peer {} key-file {of:?} {why}",
|
||||
fmt_b64(&*peerid)
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(owg) = ap.outwg.as_ref() {
|
||||
let mut child = Command::new("wg")
|
||||
.arg("set")
|
||||
.arg(&owg.dev)
|
||||
.arg("peer")
|
||||
.arg(&owg.pk)
|
||||
.arg("preshared-key")
|
||||
.arg("/dev/stdin")
|
||||
.stdin(Stdio::piped())
|
||||
.args(&owg.extra_params)
|
||||
.spawn()?;
|
||||
b64_writer(child.stdin.take().unwrap()).write_all(key.secret())?;
|
||||
|
||||
thread::spawn(move || {
|
||||
let status = child.wait();
|
||||
|
||||
if let Ok(status) = status {
|
||||
if status.success() {
|
||||
debug!("successfully passed psk to wg")
|
||||
} else {
|
||||
error!("could not pass psk to wg {:?}", status)
|
||||
}
|
||||
} else {
|
||||
error!("wait failed: {:?}", status)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn poll(&mut self, rx_buf: &mut [u8]) -> anyhow::Result<AppPollResult> {
|
||||
use crate::protocol::PollResult as C;
|
||||
use AppPollResult as A;
|
||||
loop {
|
||||
return Ok(match self.crypt.poll()? {
|
||||
C::DeleteKey(PeerPtr(no)) => A::DeleteKey(AppPeerPtr(no)),
|
||||
C::SendInitiation(PeerPtr(no)) => A::SendInitiation(AppPeerPtr(no)),
|
||||
C::SendRetransmission(PeerPtr(no)) => A::SendRetransmission(AppPeerPtr(no)),
|
||||
C::Sleep(timeout) => match self.try_recv(rx_buf, timeout)? {
|
||||
Some((len, addr)) => A::ReceivedMessage(len, addr),
|
||||
None => continue,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to receive a new message
|
||||
///
|
||||
/// - might wait for an duration up to `timeout`
|
||||
/// - returns immediately if an error occurs
|
||||
/// - returns immediately if a new message is received
|
||||
pub fn try_recv(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
timeout: Timing,
|
||||
) -> anyhow::Result<Option<(usize, Endpoint)>> {
|
||||
let timeout = Duration::from_secs_f64(timeout);
|
||||
|
||||
// if there is no time to wait on IO, well, then, lets not waste any time!
|
||||
if timeout.is_zero() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// NOTE when using mio::Poll, there are some particularities (taken from
|
||||
// https://docs.rs/mio/latest/mio/struct.Poll.html):
|
||||
//
|
||||
// - poll() might return readiness, even if nothing is ready
|
||||
// - in this case, a WouldBlock error is returned from actual IO operations
|
||||
// - after receiving readiness for a source, it must be drained until a WouldBlock
|
||||
// is received
|
||||
//
|
||||
// This would usually require us to maintain the drainage status of each socket;
|
||||
// a socket would only become drained when it returned WouldBlock and only
|
||||
// non-drained when receiving a readiness event from mio for it. Then, only the
|
||||
// ready sockets should be worked on, ideally without requiring an O(n) search
|
||||
// through all sockets for checking their drained status. However, our use-case
|
||||
// is primarily heaving one or two sockets (if IPv4 and IPv6 IF_ANY listen is
|
||||
// desired on a non-dual-stack OS), thus just checking every socket after any
|
||||
// readiness event seems to be good enough™ for now.
|
||||
|
||||
// only poll if we drained all sockets before
|
||||
if self.all_sockets_drained {
|
||||
self.mio_poll.poll(&mut self.events, Some(timeout))?;
|
||||
}
|
||||
|
||||
let mut would_block_count = 0;
|
||||
for (sock_no, socket) in self.sockets.iter_mut().enumerate() {
|
||||
match socket.recv_from(buf) {
|
||||
Ok((n, addr)) => {
|
||||
// at least one socket was not drained...
|
||||
self.all_sockets_drained = false;
|
||||
return Ok(Some((
|
||||
n,
|
||||
Endpoint::SocketBoundAddress {
|
||||
socket: SocketPtr(sock_no),
|
||||
addr,
|
||||
},
|
||||
)));
|
||||
}
|
||||
Err(e) if e.kind() == ErrorKind::WouldBlock => {
|
||||
would_block_count += 1;
|
||||
}
|
||||
// TODO if one socket continuously returns an error, then we never poll, thus we never wait for a timeout, thus we have a spin-lock
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// if each socket returned WouldBlock, then we drained them all at least once indeed
|
||||
self.all_sockets_drained = would_block_count == self.sockets.len();
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
261
rosenpass/src/cli.rs
Normal file
@@ -0,0 +1,261 @@
|
||||
use anyhow::{bail, ensure};
|
||||
use clap::Parser;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use crate::app_server;
|
||||
use crate::app_server::AppServer;
|
||||
use crate::util::{LoadValue, LoadValueB64};
|
||||
use crate::{
|
||||
// app_server::{AppServer, LoadValue, LoadValueB64},
|
||||
coloring::Secret,
|
||||
pqkem::{StaticKEM, KEM},
|
||||
protocol::{SPk, SSk, SymKey},
|
||||
};
|
||||
|
||||
use super::config;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about)]
|
||||
pub enum Cli {
|
||||
/// Start Rosenpass in server mode and carry on with the key exchange
|
||||
///
|
||||
/// This will parse the configuration file and perform the key exchange
|
||||
/// with the specified peers. If a peer's endpoint is specified, this
|
||||
/// Rosenpass instance will try to initiate a key exchange with the peer,
|
||||
/// otherwise only initiation attempts from the peer will be responded to.
|
||||
ExchangeConfig { config_file: PathBuf },
|
||||
|
||||
/// Start in daemon mode, performing key exchanges
|
||||
///
|
||||
/// The configuration is read from the command line. The `peer` token
|
||||
/// always separates multiple peers, e. g. if the token `peer` appears
|
||||
/// in the WIREGUARD_EXTRA_ARGS it is not put into the WireGuard arguments
|
||||
/// but instead a new peer is created.
|
||||
/* Explanation: `first_arg` and `rest_of_args` are combined into one
|
||||
* `Vec<String>`. They are only used to trick clap into displaying some
|
||||
* guidance on the CLI usage.
|
||||
*/
|
||||
#[allow(rustdoc::broken_intra_doc_links)]
|
||||
#[allow(rustdoc::invalid_html_tags)]
|
||||
Exchange {
|
||||
/// public-key <PATH> secret-key <PATH> [listen <ADDR>:<PORT>]... [verbose]
|
||||
#[clap(value_name = "OWN_CONFIG")]
|
||||
first_arg: String,
|
||||
|
||||
/// peer public-key <PATH> [ENDPOINT] [PSK] [OUTFILE] [WG]
|
||||
///
|
||||
/// ENDPOINT := endpoint <HOST/IP>:<PORT>
|
||||
///
|
||||
/// PSK := preshared-key <PATH>
|
||||
///
|
||||
/// OUTFILE := outfile <PATH>
|
||||
///
|
||||
/// WG := wireguard <WIREGUARD_DEV> <WIREGUARD_PEER> [WIREGUARD_EXTRA_ARGS]...
|
||||
#[clap(value_name = "PEERS")]
|
||||
rest_of_args: Vec<String>,
|
||||
|
||||
/// Save the parsed configuration to a file before starting the daemon
|
||||
#[clap(short, long)]
|
||||
config_file: Option<PathBuf>,
|
||||
},
|
||||
|
||||
/// Generate a demo config file
|
||||
GenConfig {
|
||||
config_file: PathBuf,
|
||||
|
||||
/// Forcefully overwrite existing config file
|
||||
#[clap(short, long)]
|
||||
force: bool,
|
||||
},
|
||||
|
||||
/// Generate the keys mentioned in a configFile
|
||||
///
|
||||
/// Generates secret- & public-key to their destination. If a config file
|
||||
/// is provided then the key file destination is taken from there.
|
||||
/// Otherwise the
|
||||
GenKeys {
|
||||
config_file: Option<PathBuf>,
|
||||
|
||||
/// where to write public-key to
|
||||
#[clap(short, long)]
|
||||
public_key: Option<PathBuf>,
|
||||
|
||||
/// where to write secret-key to
|
||||
#[clap(short, long)]
|
||||
secret_key: Option<PathBuf>,
|
||||
|
||||
/// Forcefully overwrite public- & secret-key file
|
||||
#[clap(short, long)]
|
||||
force: bool,
|
||||
},
|
||||
|
||||
/// Validate a configuration
|
||||
Validate { config_files: Vec<PathBuf> },
|
||||
|
||||
/// Show the rosenpass manpage
|
||||
// TODO make this the default, but only after the manpage has been adjusted once the CLI stabilizes
|
||||
Man,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
pub fn run() -> anyhow::Result<()> {
|
||||
let cli = Self::parse();
|
||||
|
||||
use Cli::*;
|
||||
match cli {
|
||||
Man => {
|
||||
let man_cmd = std::process::Command::new("man")
|
||||
.args(["1", "rosenpass"])
|
||||
.status();
|
||||
|
||||
if !(man_cmd.is_ok() && man_cmd.unwrap().success()) {
|
||||
println!(include_str!(env!("ROSENPASS_MAN")));
|
||||
}
|
||||
}
|
||||
GenConfig { config_file, force } => {
|
||||
ensure!(
|
||||
force || !config_file.exists(),
|
||||
"config file {config_file:?} already exists"
|
||||
);
|
||||
|
||||
config::Rosenpass::example_config().store(config_file)?;
|
||||
}
|
||||
|
||||
GenKeys {
|
||||
config_file,
|
||||
public_key,
|
||||
secret_key,
|
||||
force,
|
||||
} => {
|
||||
// figure out where the key file is specified, in the config file or directly as flag?
|
||||
let (pkf, skf) = match (config_file, public_key, secret_key) {
|
||||
(Some(config_file), _, _) => {
|
||||
ensure!(
|
||||
config_file.exists(),
|
||||
"config file {config_file:?} does not exist"
|
||||
);
|
||||
|
||||
let config = config::Rosenpass::load(config_file)?;
|
||||
|
||||
(config.public_key, config.secret_key)
|
||||
}
|
||||
(_, Some(pkf), Some(skf)) => (pkf, skf),
|
||||
_ => {
|
||||
bail!("either a config-file or both public-key and secret-key file are required")
|
||||
}
|
||||
};
|
||||
|
||||
// check that we are not overriding something unintentionally
|
||||
let mut problems = vec![];
|
||||
if !force && pkf.is_file() {
|
||||
problems.push(format!(
|
||||
"public-key file {pkf:?} exist, refusing to overwrite it"
|
||||
));
|
||||
}
|
||||
if !force && skf.is_file() {
|
||||
problems.push(format!(
|
||||
"secret-key file {skf:?} exist, refusing to overwrite it"
|
||||
));
|
||||
}
|
||||
if !problems.is_empty() {
|
||||
bail!(problems.join("\n"));
|
||||
}
|
||||
|
||||
// generate the keys and store them in files
|
||||
let mut ssk = crate::protocol::SSk::random();
|
||||
let mut spk = crate::protocol::SPk::random();
|
||||
StaticKEM::keygen(ssk.secret_mut(), spk.secret_mut())?;
|
||||
|
||||
ssk.store_secret(skf)?;
|
||||
spk.store_secret(pkf)?;
|
||||
}
|
||||
|
||||
ExchangeConfig { config_file } => {
|
||||
ensure!(
|
||||
config_file.exists(),
|
||||
"config file '{config_file:?}' does not exist"
|
||||
);
|
||||
|
||||
let config = config::Rosenpass::load(config_file)?;
|
||||
config.validate()?;
|
||||
Self::event_loop(config)?;
|
||||
}
|
||||
|
||||
Exchange {
|
||||
first_arg,
|
||||
mut rest_of_args,
|
||||
config_file,
|
||||
} => {
|
||||
rest_of_args.insert(0, first_arg);
|
||||
let args = rest_of_args;
|
||||
let mut config = config::Rosenpass::parse_args(args)?;
|
||||
|
||||
if let Some(p) = config_file {
|
||||
config.store(&p)?;
|
||||
config.config_file_path = p;
|
||||
}
|
||||
config.validate()?;
|
||||
Self::event_loop(config)?;
|
||||
}
|
||||
|
||||
Validate { config_files } => {
|
||||
for file in config_files {
|
||||
match config::Rosenpass::load(&file) {
|
||||
Ok(config) => {
|
||||
eprintln!("{file:?} is valid TOML and conforms to the expected schema");
|
||||
match config.validate() {
|
||||
Ok(_) => eprintln!("{file:?} is passed all logical checks"),
|
||||
Err(_) => eprintln!("{file:?} contains logical errors"),
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!("{file:?} is not valid: {e}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn event_loop(config: config::Rosenpass) -> anyhow::Result<()> {
|
||||
// load own keys
|
||||
let sk = SSk::load(&config.secret_key)?;
|
||||
let pk = SPk::load(&config.public_key)?;
|
||||
|
||||
// start an application server
|
||||
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
|
||||
sk,
|
||||
pk,
|
||||
config.listen,
|
||||
config.verbosity,
|
||||
)?);
|
||||
|
||||
for cfg_peer in config.peers {
|
||||
srv.add_peer(
|
||||
// psk, pk, outfile, outwg, tx_addr
|
||||
cfg_peer.pre_shared_key.map(SymKey::load_b64).transpose()?,
|
||||
SPk::load(&cfg_peer.public_key)?,
|
||||
cfg_peer.key_out,
|
||||
cfg_peer.wg.map(|cfg| app_server::WireguardOut {
|
||||
dev: cfg.device,
|
||||
pk: cfg.peer,
|
||||
extra_params: cfg.extra_params,
|
||||
}),
|
||||
cfg_peer.endpoint.clone(),
|
||||
)?;
|
||||
}
|
||||
|
||||
srv.event_loop()
|
||||
}
|
||||
}
|
||||
|
||||
trait StoreSecret {
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreSecret for Secret<N> {
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
||||
std::fs::write(path, self.secret())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
//! This module contains various types for dealing with secrets
|
||||
//!
|
||||
//! These types use type level coloring to make accidential leackage of secrets extra hard.
|
||||
//! Types types for dealing with (secret-) values
|
||||
//!
|
||||
//! These types use type level coloring to make accidential leackage of secrets extra hard. Both [Secret] and [Public] own their data, but the memory backing
|
||||
//! [Secret] is special:
|
||||
//! - as it is heap allocated, we can actively zeroize the memory before freeing it.
|
||||
//! - guard pages before and after each allocation trap accidential sequential reads that creep towards our secrets
|
||||
//! - the memory is mlocked, e.g. it is never swapped
|
||||
|
||||
use crate::{
|
||||
sodium::{rng, zeroize},
|
||||
457
rosenpass/src/config.rs
Normal file
@@ -0,0 +1,457 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs,
|
||||
io::Write,
|
||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{bail, ensure};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::util::fopen_w;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Rosenpass {
|
||||
pub public_key: PathBuf,
|
||||
|
||||
pub secret_key: PathBuf,
|
||||
|
||||
pub listen: Vec<SocketAddr>,
|
||||
|
||||
#[serde(default)]
|
||||
pub verbosity: Verbosity,
|
||||
pub peers: Vec<RosenpassPeer>,
|
||||
|
||||
#[serde(skip)]
|
||||
pub config_file_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum Verbosity {
|
||||
Quiet,
|
||||
Verbose,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RosenpassPeer {
|
||||
pub public_key: PathBuf,
|
||||
pub endpoint: Option<String>,
|
||||
pub pre_shared_key: Option<PathBuf>,
|
||||
|
||||
#[serde(default)]
|
||||
pub key_out: Option<PathBuf>,
|
||||
|
||||
// TODO make sure failure does not crash but is logged
|
||||
#[serde(default)]
|
||||
pub exchange_command: Vec<String>,
|
||||
|
||||
// TODO make this field only available on binary builds, not on library builds
|
||||
#[serde(flatten)]
|
||||
pub wg: Option<WireGuard>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct WireGuard {
|
||||
pub device: String,
|
||||
pub peer: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub extra_params: Vec<String>,
|
||||
}
|
||||
|
||||
impl Rosenpass {
|
||||
/// Load a config file from a file path
|
||||
///
|
||||
/// no validation is conducted
|
||||
pub fn load<P: AsRef<Path>>(p: P) -> anyhow::Result<Self> {
|
||||
let mut config: Self = toml::from_str(&fs::read_to_string(&p)?)?;
|
||||
|
||||
config.config_file_path = p.as_ref().to_owned();
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Write a config to a file
|
||||
pub fn store<P: AsRef<Path>>(&self, p: P) -> anyhow::Result<()> {
|
||||
let serialized_config =
|
||||
toml::to_string_pretty(&self).expect("unable to serialize the default config");
|
||||
fs::write(p, serialized_config)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Commit the configuration to where it came from, overwriting the original file
|
||||
pub fn commit(&self) -> anyhow::Result<()> {
|
||||
let mut f = fopen_w(&self.config_file_path)?;
|
||||
f.write_all(toml::to_string_pretty(&self)?.as_bytes())?;
|
||||
|
||||
self.store(&self.config_file_path)
|
||||
}
|
||||
|
||||
/// Validate a configuration
|
||||
pub fn validate(&self) -> anyhow::Result<()> {
|
||||
// check the public-key file exists
|
||||
ensure!(
|
||||
self.public_key.is_file(),
|
||||
"public-key file {:?} does not exist",
|
||||
self.public_key
|
||||
);
|
||||
|
||||
// check the secret-key file exists
|
||||
ensure!(
|
||||
self.secret_key.is_file(),
|
||||
"secret-key file {:?} does not exist",
|
||||
self.secret_key
|
||||
);
|
||||
|
||||
for (i, peer) in self.peers.iter().enumerate() {
|
||||
// check peer's public-key file exists
|
||||
ensure!(
|
||||
peer.public_key.is_file(),
|
||||
"peer {i} public-key file {:?} does not exist",
|
||||
peer.public_key
|
||||
);
|
||||
|
||||
// check endpoint is usable
|
||||
if let Some(addr) = peer.endpoint.as_ref() {
|
||||
ensure!(
|
||||
addr.to_socket_addrs().is_ok(),
|
||||
"peer {i} endpoint {} can not be parsed to a socket address",
|
||||
addr
|
||||
);
|
||||
}
|
||||
|
||||
// TODO warn if neither out_key nor exchange_command is defined
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new configuration
|
||||
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(public_key: P1, secret_key: P2) -> Self {
|
||||
Self {
|
||||
public_key: PathBuf::from(public_key.as_ref()),
|
||||
secret_key: PathBuf::from(secret_key.as_ref()),
|
||||
listen: vec![],
|
||||
verbosity: Verbosity::Quiet,
|
||||
peers: vec![],
|
||||
config_file_path: PathBuf::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add IPv4 __and__ IPv6 IF_ANY address to the listen interfaces
|
||||
pub fn add_if_any(&mut self, port: u16) {
|
||||
let ipv4_any = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port));
|
||||
let ipv6_any = SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0),
|
||||
port,
|
||||
0,
|
||||
0,
|
||||
));
|
||||
self.listen.push(ipv4_any);
|
||||
self.listen.push(ipv6_any);
|
||||
}
|
||||
|
||||
/// from chaotic args
|
||||
/// Quest: the grammar is undecideable, what do we do here?
|
||||
pub fn parse_args(args: Vec<String>) -> anyhow::Result<Self> {
|
||||
let mut config = Self::new("", "");
|
||||
|
||||
#[derive(Debug, Hash, PartialEq, Eq)]
|
||||
enum State {
|
||||
Own,
|
||||
OwnPublicKey,
|
||||
OwnSecretKey,
|
||||
OwnListen,
|
||||
Peer,
|
||||
PeerPsk,
|
||||
PeerPublicKey,
|
||||
PeerEndpoint,
|
||||
PeerOutfile,
|
||||
PeerWireguardDev,
|
||||
PeerWireguardPeer,
|
||||
PeerWireguardExtraArgs,
|
||||
}
|
||||
|
||||
let mut already_set = HashSet::new();
|
||||
|
||||
// TODO idea: use config.peers.len() to give index of peer with conflicting argument
|
||||
use State::*;
|
||||
let mut state = Own;
|
||||
let mut current_peer = None;
|
||||
let p_exists = "a peer should exist by now";
|
||||
let wg_exists = "a peer wireguard should exist by now";
|
||||
for arg in args {
|
||||
state = match (state, arg.as_str(), &mut current_peer) {
|
||||
(Own, "public-key", None) => OwnPublicKey,
|
||||
(Own, "secret-key", None) => OwnSecretKey,
|
||||
(Own, "private-key", None) => {
|
||||
log::warn!(
|
||||
"the private-key argument is deprecated, please use secret-key instead"
|
||||
);
|
||||
OwnSecretKey
|
||||
}
|
||||
(Own, "listen", None) => OwnListen,
|
||||
(Own, "verbose", None) => {
|
||||
config.verbosity = Verbosity::Verbose;
|
||||
Own
|
||||
}
|
||||
(Own, "peer", None) => {
|
||||
ensure!(
|
||||
already_set.contains(&OwnPublicKey),
|
||||
"public-key file must be set"
|
||||
);
|
||||
ensure!(
|
||||
already_set.contains(&OwnSecretKey),
|
||||
"secret-key file must be set"
|
||||
);
|
||||
|
||||
already_set.clear();
|
||||
current_peer = Some(RosenpassPeer::default());
|
||||
|
||||
Peer
|
||||
}
|
||||
(OwnPublicKey, pk, None) => {
|
||||
ensure!(
|
||||
already_set.insert(OwnPublicKey),
|
||||
"public-key was already set"
|
||||
);
|
||||
config.public_key = pk.into();
|
||||
Own
|
||||
}
|
||||
(OwnSecretKey, sk, None) => {
|
||||
ensure!(
|
||||
already_set.insert(OwnSecretKey),
|
||||
"secret-key was already set"
|
||||
);
|
||||
config.secret_key = sk.into();
|
||||
Own
|
||||
}
|
||||
(OwnListen, l, None) => {
|
||||
already_set.insert(OwnListen); // multiple listen directives are allowed
|
||||
for socket_addr in l.to_socket_addrs()? {
|
||||
config.listen.push(socket_addr);
|
||||
}
|
||||
|
||||
Own
|
||||
}
|
||||
(Peer | PeerWireguardExtraArgs, "peer", maybe_peer @ Some(_)) => {
|
||||
// TODO check current peer
|
||||
// commit current peer, create a new one
|
||||
config.peers.push(maybe_peer.take().expect(p_exists));
|
||||
|
||||
already_set.clear();
|
||||
current_peer = Some(RosenpassPeer::default());
|
||||
|
||||
Peer
|
||||
}
|
||||
(Peer, "public-key", Some(_)) => PeerPublicKey,
|
||||
(Peer, "endpoint", Some(_)) => PeerEndpoint,
|
||||
(Peer, "preshared-key", Some(_)) => PeerPsk,
|
||||
(Peer, "outfile", Some(_)) => PeerOutfile,
|
||||
(Peer, "wireguard", Some(_)) => PeerWireguardDev,
|
||||
(PeerPublicKey, pk, Some(peer)) => {
|
||||
ensure!(
|
||||
already_set.insert(PeerPublicKey),
|
||||
"public-key was already set"
|
||||
);
|
||||
peer.public_key = pk.into();
|
||||
Peer
|
||||
}
|
||||
(PeerEndpoint, e, Some(peer)) => {
|
||||
ensure!(already_set.insert(PeerEndpoint), "endpoint was already set");
|
||||
peer.endpoint = Some(e.to_owned());
|
||||
Peer
|
||||
}
|
||||
(PeerPsk, psk, Some(peer)) => {
|
||||
ensure!(already_set.insert(PeerEndpoint), "peer psk was already set");
|
||||
peer.pre_shared_key = Some(psk.into());
|
||||
Peer
|
||||
}
|
||||
(PeerOutfile, of, Some(peer)) => {
|
||||
ensure!(
|
||||
already_set.insert(PeerOutfile),
|
||||
"peer outfile was already set"
|
||||
);
|
||||
peer.key_out = Some(of.into());
|
||||
Peer
|
||||
}
|
||||
(PeerWireguardDev, dev, Some(peer)) => {
|
||||
ensure!(
|
||||
already_set.insert(PeerWireguardDev),
|
||||
"peer wireguard-dev was already set"
|
||||
);
|
||||
assert!(peer.wg.is_none());
|
||||
peer.wg = Some(WireGuard {
|
||||
device: dev.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
PeerWireguardPeer
|
||||
}
|
||||
(PeerWireguardPeer, p, Some(peer)) => {
|
||||
ensure!(
|
||||
already_set.insert(PeerWireguardPeer),
|
||||
"peer wireguard-peer was already set"
|
||||
);
|
||||
peer.wg.as_mut().expect(wg_exists).peer = p.to_string();
|
||||
PeerWireguardExtraArgs
|
||||
}
|
||||
(PeerWireguardExtraArgs, arg, Some(peer)) => {
|
||||
peer.wg
|
||||
.as_mut()
|
||||
.expect(wg_exists)
|
||||
.extra_params
|
||||
.push(arg.to_string());
|
||||
PeerWireguardExtraArgs
|
||||
}
|
||||
|
||||
// error cases
|
||||
(Own, x, None) => {
|
||||
bail!("unrecognised argument {x}");
|
||||
}
|
||||
(Own | OwnPublicKey | OwnSecretKey | OwnListen, _, Some(_)) => {
|
||||
panic!("current_peer is not None while in Own* state, this must never happen")
|
||||
}
|
||||
|
||||
(State::Peer, arg, Some(_)) => {
|
||||
bail!("unrecongnised argument {arg}");
|
||||
}
|
||||
(
|
||||
Peer
|
||||
| PeerEndpoint
|
||||
| PeerOutfile
|
||||
| PeerPublicKey
|
||||
| PeerPsk
|
||||
| PeerWireguardDev
|
||||
| PeerWireguardPeer
|
||||
| PeerWireguardExtraArgs,
|
||||
_,
|
||||
None,
|
||||
) => {
|
||||
panic!("got peer options but no peer was created")
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if let Some(p) = current_peer {
|
||||
// TODO ensure peer is propagated with sufficient information
|
||||
config.peers.push(p);
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
||||
impl Rosenpass {
|
||||
/// Generate an example configuration
|
||||
pub fn example_config() -> Self {
|
||||
let peer = RosenpassPeer {
|
||||
public_key: "rp-peer-public-key".into(),
|
||||
endpoint: Some("my-peer.test:9999".into()),
|
||||
exchange_command: [
|
||||
"wg",
|
||||
"set",
|
||||
"wg0",
|
||||
"peer",
|
||||
"<PEER_ID>",
|
||||
"preshared-key",
|
||||
"/dev/stdin",
|
||||
]
|
||||
.into_iter()
|
||||
.map(|x| x.to_string())
|
||||
.collect(),
|
||||
key_out: Some("rp-key-out".into()),
|
||||
pre_shared_key: None,
|
||||
wg: None,
|
||||
};
|
||||
|
||||
Self {
|
||||
public_key: "rp-public-key".into(),
|
||||
secret_key: "rp-secret-key".into(),
|
||||
peers: vec![peer],
|
||||
..Self::new("", "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Verbosity {
|
||||
fn default() -> Self {
|
||||
Self::Quiet
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::net::IpAddr;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn split_str(s: &str) -> Vec<String> {
|
||||
s.split(" ").map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_cli_parse() {
|
||||
let args = split_str(
|
||||
"public-key /my/public-key secret-key /my/secret-key verbose \
|
||||
listen 0.0.0.0:9999 peer public-key /peer/public-key endpoint \
|
||||
peer.test:9999 outfile /peer/rp-out",
|
||||
);
|
||||
|
||||
let config = Rosenpass::parse_args(args).unwrap();
|
||||
|
||||
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
|
||||
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
|
||||
assert_eq!(config.verbosity, Verbosity::Verbose);
|
||||
assert_eq!(
|
||||
&config.listen,
|
||||
&vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 9999)]
|
||||
);
|
||||
assert_eq!(
|
||||
config.peers,
|
||||
vec![RosenpassPeer {
|
||||
public_key: PathBuf::from("/peer/public-key"),
|
||||
endpoint: Some("peer.test:9999".into()),
|
||||
pre_shared_key: None,
|
||||
key_out: Some(PathBuf::from("/peer/rp-out")),
|
||||
..Default::default()
|
||||
}]
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_parse_multiple_peers() {
|
||||
let args = split_str(
|
||||
"public-key /my/public-key secret-key /my/secret-key verbose \
|
||||
peer public-key /peer-a/public-key endpoint \
|
||||
peer.test:9999 outfile /peer-a/rp-out \
|
||||
peer public-key /peer-b/public-key outfile /peer-b/rp-out",
|
||||
);
|
||||
|
||||
let config = Rosenpass::parse_args(args).unwrap();
|
||||
|
||||
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
|
||||
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
|
||||
assert_eq!(config.verbosity, Verbosity::Verbose);
|
||||
assert!(&config.listen.is_empty());
|
||||
assert_eq!(
|
||||
config.peers,
|
||||
vec![
|
||||
RosenpassPeer {
|
||||
public_key: PathBuf::from("/peer-a/public-key"),
|
||||
endpoint: Some("peer.test:9999".into()),
|
||||
pre_shared_key: None,
|
||||
key_out: Some(PathBuf::from("/peer-a/rp-out")),
|
||||
..Default::default()
|
||||
},
|
||||
RosenpassPeer {
|
||||
public_key: PathBuf::from("/peer-b/public-key"),
|
||||
endpoint: None,
|
||||
pre_shared_key: None,
|
||||
key_out: Some(PathBuf::from("/peer-b/rp-out")),
|
||||
..Default::default()
|
||||
}
|
||||
]
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
//! Pseudo Random Functions (PRFs) with a tree-like label scheme which
|
||||
//! ensures their uniqueness
|
||||
|
||||
use {
|
||||
crate::{prftree::PrfTree, sodium::KEY_SIZE},
|
||||
anyhow::Result,
|
||||
@@ -7,7 +10,7 @@ pub fn protocol() -> Result<PrfTree> {
|
||||
PrfTree::zero().mix("Rosenpass v1 mceliece460896 Kyber512 ChaChaPoly1305 BLAKE2s".as_bytes())
|
||||
}
|
||||
|
||||
// TODO Use labels that can serve as idents
|
||||
// TODO Use labels that can serve as identifiers
|
||||
macro_rules! prflabel {
|
||||
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
||||
pub fn $name() -> Result<PrfTree> {
|
||||
@@ -18,12 +21,12 @@ macro_rules! prflabel {
|
||||
}
|
||||
}
|
||||
|
||||
prflabel!(protocol, mac, "mac");
|
||||
prflabel!(protocol, cookie, "cookie");
|
||||
prflabel!(protocol, peerid, "peer id");
|
||||
prflabel!(protocol, mac, "mac");
|
||||
prflabel!(protocol, cookie, "cookie");
|
||||
prflabel!(protocol, peerid, "peer id");
|
||||
prflabel!(protocol, biscuit_ad, "biscuit additional data");
|
||||
prflabel!(protocol, ckinit, "chaining key init");
|
||||
prflabel!(protocol, _ckextract, "chaining key extract");
|
||||
prflabel!(protocol, ckinit, "chaining key init");
|
||||
prflabel!(protocol, _ckextract, "chaining key extract");
|
||||
|
||||
macro_rules! prflabel_leaf {
|
||||
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
||||
@@ -35,10 +38,10 @@ macro_rules! prflabel_leaf {
|
||||
}
|
||||
}
|
||||
|
||||
prflabel_leaf!(_ckextract, mix, "mix");
|
||||
prflabel_leaf!(_ckextract, hs_enc, "handshake encryption");
|
||||
prflabel_leaf!(_ckextract, ini_enc, "initiator handshake encryption");
|
||||
prflabel_leaf!(_ckextract, res_enc, "responder handshake encryption");
|
||||
prflabel_leaf!(_ckextract, mix, "mix");
|
||||
prflabel_leaf!(_ckextract, hs_enc, "handshake encryption");
|
||||
prflabel_leaf!(_ckextract, ini_enc, "initiator handshake encryption");
|
||||
prflabel_leaf!(_ckextract, res_enc, "responder handshake encryption");
|
||||
|
||||
prflabel!(_ckextract, _user, "user");
|
||||
prflabel!(_user, _rp, "rosenpass.eu");
|
||||
@@ -3,7 +3,11 @@ pub mod util;
|
||||
#[macro_use]
|
||||
pub mod sodium;
|
||||
pub mod coloring;
|
||||
#[rustfmt::skip]
|
||||
pub mod labeled_prf;
|
||||
pub mod app_server;
|
||||
pub mod cli;
|
||||
pub mod config;
|
||||
pub mod msgs;
|
||||
pub mod pqkem;
|
||||
pub mod prftree;
|
||||
@@ -15,7 +19,7 @@ pub enum RosenpassError {
|
||||
Oqs,
|
||||
#[error("error from external library while calling OQS")]
|
||||
OqsExternalLib,
|
||||
#[error("buffer size mismatch, required {required_size} but only found {actual_size}")]
|
||||
#[error("buffer size mismatch, required {required_size} but found {actual_size}")]
|
||||
BufferSizeMismatch {
|
||||
required_size: usize,
|
||||
actual_size: usize,
|
||||
@@ -6,7 +6,7 @@
|
||||
//! This is a generalization of a PRF operating
|
||||
//! on a sequence of inputs instead of a single input.
|
||||
//!
|
||||
//! Like a Dec function the Iprf features efficient
|
||||
//! Like a Dec function the Iprf features efficient
|
||||
//! incrementability.
|
||||
//!
|
||||
//! You can also think of an Iprf as a Dec function with
|
||||
@@ -27,7 +27,7 @@ pub fn prf_into(out: &mut [u8], key: &[u8], data: &[u8]) {
|
||||
hmac_into(out, key, data).unwrap()
|
||||
}
|
||||
|
||||
pub fn prf(key: &[u8], data: &[u8]) -> [u8; KEY_SIZE]{
|
||||
pub fn prf(key: &[u8], data: &[u8]) -> [u8; KEY_SIZE] {
|
||||
mutating([0u8; KEY_SIZE], |r| prf_into(r, key, data))
|
||||
}
|
||||
|
||||
@@ -40,11 +40,11 @@ impl Iprf {
|
||||
IprfBranch(self.0)
|
||||
}
|
||||
|
||||
// TODO: Protocol! Use domain separation to ensure that
|
||||
// TODO: Protocol! Use domain separation to ensure that
|
||||
fn mix(self, v: &[u8]) -> Self {
|
||||
Self(prf(&self.0, v))
|
||||
}
|
||||
|
||||
|
||||
fn mix_secret<const N: usize>(self, v: Secret<N>) -> SecretIprf {
|
||||
SecretIprf::prf_invoc(&self.0, v.secret())
|
||||
}
|
||||
@@ -70,8 +70,9 @@ impl IprfBranch {
|
||||
|
||||
impl SecretIprf {
|
||||
fn prf_invoc(k: &[u8], d: &[u8]) -> SecretIprf {
|
||||
mutating(SecretIprf(Secret::zero()), |r|
|
||||
prf_into(k, d, r.secret_mut()))
|
||||
mutating(SecretIprf(Secret::zero()), |r| {
|
||||
prf_into(k, d, r.secret_mut())
|
||||
})
|
||||
}
|
||||
|
||||
fn from_key(k: Secret<N>) -> SecretIprf {
|
||||
15
rosenpass/src/main.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use log::error;
|
||||
use rosenpass::{cli::Cli, sodium::sodium_init};
|
||||
use std::process::exit;
|
||||
|
||||
/// Catches errors, prints them through the logger, then exits
|
||||
pub fn main() {
|
||||
env_logger::init();
|
||||
match sodium_init().and_then(|()| Cli::run()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("{e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
//! # Messages
|
||||
//! Data structures representing the messages going over the wire
|
||||
//!
|
||||
//! This module contains data structures that help in the
|
||||
//! serialization/deserialization (ser/de) of messages. Thats kind of a lie,
|
||||
//! since no actual ser/de happens. Instead, the structures offer views into
|
||||
//! mutable byte slices (`&mut [u8]`), allowing to modify the fields of an
|
||||
//! This module contains de-/serialization of the protocol's messages. Thats kind
|
||||
//! of a lie, since no actual ser/de happens. Instead, the structures offer views
|
||||
//! into mutable byte slices (`&mut [u8]`), allowing to modify the fields of an
|
||||
//! always serialized instance of the data in question. This is closely related
|
||||
//! to the concept of lenses in function programming; more on that here:
|
||||
//! [https://sinusoid.es/misc/lager/lenses.pdf](https://sinusoid.es/misc/lager/lenses.pdf)
|
||||
@@ -144,7 +143,7 @@ macro_rules! data_lense(
|
||||
pub fn check_size(len: usize) -> Result<(), RosenpassError>{
|
||||
let required_size = $( $len + )+ 0;
|
||||
let actual_size = len;
|
||||
if required_size < actual_size {
|
||||
if required_size != actual_size {
|
||||
Err(RosenpassError::BufferSizeMismatch {
|
||||
required_size,
|
||||
actual_size,
|
||||
@@ -200,23 +199,53 @@ macro_rules! data_lense(
|
||||
type __ContainerType;
|
||||
|
||||
/// Create a lense to the byte slice
|
||||
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
||||
|
||||
/// Create a lense to the byte slice, automatically truncating oversized buffers
|
||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
||||
}
|
||||
|
||||
impl<'a> [< $type Ext >] for &'a [u8] {
|
||||
type __ContainerType = &'a [u8];
|
||||
|
||||
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
$type::<Self::__ContainerType, $( $($generic),+ )? >::check_size(self.len())?;
|
||||
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
||||
}
|
||||
|
||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
let required_size = $( $len + )+ 0;
|
||||
let actual_size = self.len();
|
||||
if actual_size < required_size {
|
||||
return Err(RosenpassError::BufferSizeMismatch {
|
||||
required_size,
|
||||
actual_size,
|
||||
});
|
||||
}
|
||||
|
||||
[< $type Ext >]::[< $type:snake >](&self[..required_size])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> [< $type Ext >] for &'a mut [u8] {
|
||||
type __ContainerType = &'a mut [u8];
|
||||
|
||||
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
$type::<Self::__ContainerType, $( $($generic),+ )? >::check_size(self.len())?;
|
||||
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
||||
}
|
||||
|
||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||
let required_size = $( $len + )+ 0;
|
||||
let actual_size = self.len();
|
||||
if actual_size < required_size {
|
||||
return Err(RosenpassError::BufferSizeMismatch {
|
||||
required_size,
|
||||
actual_size,
|
||||
});
|
||||
}
|
||||
|
||||
[< $type Ext >]::[< $type:snake >](&mut self[..required_size])
|
||||
}
|
||||
}
|
||||
});
|
||||
);
|
||||
@@ -244,9 +273,9 @@ data_lense! { InitHello :=
|
||||
/// Randomly generated connection id
|
||||
sidi: 4,
|
||||
/// Kyber 512 Ephemeral Public Key
|
||||
epki: EKEM::PK_LEN,
|
||||
epki: EphemeralKEM::PK_LEN,
|
||||
/// Classic McEliece Ciphertext
|
||||
sctr: SKEM::CT_LEN,
|
||||
sctr: StaticKEM::CT_LEN,
|
||||
/// Encryped: 16 byte hash of McEliece initiator static key
|
||||
pidic: sodium::AEAD_TAG_LEN + 32,
|
||||
/// Encrypted TAI64N Time Stamp (against replay attacks)
|
||||
@@ -259,9 +288,9 @@ data_lense! { RespHello :=
|
||||
/// Copied from InitHello
|
||||
sidi: 4,
|
||||
/// Kyber 512 Ephemeral Ciphertext
|
||||
ecti: EKEM::CT_LEN,
|
||||
ecti: EphemeralKEM::CT_LEN,
|
||||
/// Classic McEliece Ciphertext
|
||||
scti: SKEM::CT_LEN,
|
||||
scti: StaticKEM::CT_LEN,
|
||||
/// Empty encrypted message (just an auth tag)
|
||||
auth: sodium::AEAD_TAG_LEN,
|
||||
/// Responders handshake state in encrypted form
|
||||
@@ -1,5 +1,6 @@
|
||||
//! This module contains Traits and implementations for Key Encapsulation
|
||||
//! Mechanisms (KEM). KEMs are the interface provided by almost all post-quantum
|
||||
//! Traits and implementations for Key Encapsulation Mechanisms (KEMs)
|
||||
//!
|
||||
//! KEMs are the interface provided by almost all post-quantum
|
||||
//! secure key exchange mechanisms.
|
||||
//!
|
||||
//! Conceptually KEMs are akin to public-key encryption, but instead of encrypting
|
||||
@@ -7,7 +8,7 @@
|
||||
//!
|
||||
//! encapsulation.
|
||||
//! The [KEM] Trait describes the basic API offered by a Key Encapsulation
|
||||
//! Mechanism. Two implementations for it are provided, [SKEM] and [EKEM].
|
||||
//! Mechanism. Two implementations for it are provided, [StaticKEM] and [EphemeralKEM].
|
||||
|
||||
use crate::{RosenpassError, RosenpassMaybeError};
|
||||
|
||||
@@ -50,7 +51,7 @@ pub trait KEM {
|
||||
/// Classic McEliece is chosen because of its high security margin and its small
|
||||
/// ciphertexts. The public keys are humongous, but (being static keys) the are never transmitted over
|
||||
/// the wire so this is not a big problem.
|
||||
pub struct SKEM;
|
||||
pub struct StaticKEM;
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
@@ -65,7 +66,7 @@ pub struct SKEM;
|
||||
/// to only check that the buffers are big enough, allowing them to be even
|
||||
/// bigger. However, from a correctness point of view it does not make sense to
|
||||
/// allow bigger buffers.
|
||||
impl KEM for SKEM {
|
||||
impl KEM for StaticKEM {
|
||||
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_secret_key as usize;
|
||||
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_public_key as usize;
|
||||
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_ciphertext as usize;
|
||||
@@ -119,7 +120,7 @@ impl KEM for SKEM {
|
||||
/// wireguard paper claimed that CPA security would be sufficient. Nonetheless we choose kyber
|
||||
/// which provides CCA security since there are no publicly vetted KEMs out there which provide
|
||||
/// only CPA security.
|
||||
pub struct EKEM;
|
||||
pub struct EphemeralKEM;
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
@@ -134,7 +135,7 @@ pub struct EKEM;
|
||||
/// to only check that the buffers are big enough, allowing them to be even
|
||||
/// bigger. However, from a correctness point of view it does not make sense to
|
||||
/// allow bigger buffers.
|
||||
impl KEM for EKEM {
|
||||
impl KEM for EphemeralKEM {
|
||||
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_secret_key as usize;
|
||||
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_public_key as usize;
|
||||
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_ciphertext as usize;
|
||||
@@ -143,8 +144,7 @@ impl KEM for EKEM {
|
||||
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
||||
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
||||
unsafe {
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_keypair(pk.as_mut_ptr(), sk.as_mut_ptr())
|
||||
.to_rg_error()
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_keypair(pk.as_mut_ptr(), sk.as_mut_ptr()).to_rg_error()
|
||||
}
|
||||
}
|
||||
fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Result<(), RosenpassError> {
|
||||
@@ -152,12 +152,8 @@ impl KEM for EKEM {
|
||||
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
||||
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
||||
unsafe {
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_encaps(
|
||||
ct.as_mut_ptr(),
|
||||
shk.as_mut_ptr(),
|
||||
pk.as_ptr(),
|
||||
)
|
||||
.to_rg_error()
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_encaps(ct.as_mut_ptr(), shk.as_mut_ptr(), pk.as_ptr())
|
||||
.to_rg_error()
|
||||
}
|
||||
}
|
||||
fn decaps(shk: &mut [u8], sk: &[u8], ct: &[u8]) -> Result<(), RosenpassError> {
|
||||
@@ -165,12 +161,8 @@ impl KEM for EKEM {
|
||||
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
||||
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
||||
unsafe {
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_decaps(
|
||||
shk.as_mut_ptr(),
|
||||
ct.as_ptr(),
|
||||
sk.as_ptr(),
|
||||
)
|
||||
.to_rg_error()
|
||||
oqs_sys::kem::OQS_KEM_kyber_512_decaps(shk.as_mut_ptr(), ct.as_ptr(), sk.as_ptr())
|
||||
.to_rg_error()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
//! Implementation of the tree-like structure used for the label derivation in [labeled_prf](crate::labeled_prf)
|
||||
use {
|
||||
crate::{
|
||||
coloring::Secret,
|
||||
@@ -1,3 +1,5 @@
|
||||
//! Bindings and helpers for accessing libsodium functions
|
||||
|
||||
use crate::util::*;
|
||||
use anyhow::{ensure, Result};
|
||||
use libsodium_sys as libsodium;
|
||||
244
rosenpass/src/util.rs
Normal file
@@ -0,0 +1,244 @@
|
||||
//! Helper functions and macros
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use base64::{
|
||||
display::Base64Display as B64Display, read::DecoderReader as B64Reader,
|
||||
write::EncoderWriter as B64Writer,
|
||||
};
|
||||
use std::{
|
||||
borrow::{Borrow, BorrowMut},
|
||||
cmp::min,
|
||||
fs::{File, OpenOptions},
|
||||
io::{Read, Write},
|
||||
path::Path,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use crate::coloring::{Public, Secret};
|
||||
|
||||
/// Xors a and b element-wise and writes the result into a.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rosenpass::util::xor_into;
|
||||
/// let mut a = String::from("hello").into_bytes();
|
||||
/// let b = b"world";
|
||||
/// xor_into(&mut a, b);
|
||||
/// assert_eq!(&a, b"\x1f\n\x1e\x00\x0b");
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn xor_into(a: &mut [u8], b: &[u8]) {
|
||||
assert!(a.len() == b.len());
|
||||
for (av, bv) in a.iter_mut().zip(b.iter()) {
|
||||
*av ^= *bv;
|
||||
}
|
||||
}
|
||||
|
||||
/// Concatenate two byte arrays
|
||||
// TODO: Zeroize result?
|
||||
#[macro_export]
|
||||
macro_rules! cat {
|
||||
($len:expr; $($toks:expr),+) => {{
|
||||
let mut buf = [0u8; $len];
|
||||
let mut off = 0;
|
||||
$({
|
||||
let tok = $toks;
|
||||
let tr = ::std::borrow::Borrow::<[u8]>::borrow(tok);
|
||||
(&mut buf[off..(off + tr.len())]).copy_from_slice(tr);
|
||||
off += tr.len();
|
||||
})+
|
||||
assert!(off == buf.len(), "Size mismatch in cat!()");
|
||||
buf
|
||||
}}
|
||||
}
|
||||
|
||||
// TODO: consistent inout ordering
|
||||
pub fn cpy<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, dst: &mut T) {
|
||||
dst.borrow_mut().copy_from_slice(src.borrow());
|
||||
}
|
||||
|
||||
/// Copy from `src` to `dst`. If `src` and `dst` are not of equal length, copy as many bytes as possible.
|
||||
pub fn cpy_min<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, dst: &mut T) {
|
||||
let src = src.borrow();
|
||||
let dst = dst.borrow_mut();
|
||||
let len = min(src.len(), dst.len());
|
||||
dst[..len].copy_from_slice(&src[..len]);
|
||||
}
|
||||
|
||||
/// Try block basically…returns a result and allows the use of the question mark operator inside
|
||||
#[macro_export]
|
||||
macro_rules! attempt {
|
||||
($block:expr) => {
|
||||
(|| -> ::anyhow::Result<_> { $block })()
|
||||
};
|
||||
}
|
||||
|
||||
use base64::engine::general_purpose::GeneralPurpose as Base64Engine;
|
||||
const B64ENGINE: Base64Engine = base64::engine::general_purpose::STANDARD;
|
||||
|
||||
pub fn fmt_b64<'a>(payload: &'a [u8]) -> B64Display<'a, 'static, Base64Engine> {
|
||||
B64Display::<'a, 'static>::new(payload, &B64ENGINE)
|
||||
}
|
||||
|
||||
pub fn b64_writer<W: Write>(w: W) -> B64Writer<'static, Base64Engine, W> {
|
||||
B64Writer::new(w, &B64ENGINE)
|
||||
}
|
||||
|
||||
pub fn b64_reader<R: Read>(r: R) -> B64Reader<'static, Base64Engine, R> {
|
||||
B64Reader::new(r, &B64ENGINE)
|
||||
}
|
||||
|
||||
// TODO remove this once std::cmp::max becomes const
|
||||
pub const fn max_usize(a: usize, b: usize) -> usize {
|
||||
if a > b {
|
||||
a
|
||||
} else {
|
||||
b
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Timebase(Instant);
|
||||
|
||||
impl Default for Timebase {
|
||||
fn default() -> Self {
|
||||
Self(Instant::now())
|
||||
}
|
||||
}
|
||||
|
||||
impl Timebase {
|
||||
pub fn now(&self) -> f64 {
|
||||
self.0.elapsed().as_secs_f64()
|
||||
}
|
||||
|
||||
pub fn dur(&self, t: f64) -> Duration {
|
||||
Duration::from_secs_f64(t)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mutating<T, F>(mut v: T, f: F) -> T
|
||||
where
|
||||
F: Fn(&mut T),
|
||||
{
|
||||
f(&mut v);
|
||||
v
|
||||
}
|
||||
|
||||
pub fn sideeffect<T, F>(v: T, f: F) -> T
|
||||
where
|
||||
F: Fn(&T),
|
||||
{
|
||||
f(&v);
|
||||
v
|
||||
}
|
||||
|
||||
/// load'n store
|
||||
|
||||
/// Open a file writable
|
||||
pub fn fopen_w<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||
Ok(OpenOptions::new()
|
||||
.read(false)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(path)?)
|
||||
}
|
||||
/// Open a file readable
|
||||
pub fn fopen_r<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||
Ok(OpenOptions::new()
|
||||
.read(true)
|
||||
.write(false)
|
||||
.create(false)
|
||||
.truncate(false)
|
||||
.open(path)?)
|
||||
}
|
||||
|
||||
pub trait ReadExactToEnd {
|
||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()>;
|
||||
}
|
||||
|
||||
impl<R: Read> ReadExactToEnd for R {
|
||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()> {
|
||||
let mut dummy = [0u8; 8];
|
||||
self.read_exact(buf)?;
|
||||
ensure!(self.read(&mut dummy)? == 0, "File too long!");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait LoadValue {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
pub trait LoadValueB64 {
|
||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
trait StoreValue {
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||
}
|
||||
|
||||
trait StoreSecret {
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||
}
|
||||
|
||||
impl<T: StoreValue> StoreSecret for T {
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
self.store(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Secret<N> {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
fopen_r(p)?
|
||||
.read_exact_to_end(v.secret_mut())
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValueB64 for Secret<N> {
|
||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
// This might leave some fragments of the secret on the stack;
|
||||
// in practice this is likely not a problem because the stack likely
|
||||
// will be overwritten by something else soon but this is not exactly
|
||||
// guaranteed. It would be possible to remedy this, but since the secret
|
||||
// data will linger in the Linux page cache anyways with the current
|
||||
// implementation, going to great length to erase the secret here is
|
||||
// not worth it right now.
|
||||
b64_reader(&mut fopen_r(p)?)
|
||||
.read_exact(v.secret_mut())
|
||||
.with_context(|| format!("Could not load base64 file {p:?}"))?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreSecret for Secret<N> {
|
||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
std::fs::write(path, self.secret())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Public<N> {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
fopen_r(path)?.read_exact_to_end(&mut *v)?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValue for Public<N> {
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
std::fs::write(path, **self)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -8,21 +8,21 @@ fn generate_keys() {
|
||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("keygen");
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let priv_key_path = tmpdir.join("private-key");
|
||||
let pub_key_path = tmpdir.join("public-key");
|
||||
let secret_key_path = tmpdir.join("secret-key");
|
||||
let public_key_path = tmpdir.join("public-key");
|
||||
|
||||
let output = test_bin::get_test_bin(BIN)
|
||||
.args(["keygen", "private-key"])
|
||||
.arg(&priv_key_path)
|
||||
.arg("public-key")
|
||||
.arg(&pub_key_path)
|
||||
.args(["gen-keys", "--secret-key"])
|
||||
.arg(&secret_key_path)
|
||||
.arg("--public-key")
|
||||
.arg(&public_key_path)
|
||||
.output()
|
||||
.expect("Failed to start {BIN}");
|
||||
|
||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||
|
||||
assert!(priv_key_path.is_file());
|
||||
assert!(pub_key_path.is_file());
|
||||
assert!(secret_key_path.is_file());
|
||||
assert!(public_key_path.is_file());
|
||||
|
||||
// cleanup
|
||||
fs::remove_dir_all(&tmpdir).unwrap();
|
||||
@@ -46,22 +46,22 @@ fn check_exchange() {
|
||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("exchange");
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let priv_key_paths = [tmpdir.join("private-key-0"), tmpdir.join("private-key-1")];
|
||||
let pub_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
||||
let secret_key_paths = [tmpdir.join("secret-key-0"), tmpdir.join("secret-key-1")];
|
||||
let public_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
||||
let shared_key_paths = [tmpdir.join("shared-key-0"), tmpdir.join("shared-key-1")];
|
||||
|
||||
// generate key pairs
|
||||
for (priv_key_path, pub_key_path) in priv_key_paths.iter().zip(pub_key_paths.iter()) {
|
||||
for (secret_key_path, pub_key_path) in secret_key_paths.iter().zip(public_key_paths.iter()) {
|
||||
let output = test_bin::get_test_bin(BIN)
|
||||
.args(["keygen", "private-key"])
|
||||
.arg(&priv_key_path)
|
||||
.arg("public-key")
|
||||
.args(["gen-keys", "--secret-key"])
|
||||
.arg(&secret_key_path)
|
||||
.arg("--public-key")
|
||||
.arg(&pub_key_path)
|
||||
.output()
|
||||
.expect("Failed to start {BIN}");
|
||||
|
||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||
assert!(priv_key_path.is_file());
|
||||
assert!(secret_key_path.is_file());
|
||||
assert!(pub_key_path.is_file());
|
||||
}
|
||||
|
||||
@@ -69,12 +69,12 @@ fn check_exchange() {
|
||||
let port = find_udp_socket();
|
||||
let listen_addr = format!("localhost:{port}");
|
||||
let mut server = test_bin::get_test_bin(BIN)
|
||||
.args(["exchange", "private-key"])
|
||||
.arg(&priv_key_paths[0])
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[0])
|
||||
.arg("public-key")
|
||||
.arg(&pub_key_paths[0])
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["listen", &listen_addr, "verbose", "peer", "public-key"])
|
||||
.arg(&pub_key_paths[1])
|
||||
.arg(&public_key_paths[1])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[0])
|
||||
.stdout(Stdio::null())
|
||||
@@ -82,14 +82,16 @@ fn check_exchange() {
|
||||
.spawn()
|
||||
.expect("Failed to start {BIN}");
|
||||
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
// start second process, the client
|
||||
let mut client = test_bin::get_test_bin(BIN)
|
||||
.args(["exchange", "private-key"])
|
||||
.arg(&priv_key_paths[1])
|
||||
.args(["exchange", "secret-key"])
|
||||
.arg(&secret_key_paths[1])
|
||||
.arg("public-key")
|
||||
.arg(&pub_key_paths[1])
|
||||
.arg(&public_key_paths[1])
|
||||
.args(["verbose", "peer", "public-key"])
|
||||
.arg(&pub_key_paths[0])
|
||||
.arg(&public_key_paths[0])
|
||||
.args(["endpoint", &listen_addr])
|
||||
.arg("outfile")
|
||||
.arg(&shared_key_paths[1])
|
||||
69
rp
@@ -43,6 +43,17 @@ dbg() {
|
||||
echo >&2 "$@"
|
||||
}
|
||||
|
||||
|
||||
detect_git_dir() {
|
||||
# https://stackoverflow.com/questions/3618078/pipe-only-stderr-through-a-filter
|
||||
(
|
||||
git -C "${scriptdir}" rev-parse --show-toplevel 3>&1 1>&2 2>&3 3>&- \
|
||||
| sed '
|
||||
/not a git repository/d;
|
||||
s/^/WARNING: /'
|
||||
) 3>&1 1>&2 2>&3 3>&-
|
||||
}
|
||||
|
||||
# Cleanup subsystem (sigterm)
|
||||
|
||||
cleanup_init() {
|
||||
@@ -141,9 +152,9 @@ genkey() {
|
||||
umask 077
|
||||
mkdir -p $(enquote "${skdir}")
|
||||
wg genkey > $(enquote "${skdir}"/wgsk)
|
||||
$(enquote "${binary}") keygen \\
|
||||
private-key $(enquote "${skdir}"/pqsk) \\
|
||||
public-key $(enquote "${skdir}"/pqpk)"
|
||||
$(enquote "${binary}") gen-keys \\
|
||||
-s $(enquote "${skdir}"/pqsk) \\
|
||||
-p $(enquote "${skdir}"/pqpk)"
|
||||
}
|
||||
|
||||
pubkey() {
|
||||
@@ -186,7 +197,7 @@ exchange() {
|
||||
lip="${listen%:*}";
|
||||
lport="${listen/*:/}";
|
||||
if [[ "$lip" = "$lport" ]]; then
|
||||
lip="[0::0]"
|
||||
lip="[::]"
|
||||
fi
|
||||
shift;;
|
||||
-h | -help | --help | help) usage; return 0;;
|
||||
@@ -198,15 +209,41 @@ exchange() {
|
||||
fatal "Needs at least one peer specified"
|
||||
fi
|
||||
|
||||
frag "
|
||||
# Create the Wireguard interface
|
||||
ip link add dev $(enquote "${dev}") type wireguard || true"
|
||||
# os dependent setup
|
||||
case "$OSTYPE" in
|
||||
linux-*) # could be linux-gnu or linux-musl
|
||||
frag "
|
||||
# Create the WireGuard interface
|
||||
ip link add dev $(enquote "${dev}") type wireguard || true"
|
||||
|
||||
cleanup "
|
||||
ip link del dev $(enquote "${dev}") || true"
|
||||
cleanup "
|
||||
ip link del dev $(enquote "${dev}") || true"
|
||||
|
||||
frag "
|
||||
ip link set dev $(enquote "${dev}") up"
|
||||
frag "
|
||||
ip link set dev $(enquote "${dev}") up"
|
||||
;;
|
||||
|
||||
freebsd*)
|
||||
frag "
|
||||
# load the WireGuard kernel module
|
||||
kldload -n if_wg || fatal 'Cannot load if_wg kernel module'"
|
||||
|
||||
frag "
|
||||
# Create the WireGuard interface
|
||||
ifconfig wg create name $(enquote "${dev}") || true"
|
||||
|
||||
cleanup "
|
||||
ifconfig $(enquote "${dev}") destroy || true"
|
||||
|
||||
frag "
|
||||
ifconfig $(enquote "${dev}") up"
|
||||
;;
|
||||
|
||||
*)
|
||||
fatal "Your system $OSTYPE is not yet supported. We are happy to receive patches to address this :)"
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
frag "
|
||||
# Deploy the classic wireguard private key
|
||||
@@ -225,7 +262,7 @@ exchange() {
|
||||
frag_append "verbose"
|
||||
fi
|
||||
|
||||
frag_append_esc " private-key $(enquote "${skdir}/pqsk")"
|
||||
frag_append_esc " secret-key $(enquote "${skdir}/pqsk")"
|
||||
frag_append_esc " public-key $(enquote "${skdir}/pqpk")"
|
||||
|
||||
if test -n "${lport}"; then
|
||||
@@ -244,7 +281,7 @@ exchange() {
|
||||
local arg; arg="$1"; shift
|
||||
case "${arg}" in
|
||||
peer) set -- "peer" "$@"; break;; # Next peer
|
||||
endpoint) ip="${1%:*}"; port="${1/*:/}"; shift;;
|
||||
endpoint) ip="${1%:*}"; port="${1##*:}"; shift;;
|
||||
persistent-keepalive) keepalive="${1}"; shift;;
|
||||
allowed-ips) allowedips="${1}"; shift;;
|
||||
-h | -help | --help | help) usage; return 0;;
|
||||
@@ -314,8 +351,10 @@ main() {
|
||||
project_name="rosenpass"
|
||||
verbose=0
|
||||
scriptdir="$(dirname "${script}")"
|
||||
gitdir="$(git -C "${scriptdir}" rev-parse --show-toplevel 2>/dev/null)" || true
|
||||
nixdir="$(readlink -f result/bin/rp | grep -Pio '^/nix/store/[^/]+(?=/bin/[^/]+)')" || true
|
||||
gitdir="$(detect_git_dir)" || true
|
||||
if [[ -d /nix ]]; then
|
||||
nixdir="$(readlink -f result/bin/rp | grep -Pio '^/nix/store/[^/]+(?=/bin/[^/]+)')" || true
|
||||
fi
|
||||
binary="$(find_rosenpass_binary)"
|
||||
|
||||
# Parse command
|
||||
|
||||
646
src/main.rs
@@ -1,646 +0,0 @@
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use log::{error, info};
|
||||
use rosenpass::{
|
||||
attempt,
|
||||
coloring::{Public, Secret},
|
||||
multimatch,
|
||||
pqkem::{SKEM, KEM},
|
||||
protocol::{SPk, SSk, MsgBuf, PeerPtr, Server as CryptoServer, SymKey, Timing},
|
||||
sodium::sodium_init,
|
||||
util::{b64_reader, b64_writer, fmt_b64},
|
||||
};
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{ErrorKind, Read, Write},
|
||||
net::{SocketAddr, ToSocketAddrs, UdpSocket},
|
||||
path::Path,
|
||||
process::{exit, Command, Stdio},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
/// Open a file writable
|
||||
pub fn fopen_w<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||
Ok(OpenOptions::new()
|
||||
.read(false)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(path)?)
|
||||
}
|
||||
/// Open a file readable
|
||||
pub fn fopen_r<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||
Ok(OpenOptions::new()
|
||||
.read(true)
|
||||
.write(false)
|
||||
.create(false)
|
||||
.truncate(false)
|
||||
.open(path)?)
|
||||
}
|
||||
|
||||
pub trait ReadExactToEnd {
|
||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()>;
|
||||
}
|
||||
|
||||
impl<R: Read> ReadExactToEnd for R {
|
||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()> {
|
||||
let mut dummy = [0u8; 8];
|
||||
self.read_exact(buf)?;
|
||||
ensure!(self.read(&mut dummy)? == 0, "File too long!");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait LoadValue {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
pub trait LoadValueB64 {
|
||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
trait StoreValue {
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||
}
|
||||
|
||||
trait StoreSecret {
|
||||
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||
}
|
||||
|
||||
impl<T: StoreValue> StoreSecret for T {
|
||||
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
self.store(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Secret<N> {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
fopen_r(p)?
|
||||
.read_exact_to_end(v.secret_mut())
|
||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValueB64 for Secret<N> {
|
||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
let p = path.as_ref();
|
||||
// This might leave some fragments of the secret on the stack;
|
||||
// in practice this is likely not a problem because the stack likely
|
||||
// will be overwritten by something else soon but this is not exactly
|
||||
// guaranteed. It would be possible to remedy this, but since the secret
|
||||
// data will linger in the linux page cache anyways with the current
|
||||
// implementation, going to great length to erase the secret here is
|
||||
// not worth it right now.
|
||||
b64_reader(&mut fopen_r(p)?)
|
||||
.read_exact(v.secret_mut())
|
||||
.with_context(|| format!("Could not load base64 file {p:?}"))?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreSecret for Secret<N> {
|
||||
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
std::fs::write(path, self.secret())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> LoadValue for Public<N> {
|
||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let mut v = Self::random();
|
||||
fopen_r(path)?.read_exact_to_end(&mut *v)?;
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> StoreValue for Public<N> {
|
||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||
std::fs::write(path, **self)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! bail_usage {
|
||||
($args:expr, $($pt:expr),*) => {{
|
||||
error!($($pt),*);
|
||||
cmd_help()?;
|
||||
exit(1);
|
||||
}}
|
||||
}
|
||||
|
||||
macro_rules! ensure_usage {
|
||||
($args:expr, $ck:expr, $($pt:expr),*) => {{
|
||||
if !$ck {
|
||||
bail_usage!($args, $($pt),*);
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
macro_rules! mandatory_opt {
|
||||
($args:expr, $val:expr, $name:expr) => {{
|
||||
ensure_usage!($args, $val.is_some(), "{0} option is mandatory", $name)
|
||||
}};
|
||||
}
|
||||
|
||||
pub struct ArgsWalker {
|
||||
pub argv: Vec<String>,
|
||||
pub off: usize,
|
||||
}
|
||||
|
||||
impl ArgsWalker {
|
||||
pub fn get(&self) -> Option<&str> {
|
||||
self.argv.get(self.off).map(|s| s as &str)
|
||||
}
|
||||
|
||||
pub fn prev(&mut self) -> Option<&str> {
|
||||
assert!(self.off > 0);
|
||||
self.off -= 1;
|
||||
self.get()
|
||||
}
|
||||
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn next(&mut self) -> Option<&str> {
|
||||
assert!(self.todo() > 0);
|
||||
self.off += 1;
|
||||
self.get()
|
||||
}
|
||||
|
||||
pub fn opt(&mut self, dst: &mut Option<String>) -> Result<()> {
|
||||
let cmd = &self.argv[self.off - 1];
|
||||
ensure_usage!(&self, self.todo() > 0, "Option {} takes a value", cmd);
|
||||
ensure_usage!(&self, dst.is_none(), "Cannot set {} multiple times.", cmd);
|
||||
*dst = Some(String::from(self.next().unwrap()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn todo(&self) -> usize {
|
||||
self.argv.len() - self.off
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct WireguardOut {
|
||||
// impl KeyOutput
|
||||
dev: String,
|
||||
pk: String,
|
||||
extra_params: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AppPeer {
|
||||
pub outfile: Option<String>,
|
||||
pub outwg: Option<WireguardOut>,
|
||||
pub tx_addr: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Verbosity {
|
||||
Quiet,
|
||||
Verbose,
|
||||
}
|
||||
|
||||
/// Holds the state of the application, namely the external IO
|
||||
#[derive(Debug)]
|
||||
pub struct AppServer {
|
||||
pub crypt: CryptoServer,
|
||||
pub sock: UdpSocket,
|
||||
pub peers: Vec<AppPeer>,
|
||||
pub verbosity: Verbosity,
|
||||
}
|
||||
|
||||
/// Index based pointer to a Peer
|
||||
#[derive(Debug)]
|
||||
pub struct AppPeerPtr(pub usize);
|
||||
|
||||
impl AppPeerPtr {
|
||||
/// Takes an index based handle and returns the actual peer
|
||||
pub fn lift(p: PeerPtr) -> Self {
|
||||
Self(p.0)
|
||||
}
|
||||
|
||||
/// Returns an index based handle to one Peer
|
||||
pub fn lower(&self) -> PeerPtr {
|
||||
PeerPtr(self.0)
|
||||
}
|
||||
|
||||
pub fn get_app<'a>(&self, srv: &'a AppServer) -> &'a AppPeer {
|
||||
&srv.peers[self.0]
|
||||
}
|
||||
|
||||
pub fn get_app_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut AppPeer {
|
||||
&mut srv.peers[self.0]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AppPollResult {
|
||||
DeleteKey(AppPeerPtr),
|
||||
SendInitiation(AppPeerPtr),
|
||||
SendRetransmission(AppPeerPtr),
|
||||
ReceivedMessage(usize, SocketAddr),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum KeyOutputReason {
|
||||
Exchanged,
|
||||
Stale,
|
||||
}
|
||||
|
||||
/// Catches errors, prints them through the logger, then exits
|
||||
pub fn main() {
|
||||
env_logger::init();
|
||||
match rosenpass_main() {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("{e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry point to the whole program
|
||||
pub fn rosenpass_main() -> Result<()> {
|
||||
sodium_init()?;
|
||||
|
||||
let mut args = ArgsWalker {
|
||||
argv: std::env::args().collect(),
|
||||
off: 0, // skipping executable path
|
||||
};
|
||||
|
||||
// Command parsing
|
||||
match args.next() {
|
||||
Some("help") | Some("-h") | Some("-help") | Some("--help") => cmd_help()?,
|
||||
Some("keygen") => cmd_keygen(args)?,
|
||||
Some("exchange") => cmd_exchange(args)?,
|
||||
Some(cmd) => bail_usage!(&args, "No such command {}", cmd),
|
||||
None => bail_usage!(&args, "Expected a command!"),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print the usage information
|
||||
pub fn cmd_help() -> Result<()> {
|
||||
eprint!(include_str!("usage.md"), env!("CARGO_BIN_NAME"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a keypair
|
||||
pub fn cmd_keygen(mut args: ArgsWalker) -> Result<()> {
|
||||
let mut sf: Option<String> = None;
|
||||
let mut pf: Option<String> = None;
|
||||
|
||||
// Arg parsing
|
||||
loop {
|
||||
match args.next() {
|
||||
Some("private-key") => args.opt(&mut sf)?,
|
||||
Some("public-key") => args.opt(&mut pf)?,
|
||||
Some(opt) => bail_usage!(&args, "Unknown option `{}`", opt),
|
||||
None => break,
|
||||
};
|
||||
}
|
||||
|
||||
mandatory_opt!(&args, sf, "private-key");
|
||||
mandatory_opt!(&args, pf, "private-key");
|
||||
|
||||
// Cmd
|
||||
let (mut ssk, mut spk) = (SSk::random(), SPk::random());
|
||||
unsafe {
|
||||
SKEM::keygen(ssk.secret_mut(), spk.secret_mut())?;
|
||||
ssk.store_secret(sf.unwrap())?;
|
||||
spk.store_secret(pf.unwrap())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cmd_exchange(mut args: ArgsWalker) -> Result<()> {
|
||||
// Argument parsing
|
||||
let mut sf: Option<String> = None;
|
||||
let mut pf: Option<String> = None;
|
||||
let mut listen: Option<String> = None;
|
||||
let mut verbosity = Verbosity::Quiet;
|
||||
|
||||
// Global parameters
|
||||
loop {
|
||||
match args.next() {
|
||||
Some("private-key") => args.opt(&mut sf)?,
|
||||
Some("public-key") => args.opt(&mut pf)?,
|
||||
Some("listen") => args.opt(&mut listen)?,
|
||||
Some("verbose") => {
|
||||
verbosity = Verbosity::Verbose;
|
||||
}
|
||||
Some("peer") => {
|
||||
args.prev();
|
||||
break;
|
||||
}
|
||||
Some(opt) => bail_usage!(&args, "Unknown option `{}`", opt),
|
||||
None => break,
|
||||
};
|
||||
}
|
||||
|
||||
mandatory_opt!(&args, sf, "private-key");
|
||||
mandatory_opt!(&args, pf, "public-key");
|
||||
|
||||
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
|
||||
// sk, pk, addr
|
||||
SSk::load(&sf.unwrap())?,
|
||||
SPk::load(&pf.unwrap())?,
|
||||
listen.as_deref().unwrap_or("[0::0]:0"),
|
||||
verbosity,
|
||||
)?);
|
||||
|
||||
// Peer parameters
|
||||
'_parseAllPeers: while args.todo() > 0 {
|
||||
let mut pf: Option<String> = None;
|
||||
let mut outfile: Option<String> = None;
|
||||
let mut outwg: Option<WireguardOut> = None;
|
||||
let mut endpoint: Option<String> = None;
|
||||
let mut pskf: Option<String> = None;
|
||||
|
||||
args.next(); // skip "peer" starter itself
|
||||
|
||||
'parseOnePeer: loop {
|
||||
match args.next() {
|
||||
// Done with this peer
|
||||
Some("peer") => {
|
||||
args.prev();
|
||||
break 'parseOnePeer;
|
||||
}
|
||||
None => break 'parseOnePeer,
|
||||
// Options
|
||||
Some("public-key") => args.opt(&mut pf)?,
|
||||
Some("endpoint") => args.opt(&mut endpoint)?,
|
||||
Some("preshared-key") => args.opt(&mut pskf)?,
|
||||
Some("outfile") => args.opt(&mut outfile)?,
|
||||
// Wireguard out
|
||||
Some("wireguard") => {
|
||||
ensure_usage!(
|
||||
&args,
|
||||
outwg.is_none(),
|
||||
"Cannot set wireguard output for the same peer multiple times."
|
||||
);
|
||||
ensure_usage!(&args, args.todo() >= 2, "Option wireguard takes to values");
|
||||
let dev = String::from(args.next().unwrap());
|
||||
let pk = String::from(args.next().unwrap());
|
||||
let wg = outwg.insert(WireguardOut {
|
||||
dev,
|
||||
pk,
|
||||
extra_params: Vec::new(),
|
||||
});
|
||||
'_parseWgOutExtra: loop {
|
||||
match args.next() {
|
||||
Some("peer") => {
|
||||
args.prev();
|
||||
break 'parseOnePeer;
|
||||
}
|
||||
None => break 'parseOnePeer,
|
||||
Some(xtra) => wg.extra_params.push(xtra.to_string()),
|
||||
};
|
||||
}
|
||||
}
|
||||
// Invalid
|
||||
Some(opt) => bail_usage!(&args, "Unknown peer option `{}`", opt),
|
||||
};
|
||||
}
|
||||
|
||||
mandatory_opt!(&args, pf, "private-key");
|
||||
ensure_usage!(
|
||||
&args,
|
||||
outfile.is_some() || outwg.is_some(),
|
||||
"Either of the outfile or wireguard option is mandatory"
|
||||
);
|
||||
|
||||
let tx_addr = endpoint
|
||||
.map(|e| {
|
||||
e.to_socket_addrs()?
|
||||
.next()
|
||||
.context("Expected address in endpoint parameter")
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
srv.add_peer(
|
||||
// psk, pk, outfile, outwg, tx_addr
|
||||
pskf.map(SymKey::load_b64).transpose()?,
|
||||
SPk::load(&pf.unwrap())?,
|
||||
outfile,
|
||||
outwg,
|
||||
tx_addr,
|
||||
)?;
|
||||
}
|
||||
|
||||
srv.listen_loop()
|
||||
}
|
||||
|
||||
impl AppServer {
|
||||
pub fn new<A: ToSocketAddrs>(
|
||||
sk: SSk,
|
||||
pk: SPk,
|
||||
addr: A,
|
||||
verbosity: Verbosity,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
crypt: CryptoServer::new(sk, pk),
|
||||
sock: UdpSocket::bind(addr)?,
|
||||
peers: Vec::new(),
|
||||
verbosity,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verbose(&self) -> bool {
|
||||
matches!(self.verbosity, Verbosity::Verbose)
|
||||
}
|
||||
|
||||
pub fn add_peer(
|
||||
&mut self,
|
||||
psk: Option<SymKey>,
|
||||
pk: SPk,
|
||||
outfile: Option<String>,
|
||||
outwg: Option<WireguardOut>,
|
||||
tx_addr: Option<SocketAddr>,
|
||||
) -> Result<AppPeerPtr> {
|
||||
let PeerPtr(pn) = self.crypt.add_peer(psk, pk)?;
|
||||
assert!(pn == self.peers.len());
|
||||
self.peers.push(AppPeer {
|
||||
outfile,
|
||||
outwg,
|
||||
tx_addr,
|
||||
});
|
||||
Ok(AppPeerPtr(pn))
|
||||
}
|
||||
|
||||
pub fn listen_loop(&mut self) -> Result<()> {
|
||||
const INIT_SLEEP: f64 = 0.01;
|
||||
const MAX_FAILURES: i32 = 10;
|
||||
let mut failure_cnt = 0;
|
||||
|
||||
loop {
|
||||
let msgs_processed = 0usize;
|
||||
let err = match self.event_loop() {
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) => e,
|
||||
};
|
||||
|
||||
// This should not happen…
|
||||
failure_cnt = if msgs_processed > 0 {
|
||||
0
|
||||
} else {
|
||||
failure_cnt + 1
|
||||
};
|
||||
let sleep = INIT_SLEEP * 2.0f64.powf(f64::from(failure_cnt - 1));
|
||||
let tries_left = MAX_FAILURES - (failure_cnt - 1);
|
||||
error!(
|
||||
"unexpected error after processing {} messages: {:?} {}",
|
||||
msgs_processed,
|
||||
err,
|
||||
err.backtrace()
|
||||
);
|
||||
if tries_left > 0 {
|
||||
error!("reinitializing networking in {sleep}! {tries_left} tries left.");
|
||||
std::thread::sleep(self.crypt.timebase.dur(sleep));
|
||||
continue;
|
||||
}
|
||||
|
||||
bail!("too many network failures");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn event_loop(&mut self) -> Result<()> {
|
||||
let (mut rx, mut tx) = (MsgBuf::zero(), MsgBuf::zero());
|
||||
macro_rules! tx_maybe_with {
|
||||
($peer:expr, $fn:expr) => {
|
||||
attempt!({
|
||||
let p = $peer.get_app(self);
|
||||
if let Some(addr) = p.tx_addr {
|
||||
let len = $fn()?;
|
||||
self.sock.send_to(&tx[..len], addr)?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
loop {
|
||||
use rosenpass::protocol::HandleMsgResult;
|
||||
use AppPollResult::*;
|
||||
use KeyOutputReason::*;
|
||||
match self.poll(&mut *rx)? {
|
||||
SendInitiation(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.initiate_handshake(peer.lower(), &mut *tx))?,
|
||||
SendRetransmission(peer) => tx_maybe_with!(peer, || self
|
||||
.crypt
|
||||
.retransmit_handshake(peer.lower(), &mut *tx))?,
|
||||
DeleteKey(peer) => self.output_key(peer, Stale, &SymKey::random())?,
|
||||
|
||||
ReceivedMessage(len, addr) => {
|
||||
multimatch!(self.crypt.handle_msg(&rx[..len], &mut *tx),
|
||||
Err(ref e) =>
|
||||
self.verbose().then(||
|
||||
info!("error processing incoming message from {:?}: {:?} {}", addr, e, e.backtrace())),
|
||||
|
||||
Ok(HandleMsgResult { resp: Some(len), .. }) => {
|
||||
self.sock.send_to(&tx[0..len], addr)?
|
||||
},
|
||||
|
||||
Ok(HandleMsgResult { exchanged_with: Some(p), .. }) => {
|
||||
let ap = AppPeerPtr::lift(p);
|
||||
ap.get_app_mut(self).tx_addr = Some(addr);
|
||||
// TODO: Maybe we should rather call the key "rosenpass output"?
|
||||
self.output_key(ap, Exchanged, &self.crypt.osk(p)?)?;
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn output_key(&self, peer: AppPeerPtr, why: KeyOutputReason, key: &SymKey) -> Result<()> {
|
||||
let peerid = peer.lower().get(&self.crypt).pidt()?;
|
||||
let ap = peer.get_app(self);
|
||||
|
||||
if self.verbose() {
|
||||
let msg = match why {
|
||||
KeyOutputReason::Exchanged => "Exchanged key with peer",
|
||||
KeyOutputReason::Stale => "Erasing outdated key from peer",
|
||||
};
|
||||
info!("{} {}", msg, fmt_b64(&*peerid));
|
||||
}
|
||||
|
||||
if let Some(of) = ap.outfile.as_ref() {
|
||||
// This might leave some fragments of the secret on the stack;
|
||||
// in practice this is likely not a problem because the stack likely
|
||||
// will be overwritten by something else soon but this is not exactly
|
||||
// guaranteed. It would be possible to remedy this, but since the secret
|
||||
// data will linger in the linux page cache anyways with the current
|
||||
// implementation, going to great length to erase the secret here is
|
||||
// not worth it right now.
|
||||
b64_writer(fopen_w(of)?).write_all(key.secret())?;
|
||||
let why = match why {
|
||||
KeyOutputReason::Exchanged => "exchanged",
|
||||
KeyOutputReason::Stale => "stale",
|
||||
};
|
||||
println!(
|
||||
"output-key peer {} key-file {} {}",
|
||||
fmt_b64(&*peerid),
|
||||
of,
|
||||
why
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(owg) = ap.outwg.as_ref() {
|
||||
let child = Command::new("wg")
|
||||
.arg("set")
|
||||
.arg(&owg.dev)
|
||||
.arg("peer")
|
||||
.arg(&owg.pk)
|
||||
.arg("preshared-key")
|
||||
.arg("/dev/stdin")
|
||||
.stdin(Stdio::piped())
|
||||
.args(&owg.extra_params)
|
||||
.spawn()?;
|
||||
b64_writer(child.stdin.unwrap()).write_all(key.secret())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn poll(&mut self, rx_buf: &mut [u8]) -> Result<AppPollResult> {
|
||||
use rosenpass::protocol::PollResult as C;
|
||||
use AppPollResult as A;
|
||||
loop {
|
||||
return Ok(match self.crypt.poll()? {
|
||||
C::DeleteKey(PeerPtr(no)) => A::DeleteKey(AppPeerPtr(no)),
|
||||
C::SendInitiation(PeerPtr(no)) => A::SendInitiation(AppPeerPtr(no)),
|
||||
C::SendRetransmission(PeerPtr(no)) => A::SendRetransmission(AppPeerPtr(no)),
|
||||
C::Sleep(timeout) => match self.try_recv(rx_buf, timeout)? {
|
||||
Some((len, addr)) => A::ReceivedMessage(len, addr),
|
||||
None => continue,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_recv(&self, buf: &mut [u8], timeout: Timing) -> Result<Option<(usize, SocketAddr)>> {
|
||||
if timeout == 0.0 {
|
||||
return Ok(None);
|
||||
}
|
||||
self.sock
|
||||
.set_read_timeout(Some(Duration::from_secs_f64(timeout)))?;
|
||||
match self.sock.recv_from(buf) {
|
||||
Ok(x) => Ok(Some(x)),
|
||||
Err(e) => match e.kind() {
|
||||
ErrorKind::WouldBlock => Ok(None),
|
||||
ErrorKind::TimedOut => Ok(None),
|
||||
_ => Err(anyhow::Error::new(e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
51
src/usage.md
@@ -1,51 +0,0 @@
|
||||
NAME
|
||||
|
||||
{0} – Perform post-quantum secure key exchanges for wireguard and other services.
|
||||
|
||||
SYNOPSIS
|
||||
|
||||
{0} [ COMMAND ] [ OPTIONS ]... [ ARGS ]...
|
||||
|
||||
DESCRIPTION
|
||||
{0} performs cryptographic key exchanges that are secure against quantum-computers and outputs the keys.
|
||||
These keys can then be passed to various services such as wireguard or other vpn services
|
||||
as pre-shared-keys to achieve security against attackers with quantum computers.
|
||||
|
||||
This is a research project and quantum computers are not thought to become practical in less than ten years.
|
||||
If you are not specifically tasked with developing post-quantum secure systems, you probably do not need this tool.
|
||||
|
||||
COMMANDS
|
||||
|
||||
keygen private-key <file-path> public-key <file-path>
|
||||
Generate a keypair to use in the exchange command later. Send the public-key file to your communication partner
|
||||
and keep the private-key file a secret!
|
||||
|
||||
exchange private-key <file-path> public-key <file-path> [ OPTIONS ]... PEER...\n"
|
||||
Start a process to exchange keys with the specified peers. You should specify at least one peer.
|
||||
|
||||
OPTIONS
|
||||
listen <ip>[:<port>]
|
||||
Instructs {0} to listen on the specified interface and port. By default {0} will listen on all interfaces and select a random port.
|
||||
|
||||
verbose
|
||||
Extra logging
|
||||
|
||||
PEER := peer public-key <file-path> [endpoint <ip>[:<port>]] [preshared-key <file-path>] [outfile <file-path>] [wireguard <dev> <peer> <extra_params>]
|
||||
Instructs {0} to exchange keys with the given peer and write the resulting PSK into the given output file.
|
||||
You must either specify the outfile or wireguard output option.
|
||||
|
||||
endpoint <ip>[:<port>]
|
||||
Specifies the address where the peer can be reached. This will be automatically updated after the first sucessfull
|
||||
key exchange with the peer. If this is unspecified, the peer must initiate the connection.
|
||||
|
||||
preshared-key <file-path>
|
||||
You may specifie a pre-shared key which will be mixied into the final secret.
|
||||
|
||||
outfile <file-path>
|
||||
You may specify a file to write the exchanged keys to. If this option is specified, {0} will
|
||||
write a notification to standard out every time the key is updated.
|
||||
|
||||
wireguard <dev> <peer> <extra_params>
|
||||
This allows you to directly specify a wireguard peer to deploy the pre-shared-key to.
|
||||
You may specify extra parameters you would pass to `wg set` besides the preshared-key parameter which is used by {0}.
|
||||
This makes it possible to add peers entirely from {0}.
|
||||
123
src/util.rs
@@ -1,123 +0,0 @@
|
||||
use base64::{
|
||||
display::Base64Display as B64Display, read::DecoderReader as B64Reader,
|
||||
write::EncoderWriter as B64Writer,
|
||||
};
|
||||
use std::{
|
||||
borrow::{Borrow, BorrowMut},
|
||||
cmp::min,
|
||||
io::{Read, Write},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[inline]
|
||||
pub fn xor_into(a: &mut [u8], b: &[u8]) {
|
||||
assert!(a.len() == b.len());
|
||||
for (av, bv) in a.iter_mut().zip(b.iter()) {
|
||||
*av ^= *bv;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Zeroize result?
|
||||
/** Concatenate two byte arrays */
|
||||
#[macro_export]
|
||||
macro_rules! cat {
|
||||
($len:expr; $($toks:expr),+) => {{
|
||||
let mut buf = [0u8; $len];
|
||||
let mut off = 0;
|
||||
$({
|
||||
let tok = $toks;
|
||||
let tr = ::std::borrow::Borrow::<[u8]>::borrow(tok);
|
||||
(&mut buf[off..(off + tr.len())]).copy_from_slice(tr);
|
||||
off += tr.len();
|
||||
})+
|
||||
assert!(off == buf.len(), "Size mismatch in cat!()");
|
||||
buf
|
||||
}}
|
||||
}
|
||||
|
||||
// TODO: consistent inout ordering
|
||||
pub fn cpy<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, dst: &mut T) {
|
||||
dst.borrow_mut().copy_from_slice(src.borrow());
|
||||
}
|
||||
|
||||
pub fn cpy_min<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, to: &mut T) {
|
||||
let src = src.borrow();
|
||||
let dst = to.borrow_mut();
|
||||
let len = min(src.len(), dst.len());
|
||||
dst[..len].copy_from_slice(&src[..len]);
|
||||
}
|
||||
|
||||
/// Try block basically…returns a result and allows the use of the question mark operator inside
|
||||
#[macro_export]
|
||||
macro_rules! attempt {
|
||||
($block:expr) => {
|
||||
(|| -> ::anyhow::Result<_> { $block })()
|
||||
};
|
||||
}
|
||||
|
||||
const B64TYPE: base64::Config = base64::STANDARD;
|
||||
|
||||
pub fn fmt_b64<'a>(payload: &'a [u8]) -> B64Display<'a> {
|
||||
B64Display::<'a>::with_config(payload, B64TYPE)
|
||||
}
|
||||
|
||||
pub fn b64_writer<W: Write>(w: W) -> B64Writer<W> {
|
||||
B64Writer::new(w, B64TYPE)
|
||||
}
|
||||
|
||||
pub fn b64_reader<R: Read>(r: &mut R) -> B64Reader<'_, R> {
|
||||
B64Reader::new(r, B64TYPE)
|
||||
}
|
||||
|
||||
// TODO remove this once std::cmp::max becomes const
|
||||
pub const fn max_usize(a: usize, b: usize) -> usize {
|
||||
if a > b {
|
||||
a
|
||||
} else {
|
||||
b
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Timebase(Instant);
|
||||
|
||||
impl Default for Timebase {
|
||||
fn default() -> Self {
|
||||
Self(Instant::now())
|
||||
}
|
||||
}
|
||||
|
||||
impl Timebase {
|
||||
pub fn now(&self) -> f64 {
|
||||
self.0.elapsed().as_secs_f64()
|
||||
}
|
||||
|
||||
pub fn dur(&self, t: f64) -> Duration {
|
||||
Duration::from_secs_f64(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! multimatch {
|
||||
($val:expr) => {{ () }};
|
||||
($val:expr, $($p:pat => $thn:expr),*) => {{
|
||||
let v = $val;
|
||||
($(if let $p = v { Some($thn) } else { None }),*)
|
||||
}};
|
||||
}
|
||||
|
||||
pub fn mutating<T, F>(mut v: T, f: F) -> T
|
||||
where
|
||||
F: Fn(&mut T),
|
||||
{
|
||||
f(&mut v);
|
||||
v
|
||||
}
|
||||
|
||||
pub fn sideeffect<T, F>(v: T, f: F) -> T
|
||||
where
|
||||
F: Fn(&T),
|
||||
{
|
||||
f(&v);
|
||||
v
|
||||
}
|
||||