mirror of
https://github.com/rosenpass/rosenpass.git
synced 2025-12-14 00:10:38 -08:00
Compare commits
2 Commits
v0.2.1
...
dev/rustif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60155a5530 | ||
|
|
664c591138 |
@@ -1,200 +0,0 @@
|
|||||||
#!/usr/bin/env nu
|
|
||||||
|
|
||||||
use log *
|
|
||||||
|
|
||||||
# cd to git root
|
|
||||||
cd (git rev-parse --show-toplevel)
|
|
||||||
|
|
||||||
# check if a subject depends on a potential dependency
|
|
||||||
def depends [
|
|
||||||
subject:string # package to examine
|
|
||||||
maybe_dep:string # maybe a dependency of subject
|
|
||||||
] {
|
|
||||||
not ( nix why-depends --quiet --derivation $subject $maybe_dep | is-empty )
|
|
||||||
}
|
|
||||||
|
|
||||||
# get attribute names of the attribute set
|
|
||||||
def get-attr-names [
|
|
||||||
expr: # nix expression to get attrNames of
|
|
||||||
] {
|
|
||||||
nix eval --json $expr --apply builtins.attrNames | from json
|
|
||||||
}
|
|
||||||
|
|
||||||
def job-id [
|
|
||||||
system:string,
|
|
||||||
derivation:string,
|
|
||||||
] {
|
|
||||||
$"($system)---($derivation)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# map from nixos system to github runner type
|
|
||||||
let systems_map = {
|
|
||||||
# aarch64-darwin
|
|
||||||
# aarch64-linux
|
|
||||||
|
|
||||||
i686-linux: ubuntu-latest,
|
|
||||||
x86_64-darwin: macos-13,
|
|
||||||
x86_64-linux: ubuntu-latest
|
|
||||||
}
|
|
||||||
|
|
||||||
let targets = (get-attr-names ".#packages"
|
|
||||||
| par-each {|system| { $system : (get-attr-names $".#packages.($system)") } }
|
|
||||||
| reduce {|it, acc| $acc | merge $it }
|
|
||||||
)
|
|
||||||
|
|
||||||
mut cachix_workflow = {
|
|
||||||
name: "Nix",
|
|
||||||
permissions: {contents: write},
|
|
||||||
on: {
|
|
||||||
pull_request: null,
|
|
||||||
push: {branches: [main]}
|
|
||||||
},
|
|
||||||
jobs: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
mut release_workflow = {
|
|
||||||
name: "Release",
|
|
||||||
permissions: {contents: write},
|
|
||||||
on: { push: {tags: ["v*"]} },
|
|
||||||
jobs: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
let runner_setup = [
|
|
||||||
{
|
|
||||||
uses: "actions/checkout@v3"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
uses: "cachix/install-nix-action@v22",
|
|
||||||
with: { nix_path: "nixpkgs=channel:nixos-unstable" }
|
|
||||||
}
|
|
||||||
{
|
|
||||||
uses: "cachix/cachix-action@v12",
|
|
||||||
with: {
|
|
||||||
name: rosenpass,
|
|
||||||
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
for system in ($targets | columns) {
|
|
||||||
if ($systems_map | get -i $system | is-empty) {
|
|
||||||
log info $"skipping ($system), since there are no GH-Actions runners for it"
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
# lookup the correct runner for $system
|
|
||||||
let runs_on = [ ($systems_map | get $system) ]
|
|
||||||
|
|
||||||
# add jobs for all derivations
|
|
||||||
let derivations = ($targets | get $system)
|
|
||||||
for derivation in $derivations {
|
|
||||||
|
|
||||||
if ($system == "i686-linux") and ($derivation | str contains "static") {
|
|
||||||
log info $"skipping ($system).($derivation), due to liboqs 0.8 not present in oqs-sys"
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($system == "i686-linux") and ($derivation | str contains "release-package") {
|
|
||||||
log info $"skipping ($system).($derivation), due to liboqs 0.8 not present in oqs-sys"
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
# job_id for GH-Actions
|
|
||||||
let id = ( job-id $system $derivation )
|
|
||||||
|
|
||||||
# name displayed
|
|
||||||
let name = $"($system).($derivation)"
|
|
||||||
|
|
||||||
# collection of dependencies
|
|
||||||
# TODO currently only considers dependencies on the same $system
|
|
||||||
let needs = ($derivations
|
|
||||||
| filter {|it| $it != $derivation and $it != "default" } # filter out self and default
|
|
||||||
| par-each {|it| {
|
|
||||||
name: $it, # the other derivation
|
|
||||||
# does self depend on $it?
|
|
||||||
needed: (depends $".#packages.($system).($derivation)" $".#packages.($system).($it)")
|
|
||||||
} }
|
|
||||||
| filter {|it| $it.needed}
|
|
||||||
| each {|it| job-id $system $it.name}
|
|
||||||
)
|
|
||||||
|
|
||||||
mut new_job = {
|
|
||||||
name: $"Build ($name)",
|
|
||||||
"runs-on": $runs_on,
|
|
||||||
needs: $needs,
|
|
||||||
steps: ($runner_setup | append [
|
|
||||||
{
|
|
||||||
name: Build,
|
|
||||||
run: $"nix build .#packages.($system).($derivation) --print-build-logs"
|
|
||||||
}
|
|
||||||
])
|
|
||||||
}
|
|
||||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $id $new_job )
|
|
||||||
}
|
|
||||||
|
|
||||||
# add check job
|
|
||||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $"($system)---check" {
|
|
||||||
name: $"Run Nix checks on ($system)",
|
|
||||||
"runs-on": $runs_on,
|
|
||||||
steps: ($runner_setup | append {
|
|
||||||
name: Check,
|
|
||||||
run: "nix flake check . --print-build-logs"
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
# add release job
|
|
||||||
$release_workflow.jobs = ($release_workflow.jobs | insert $"($system)---release" {
|
|
||||||
name: $"Build release artifacts for ($system)",
|
|
||||||
"runs-on": $runs_on,
|
|
||||||
steps: ($runner_setup | append [
|
|
||||||
{
|
|
||||||
name: "Build release",
|
|
||||||
run: "nix build .#release-package --print-build-logs"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name: Release,
|
|
||||||
uses: "softprops/action-gh-release@v1",
|
|
||||||
with: {
|
|
||||||
draft: "${{ contains(github.ref_name, 'rc') }}",
|
|
||||||
prerelease: "${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}",
|
|
||||||
files: "result/*"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
# add whitepaper job with upload
|
|
||||||
let system = "x86_64-linux"
|
|
||||||
$cachix_workflow.jobs = ($cachix_workflow.jobs | insert $"($system)---whitepaper-upload" {
|
|
||||||
name: $"Upload whitepaper ($system)",
|
|
||||||
"runs-on": ($systems_map | get $system),
|
|
||||||
"if": "${{ github.ref == 'refs/heads/main' }}",
|
|
||||||
steps: ($runner_setup | append [
|
|
||||||
{
|
|
||||||
name: "Git add git sha and commit",
|
|
||||||
run: "cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name: Build,
|
|
||||||
run: $"nix build .#packages.($system).whitepaper --print-build-logs"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name: "Deploy PDF artifacts",
|
|
||||||
uses: "peaceiris/actions-gh-pages@v3",
|
|
||||||
with: {
|
|
||||||
github_token: "${{ secrets.GITHUB_TOKEN }}",
|
|
||||||
publish_dir: result/,
|
|
||||||
publish_branch: papers-pdf,
|
|
||||||
force_orphan: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
])
|
|
||||||
})
|
|
||||||
|
|
||||||
log info "saving nix-cachix workflow"
|
|
||||||
$cachix_workflow | to yaml | save --force .github/workflows/nix.yaml
|
|
||||||
$release_workflow | to yaml | save --force .github/workflows/release.yaml
|
|
||||||
|
|
||||||
log info "prettify generated yaml"
|
|
||||||
prettier -w .github/workflows/
|
|
||||||
49
.github/workflows/doc-upload.yml
vendored
49
.github/workflows/doc-upload.yml
vendored
@@ -1,49 +0,0 @@
|
|||||||
name: Update website docs
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- "doc/**"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
update-website:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Clone rosenpass-website repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: rosenpass/rosenpass-website
|
|
||||||
ref: main
|
|
||||||
path: rosenpass-website
|
|
||||||
token: ${{ secrets.PRIVACC }}
|
|
||||||
|
|
||||||
- name: Copy docs to website repo
|
|
||||||
run: |
|
|
||||||
cp -R doc/* rosenpass-website/static/docs/
|
|
||||||
|
|
||||||
- name: Install mandoc
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y mandoc
|
|
||||||
|
|
||||||
- name: Compile man pages to HTML
|
|
||||||
run: |
|
|
||||||
cd rosenpass-website/static/docs/
|
|
||||||
for file in *.1; do
|
|
||||||
mandoc -Thtml "$file" > "${file%.*}.html"
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Commit changes to website repo
|
|
||||||
uses: EndBug/add-and-commit@v9
|
|
||||||
with:
|
|
||||||
author_name: GitHub Actions
|
|
||||||
author_email: actions@github.com
|
|
||||||
message: Update docs
|
|
||||||
cwd: rosenpass-website/static/docs
|
|
||||||
github_token: ${{ secrets.PRIVACC }
|
|
||||||
390
.github/workflows/nix.yaml
vendored
390
.github/workflows/nix.yaml
vendored
@@ -1,346 +1,74 @@
|
|||||||
name: Nix
|
name: Nix Related Actions
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
on:
|
on:
|
||||||
pull_request: null
|
pull_request:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches: [main]
|
||||||
- main
|
|
||||||
jobs:
|
jobs:
|
||||||
i686-linux---default:
|
build:
|
||||||
name: Build i686-linux.default
|
name: Build ${{ matrix.derivation }} on ${{ matrix.nix-system }}
|
||||||
runs-on:
|
runs-on:
|
||||||
- ubuntu-latest
|
- nix
|
||||||
needs:
|
- ${{ matrix.nix-system }}
|
||||||
- i686-linux---rosenpass
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
nix-system:
|
||||||
|
- x86_64-linux
|
||||||
|
# - aarch64-linux
|
||||||
|
derivation:
|
||||||
|
- rosenpass
|
||||||
|
- rosenpass-static
|
||||||
|
- rosenpass-oci-image
|
||||||
|
- rosenpass-static-oci-image
|
||||||
|
- proof-proverif
|
||||||
|
- whitepaper
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: cachix/install-nix-action@v22
|
- name: Generate gitHeadInfo.gin for the whitepaper
|
||||||
|
if: ${{ matrix.derivation == 'whitepaper' }}
|
||||||
|
run: ( cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin )
|
||||||
|
- name: Build ${{ matrix.derivation }}@${{ matrix.nix-system }}
|
||||||
|
run: |
|
||||||
|
# build the package
|
||||||
|
nix build .#packages.${{ matrix.nix-system }}.${{ matrix.derivation }} --print-build-logs
|
||||||
|
|
||||||
|
# copy over the results
|
||||||
|
if [[ -f $(readlink --canonicalize result ) ]]; then
|
||||||
|
mkdir -- ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
|
fi
|
||||||
|
cp --recursive -- $(readlink --canonicalize result) ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
|
chmod --recursive ug+rw -- ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
|
|
||||||
|
# add version information
|
||||||
|
git rev-parse --abbrev-ref HEAD > ${{ matrix.derivation }}-${{ matrix.nix-system }}/git-version
|
||||||
|
git rev-parse HEAD > ${{ matrix.derivation }}-${{ matrix.nix-system }}/git-sha
|
||||||
|
|
||||||
|
# override the `rp` script to keep compatible with non-nix systems
|
||||||
|
if [[ -f ${{ matrix.derivation }}-${{ matrix.nix-system }}/bin/rp ]]; then
|
||||||
|
cp --force -- rp ${{ matrix.derivation }}-${{ matrix.nix-system }}/bin/
|
||||||
|
fi
|
||||||
|
- name: Upload build results
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
name: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
- uses: cachix/cachix-action@v12
|
path: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.i686-linux.default --print-build-logs
|
|
||||||
i686-linux---rosenpass:
|
|
||||||
name: Build i686-linux.rosenpass
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.i686-linux.rosenpass --print-build-logs
|
|
||||||
i686-linux---rosenpass-oci-image:
|
|
||||||
name: Build i686-linux.rosenpass-oci-image
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- i686-linux---rosenpass
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.i686-linux.rosenpass-oci-image --print-build-logs
|
|
||||||
i686-linux---check:
|
|
||||||
name: Run Nix checks on i686-linux
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Check
|
|
||||||
run: nix flake check . --print-build-logs
|
|
||||||
x86_64-darwin---default:
|
|
||||||
name: Build x86_64-darwin.default
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
needs:
|
|
||||||
- x86_64-darwin---rosenpass
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-darwin.default --print-build-logs
|
|
||||||
x86_64-darwin---release-package:
|
|
||||||
name: Build x86_64-darwin.release-package
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
needs:
|
|
||||||
- x86_64-darwin---rosenpass
|
|
||||||
- x86_64-darwin---rosenpass-oci-image
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-darwin.release-package --print-build-logs
|
|
||||||
x86_64-darwin---rosenpass:
|
|
||||||
name: Build x86_64-darwin.rosenpass
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-darwin.rosenpass --print-build-logs
|
|
||||||
x86_64-darwin---rosenpass-oci-image:
|
|
||||||
name: Build x86_64-darwin.rosenpass-oci-image
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
needs:
|
|
||||||
- x86_64-darwin---rosenpass
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-darwin.rosenpass-oci-image --print-build-logs
|
|
||||||
x86_64-darwin---check:
|
|
||||||
name: Run Nix checks on x86_64-darwin
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Check
|
|
||||||
run: nix flake check . --print-build-logs
|
|
||||||
x86_64-linux---default:
|
|
||||||
name: Build x86_64-linux.default
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- x86_64-linux---rosenpass
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.default --print-build-logs
|
|
||||||
x86_64-linux---proof-proverif:
|
|
||||||
name: Build x86_64-linux.proof-proverif
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- x86_64-linux---proverif-patched
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.proof-proverif --print-build-logs
|
|
||||||
x86_64-linux---proverif-patched:
|
|
||||||
name: Build x86_64-linux.proverif-patched
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.proverif-patched --print-build-logs
|
|
||||||
x86_64-linux---release-package:
|
|
||||||
name: Build x86_64-linux.release-package
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- x86_64-linux---rosenpass-static-oci-image
|
|
||||||
- x86_64-linux---rosenpass-static
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.release-package --print-build-logs
|
|
||||||
x86_64-linux---rosenpass:
|
|
||||||
name: Build x86_64-linux.rosenpass
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.rosenpass --print-build-logs
|
|
||||||
x86_64-linux---rosenpass-oci-image:
|
|
||||||
name: Build x86_64-linux.rosenpass-oci-image
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- x86_64-linux---rosenpass
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.rosenpass-oci-image --print-build-logs
|
|
||||||
x86_64-linux---rosenpass-static:
|
|
||||||
name: Build x86_64-linux.rosenpass-static
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.rosenpass-static --print-build-logs
|
|
||||||
x86_64-linux---rosenpass-static-oci-image:
|
|
||||||
name: Build x86_64-linux.rosenpass-static-oci-image
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- x86_64-linux---rosenpass-static
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.rosenpass-static-oci-image --print-build-logs
|
|
||||||
x86_64-linux---whitepaper:
|
|
||||||
name: Build x86_64-linux.whitepaper
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
needs: []
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.whitepaper --print-build-logs
|
|
||||||
x86_64-linux---check:
|
|
||||||
name: Run Nix checks on x86_64-linux
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Check
|
|
||||||
run: nix flake check . --print-build-logs
|
|
||||||
x86_64-linux---whitepaper-upload:
|
|
||||||
name: Upload whitepaper x86_64-linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Git add git sha and commit
|
|
||||||
run: cd papers && ./tex/gitinfo2.sh && git add gitHeadInfo.gin
|
|
||||||
- name: Build
|
|
||||||
run: nix build .#packages.x86_64-linux.whitepaper --print-build-logs
|
|
||||||
- name: Deploy PDF artifacts
|
- name: Deploy PDF artifacts
|
||||||
|
if: ${{ matrix.derivation == 'whitepaper' && github.ref == 'refs/heads/main' }}
|
||||||
uses: peaceiris/actions-gh-pages@v3
|
uses: peaceiris/actions-gh-pages@v3
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
publish_dir: result/
|
publish_dir: ${{ matrix.derivation }}-${{ matrix.nix-system }}
|
||||||
publish_branch: papers-pdf
|
publish_branch: papers-pdf
|
||||||
force_orphan: true
|
force_orphan: true
|
||||||
|
checks:
|
||||||
|
name: Run Nix checks
|
||||||
|
runs-on: nixos
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Run Checks
|
||||||
|
run: nix flake check . --print-build-logs
|
||||||
|
|||||||
88
.github/workflows/qc.yaml
vendored
88
.github/workflows/qc.yaml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: QC
|
name: Quality Control
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
@@ -12,31 +12,15 @@ jobs:
|
|||||||
prettier:
|
prettier:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- uses: actionsx/prettier@v2
|
- uses: actionsx/prettier@v2
|
||||||
with:
|
with:
|
||||||
args: --check .
|
args: --check .
|
||||||
|
|
||||||
shellcheck:
|
|
||||||
name: Shellcheck
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Run ShellCheck
|
|
||||||
uses: ludeeus/action-shellcheck@master
|
|
||||||
|
|
||||||
cargo-audit:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: actions-rs/audit-check@v1
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
cargo-clippy:
|
cargo-clippy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/cache@v3
|
- uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
@@ -47,73 +31,17 @@ jobs:
|
|||||||
target/
|
target/
|
||||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
- run: rustup component add clippy
|
- run: rustup component add clippy
|
||||||
- name: Install libsodium
|
- name: Install xmllint
|
||||||
run: sudo apt-get install -y libsodium-dev
|
run: sudo apt-get install -y libsodium-dev
|
||||||
- uses: actions-rs/clippy-check@v1
|
- uses: actions-rs/clippy-check@v1
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
args: --all-features
|
args: --all-features
|
||||||
|
|
||||||
cargo-doc:
|
cargo-audit:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v1
|
||||||
- uses: actions/cache@v3
|
- uses: actions-rs/audit-check@v1
|
||||||
with:
|
with:
|
||||||
path: |
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- run: rustup component add clippy
|
|
||||||
- name: Install libsodium
|
|
||||||
run: sudo apt-get install -y libsodium-dev
|
|
||||||
# `--no-deps` used as a workaround for a rust compiler bug. See:
|
|
||||||
# - https://github.com/rosenpass/rosenpass/issues/62
|
|
||||||
# - https://github.com/rust-lang/rust/issues/108378
|
|
||||||
- run: RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --document-private-items
|
|
||||||
|
|
||||||
cargo-test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Install libsodium
|
|
||||||
run: sudo apt-get install -y libsodium-dev
|
|
||||||
# liboqs requires quite a lot of stack memory, thus we adjust
|
|
||||||
# the default stack size picked for new threads (which is used
|
|
||||||
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
|
|
||||||
- run: RUST_MIN_STACK=8388608 cargo test
|
|
||||||
|
|
||||||
cargo-test-nix-devshell-x86_64-linux:
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- uses: cachix/install-nix-action@v21
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- run: nix develop --command cargo test
|
|
||||||
|
|||||||
71
.github/workflows/release.yaml
vendored
71
.github/workflows/release.yaml
vendored
@@ -3,69 +3,28 @@ permissions:
|
|||||||
contents: write
|
contents: write
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags: ["v*"]
|
||||||
- v*
|
|
||||||
jobs:
|
jobs:
|
||||||
i686-linux---release:
|
release:
|
||||||
name: Build release artifacts for i686-linux
|
name: Release for ${{ matrix.nix-system }}
|
||||||
runs-on:
|
runs-on:
|
||||||
- ubuntu-latest
|
- nix
|
||||||
|
- ${{ matrix.nix-system }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
nix-system:
|
||||||
|
- x86_64-linux
|
||||||
|
# - aarch64-linux
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: cachix/install-nix-action@v22
|
- name: Build release-package for ${{ matrix.nix-system }}
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build release
|
|
||||||
run: nix build .#release-package --print-build-logs
|
run: nix build .#release-package --print-build-logs
|
||||||
- name: Release
|
- name: Release
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
draft: ${{ contains(github.ref_name, 'rc') }}
|
||||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
||||||
files: result/*
|
files: |
|
||||||
x86_64-darwin---release:
|
result/*
|
||||||
name: Build release artifacts for x86_64-darwin
|
|
||||||
runs-on:
|
|
||||||
- macos-13
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build release
|
|
||||||
run: nix build .#release-package --print-build-logs
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
with:
|
|
||||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
|
||||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
|
||||||
files: result/*
|
|
||||||
x86_64-linux---release:
|
|
||||||
name: Build release artifacts for x86_64-linux
|
|
||||||
runs-on:
|
|
||||||
- ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- uses: cachix/install-nix-action@v22
|
|
||||||
with:
|
|
||||||
nix_path: nixpkgs=channel:nixos-unstable
|
|
||||||
- uses: cachix/cachix-action@v12
|
|
||||||
with:
|
|
||||||
name: rosenpass
|
|
||||||
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
|
||||||
- name: Build release
|
|
||||||
run: nix build .#release-package --print-build-logs
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
with:
|
|
||||||
draft: ${{ contains(github.ref_name, 'rc') }}
|
|
||||||
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
|
|
||||||
files: result/*
|
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
# TODO use CI_JOB_TOKEN once https://gitlab.com/groups/gitlab-org/-/epics/6310 is fixed
|
|
||||||
pull-from-gh:
|
|
||||||
only: ["schedules"]
|
|
||||||
variables:
|
|
||||||
REMOTE: "https://github.com/rosenpass/rosenpass.git"
|
|
||||||
LOCAL: " git@gitlab.com:rosenpass/rosenpass.git"
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
before_script:
|
|
||||||
- mkdir ~/.ssh/
|
|
||||||
- echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
|
|
||||||
- echo "$REPO_SSH_KEY" > ~/.ssh/id_ed25519
|
|
||||||
- chmod 600 --recursive ~/.ssh/
|
|
||||||
- git config --global user.email "ci@gitlab.com"
|
|
||||||
- git config --global user.name "CI"
|
|
||||||
script:
|
|
||||||
- git clone --mirror $REMOTE rosenpass
|
|
||||||
- cd rosenpass && git push --mirror $LOCAL
|
|
||||||
970
Cargo.lock
generated
970
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,6 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
|
||||||
|
|
||||||
members = [
|
members = [
|
||||||
"rosenpass",
|
"rosenpass",
|
||||||
|
"rp",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.metadata.release]
|
|
||||||
# ensure that adding `--package` as argument to `cargo release` still creates version tags in the form of `vx.y.z`
|
|
||||||
tag-prefix = ""
|
|
||||||
|
|||||||
2
config-examples/.gitignore
vendored
2
config-examples/.gitignore
vendored
@@ -1,2 +0,0 @@
|
|||||||
peer-*-*-key
|
|
||||||
peer-*-out
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
public_key = "peer-a-public-key"
|
|
||||||
secret_key = "peer-a-secret-key"
|
|
||||||
listen = ["[::]:10001"]
|
|
||||||
verbosity = "Quiet"
|
|
||||||
|
|
||||||
[[peers]]
|
|
||||||
public_key = "peer-b-public-key"
|
|
||||||
endpoint = "localhost:10002"
|
|
||||||
key_out = "peer-a-rp-out-key"
|
|
||||||
# exchange_command = [
|
|
||||||
# "wg",
|
|
||||||
# "set",
|
|
||||||
# "wg0",
|
|
||||||
# "peer",
|
|
||||||
# "<PEER_ID>",
|
|
||||||
# "preshared-key",
|
|
||||||
# "/dev/stdin",
|
|
||||||
# ]
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
public_key = "peer-b-public-key"
|
|
||||||
secret_key = "peer-b-secret-key"
|
|
||||||
listen = ["[::]:10002"]
|
|
||||||
verbosity = "Quiet"
|
|
||||||
|
|
||||||
[[peers]]
|
|
||||||
public_key = "peer-a-public-key"
|
|
||||||
endpoint = "localhost:10001"
|
|
||||||
key_out = "peer-b-rp-out-key"
|
|
||||||
# exchange_command = [
|
|
||||||
# "wg",
|
|
||||||
# "set",
|
|
||||||
# "wg0",
|
|
||||||
# "peer",
|
|
||||||
# "<PEER_ID>",
|
|
||||||
# "preshared-key",
|
|
||||||
# "/dev/stdin",
|
|
||||||
# ]
|
|
||||||
105
doc/rosenpass.1
105
doc/rosenpass.1
@@ -1,105 +0,0 @@
|
|||||||
.Dd $Mdocdate$
|
|
||||||
.Dt ROSENPASS 1
|
|
||||||
.Os
|
|
||||||
.Sh NAME
|
|
||||||
.Nm rosenpass
|
|
||||||
.Nd builds post-quantum-secure VPNs
|
|
||||||
.Sh SYNOPSIS
|
|
||||||
.Nm
|
|
||||||
.Op COMMAND
|
|
||||||
.Op Ar OPTIONS ...
|
|
||||||
.Op Ar ARGS ...
|
|
||||||
.Sh DESCRIPTION
|
|
||||||
.Nm
|
|
||||||
performs cryptographic key exchanges that are secure against quantum-computers
|
|
||||||
and then outputs the keys.
|
|
||||||
These keys can then be passed to various services, such as wireguard or other
|
|
||||||
vpn services, as pre-shared-keys to achieve security against attackers with
|
|
||||||
quantum computers.
|
|
||||||
.Pp
|
|
||||||
This is a research project and quantum computers are not thought to become
|
|
||||||
practical in fewer than ten years.
|
|
||||||
If you are not specifically tasked with developing post-quantum secure systems,
|
|
||||||
you probably do not need this tool.
|
|
||||||
.Ss COMMANDS
|
|
||||||
.Bl -tag -width Ds
|
|
||||||
.It Ar keygen private-key <file-path> public-key <file-path>
|
|
||||||
Generate a keypair to use in the exchange command later.
|
|
||||||
Send the public-key file to your communication partner and keep the private-key
|
|
||||||
file secret!
|
|
||||||
.It Ar exchange private-key <file-path> public-key <file-path> [ OPTIONS ] PEERS
|
|
||||||
Start a process to exchange keys with the specified peers.
|
|
||||||
You should specify at least one peer.
|
|
||||||
.Pp
|
|
||||||
Its
|
|
||||||
.Ar OPTIONS
|
|
||||||
are as follows:
|
|
||||||
.Bl -tag -width Ds
|
|
||||||
.It Ar listen <ip>[:<port>]
|
|
||||||
Instructs
|
|
||||||
.Nm
|
|
||||||
to listen on the specified interface and port.
|
|
||||||
By default,
|
|
||||||
.Nm
|
|
||||||
will listen on all interfaces and select a random port.
|
|
||||||
.It Ar verbose
|
|
||||||
Extra logging.
|
|
||||||
.El
|
|
||||||
.El
|
|
||||||
.Ss PEER
|
|
||||||
Each
|
|
||||||
.Ar PEER
|
|
||||||
is defined as follows:
|
|
||||||
.Qq peer public-key <file-path> [endpoint <ip>[:<port>]] [preshared-key <file-path>] [outfile <file-path>] [wireguard <dev> <peer> <extra_params>]
|
|
||||||
.Pp
|
|
||||||
Providing a
|
|
||||||
.Ar PEER
|
|
||||||
instructs
|
|
||||||
.Nm
|
|
||||||
to exchange keys with the given peer and write the resulting PSK into the given
|
|
||||||
output file.
|
|
||||||
You must either specify the outfile or wireguard output option.
|
|
||||||
.Pp
|
|
||||||
The parameters of
|
|
||||||
.Ar PEER
|
|
||||||
are as follows:
|
|
||||||
.Bl -tag -width Ds
|
|
||||||
.It Ar endpoint <ip>[:<port>]
|
|
||||||
Specifies the address where the peer can be reached.
|
|
||||||
This will be automatically updated after the first successful key exchange with
|
|
||||||
the peer.
|
|
||||||
If this is unspecified, the peer must initiate the connection.
|
|
||||||
.It Ar preshared-key <file-path>
|
|
||||||
You may specify a pre-shared key which will be mixed into the final secret.
|
|
||||||
.It Ar outfile <file-path>
|
|
||||||
You may specify a file to write the exchanged keys to.
|
|
||||||
If this option is specified,
|
|
||||||
.Nm
|
|
||||||
will write a notification to standard out every time the key is updated.
|
|
||||||
.It Ar wireguard <dev> <peer> <extra_params>
|
|
||||||
This allows you to directly specify a wireguard peer to deploy the
|
|
||||||
pre-shared-key to.
|
|
||||||
You may specify extra parameters you would pass to
|
|
||||||
.Qq wg set
|
|
||||||
besides the preshared-key parameter which is used by
|
|
||||||
.Nm .
|
|
||||||
This makes it possible to add peers entirely from
|
|
||||||
.Nm .
|
|
||||||
.El
|
|
||||||
.Sh EXIT STATUS
|
|
||||||
.Ex -std
|
|
||||||
.Sh SEE ALSO
|
|
||||||
.Xr rp 1 ,
|
|
||||||
.Xr wg 1
|
|
||||||
.Sh STANDARDS
|
|
||||||
This tool is the reference implementation of the Rosenpass protocol, written
|
|
||||||
by Karolin Varner, Benjamin Lipp, Wanja Zaeske, and Lisa Schmidt.
|
|
||||||
.Sh AUTHORS
|
|
||||||
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
|
||||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
|
||||||
.Pp
|
|
||||||
This manual page was written by
|
|
||||||
.An Emil Engler
|
|
||||||
.Sh BUGS
|
|
||||||
The bugs are tracked at
|
|
||||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
|
||||||
119
doc/rp.1
119
doc/rp.1
@@ -1,119 +0,0 @@
|
|||||||
.Dd $Mdocdate$
|
|
||||||
.Dt RP 1
|
|
||||||
.Os
|
|
||||||
.Sh NAME
|
|
||||||
.Nm rp
|
|
||||||
.Nd high-level interface to rosenpass
|
|
||||||
.Sh SYNOPSIS
|
|
||||||
.Nm
|
|
||||||
.Op Ar explain
|
|
||||||
.Op Ar verbose
|
|
||||||
.Ar genkey Ar ... | Ar pubkey ... | Ar exchange ...
|
|
||||||
.Nm
|
|
||||||
.Op ...
|
|
||||||
.Ar genkey PRIVATE_KEYS_DIR
|
|
||||||
.Nm
|
|
||||||
.Op ...
|
|
||||||
.Ar pubkey Ar PRIVATE_KEYS_DIR Ar PUBLIC_KEYS_DIR
|
|
||||||
.Nm
|
|
||||||
.Op ...
|
|
||||||
.\" Splitting this across several lines
|
|
||||||
.Ar exchange Ar PRIVATE_KEYS_DIR
|
|
||||||
.Op dev <device>
|
|
||||||
.Op listen <ip>:<port>
|
|
||||||
.\" Because the peer argument is complicated, it would be heel to represent it
|
|
||||||
.\" in mdoc... Using an ugly hack instead, thereby losing semantic.
|
|
||||||
[peer PUBLIC_KEYS_DIR [endpoint <ip>:<port>] [persistent-keepalive <interval>]
|
|
||||||
[allowed-ips <ip1>/<cidr1>[,<ip2>/<cidr2>] ...]] ...
|
|
||||||
.Sh DESCRIPTION
|
|
||||||
The
|
|
||||||
.Nm
|
|
||||||
program
|
|
||||||
is used to build a VPN with WireGuard and Rosenpass.
|
|
||||||
.Pp
|
|
||||||
The optional
|
|
||||||
.Op explain
|
|
||||||
and
|
|
||||||
.Op verbose
|
|
||||||
options can be used to obtain further help or to enable a detailed view on the
|
|
||||||
operations, respectively.
|
|
||||||
.Ss COMMANDS
|
|
||||||
.Bl -tag -width Ds
|
|
||||||
.It Ar genkey Ar PRIVATE_KEYS_DIR
|
|
||||||
Creates a new directory with appropriate permissions and generates all the
|
|
||||||
necessary private keys required for a peer to participate in a rosenpass
|
|
||||||
connection.
|
|
||||||
.It Ar pubkey Ar PRIVATE_KEYS_DIR Ar PUBLIC_KEYS_DIR
|
|
||||||
Creates a fresh directory at
|
|
||||||
.Ar PUBLIC_KEYS_DIR ,
|
|
||||||
which contains the extracted public keys from the private keys generated by
|
|
||||||
.Ar genkey
|
|
||||||
and located inside
|
|
||||||
.Ar PRIVATE_KEYS_DIR .
|
|
||||||
.It Ar exchange Ar PRIVATE_KEYS_DIR [dev <device>] [listen <ip>:<port>] [PEERS]
|
|
||||||
Starts the VPN on interface
|
|
||||||
.Ar device ,
|
|
||||||
listening on the provided IP and port combination, allowing connections from
|
|
||||||
.Ar PEERS .
|
|
||||||
.El
|
|
||||||
.Sh EXIT STATUS
|
|
||||||
.Ex -std
|
|
||||||
.Sh EXAMPLES
|
|
||||||
In this example, we will assume that the server has an interface bound to
|
|
||||||
192.168.0.1, that accepts incoming connections on port 9999/UDP for Rosenpass
|
|
||||||
and port 10000/UDP for WireGuard.
|
|
||||||
.Pp
|
|
||||||
To create a VPN connection, start by generating secret keys on both hosts.
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
rp genkey server.rosenpass-secret
|
|
||||||
rp genkey client.rosenpass-secret
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
Extract the public keys:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
rp pubkey server.rosenpass-secret server.rosenpass-public
|
|
||||||
rp pubkey client.rosenpass-secret client.rosenpass-public
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
Copy the
|
|
||||||
.Qq -public
|
|
||||||
directories to the other peers and then start the VPN.
|
|
||||||
On the server:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
sudo rp exchange server.rosenpass-secret dev rosenpass0 listen 192.168.0.1:9999 \\
|
|
||||||
peer client.rosenpass-public allowed-ips fe80::/64
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
On the client:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
sudo rp exchange client.rosenpass-secret dev rosenpass 0 \\
|
|
||||||
peer server.rosenpass-public endpoint 192.168.0.1:9999 allowed-ips fe80::/64
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
Assign IP addresses:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
sudo ip a add fe80::1/64 dev rosenpass0 # Server
|
|
||||||
sudo ip a add fe80::2/64 dev rosenpass0 # Client
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
Test the connection by pinging the server on the client machine:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
ping fe80::1%rosenpass0 # Client
|
|
||||||
.Ed
|
|
||||||
.Pp
|
|
||||||
You can watch how rosenpass replaces the WireGuard PSK with the following:
|
|
||||||
.Bd -literal -offset indent
|
|
||||||
watch -n 0.2 'wg show all; wg show all preshared-keys'
|
|
||||||
.Ed
|
|
||||||
.Sh SEE ALSO
|
|
||||||
.Xr rosenpass 1 ,
|
|
||||||
.Xr wg 1
|
|
||||||
.Sh AUTHORS
|
|
||||||
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
|
|
||||||
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
|
|
||||||
.Pp
|
|
||||||
This manual page was written by
|
|
||||||
.An Emil Engler
|
|
||||||
.Sh BUGS
|
|
||||||
The bugs are tracked at
|
|
||||||
.Lk https://github.com/rosenpass/rosenpass/issues .
|
|
||||||
81
flake.lock
generated
81
flake.lock
generated
@@ -8,11 +8,11 @@
|
|||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1699770036,
|
"lastModified": 1674240251,
|
||||||
"narHash": "sha256-bZmI7ytPAYLpyFNgj5xirDkKuAniOkj1xHdv5aIJ5GM=",
|
"narHash": "sha256-AVMmf/CtcGensTZmMicToDpOwySEGNKYgRPC7lu3m8w=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "81ab0b4f7ae9ebb57daa0edf119c4891806e4d3a",
|
"rev": "d8067f4d1d3d30732703209bec5ca7d62aaececc",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -22,15 +22,12 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1694529238,
|
"lastModified": 1667395993,
|
||||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -39,33 +36,13 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"naersk": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1698420672,
|
|
||||||
"narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=",
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "naersk",
|
|
||||||
"rev": "aeb58d5e8faead8980a807c840232697982d47b9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "naersk",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1698846319,
|
"lastModified": 1672968032,
|
||||||
"narHash": "sha256-4jyW/dqFBVpWFnhl0nvP6EN4lP7/ZqPxYRjl6var0Oc=",
|
"narHash": "sha256-26Jns3GmHem44a06UN5Rj/KOD9qNJThyQrom02Ijur8=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "34bdaaf1f0b7fb6d9091472edc968ff10a8c2857",
|
"rev": "2dea8991d89b9f1e78d874945f78ca15f6954289",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -73,22 +50,37 @@
|
|||||||
"type": "indirect"
|
"type": "indirect"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"nixpkgs-unstable": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1676496762,
|
||||||
|
"narHash": "sha256-GFAxjaTgh8KJ8q7BYaI4EVGI5K98ooW70fG/83rSb08=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "1bddde315297c092712b0ef03d9def7a474b28ae",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
"naersk": "naersk",
|
"nixpkgs": "nixpkgs",
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs-unstable": "nixpkgs-unstable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1699715108,
|
"lastModified": 1674162026,
|
||||||
"narHash": "sha256-yPozsobJU55gj+szgo4Lpcg1lHvGQYAT6Y4MrC80mWE=",
|
"narHash": "sha256-iY0bxoVE7zAZmp0BB/m5hZW5pWHUfgntDvc1m2zyt/U=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "5fcf5289e726785d20d3aa4d13d90a43ed248e83",
|
"rev": "6e52c64031825920983515b9e975e93232739f7f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -97,21 +89,6 @@
|
|||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|||||||
146
flake.nix
146
flake.nix
@@ -1,11 +1,8 @@
|
|||||||
{
|
{
|
||||||
inputs = {
|
inputs = {
|
||||||
|
nixpkgs-unstable.url = "github:NixOS/nixpkgs";
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
|
||||||
# for quicker rust builds
|
|
||||||
naersk.url = "github:nix-community/naersk";
|
|
||||||
naersk.inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
|
|
||||||
# for rust nightly with llvm-tools-preview
|
# for rust nightly with llvm-tools-preview
|
||||||
fenix.url = "github:nix-community/fenix";
|
fenix.url = "github:nix-community/fenix";
|
||||||
fenix.inputs.nixpkgs.follows = "nixpkgs";
|
fenix.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
@@ -22,15 +19,12 @@
|
|||||||
"aarch64-linux"
|
"aarch64-linux"
|
||||||
|
|
||||||
# unsuported best-effort
|
# unsuported best-effort
|
||||||
"i686-linux"
|
|
||||||
"x86_64-darwin"
|
"x86_64-darwin"
|
||||||
"aarch64-darwin"
|
"aarch64-darwin"
|
||||||
# "x86_64-windows"
|
# "x86_64-windows"
|
||||||
]
|
]
|
||||||
(system:
|
(system:
|
||||||
let
|
let
|
||||||
lib = nixpkgs.lib;
|
|
||||||
|
|
||||||
# normal nixpkgs
|
# normal nixpkgs
|
||||||
pkgs = import nixpkgs {
|
pkgs = import nixpkgs {
|
||||||
inherit system;
|
inherit system;
|
||||||
@@ -53,17 +47,14 @@
|
|||||||
)
|
)
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
# parsed Cargo.toml
|
# parsed Cargo.toml
|
||||||
cargoToml = builtins.fromTOML (builtins.readFile ./rosenpass/Cargo.toml);
|
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||||
|
|
||||||
# source files relevant for rust
|
# source files relevant for rust
|
||||||
src = pkgs.lib.sources.sourceFilesBySuffices ./. [
|
src = pkgs.lib.sourceByRegex ./. [
|
||||||
".lock"
|
"Cargo\\.(toml|lock)"
|
||||||
".rs"
|
"(src|benches)(/.*\\.(rs|md))?"
|
||||||
".toml"
|
"rp"
|
||||||
];
|
];
|
||||||
|
|
||||||
# builds a bin path for all dependencies for the `rp` shellscript
|
# builds a bin path for all dependencies for the `rp` shellscript
|
||||||
rpBinPath = p: with p; lib.makeBinPath [
|
rpBinPath = p: with p; lib.makeBinPath [
|
||||||
coreutils
|
coreutils
|
||||||
@@ -71,55 +62,23 @@
|
|||||||
gawk
|
gawk
|
||||||
wireguard-tools
|
wireguard-tools
|
||||||
];
|
];
|
||||||
|
|
||||||
# a function to generate a nix derivation for rosenpass against any
|
# a function to generate a nix derivation for rosenpass against any
|
||||||
# given set of nixpkgs
|
# given set of nixpkgs
|
||||||
rpDerivation = p:
|
rpDerivation = p:
|
||||||
let
|
let
|
||||||
# whether we want to build a statically linked binary
|
isStatic = p.stdenv.hostPlatform.isStatic;
|
||||||
isStatic = p.targetPlatform.isStatic;
|
|
||||||
|
|
||||||
# the rust target of `p`
|
|
||||||
target = p.rust.toRustTargetSpec p.targetPlatform;
|
|
||||||
|
|
||||||
# convert a string to shout case
|
|
||||||
shout = string: builtins.replaceStrings [ "-" ] [ "_" ] (pkgs.lib.toUpper string);
|
|
||||||
|
|
||||||
# suitable Rust toolchain
|
|
||||||
toolchain = with inputs.fenix.packages.${system}; combine [
|
|
||||||
stable.cargo
|
|
||||||
stable.rustc
|
|
||||||
targets.${target}.stable.rust-std
|
|
||||||
];
|
|
||||||
|
|
||||||
# naersk with a custom toolchain
|
|
||||||
naersk = pkgs.callPackage inputs.naersk {
|
|
||||||
cargo = toolchain;
|
|
||||||
rustc = toolchain;
|
|
||||||
};
|
|
||||||
|
|
||||||
# used to trick the build.rs into believing that CMake was ran **again**
|
|
||||||
fakecmake = pkgs.writeScriptBin "cmake" ''
|
|
||||||
#! ${pkgs.stdenv.shell} -e
|
|
||||||
true
|
|
||||||
'';
|
|
||||||
in
|
in
|
||||||
naersk.buildPackage
|
p.rustPlatform.buildRustPackage {
|
||||||
{
|
|
||||||
# metadata and source
|
# metadata and source
|
||||||
name = cargoToml.package.name;
|
pname = cargoToml.package.name;
|
||||||
version = cargoToml.package.version;
|
version = cargoToml.package.version;
|
||||||
inherit src;
|
inherit src;
|
||||||
|
cargoLock = {
|
||||||
cargoBuildOptions = x: x ++ [ "-p" "rosenpass" ];
|
lockFile = src + "/Cargo.lock";
|
||||||
cargoTestOptions = x: x ++ [ "-p" "rosenpass" ];
|
};
|
||||||
|
|
||||||
doCheck = true;
|
|
||||||
|
|
||||||
nativeBuildInputs = with pkgs; [
|
nativeBuildInputs = with pkgs; [
|
||||||
p.stdenv.cc
|
|
||||||
cmake # for oqs build in the oqs-sys crate
|
cmake # for oqs build in the oqs-sys crate
|
||||||
mandoc # for the built-in manual
|
|
||||||
makeWrapper # for the rp shellscript
|
makeWrapper # for the rp shellscript
|
||||||
pkg-config # let libsodium-sys-stable find libsodium
|
pkg-config # let libsodium-sys-stable find libsodium
|
||||||
removeReferencesTo
|
removeReferencesTo
|
||||||
@@ -127,63 +86,36 @@
|
|||||||
];
|
];
|
||||||
buildInputs = with p; [ bash libsodium ];
|
buildInputs = with p; [ bash libsodium ];
|
||||||
|
|
||||||
override = x: {
|
# otherwise pkg-config tries to link non-existent dynamic libs
|
||||||
|
PKG_CONFIG_ALL_STATIC = true;
|
||||||
|
|
||||||
|
# nix defaults to building for aarch64 _without_ the armv8-a
|
||||||
|
# crypto extensions, but liboqs depens on these
|
||||||
preBuild =
|
preBuild =
|
||||||
# nix defaults to building for aarch64 _without_ the armv8-a crypto
|
if system == "aarch64-linux" then ''
|
||||||
# extensions, but liboqs depens on these
|
|
||||||
(lib.optionalString (system == "aarch64-linux") ''
|
|
||||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
|
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
|
||||||
''
|
'' else "";
|
||||||
);
|
|
||||||
|
|
||||||
# fortify is only compatible with dynamic linking
|
|
||||||
hardeningDisable = lib.optional isStatic "fortify";
|
|
||||||
};
|
|
||||||
|
|
||||||
overrideMain = x: {
|
|
||||||
# CMake detects that it was served a _foreign_ target dir, and CMake
|
|
||||||
# would be executed again upon the second build step of naersk.
|
|
||||||
# By adding our specially optimized CMake version, we reduce the cost
|
|
||||||
# of recompilation by 99 % while, while avoiding any CMake errors.
|
|
||||||
nativeBuildInputs = [ (lib.hiPrio fakecmake) ] ++ x.nativeBuildInputs;
|
|
||||||
|
|
||||||
# make sure that libc is linked, under musl this is not the case per
|
|
||||||
# default
|
|
||||||
preBuild = (lib.optionalString isStatic ''
|
|
||||||
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -lc"
|
|
||||||
'');
|
|
||||||
|
|
||||||
preInstall = ''
|
preInstall = ''
|
||||||
install -D ${./rp} $out/bin/rp
|
install -D rp $out/bin/rp
|
||||||
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
|
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
|
||||||
'';
|
'';
|
||||||
};
|
|
||||||
|
|
||||||
# We want to build for a specific target...
|
# nix progated the *.dev outputs of buildInputs for static
|
||||||
CARGO_BUILD_TARGET = target;
|
# builds, but that is non-sense for an executables only package
|
||||||
|
postFixup =
|
||||||
|
if isStatic then ''
|
||||||
|
remove-references-to -t ${p.bash.dev} -t ${p.libsodium.dev} \
|
||||||
|
$out/nix-support/propagated-build-inputs
|
||||||
|
'' else "";
|
||||||
|
|
||||||
# ... which might require a non-default linker:
|
meta = with pkgs.lib; {
|
||||||
"CARGO_TARGET_${shout target}_LINKER" =
|
|
||||||
let
|
|
||||||
inherit (p.stdenv) cc;
|
|
||||||
in
|
|
||||||
"${cc}/bin/${cc.targetPrefix}cc";
|
|
||||||
|
|
||||||
meta = with pkgs.lib;
|
|
||||||
{
|
|
||||||
inherit (cargoToml.package) description homepage;
|
inherit (cargoToml.package) description homepage;
|
||||||
license = with licenses; [ mit asl20 ];
|
license = with licenses; [ mit asl20 ];
|
||||||
maintainers = [ maintainers.wucke13 ];
|
maintainers = [ maintainers.wucke13 ];
|
||||||
platforms = platforms.all;
|
platforms = platforms.all;
|
||||||
};
|
};
|
||||||
} // (lib.mkIf isStatic {
|
};
|
||||||
# otherwise pkg-config tries to link non-existent dynamic libs
|
|
||||||
# documented here: https://docs.rs/pkg-config/latest/pkg_config/
|
|
||||||
PKG_CONFIG_ALL_STATIC = true;
|
|
||||||
|
|
||||||
# tell rust to build everything statically linked
|
|
||||||
CARGO_BUILD_RUSTFLAGS = "-C target-feature=+crt-static";
|
|
||||||
});
|
|
||||||
# a function to generate a docker image based of rosenpass
|
# a function to generate a docker image based of rosenpass
|
||||||
rosenpassOCI = name: pkgs.dockerTools.buildImage rec {
|
rosenpassOCI = name: pkgs.dockerTools.buildImage rec {
|
||||||
inherit name;
|
inherit name;
|
||||||
@@ -246,11 +178,14 @@
|
|||||||
#
|
#
|
||||||
packages.whitepaper =
|
packages.whitepaper =
|
||||||
let
|
let
|
||||||
|
pkgs = import inputs.nixpkgs-unstable {
|
||||||
|
inherit system;
|
||||||
|
};
|
||||||
tlsetup = (pkgs.texlive.combine {
|
tlsetup = (pkgs.texlive.combine {
|
||||||
inherit (pkgs.texlive) scheme-basic acmart amsfonts ccicons
|
inherit (pkgs.texlive) scheme-basic acmart amsfonts ccicons
|
||||||
csquotes csvsimple doclicense fancyvrb fontspec gobble
|
csquotes csvsimple doclicense fancyvrb fontspec gobble
|
||||||
koma-script ifmtarg latexmk lm markdown mathtools minted noto
|
koma-script ifmtarg latexmk lm markdown mathtools minted noto
|
||||||
nunito pgf soul unicode-math lualatex-math paralist
|
nunito pgf soul soulutf8 unicode-math lualatex-math
|
||||||
gitinfo2 eso-pic biblatex biblatex-trad biblatex-software
|
gitinfo2 eso-pic biblatex biblatex-trad biblatex-software
|
||||||
xkeyval xurl xifthen biber;
|
xkeyval xurl xifthen biber;
|
||||||
});
|
});
|
||||||
@@ -287,7 +222,7 @@
|
|||||||
packages.proof-proverif = pkgs.stdenv.mkDerivation {
|
packages.proof-proverif = pkgs.stdenv.mkDerivation {
|
||||||
name = "rosenpass-proverif-proof";
|
name = "rosenpass-proverif-proof";
|
||||||
version = "unstable";
|
version = "unstable";
|
||||||
src = pkgs.lib.sources.sourceByRegex ./. [
|
src = pkgs.lib.sourceByRegex ./. [
|
||||||
"analyze.sh"
|
"analyze.sh"
|
||||||
"marzipan(/marzipan.awk)?"
|
"marzipan(/marzipan.awk)?"
|
||||||
"analysis(/.*)?"
|
"analysis(/.*)?"
|
||||||
@@ -308,7 +243,6 @@
|
|||||||
inherit (packages.proof-proverif) CRYPTOVERIF_LIB;
|
inherit (packages.proof-proverif) CRYPTOVERIF_LIB;
|
||||||
inputsFrom = [ packages.default ];
|
inputsFrom = [ packages.default ];
|
||||||
nativeBuildInputs = with pkgs; [
|
nativeBuildInputs = with pkgs; [
|
||||||
cmake # override the fakecmake from the main step above
|
|
||||||
cargo-release
|
cargo-release
|
||||||
clippy
|
clippy
|
||||||
nodePackages.prettier
|
nodePackages.prettier
|
||||||
@@ -323,10 +257,12 @@
|
|||||||
|
|
||||||
|
|
||||||
checks = {
|
checks = {
|
||||||
cargo-fmt = pkgs.runCommand "check-cargo-fmt"
|
# Blocked by https://github.com/rust-lang/rustfmt/issues/4306
|
||||||
{ inherit (self.devShells.${system}.default) nativeBuildInputs buildInputs; } ''
|
# @dakoraa wants a coding style suitable for her accessible coding setup
|
||||||
cargo fmt --manifest-path=${./.}/Cargo.toml --check && touch $out
|
# cargo-fmt = pkgs.runCommand "check-cargo-fmt"
|
||||||
'';
|
# { inherit (devShells.default) nativeBuildInputs buildInputs; } ''
|
||||||
|
# cargo fmt --manifest-path=${src}/Cargo.toml --check > $out
|
||||||
|
# '';
|
||||||
nixpkgs-fmt = pkgs.runCommand "check-nixpkgs-fmt"
|
nixpkgs-fmt = pkgs.runCommand "check-nixpkgs-fmt"
|
||||||
{ nativeBuildInputs = [ pkgs.nixpkgs-fmt ]; } ''
|
{ nativeBuildInputs = [ pkgs.nixpkgs-fmt ]; } ''
|
||||||
nixpkgs-fmt --check ${./.} && touch $out
|
nixpkgs-fmt --check ${./.} && touch $out
|
||||||
@@ -336,8 +272,6 @@
|
|||||||
cd ${./.} && prettier --check . && touch $out
|
cd ${./.} && prettier --check . && touch $out
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
formatter = pkgs.nixpkgs-fmt;
|
|
||||||
}))
|
}))
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 122 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 227 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 725 KiB After Width: | Height: | Size: 725 KiB |
@@ -1345,7 +1345,7 @@
|
|||||||
<g transform="matrix(1,0,0,1,420.66,-1031.32)">
|
<g transform="matrix(1,0,0,1,420.66,-1031.32)">
|
||||||
<g transform="matrix(31.25,0,0,31.25,1431.32,1459.33)">
|
<g transform="matrix(31.25,0,0,31.25,1431.32,1459.33)">
|
||||||
</g>
|
</g>
|
||||||
<text x="1179.63px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"chaining k<tspan x="1334px 1350.47px " y="1459.33px 1459.33px ">ey</tspan> init"</text>
|
<text x="1179.63px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"k<tspan x="1207.79px 1224.25px " y="1459.33px 1459.33px ">ey</tspan> chaining init"</text>
|
||||||
</g>
|
</g>
|
||||||
</g>
|
</g>
|
||||||
<g transform="matrix(0.389246,0,0,0.136584,299.374,1166.87)">
|
<g transform="matrix(0.389246,0,0,0.136584,299.374,1166.87)">
|
||||||
@@ -1437,7 +1437,7 @@
|
|||||||
<g transform="matrix(0.99675,0,0,0.996238,-597.124,-172.692)">
|
<g transform="matrix(0.99675,0,0,0.996238,-597.124,-172.692)">
|
||||||
<g transform="matrix(31.25,0,0,31.25,1492.94,1459.33)">
|
<g transform="matrix(31.25,0,0,31.25,1492.94,1459.33)">
|
||||||
</g>
|
</g>
|
||||||
<text x="1187.16px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"chaining k<tspan x="1341.54px 1358px " y="1459.33px 1459.33px ">ey</tspan> e<tspan x="1398.88px " y="1459.33px ">x</tspan>tr<tspan x="1437.88px " y="1459.33px ">a</tspan>ct"</text>
|
<text x="1187.16px" y="1459.33px" style="font-family:'Nunito-Medium', 'Nunito';font-weight:500;font-size:31.25px;">"k<tspan x="1215.32px 1231.79px " y="1459.33px 1459.33px ">ey</tspan> chaining e<tspan x="1398.88px " y="1459.33px ">x</tspan>tr<tspan x="1437.88px " y="1459.33px ">a</tspan>ct"</text>
|
||||||
</g>
|
</g>
|
||||||
<g transform="matrix(0.99675,0,0,0.996238,-380.054,-779.158)">
|
<g transform="matrix(0.99675,0,0,0.996238,-380.054,-779.158)">
|
||||||
<g transform="matrix(31.25,0,0,31.25,1463.54,1459.33)">
|
<g transform="matrix(31.25,0,0,31.25,1463.54,1459.33)">
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 218 KiB After Width: | Height: | Size: 218 KiB |
Binary file not shown.
Binary file not shown.
@@ -79,8 +79,6 @@
|
|||||||
letter-csv .initial:n = ,
|
letter-csv .initial:n = ,
|
||||||
letter-content .tl_set:N = \l_letter_csv_content_tl,
|
letter-content .tl_set:N = \l_letter_csv_content_tl,
|
||||||
letter-content .initial:n=,
|
letter-content .initial:n=,
|
||||||
tableofcontents .bool_gset:N = \g__ptxcd_tableofcontents_bool,
|
|
||||||
tableofcontents .initial:n = true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
\tl_new:N \l__markdown_sequence_tl
|
\tl_new:N \l__markdown_sequence_tl
|
||||||
|
|||||||
@@ -171,12 +171,7 @@ version={4.0},
|
|||||||
\ExplSyntaxOn
|
\ExplSyntaxOn
|
||||||
\SetTemplatePreamble{
|
\SetTemplatePreamble{
|
||||||
\hypersetup{pdftitle=\inserttitle,pdfauthor=The~Rosenpass~Project}
|
\hypersetup{pdftitle=\inserttitle,pdfauthor=The~Rosenpass~Project}
|
||||||
\exp_args:NV\tl_if_eq:nnTF \inserttitle{Rosenpass} {
|
|
||||||
\title{\vspace*{-2.5cm}\includegraphics[width=4cm]{RosenPass-Logo}}
|
\title{\vspace*{-2.5cm}\includegraphics[width=4cm]{RosenPass-Logo}}
|
||||||
} {
|
|
||||||
\titlehead{\centerline{\includegraphics[width=4cm]{RosenPass-Logo}}}
|
|
||||||
\title{\inserttitle}
|
|
||||||
}
|
|
||||||
\author{\csname insertauthor\endcsname}
|
\author{\csname insertauthor\endcsname}
|
||||||
\subject{\csname insertsubject\endcsname}
|
\subject{\csname insertsubject\endcsname}
|
||||||
\date{\vspace{-1cm}}
|
\date{\vspace{-1cm}}
|
||||||
@@ -379,28 +374,29 @@ version={4.0},
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
\makeatother
|
||||||
|
\ExplSyntaxOff
|
||||||
% end of namepartpicturesetup
|
% end of namepartpicturesetup
|
||||||
|
|
||||||
\newcommand{\captionbox}[1]{{\setlength{\fboxsep}{.5ex}\colorbox{rosenpass-gray}{#1}}}
|
\newcommand{\captionbox}[1]{{\setlength{\fboxsep}{.5ex}\colorbox{rosenpass-gray}{#1}}}
|
||||||
|
|
||||||
|
\makeatletter
|
||||||
\renewenvironment{abstract}{
|
\renewenvironment{abstract}{
|
||||||
\small
|
\small
|
||||||
\begin{center}\normalfont\sectfont\nobreak\abstractname\@endparpenalty\@M\end{center}%
|
\begin{center}\normalfont\sectfont\nobreak\abstractname\@endparpenalty\@M\end{center}%
|
||||||
}{
|
}{
|
||||||
\par
|
\par
|
||||||
}
|
}
|
||||||
|
\makeatother
|
||||||
|
|
||||||
\SetTemplateBegin{
|
\SetTemplateBegin{
|
||||||
\maketitle
|
\maketitle
|
||||||
\begin{abstract}
|
\begin{abstract}
|
||||||
\noindent\csname insertabstract\endcsname
|
\noindent\csname insertabstract\endcsname
|
||||||
\end{abstract}
|
\end{abstract}
|
||||||
\bool_if:NT \g__ptxcd_tableofcontents_bool \tableofcontents
|
\tableofcontents
|
||||||
\clearpage
|
\clearpage
|
||||||
}
|
}
|
||||||
\makeatother
|
|
||||||
\ExplSyntaxOff
|
|
||||||
|
|
||||||
\SetTemplateEnd{}
|
\SetTemplateEnd{
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,13 +7,13 @@ author:
|
|||||||
- Wanja Zaeske
|
- Wanja Zaeske
|
||||||
- Lisa Schmidt = {Scientific Illustrator – \\url{mullana.de}}
|
- Lisa Schmidt = {Scientific Illustrator – \\url{mullana.de}}
|
||||||
abstract: |
|
abstract: |
|
||||||
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for another application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for some other application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
|
||||||
|
|
||||||
The WireGuard implementation enjoys great trust from the cryptography community and has excellent performance characteristics. To preserve these features, the Rosenpass application runs side-by-side with WireGuard and supplies a new post-quantum-secure pre-shared key (PSK) every two minutes. WireGuard itself still performs the pre-quantum-secure key exchange and transfers any transport data with no involvement from Rosenpass at all.
|
The WireGuard implementation enjoys great trust from the cryptography community and has excellent performance characteristics. To preserve these features, the Rosenpass application runs side-by-side with WireGuard and supplies a new post-quantum-secure pre-shared key (PSK) every two minutes. WireGuard itself still performs the pre-quantum-secure key exchange and transfers any transport data with no involvement from Rosenpass at all.
|
||||||
|
|
||||||
The Rosenpass project consists of a protocol description, an implementation written in Rust, and a symbolic analysis of the protocol’s security using ProVerif [@proverif]. We are working on a cryptographic security proof using CryptoVerif [@cryptoverif].
|
The Rosenpass project consists of a protocol description, an implementation written in Rust, and a symbolic analysis of the protocol’s security using ProVerif [@proverif]. We are working on a cryptographic security proof using CryptoVerif [@cryptoverif].
|
||||||
|
|
||||||
This document is a guide for engineers and researchers implementing the protocol; a scientific paper discussing the security properties of Rosenpass is work in progress.
|
This document is a guide to engineers and researchers implementing the protocol; a scientific paper discussing the security properties of Rosenpass is work in progress.
|
||||||
---
|
---
|
||||||
|
|
||||||
\enlargethispage{5mm}
|
\enlargethispage{5mm}
|
||||||
@@ -169,7 +169,7 @@ Rosenpass uses a cryptographic hash function for multiple purposes:
|
|||||||
* Computing the cookie to guard against denial of service attacks. This is a feature adopted from WireGuard, but not yet included in the implementation of Rosenpass.
|
* Computing the cookie to guard against denial of service attacks. This is a feature adopted from WireGuard, but not yet included in the implementation of Rosenpass.
|
||||||
* Computing the peer ID
|
* Computing the peer ID
|
||||||
* Key derivation during and after the handshake
|
* Key derivation during and after the handshake
|
||||||
* Computing the additional data for the biscuit encryption, to provide some privacy for its contents
|
* Computing the additional data for the biscuit encryption, to prove some privacy for its contents
|
||||||
|
|
||||||
Using one hash function for multiple purposes can cause real-world security issues and even key recovery attacks [@oraclecloning]. We choose a tree-based domain separation scheme based on a keyed hash function – the previously introduced primitive `hash` – to make sure all our hash function calls can be seen as distinct.
|
Using one hash function for multiple purposes can cause real-world security issues and even key recovery attacks [@oraclecloning]. We choose a tree-based domain separation scheme based on a keyed hash function – the previously introduced primitive `hash` – to make sure all our hash function calls can be seen as distinct.
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ For each peer, the server stores:
|
|||||||
The initiator stores the following local state for each ongoing handshake:
|
The initiator stores the following local state for each ongoing handshake:
|
||||||
|
|
||||||
* A reference to the peer structure
|
* A reference to the peer structure
|
||||||
* A state indicator to keep track of the next message expected from the responder
|
* A state indicator to keep track of the message expected from the responder next
|
||||||
* `sidi` – Initiator session ID
|
* `sidi` – Initiator session ID
|
||||||
* `sidr` – Responder session ID
|
* `sidr` – Responder session ID
|
||||||
* `ck` – The chaining key
|
* `ck` – The chaining key
|
||||||
|
|||||||
13
readme.md
13
readme.md
@@ -14,7 +14,7 @@ This repository contains
|
|||||||
|
|
||||||
## Getting started
|
## Getting started
|
||||||
|
|
||||||
First, [install rosenpass](#Getting-Rosenpass). Then, check out the help functions of `rp` & `rosenpass`:
|
First, [install rosenpass](#Getting-Rosenpass). Then, check out the help funtions of `rp` & `rosenpass`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
rp help
|
rp help
|
||||||
@@ -62,22 +62,15 @@ A wrapper script provides instant feedback about which queries execute as expect
|
|||||||
[^libsodium]: https://doc.libsodium.org/
|
[^libsodium]: https://doc.libsodium.org/
|
||||||
[^wg]: https://www.wireguard.com/
|
[^wg]: https://www.wireguard.com/
|
||||||
[^pqwg]: https://eprint.iacr.org/2020/379
|
[^pqwg]: https://eprint.iacr.org/2020/379
|
||||||
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeats the purpose of a key exchange protocol
|
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeates the purpose of a key exchange protocol
|
||||||
[^wg-statedis]: https://lists.zx2c4.com/pipermail/wireguard/2021-August/006916.htmlA
|
[^wg-statedis]: https://lists.zx2c4.com/pipermail/wireguard/2021-August/006916.htmlA
|
||||||
|
|
||||||
# Getting Rosenpass
|
# Getting Rosenpass
|
||||||
|
|
||||||
Rosenpass is packaged for more and more distributions, maybe also for the distribution of your choice?
|
Rosenpass is packaged for more and more distros, maybe also for the distro of your choice?
|
||||||
|
|
||||||
[](https://repology.org/project/rosenpass/versions)
|
[](https://repology.org/project/rosenpass/versions)
|
||||||
|
|
||||||
# Mirrors
|
|
||||||
|
|
||||||
Don't want to use GitHub or only have an IPv6 connection? Rosenpass has set up two mirrors for this:
|
|
||||||
|
|
||||||
- [NotABug](https://notabug.org/rosenpass/rosenpass)
|
|
||||||
- [GitLab](https://gitlab.com/rosenpass/rosenpass/)
|
|
||||||
|
|
||||||
# Supported by
|
# Supported by
|
||||||
|
|
||||||
Funded through <a href="https://nlnet.nl/">NLNet</a> with financial support for the European Commission's <a href="https://nlnet.nl/assure">NGI Assure</a> program.
|
Funded through <a href="https://nlnet.nl/">NLNet</a> with financial support for the European Commission's <a href="https://nlnet.nl/assure">NGI Assure</a> program.
|
||||||
|
|||||||
@@ -1,42 +1,35 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "rosenpass"
|
name = "rosenpass"
|
||||||
version = "0.2.1"
|
version = "0.1.1"
|
||||||
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
description = "Build post-quantum-secure VPNs with WireGuard!"
|
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||||
homepage = "https://rosenpass.eu/"
|
homepage = "https://rosenpass.eu/"
|
||||||
repository = "https://github.com/rosenpass/rosenpass"
|
repository = "https://github.com/rosenpass/rosenpass"
|
||||||
readme = "readme.md"
|
readme = "../readme.md"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "handshake"
|
name = "handshake"
|
||||||
harness = false
|
harness = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { version = "1.0.71", features = ["backtrace"] }
|
anyhow = { version = "1.0.52", features = ["backtrace"] }
|
||||||
base64 = "0.21.1"
|
base64 = "0.13.0"
|
||||||
|
clap = { version = "3.0.0", features = ["yaml"] }
|
||||||
static_assertions = "1.1.0"
|
static_assertions = "1.1.0"
|
||||||
memoffset = "0.9.0"
|
memoffset = "0.6.5"
|
||||||
libsodium-sys-stable = { version = "1.19.28", features = ["use-pkg-config"] }
|
libsodium-sys-stable = { version = "1.19.26", features = ["use-pkg-config"] }
|
||||||
oqs-sys = { version = "0.8", default-features = false, features = ['classic_mceliece', 'kyber'] }
|
oqs-sys = { version = "0.7.1", default-features = false, features = ['classic_mceliece', 'kyber'] }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.38"
|
||||||
paste = "1.0.12"
|
paste = "1.0.11"
|
||||||
log = { version = "0.4.17", optional = true }
|
log = { version = "0.4.17", optional = true }
|
||||||
env_logger = { version = "0.10.0", optional = true }
|
env_logger = { version = "0.10.0", optional = true }
|
||||||
serde = { version = "1.0.163", features = ["derive"] }
|
|
||||||
toml = "0.7.4"
|
|
||||||
clap = { version = "4.3.0", features = ["derive"] }
|
|
||||||
mio = { version = "0.8.6", features = ["net", "os-poll"] }
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
anyhow = "1.0.71"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.4.0"
|
criterion = "0.3.5"
|
||||||
test_bin = "0.4.0"
|
test_bin = "0.4.0"
|
||||||
stacker = "0.1.15"
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["log", "env_logger"]
|
default = ["log", "env_logger"]
|
||||||
|
|||||||
@@ -1,18 +1,17 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rosenpass::pqkem::KEM;
|
|
||||||
use rosenpass::{
|
use rosenpass::{
|
||||||
pqkem::StaticKEM,
|
pqkem::{CCAKEM, KEM},
|
||||||
protocol::{CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, SPk, SSk, SymKey},
|
protocol::{CcaPk, CcaSk, HandleMsgResult, MsgBuf, PeerPtr, Server, SymKey},
|
||||||
sodium::sodium_init,
|
sodium::sodium_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
|
|
||||||
fn handle(
|
fn handle(
|
||||||
tx: &mut CryptoServer,
|
tx: &mut Server,
|
||||||
msgb: &mut MsgBuf,
|
msgb: &mut MsgBuf,
|
||||||
msgl: usize,
|
msgl: usize,
|
||||||
rx: &mut CryptoServer,
|
rx: &mut Server,
|
||||||
resb: &mut MsgBuf,
|
resb: &mut MsgBuf,
|
||||||
) -> Result<(Option<SymKey>, Option<SymKey>)> {
|
) -> Result<(Option<SymKey>, Option<SymKey>)> {
|
||||||
let HandleMsgResult {
|
let HandleMsgResult {
|
||||||
@@ -31,7 +30,7 @@ fn handle(
|
|||||||
Ok((txk, rxk.or(xch)))
|
Ok((txk, rxk.or(xch)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hs(ini: &mut CryptoServer, res: &mut CryptoServer) -> Result<()> {
|
fn hs(ini: &mut Server, res: &mut Server) -> Result<()> {
|
||||||
let (mut inib, mut resb) = (MsgBuf::zero(), MsgBuf::zero());
|
let (mut inib, mut resb) = (MsgBuf::zero(), MsgBuf::zero());
|
||||||
let sz = ini.initiate_handshake(PeerPtr(0), &mut *inib)?;
|
let sz = ini.initiate_handshake(PeerPtr(0), &mut *inib)?;
|
||||||
let (kini, kres) = handle(ini, &mut inib, sz, res, &mut resb)?;
|
let (kini, kres) = handle(ini, &mut inib, sz, res, &mut resb)?;
|
||||||
@@ -39,19 +38,16 @@ fn hs(ini: &mut CryptoServer, res: &mut CryptoServer) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn keygen() -> Result<(SSk, SPk)> {
|
fn keygen() -> Result<(CcaSk, CcaPk)> {
|
||||||
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
|
let (mut sk, mut pk) = (CcaSk::zero(), CcaPk::zero());
|
||||||
StaticKEM::keygen(sk.secret_mut(), pk.secret_mut())?;
|
CCAKEM::keygen(sk.secret_mut(), pk.secret_mut())?;
|
||||||
Ok((sk, pk))
|
Ok((sk, pk))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_server_pair() -> Result<(CryptoServer, CryptoServer)> {
|
fn make_server_pair() -> Result<(Server, Server)> {
|
||||||
let psk = SymKey::random();
|
let psk = SymKey::random();
|
||||||
let ((ska, pka), (skb, pkb)) = (keygen()?, keygen()?);
|
let ((ska, pka), (skb, pkb)) = (keygen()?, keygen()?);
|
||||||
let (mut a, mut b) = (
|
let (mut a, mut b) = (Server::new(ska, pka.clone()), Server::new(skb, pkb.clone()));
|
||||||
CryptoServer::new(ska, pka.clone()),
|
|
||||||
CryptoServer::new(skb, pkb.clone()),
|
|
||||||
);
|
|
||||||
a.add_peer(Some(psk.clone()), pkb)?;
|
a.add_peer(Some(psk.clone()), pkb)?;
|
||||||
b.add_peer(Some(psk), pka)?;
|
b.add_peer(Some(psk), pka)?;
|
||||||
Ok((a, b))
|
Ok((a, b))
|
||||||
@@ -62,12 +58,12 @@ fn criterion_benchmark(c: &mut Criterion) {
|
|||||||
let (mut a, mut b) = make_server_pair().unwrap();
|
let (mut a, mut b) = make_server_pair().unwrap();
|
||||||
c.bench_function("cca_secret_alloc", |bench| {
|
c.bench_function("cca_secret_alloc", |bench| {
|
||||||
bench.iter(|| {
|
bench.iter(|| {
|
||||||
SSk::zero();
|
CcaSk::zero();
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
c.bench_function("cca_public_alloc", |bench| {
|
c.bench_function("cca_public_alloc", |bench| {
|
||||||
bench.iter(|| {
|
bench.iter(|| {
|
||||||
SPk::zero();
|
CcaPk::zero();
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
c.bench_function("keygen", |bench| {
|
c.bench_function("keygen", |bench| {
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
use anyhow::bail;
|
|
||||||
use anyhow::Result;
|
|
||||||
use std::env;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::Command;
|
|
||||||
|
|
||||||
/// Invokes a troff compiler to compile a manual page
|
|
||||||
fn render_man(compiler: &str, man: &str) -> Result<String> {
|
|
||||||
let out = Command::new(compiler).args(["-Tascii", man]).output()?;
|
|
||||||
if !out.status.success() {
|
|
||||||
bail!("{} returned an error", compiler);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(String::from_utf8(out.stdout)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generates the manual page
|
|
||||||
fn generate_man() -> String {
|
|
||||||
// This function is purposely stupid and redundant
|
|
||||||
|
|
||||||
let man = render_man("mandoc", "./doc/rosenpass.1");
|
|
||||||
if let Ok(man) = man {
|
|
||||||
return man;
|
|
||||||
}
|
|
||||||
|
|
||||||
let man = render_man("groff", "./doc/rosenpass.1");
|
|
||||||
if let Ok(man) = man {
|
|
||||||
return man;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Link to online manual here
|
|
||||||
"Cannot render manual page\n".into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn man() {
|
|
||||||
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
|
|
||||||
let man = generate_man();
|
|
||||||
let path = out_dir.join("rosenpass.1.ascii");
|
|
||||||
|
|
||||||
let mut file = File::create(&path).unwrap();
|
|
||||||
file.write_all(man.as_bytes()).unwrap();
|
|
||||||
|
|
||||||
println!("cargo:rustc-env=ROSENPASS_MAN={}", path.display());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
// For now, rerun the build script on every time, as the build script
|
|
||||||
// is not very expensive right now.
|
|
||||||
println!("cargo:rerun-if-changed=./");
|
|
||||||
man();
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../readme.md
|
|
||||||
@@ -1,738 +0,0 @@
|
|||||||
use anyhow::bail;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use log::{debug, error, info, warn};
|
|
||||||
use mio::Interest;
|
|
||||||
use mio::Token;
|
|
||||||
|
|
||||||
use std::cell::Cell;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
use std::io::ErrorKind;
|
|
||||||
use std::net::Ipv4Addr;
|
|
||||||
use std::net::Ipv6Addr;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::net::SocketAddrV4;
|
|
||||||
use std::net::SocketAddrV6;
|
|
||||||
use std::net::ToSocketAddrs;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::process::Command;
|
|
||||||
use std::process::Stdio;
|
|
||||||
use std::slice;
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use crate::util::fopen_w;
|
|
||||||
use crate::{
|
|
||||||
config::Verbosity,
|
|
||||||
protocol::{CryptoServer, MsgBuf, PeerPtr, SPk, SSk, SymKey, Timing},
|
|
||||||
util::{b64_writer, fmt_b64},
|
|
||||||
};
|
|
||||||
|
|
||||||
const IPV4_ANY_ADDR: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
|
||||||
const IPV6_ANY_ADDR: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
|
|
||||||
|
|
||||||
fn ipv4_any_binding() -> SocketAddr {
|
|
||||||
// addr, port
|
|
||||||
SocketAddr::V4(SocketAddrV4::new(IPV4_ANY_ADDR, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ipv6_any_binding() -> SocketAddr {
|
|
||||||
// addr, port, flowinfo, scope_id
|
|
||||||
SocketAddr::V6(SocketAddrV6::new(IPV6_ANY_ADDR, 0, 0, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct AppPeer {
|
|
||||||
pub outfile: Option<PathBuf>,
|
|
||||||
pub outwg: Option<WireguardOut>, // TODO make this a generic command
|
|
||||||
pub initial_endpoint: Option<Endpoint>,
|
|
||||||
pub current_endpoint: Option<Endpoint>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AppPeer {
|
|
||||||
pub fn endpoint(&self) -> Option<&Endpoint> {
|
|
||||||
self.current_endpoint
|
|
||||||
.as_ref()
|
|
||||||
.or(self.initial_endpoint.as_ref())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct WireguardOut {
|
|
||||||
// impl KeyOutput
|
|
||||||
pub dev: String,
|
|
||||||
pub pk: String,
|
|
||||||
pub extra_params: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Holds the state of the application, namely the external IO
|
|
||||||
///
|
|
||||||
/// Responsible for file IO, network IO
|
|
||||||
// TODO add user control via unix domain socket and stdin/stdout
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct AppServer {
|
|
||||||
pub crypt: CryptoServer,
|
|
||||||
pub sockets: Vec<mio::net::UdpSocket>,
|
|
||||||
pub events: mio::Events,
|
|
||||||
pub mio_poll: mio::Poll,
|
|
||||||
pub peers: Vec<AppPeer>,
|
|
||||||
pub verbosity: Verbosity,
|
|
||||||
pub all_sockets_drained: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A socket pointer is an index assigned to a socket;
|
|
||||||
/// right now the index is just the sockets index in AppServer::sockets.
|
|
||||||
///
|
|
||||||
/// Holding this as a reference instead of an &mut UdpSocket is useful
|
|
||||||
/// to deal with the borrow checker, because otherwise we could not refer
|
|
||||||
/// to a socket and another member of AppServer at the same time.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SocketPtr(pub usize);
|
|
||||||
|
|
||||||
impl SocketPtr {
|
|
||||||
pub fn get<'a>(&self, srv: &'a AppServer) -> &'a mio::net::UdpSocket {
|
|
||||||
&srv.sockets[self.0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut mio::net::UdpSocket {
|
|
||||||
&mut srv.sockets[self.0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send_to(&self, srv: &AppServer, buf: &[u8], addr: SocketAddr) -> anyhow::Result<()> {
|
|
||||||
self.get(srv).send_to(buf, addr)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Index based pointer to a Peer
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
pub struct AppPeerPtr(pub usize);
|
|
||||||
|
|
||||||
impl AppPeerPtr {
|
|
||||||
/// Takes an index based handle and returns the actual peer
|
|
||||||
pub fn lift(p: PeerPtr) -> Self {
|
|
||||||
Self(p.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an index based handle to one Peer
|
|
||||||
pub fn lower(&self) -> PeerPtr {
|
|
||||||
PeerPtr(self.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_app<'a>(&self, srv: &'a AppServer) -> &'a AppPeer {
|
|
||||||
&srv.peers[self.0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_app_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut AppPeer {
|
|
||||||
&mut srv.peers[self.0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum AppPollResult {
|
|
||||||
DeleteKey(AppPeerPtr),
|
|
||||||
SendInitiation(AppPeerPtr),
|
|
||||||
SendRetransmission(AppPeerPtr),
|
|
||||||
ReceivedMessage(usize, Endpoint),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum KeyOutputReason {
|
|
||||||
Exchanged,
|
|
||||||
Stale,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a communication partner rosenpass may be sending packets to
|
|
||||||
///
|
|
||||||
/// Generally at the start of Rosenpass either no address or a Hostname is known;
|
|
||||||
/// later when we actually start to receive RespHello packages, we know the specific Address
|
|
||||||
/// and socket to use with a peer
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Endpoint {
|
|
||||||
/// Rosenpass supports multiple sockets, so we include the information
|
|
||||||
/// which socket an address can be reached on. This probably does not
|
|
||||||
/// make much of a difference in most setups where two sockets are just
|
|
||||||
/// used to enable dual stack operation; it does make a difference in
|
|
||||||
/// more complex use cases.
|
|
||||||
///
|
|
||||||
/// For instance it enables using multiple interfaces with overlapping
|
|
||||||
/// ip spaces, such as listening on a private IP network and a public IP
|
|
||||||
/// at the same time. It also would reply on the same port RespHello was
|
|
||||||
/// sent to when listening on multiple ports on the same interface. This
|
|
||||||
/// may be required for some arcane firewall setups.
|
|
||||||
SocketBoundAddress {
|
|
||||||
/// The socket the address can be reached under; this is generally
|
|
||||||
/// determined when we actually receive an RespHello message
|
|
||||||
socket: SocketPtr,
|
|
||||||
/// Just the address
|
|
||||||
addr: SocketAddr,
|
|
||||||
},
|
|
||||||
// A host name or IP address; storing the hostname here instead of an
|
|
||||||
// ip address makes sure that we look up the host name whenever we try
|
|
||||||
// to make a connection; this may be beneficial in some setups where a host-name
|
|
||||||
// at first can not be resolved but becomes resolvable later.
|
|
||||||
Discovery(HostPathDiscoveryEndpoint),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Endpoint {
|
|
||||||
/// Start discovery from some addresses
|
|
||||||
pub fn discovery_from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
|
||||||
Endpoint::Discovery(HostPathDiscoveryEndpoint::from_addresses(addresses))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start endpoint discovery from a hostname
|
|
||||||
pub fn discovery_from_hostname(hostname: String) -> anyhow::Result<Self> {
|
|
||||||
let host = HostPathDiscoveryEndpoint::lookup(hostname)?;
|
|
||||||
Ok(Endpoint::Discovery(host))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restart discovery; joining two sources of (potential) addresses
|
|
||||||
//
|
|
||||||
// This is used when the connection to an endpoint is lost in order
|
|
||||||
// to include the addresses specified on the command line and the
|
|
||||||
// address last used in the discovery process
|
|
||||||
pub fn discovery_from_multiple_sources(
|
|
||||||
a: Option<&Endpoint>,
|
|
||||||
b: Option<&Endpoint>,
|
|
||||||
) -> Option<Self> {
|
|
||||||
let sources = match (a, b) {
|
|
||||||
(Some(e), None) | (None, Some(e)) => e.addresses().iter().chain(&[]),
|
|
||||||
(Some(e1), Some(e2)) => e1.addresses().iter().chain(e2.addresses()),
|
|
||||||
(None, None) => return None,
|
|
||||||
};
|
|
||||||
let lower_size_bound = sources.size_hint().0;
|
|
||||||
let mut dedup = std::collections::HashSet::with_capacity(lower_size_bound);
|
|
||||||
let mut addrs = Vec::with_capacity(lower_size_bound);
|
|
||||||
for a in sources {
|
|
||||||
if dedup.insert(a) {
|
|
||||||
addrs.push(*a);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Self::discovery_from_addresses(addrs))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
|
||||||
use Endpoint::*;
|
|
||||||
match self {
|
|
||||||
SocketBoundAddress { socket, addr } => socket.send_to(srv, buf, *addr),
|
|
||||||
Discovery(host) => host.send_scouting(srv, buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn addresses(&self) -> &[SocketAddr] {
|
|
||||||
use Endpoint::*;
|
|
||||||
match self {
|
|
||||||
SocketBoundAddress { addr, .. } => slice::from_ref(addr),
|
|
||||||
Discovery(host) => host.addresses(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handles host-path discovery
|
|
||||||
///
|
|
||||||
/// When rosenpass is started, we either know no peer address
|
|
||||||
/// or we know a hostname. How to contact this hostname may not
|
|
||||||
/// be entirely clear for two reasons:
|
|
||||||
///
|
|
||||||
/// 1. We have multiple sockets; only a subset of those may be able to contact the host
|
|
||||||
/// 2. DNS resolution can return multiple addresses
|
|
||||||
///
|
|
||||||
/// We could just use the first working socket and the first address returned, but this
|
|
||||||
/// may be error prone: Some of the sockets may appear to be able to contact the host,
|
|
||||||
/// but the packets will be dropped. Some of the addresses may appear to be reachable
|
|
||||||
/// but the packets could be lost.
|
|
||||||
///
|
|
||||||
/// In contrast to TCP, UDP has no mechanism to ensure packets actually arrive.
|
|
||||||
///
|
|
||||||
/// To robustly handle host path discovery, we try each socket-ip-combination in a round
|
|
||||||
/// robin fashion; the struct stores the offset of the last used combination internally and
|
|
||||||
/// and will continue with the next combination on every call.
|
|
||||||
///
|
|
||||||
/// Retransmission handling will continue normally; i.e. increasing the distance between
|
|
||||||
/// retransmissions on every retransmission, until it is long enough to bore a human. Therefor
|
|
||||||
/// it is important to avoid having a large number of sockets drop packets not just for efficiency
|
|
||||||
/// but to avoid latency issues too.
|
|
||||||
///
|
|
||||||
// TODO: We might consider adjusting the retransmission handling to account for host-path discovery
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct HostPathDiscoveryEndpoint {
|
|
||||||
scouting_state: Cell<(usize, usize)>, // addr_off, sock_off
|
|
||||||
addresses: Vec<SocketAddr>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HostPathDiscoveryEndpoint {
|
|
||||||
pub fn from_addresses(addresses: Vec<SocketAddr>) -> Self {
|
|
||||||
let scouting_state = Cell::new((0, 0));
|
|
||||||
Self {
|
|
||||||
addresses,
|
|
||||||
scouting_state,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lookup a hostname
|
|
||||||
pub fn lookup(hostname: String) -> anyhow::Result<Self> {
|
|
||||||
Ok(Self {
|
|
||||||
addresses: ToSocketAddrs::to_socket_addrs(&hostname)?.collect(),
|
|
||||||
scouting_state: Cell::new((0, 0)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn addresses(&self) -> &Vec<SocketAddr> {
|
|
||||||
&self.addresses
|
|
||||||
}
|
|
||||||
|
|
||||||
fn insert_next_scout_offset(&self, srv: &AppServer, addr_no: usize, sock_no: usize) {
|
|
||||||
self.scouting_state.set((
|
|
||||||
(addr_no + 1) % self.addresses.len(),
|
|
||||||
(sock_no + 1) % srv.sockets.len(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempt to reach the host
|
|
||||||
///
|
|
||||||
/// Will round-robin-try different socket-ip-combinations on each call.
|
|
||||||
pub fn send_scouting(&self, srv: &AppServer, buf: &[u8]) -> anyhow::Result<()> {
|
|
||||||
let (addr_off, sock_off) = self.scouting_state.get();
|
|
||||||
|
|
||||||
let mut addrs = (self.addresses)
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.cycle()
|
|
||||||
.skip(addr_off)
|
|
||||||
.take(self.addresses.len());
|
|
||||||
let mut sockets = (srv.sockets)
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.cycle()
|
|
||||||
.skip(sock_off)
|
|
||||||
.take(srv.sockets.len());
|
|
||||||
|
|
||||||
for (addr_no, addr) in addrs.by_ref() {
|
|
||||||
for (sock_no, sock) in sockets.by_ref() {
|
|
||||||
let res = sock.send_to(buf, *addr);
|
|
||||||
let err = match res {
|
|
||||||
Ok(_) => {
|
|
||||||
self.insert_next_scout_offset(srv, addr_no, sock_no);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(e) => e,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: replace this by
|
|
||||||
// e.kind() == io::ErrorKind::NetworkUnreachable
|
|
||||||
// once https://github.com/rust-lang/rust/issues/86442 lands
|
|
||||||
let ignore = err
|
|
||||||
.to_string()
|
|
||||||
.starts_with("Address family not supported by protocol");
|
|
||||||
if !ignore {
|
|
||||||
warn!("Socket #{} refusing to send to {}: ", sock_no, addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("Unable to send message: All sockets returned errors.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AppServer {
|
|
||||||
pub fn new(
|
|
||||||
sk: SSk,
|
|
||||||
pk: SPk,
|
|
||||||
addrs: Vec<SocketAddr>,
|
|
||||||
verbosity: Verbosity,
|
|
||||||
) -> anyhow::Result<Self> {
|
|
||||||
// setup mio
|
|
||||||
let mio_poll = mio::Poll::new()?;
|
|
||||||
let events = mio::Events::with_capacity(8);
|
|
||||||
|
|
||||||
// bind each SocketAddr to a socket
|
|
||||||
let maybe_sockets: Result<Vec<_>, _> =
|
|
||||||
addrs.into_iter().map(mio::net::UdpSocket::bind).collect();
|
|
||||||
let mut sockets = maybe_sockets?;
|
|
||||||
|
|
||||||
// When no socket is specified, rosenpass should open one port on all
|
|
||||||
// available interfaces best-effort. Here are the cases how this can possibly go:
|
|
||||||
//
|
|
||||||
// Some operating systems (such as Linux [^linux] and FreeBSD [^freebsd])
|
|
||||||
// using IPv6 sockets to handle IPv4 connections; on these systems
|
|
||||||
// binding to the `[::]:0` address will typically open a dual-stack
|
|
||||||
// socket. Some other systems such as OpenBSD [^openbsd] do not support this feature.
|
|
||||||
//
|
|
||||||
// Dual-stack systems provide a flag to enable or disable this
|
|
||||||
// behavior – the IPV6_V6ONLY flag. OpenBSD supports this flag
|
|
||||||
// read-only. MIO[^mio] provides a way to read this flag but not
|
|
||||||
// to write it.
|
|
||||||
//
|
|
||||||
// - One dual-stack IPv6 socket, if the operating supports dual-stack sockets and
|
|
||||||
// correctly reports this
|
|
||||||
// - One IPv6 socket and one IPv4 socket if the operating does not support dual stack
|
|
||||||
// sockets or disables them by default assuming this is also correctly reported
|
|
||||||
// - One IPv6 socket and no IPv4 socket if IPv6 socket is not dual-stack and opening
|
|
||||||
// the IPv6 socket fails
|
|
||||||
// - One IPv4 socket and no IPv6 socket if opening the IPv6 socket fails
|
|
||||||
// - One dual-stack IPv6 socket and a redundant IPv4 socket if dual-stack sockets are
|
|
||||||
// supported but the operating system does not correctly report this (specifically,
|
|
||||||
// if the only_v6() call raises an error)
|
|
||||||
// - Rosenpass exits if no socket could be opened
|
|
||||||
//
|
|
||||||
// [^freebsd]: https://man.freebsd.org/cgi/man.cgi?query=ip6&sektion=4&manpath=FreeBSD+6.0-RELEASE
|
|
||||||
// [^openbsd]: https://man.openbsd.org/ip6.4
|
|
||||||
// [^linux]: https://man7.org/linux/man-pages/man7/ipv6.7.html
|
|
||||||
// [^mio]: https://docs.rs/mio/0.8.6/mio/net/struct.UdpSocket.html#method.only_v6
|
|
||||||
if sockets.is_empty() {
|
|
||||||
macro_rules! try_register_socket {
|
|
||||||
($title:expr, $binding:expr) => {{
|
|
||||||
let r = mio::net::UdpSocket::bind($binding);
|
|
||||||
match r {
|
|
||||||
Ok(sock) => {
|
|
||||||
sockets.push(sock);
|
|
||||||
Some(sockets.len() - 1)
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Could not bind to {} socket: {}", $title, e);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
let v6 = try_register_socket!("IPv6", ipv6_any_binding());
|
|
||||||
|
|
||||||
let need_v4 = match v6.map(|no| sockets[no].only_v6()) {
|
|
||||||
Some(Ok(v)) => v,
|
|
||||||
None => true,
|
|
||||||
Some(Err(e)) => {
|
|
||||||
warn!("Unable to detect whether the IPv6 socket supports dual-stack operation: {}", e);
|
|
||||||
true
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if need_v4 {
|
|
||||||
try_register_socket!("IPv4", ipv4_any_binding());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sockets.is_empty() {
|
|
||||||
bail!("No sockets to listen on!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// register all sockets to mio
|
|
||||||
for (i, socket) in sockets.iter_mut().enumerate() {
|
|
||||||
mio_poll
|
|
||||||
.registry()
|
|
||||||
.register(socket, Token(i), Interest::READABLE)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO use mio::net::UnixStream together with std::os::unix::net::UnixStream for Linux
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
crypt: CryptoServer::new(sk, pk),
|
|
||||||
peers: Vec::new(),
|
|
||||||
verbosity,
|
|
||||||
sockets,
|
|
||||||
events,
|
|
||||||
mio_poll,
|
|
||||||
all_sockets_drained: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn verbose(&self) -> bool {
|
|
||||||
matches!(self.verbosity, Verbosity::Verbose)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_peer(
|
|
||||||
&mut self,
|
|
||||||
psk: Option<SymKey>,
|
|
||||||
pk: SPk,
|
|
||||||
outfile: Option<PathBuf>,
|
|
||||||
outwg: Option<WireguardOut>,
|
|
||||||
hostname: Option<String>,
|
|
||||||
) -> anyhow::Result<AppPeerPtr> {
|
|
||||||
let PeerPtr(pn) = self.crypt.add_peer(psk, pk)?;
|
|
||||||
assert!(pn == self.peers.len());
|
|
||||||
let initial_endpoint = hostname
|
|
||||||
.map(Endpoint::discovery_from_hostname)
|
|
||||||
.transpose()?;
|
|
||||||
let current_endpoint = None;
|
|
||||||
self.peers.push(AppPeer {
|
|
||||||
outfile,
|
|
||||||
outwg,
|
|
||||||
initial_endpoint,
|
|
||||||
current_endpoint,
|
|
||||||
});
|
|
||||||
Ok(AppPeerPtr(pn))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn listen_loop(&mut self) -> anyhow::Result<()> {
|
|
||||||
const INIT_SLEEP: f64 = 0.01;
|
|
||||||
const MAX_FAILURES: i32 = 10;
|
|
||||||
let mut failure_cnt = 0;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let msgs_processed = 0usize;
|
|
||||||
let err = match self.event_loop() {
|
|
||||||
Ok(()) => return Ok(()),
|
|
||||||
Err(e) => e,
|
|
||||||
};
|
|
||||||
|
|
||||||
// This should not happen…
|
|
||||||
failure_cnt = if msgs_processed > 0 {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
failure_cnt + 1
|
|
||||||
};
|
|
||||||
let sleep = INIT_SLEEP * 2.0f64.powf(f64::from(failure_cnt - 1));
|
|
||||||
let tries_left = MAX_FAILURES - (failure_cnt - 1);
|
|
||||||
error!(
|
|
||||||
"unexpected error after processing {} messages: {:?} {}",
|
|
||||||
msgs_processed,
|
|
||||||
err,
|
|
||||||
err.backtrace()
|
|
||||||
);
|
|
||||||
if tries_left > 0 {
|
|
||||||
error!("re-initializing networking in {sleep}! {tries_left} tries left.");
|
|
||||||
std::thread::sleep(self.crypt.timebase.dur(sleep));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("too many network failures");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn event_loop(&mut self) -> anyhow::Result<()> {
|
|
||||||
let (mut rx, mut tx) = (MsgBuf::zero(), MsgBuf::zero());
|
|
||||||
|
|
||||||
/// if socket address for peer is known, call closure
|
|
||||||
/// assumes that closure leaves a message in `tx`
|
|
||||||
/// assumes that closure returns the length of message in bytes
|
|
||||||
macro_rules! tx_maybe_with {
|
|
||||||
($peer:expr, $fn:expr) => {
|
|
||||||
attempt!({
|
|
||||||
let p = $peer;
|
|
||||||
if p.get_app(self).endpoint().is_some() {
|
|
||||||
let len = $fn()?;
|
|
||||||
let ep: &Endpoint = p.get_app(self).endpoint().unwrap();
|
|
||||||
ep.send(self, &tx[..len])?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
use crate::protocol::HandleMsgResult;
|
|
||||||
use AppPollResult::*;
|
|
||||||
use KeyOutputReason::*;
|
|
||||||
match self.poll(&mut *rx)? {
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
SendInitiation(peer) => tx_maybe_with!(peer, || self
|
|
||||||
.crypt
|
|
||||||
.initiate_handshake(peer.lower(), &mut *tx))?,
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
SendRetransmission(peer) => tx_maybe_with!(peer, || self
|
|
||||||
.crypt
|
|
||||||
.retransmit_handshake(peer.lower(), &mut *tx))?,
|
|
||||||
DeleteKey(peer) => {
|
|
||||||
self.output_key(peer, Stale, &SymKey::random())?;
|
|
||||||
|
|
||||||
// There was a loss of connection apparently; restart host discovery
|
|
||||||
// starting from the last used address but including all the initially
|
|
||||||
// specified addresses
|
|
||||||
// TODO: We could do this preemptively, before any connection loss actually occurs.
|
|
||||||
let p = peer.get_app_mut(self);
|
|
||||||
p.current_endpoint = Endpoint::discovery_from_multiple_sources(
|
|
||||||
p.current_endpoint.as_ref(),
|
|
||||||
p.initial_endpoint.as_ref(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
ReceivedMessage(len, endpoint) => {
|
|
||||||
match self.crypt.handle_msg(&rx[..len], &mut *tx) {
|
|
||||||
Err(ref e) => {
|
|
||||||
self.verbose().then(|| {
|
|
||||||
info!(
|
|
||||||
"error processing incoming message from {:?}: {:?} {}",
|
|
||||||
endpoint,
|
|
||||||
e,
|
|
||||||
e.backtrace()
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(HandleMsgResult {
|
|
||||||
resp,
|
|
||||||
exchanged_with,
|
|
||||||
..
|
|
||||||
}) => {
|
|
||||||
if let Some(len) = resp {
|
|
||||||
endpoint.send(self, &tx[0..len])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(p) = exchanged_with {
|
|
||||||
let ap = AppPeerPtr::lift(p);
|
|
||||||
ap.get_app_mut(self).current_endpoint = Some(endpoint);
|
|
||||||
|
|
||||||
// TODO: Maybe we should rather call the key "rosenpass output"?
|
|
||||||
self.output_key(ap, Exchanged, &self.crypt.osk(p)?)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn output_key(
|
|
||||||
&self,
|
|
||||||
peer: AppPeerPtr,
|
|
||||||
why: KeyOutputReason,
|
|
||||||
key: &SymKey,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let peerid = peer.lower().get(&self.crypt).pidt()?;
|
|
||||||
let ap = peer.get_app(self);
|
|
||||||
|
|
||||||
if self.verbose() {
|
|
||||||
let msg = match why {
|
|
||||||
KeyOutputReason::Exchanged => "Exchanged key with peer",
|
|
||||||
KeyOutputReason::Stale => "Erasing outdated key from peer",
|
|
||||||
};
|
|
||||||
info!("{} {}", msg, fmt_b64(&*peerid));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(of) = ap.outfile.as_ref() {
|
|
||||||
// This might leave some fragments of the secret on the stack;
|
|
||||||
// in practice this is likely not a problem because the stack likely
|
|
||||||
// will be overwritten by something else soon but this is not exactly
|
|
||||||
// guaranteed. It would be possible to remedy this, but since the secret
|
|
||||||
// data will linger in the linux page cache anyways with the current
|
|
||||||
// implementation, going to great length to erase the secret here is
|
|
||||||
// not worth it right now.
|
|
||||||
b64_writer(fopen_w(of)?).write_all(key.secret())?;
|
|
||||||
let why = match why {
|
|
||||||
KeyOutputReason::Exchanged => "exchanged",
|
|
||||||
KeyOutputReason::Stale => "stale",
|
|
||||||
};
|
|
||||||
|
|
||||||
// this is intentionally writing to stdout instead of stderr, because
|
|
||||||
// it is meant to allow external detection of a successful key-exchange
|
|
||||||
println!(
|
|
||||||
"output-key peer {} key-file {of:?} {why}",
|
|
||||||
fmt_b64(&*peerid)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(owg) = ap.outwg.as_ref() {
|
|
||||||
let mut child = Command::new("wg")
|
|
||||||
.arg("set")
|
|
||||||
.arg(&owg.dev)
|
|
||||||
.arg("peer")
|
|
||||||
.arg(&owg.pk)
|
|
||||||
.arg("preshared-key")
|
|
||||||
.arg("/dev/stdin")
|
|
||||||
.stdin(Stdio::piped())
|
|
||||||
.args(&owg.extra_params)
|
|
||||||
.spawn()?;
|
|
||||||
b64_writer(child.stdin.take().unwrap()).write_all(key.secret())?;
|
|
||||||
|
|
||||||
thread::spawn(move || {
|
|
||||||
let status = child.wait();
|
|
||||||
|
|
||||||
if let Ok(status) = status {
|
|
||||||
if status.success() {
|
|
||||||
debug!("successfully passed psk to wg")
|
|
||||||
} else {
|
|
||||||
error!("could not pass psk to wg {:?}", status)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error!("wait failed: {:?}", status)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn poll(&mut self, rx_buf: &mut [u8]) -> anyhow::Result<AppPollResult> {
|
|
||||||
use crate::protocol::PollResult as C;
|
|
||||||
use AppPollResult as A;
|
|
||||||
loop {
|
|
||||||
return Ok(match self.crypt.poll()? {
|
|
||||||
C::DeleteKey(PeerPtr(no)) => A::DeleteKey(AppPeerPtr(no)),
|
|
||||||
C::SendInitiation(PeerPtr(no)) => A::SendInitiation(AppPeerPtr(no)),
|
|
||||||
C::SendRetransmission(PeerPtr(no)) => A::SendRetransmission(AppPeerPtr(no)),
|
|
||||||
C::Sleep(timeout) => match self.try_recv(rx_buf, timeout)? {
|
|
||||||
Some((len, addr)) => A::ReceivedMessage(len, addr),
|
|
||||||
None => continue,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to receive a new message
|
|
||||||
///
|
|
||||||
/// - might wait for an duration up to `timeout`
|
|
||||||
/// - returns immediately if an error occurs
|
|
||||||
/// - returns immediately if a new message is received
|
|
||||||
pub fn try_recv(
|
|
||||||
&mut self,
|
|
||||||
buf: &mut [u8],
|
|
||||||
timeout: Timing,
|
|
||||||
) -> anyhow::Result<Option<(usize, Endpoint)>> {
|
|
||||||
let timeout = Duration::from_secs_f64(timeout);
|
|
||||||
|
|
||||||
// if there is no time to wait on IO, well, then, lets not waste any time!
|
|
||||||
if timeout.is_zero() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE when using mio::Poll, there are some particularities (taken from
|
|
||||||
// https://docs.rs/mio/latest/mio/struct.Poll.html):
|
|
||||||
//
|
|
||||||
// - poll() might return readiness, even if nothing is ready
|
|
||||||
// - in this case, a WouldBlock error is returned from actual IO operations
|
|
||||||
// - after receiving readiness for a source, it must be drained until a WouldBlock
|
|
||||||
// is received
|
|
||||||
//
|
|
||||||
// This would usually require us to maintain the drainage status of each socket;
|
|
||||||
// a socket would only become drained when it returned WouldBlock and only
|
|
||||||
// non-drained when receiving a readiness event from mio for it. Then, only the
|
|
||||||
// ready sockets should be worked on, ideally without requiring an O(n) search
|
|
||||||
// through all sockets for checking their drained status. However, our use-case
|
|
||||||
// is primarily heaving one or two sockets (if IPv4 and IPv6 IF_ANY listen is
|
|
||||||
// desired on a non-dual-stack OS), thus just checking every socket after any
|
|
||||||
// readiness event seems to be good enough™ for now.
|
|
||||||
|
|
||||||
// only poll if we drained all sockets before
|
|
||||||
if self.all_sockets_drained {
|
|
||||||
self.mio_poll.poll(&mut self.events, Some(timeout))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut would_block_count = 0;
|
|
||||||
for (sock_no, socket) in self.sockets.iter_mut().enumerate() {
|
|
||||||
match socket.recv_from(buf) {
|
|
||||||
Ok((n, addr)) => {
|
|
||||||
// at least one socket was not drained...
|
|
||||||
self.all_sockets_drained = false;
|
|
||||||
return Ok(Some((
|
|
||||||
n,
|
|
||||||
Endpoint::SocketBoundAddress {
|
|
||||||
socket: SocketPtr(sock_no),
|
|
||||||
addr,
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Err(e) if e.kind() == ErrorKind::WouldBlock => {
|
|
||||||
would_block_count += 1;
|
|
||||||
}
|
|
||||||
// TODO if one socket continuously returns an error, then we never poll, thus we never wait for a timeout, thus we have a spin-lock
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if each socket returned WouldBlock, then we drained them all at least once indeed
|
|
||||||
self.all_sockets_drained = would_block_count == self.sockets.len();
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,261 +0,0 @@
|
|||||||
use anyhow::{bail, ensure};
|
|
||||||
use clap::Parser;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use crate::app_server;
|
|
||||||
use crate::app_server::AppServer;
|
|
||||||
use crate::util::{LoadValue, LoadValueB64};
|
|
||||||
use crate::{
|
|
||||||
// app_server::{AppServer, LoadValue, LoadValueB64},
|
|
||||||
coloring::Secret,
|
|
||||||
pqkem::{StaticKEM, KEM},
|
|
||||||
protocol::{SPk, SSk, SymKey},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::config;
|
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
|
||||||
#[command(author, version, about, long_about)]
|
|
||||||
pub enum Cli {
|
|
||||||
/// Start Rosenpass in server mode and carry on with the key exchange
|
|
||||||
///
|
|
||||||
/// This will parse the configuration file and perform the key exchange
|
|
||||||
/// with the specified peers. If a peer's endpoint is specified, this
|
|
||||||
/// Rosenpass instance will try to initiate a key exchange with the peer,
|
|
||||||
/// otherwise only initiation attempts from the peer will be responded to.
|
|
||||||
ExchangeConfig { config_file: PathBuf },
|
|
||||||
|
|
||||||
/// Start in daemon mode, performing key exchanges
|
|
||||||
///
|
|
||||||
/// The configuration is read from the command line. The `peer` token
|
|
||||||
/// always separates multiple peers, e. g. if the token `peer` appears
|
|
||||||
/// in the WIREGUARD_EXTRA_ARGS it is not put into the WireGuard arguments
|
|
||||||
/// but instead a new peer is created.
|
|
||||||
/* Explanation: `first_arg` and `rest_of_args` are combined into one
|
|
||||||
* `Vec<String>`. They are only used to trick clap into displaying some
|
|
||||||
* guidance on the CLI usage.
|
|
||||||
*/
|
|
||||||
#[allow(rustdoc::broken_intra_doc_links)]
|
|
||||||
#[allow(rustdoc::invalid_html_tags)]
|
|
||||||
Exchange {
|
|
||||||
/// public-key <PATH> secret-key <PATH> [listen <ADDR>:<PORT>]... [verbose]
|
|
||||||
#[clap(value_name = "OWN_CONFIG")]
|
|
||||||
first_arg: String,
|
|
||||||
|
|
||||||
/// peer public-key <PATH> [ENDPOINT] [PSK] [OUTFILE] [WG]
|
|
||||||
///
|
|
||||||
/// ENDPOINT := endpoint <HOST/IP>:<PORT>
|
|
||||||
///
|
|
||||||
/// PSK := preshared-key <PATH>
|
|
||||||
///
|
|
||||||
/// OUTFILE := outfile <PATH>
|
|
||||||
///
|
|
||||||
/// WG := wireguard <WIREGUARD_DEV> <WIREGUARD_PEER> [WIREGUARD_EXTRA_ARGS]...
|
|
||||||
#[clap(value_name = "PEERS")]
|
|
||||||
rest_of_args: Vec<String>,
|
|
||||||
|
|
||||||
/// Save the parsed configuration to a file before starting the daemon
|
|
||||||
#[clap(short, long)]
|
|
||||||
config_file: Option<PathBuf>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Generate a demo config file
|
|
||||||
GenConfig {
|
|
||||||
config_file: PathBuf,
|
|
||||||
|
|
||||||
/// Forcefully overwrite existing config file
|
|
||||||
#[clap(short, long)]
|
|
||||||
force: bool,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Generate the keys mentioned in a configFile
|
|
||||||
///
|
|
||||||
/// Generates secret- & public-key to their destination. If a config file
|
|
||||||
/// is provided then the key file destination is taken from there.
|
|
||||||
/// Otherwise the
|
|
||||||
GenKeys {
|
|
||||||
config_file: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// where to write public-key to
|
|
||||||
#[clap(short, long)]
|
|
||||||
public_key: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// where to write secret-key to
|
|
||||||
#[clap(short, long)]
|
|
||||||
secret_key: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// Forcefully overwrite public- & secret-key file
|
|
||||||
#[clap(short, long)]
|
|
||||||
force: bool,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Validate a configuration
|
|
||||||
Validate { config_files: Vec<PathBuf> },
|
|
||||||
|
|
||||||
/// Show the rosenpass manpage
|
|
||||||
// TODO make this the default, but only after the manpage has been adjusted once the CLI stabilizes
|
|
||||||
Man,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Cli {
|
|
||||||
pub fn run() -> anyhow::Result<()> {
|
|
||||||
let cli = Self::parse();
|
|
||||||
|
|
||||||
use Cli::*;
|
|
||||||
match cli {
|
|
||||||
Man => {
|
|
||||||
let man_cmd = std::process::Command::new("man")
|
|
||||||
.args(["1", "rosenpass"])
|
|
||||||
.status();
|
|
||||||
|
|
||||||
if !(man_cmd.is_ok() && man_cmd.unwrap().success()) {
|
|
||||||
println!(include_str!(env!("ROSENPASS_MAN")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
GenConfig { config_file, force } => {
|
|
||||||
ensure!(
|
|
||||||
force || !config_file.exists(),
|
|
||||||
"config file {config_file:?} already exists"
|
|
||||||
);
|
|
||||||
|
|
||||||
config::Rosenpass::example_config().store(config_file)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
GenKeys {
|
|
||||||
config_file,
|
|
||||||
public_key,
|
|
||||||
secret_key,
|
|
||||||
force,
|
|
||||||
} => {
|
|
||||||
// figure out where the key file is specified, in the config file or directly as flag?
|
|
||||||
let (pkf, skf) = match (config_file, public_key, secret_key) {
|
|
||||||
(Some(config_file), _, _) => {
|
|
||||||
ensure!(
|
|
||||||
config_file.exists(),
|
|
||||||
"config file {config_file:?} does not exist"
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = config::Rosenpass::load(config_file)?;
|
|
||||||
|
|
||||||
(config.public_key, config.secret_key)
|
|
||||||
}
|
|
||||||
(_, Some(pkf), Some(skf)) => (pkf, skf),
|
|
||||||
_ => {
|
|
||||||
bail!("either a config-file or both public-key and secret-key file are required")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// check that we are not overriding something unintentionally
|
|
||||||
let mut problems = vec![];
|
|
||||||
if !force && pkf.is_file() {
|
|
||||||
problems.push(format!(
|
|
||||||
"public-key file {pkf:?} exist, refusing to overwrite it"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if !force && skf.is_file() {
|
|
||||||
problems.push(format!(
|
|
||||||
"secret-key file {skf:?} exist, refusing to overwrite it"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if !problems.is_empty() {
|
|
||||||
bail!(problems.join("\n"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate the keys and store them in files
|
|
||||||
let mut ssk = crate::protocol::SSk::random();
|
|
||||||
let mut spk = crate::protocol::SPk::random();
|
|
||||||
StaticKEM::keygen(ssk.secret_mut(), spk.secret_mut())?;
|
|
||||||
|
|
||||||
ssk.store_secret(skf)?;
|
|
||||||
spk.store_secret(pkf)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
ExchangeConfig { config_file } => {
|
|
||||||
ensure!(
|
|
||||||
config_file.exists(),
|
|
||||||
"config file '{config_file:?}' does not exist"
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = config::Rosenpass::load(config_file)?;
|
|
||||||
config.validate()?;
|
|
||||||
Self::event_loop(config)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Exchange {
|
|
||||||
first_arg,
|
|
||||||
mut rest_of_args,
|
|
||||||
config_file,
|
|
||||||
} => {
|
|
||||||
rest_of_args.insert(0, first_arg);
|
|
||||||
let args = rest_of_args;
|
|
||||||
let mut config = config::Rosenpass::parse_args(args)?;
|
|
||||||
|
|
||||||
if let Some(p) = config_file {
|
|
||||||
config.store(&p)?;
|
|
||||||
config.config_file_path = p;
|
|
||||||
}
|
|
||||||
config.validate()?;
|
|
||||||
Self::event_loop(config)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Validate { config_files } => {
|
|
||||||
for file in config_files {
|
|
||||||
match config::Rosenpass::load(&file) {
|
|
||||||
Ok(config) => {
|
|
||||||
eprintln!("{file:?} is valid TOML and conforms to the expected schema");
|
|
||||||
match config.validate() {
|
|
||||||
Ok(_) => eprintln!("{file:?} is passed all logical checks"),
|
|
||||||
Err(_) => eprintln!("{file:?} contains logical errors"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => eprintln!("{file:?} is not valid: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn event_loop(config: config::Rosenpass) -> anyhow::Result<()> {
|
|
||||||
// load own keys
|
|
||||||
let sk = SSk::load(&config.secret_key)?;
|
|
||||||
let pk = SPk::load(&config.public_key)?;
|
|
||||||
|
|
||||||
// start an application server
|
|
||||||
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
|
|
||||||
sk,
|
|
||||||
pk,
|
|
||||||
config.listen,
|
|
||||||
config.verbosity,
|
|
||||||
)?);
|
|
||||||
|
|
||||||
for cfg_peer in config.peers {
|
|
||||||
srv.add_peer(
|
|
||||||
// psk, pk, outfile, outwg, tx_addr
|
|
||||||
cfg_peer.pre_shared_key.map(SymKey::load_b64).transpose()?,
|
|
||||||
SPk::load(&cfg_peer.public_key)?,
|
|
||||||
cfg_peer.key_out,
|
|
||||||
cfg_peer.wg.map(|cfg| app_server::WireguardOut {
|
|
||||||
dev: cfg.device,
|
|
||||||
pk: cfg.peer,
|
|
||||||
extra_params: cfg.extra_params,
|
|
||||||
}),
|
|
||||||
cfg_peer.endpoint.clone(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
srv.event_loop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trait StoreSecret {
|
|
||||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> StoreSecret for Secret<N> {
|
|
||||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
|
|
||||||
std::fs::write(path, self.secret())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,10 +1,7 @@
|
|||||||
//! Types types for dealing with (secret-) values
|
//! This module contains various types for dealing with secrets
|
||||||
|
//!
|
||||||
|
//! These types use type level coloring to make accidential leackage of secrets extra hard.
|
||||||
//!
|
//!
|
||||||
//! These types use type level coloring to make accidential leackage of secrets extra hard. Both [Secret] and [Public] own their data, but the memory backing
|
|
||||||
//! [Secret] is special:
|
|
||||||
//! - as it is heap allocated, we can actively zeroize the memory before freeing it.
|
|
||||||
//! - guard pages before and after each allocation trap accidential sequential reads that creep towards our secrets
|
|
||||||
//! - the memory is mlocked, e.g. it is never swapped
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
sodium::{rng, zeroize},
|
sodium::{rng, zeroize},
|
||||||
|
|||||||
@@ -1,457 +0,0 @@
|
|||||||
use std::{
|
|
||||||
collections::HashSet,
|
|
||||||
fs,
|
|
||||||
io::Write,
|
|
||||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs},
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::{bail, ensure};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::util::fopen_w;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Rosenpass {
|
|
||||||
pub public_key: PathBuf,
|
|
||||||
|
|
||||||
pub secret_key: PathBuf,
|
|
||||||
|
|
||||||
pub listen: Vec<SocketAddr>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub verbosity: Verbosity,
|
|
||||||
pub peers: Vec<RosenpassPeer>,
|
|
||||||
|
|
||||||
#[serde(skip)]
|
|
||||||
pub config_file_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub enum Verbosity {
|
|
||||||
Quiet,
|
|
||||||
Verbose,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub struct RosenpassPeer {
|
|
||||||
pub public_key: PathBuf,
|
|
||||||
pub endpoint: Option<String>,
|
|
||||||
pub pre_shared_key: Option<PathBuf>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub key_out: Option<PathBuf>,
|
|
||||||
|
|
||||||
// TODO make sure failure does not crash but is logged
|
|
||||||
#[serde(default)]
|
|
||||||
pub exchange_command: Vec<String>,
|
|
||||||
|
|
||||||
// TODO make this field only available on binary builds, not on library builds
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub wg: Option<WireGuard>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub struct WireGuard {
|
|
||||||
pub device: String,
|
|
||||||
pub peer: String,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub extra_params: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Rosenpass {
|
|
||||||
/// Load a config file from a file path
|
|
||||||
///
|
|
||||||
/// no validation is conducted
|
|
||||||
pub fn load<P: AsRef<Path>>(p: P) -> anyhow::Result<Self> {
|
|
||||||
let mut config: Self = toml::from_str(&fs::read_to_string(&p)?)?;
|
|
||||||
|
|
||||||
config.config_file_path = p.as_ref().to_owned();
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write a config to a file
|
|
||||||
pub fn store<P: AsRef<Path>>(&self, p: P) -> anyhow::Result<()> {
|
|
||||||
let serialized_config =
|
|
||||||
toml::to_string_pretty(&self).expect("unable to serialize the default config");
|
|
||||||
fs::write(p, serialized_config)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Commit the configuration to where it came from, overwriting the original file
|
|
||||||
pub fn commit(&self) -> anyhow::Result<()> {
|
|
||||||
let mut f = fopen_w(&self.config_file_path)?;
|
|
||||||
f.write_all(toml::to_string_pretty(&self)?.as_bytes())?;
|
|
||||||
|
|
||||||
self.store(&self.config_file_path)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validate a configuration
|
|
||||||
pub fn validate(&self) -> anyhow::Result<()> {
|
|
||||||
// check the public-key file exists
|
|
||||||
ensure!(
|
|
||||||
self.public_key.is_file(),
|
|
||||||
"public-key file {:?} does not exist",
|
|
||||||
self.public_key
|
|
||||||
);
|
|
||||||
|
|
||||||
// check the secret-key file exists
|
|
||||||
ensure!(
|
|
||||||
self.secret_key.is_file(),
|
|
||||||
"secret-key file {:?} does not exist",
|
|
||||||
self.secret_key
|
|
||||||
);
|
|
||||||
|
|
||||||
for (i, peer) in self.peers.iter().enumerate() {
|
|
||||||
// check peer's public-key file exists
|
|
||||||
ensure!(
|
|
||||||
peer.public_key.is_file(),
|
|
||||||
"peer {i} public-key file {:?} does not exist",
|
|
||||||
peer.public_key
|
|
||||||
);
|
|
||||||
|
|
||||||
// check endpoint is usable
|
|
||||||
if let Some(addr) = peer.endpoint.as_ref() {
|
|
||||||
ensure!(
|
|
||||||
addr.to_socket_addrs().is_ok(),
|
|
||||||
"peer {i} endpoint {} can not be parsed to a socket address",
|
|
||||||
addr
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO warn if neither out_key nor exchange_command is defined
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new configuration
|
|
||||||
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(public_key: P1, secret_key: P2) -> Self {
|
|
||||||
Self {
|
|
||||||
public_key: PathBuf::from(public_key.as_ref()),
|
|
||||||
secret_key: PathBuf::from(secret_key.as_ref()),
|
|
||||||
listen: vec![],
|
|
||||||
verbosity: Verbosity::Quiet,
|
|
||||||
peers: vec![],
|
|
||||||
config_file_path: PathBuf::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add IPv4 __and__ IPv6 IF_ANY address to the listen interfaces
|
|
||||||
pub fn add_if_any(&mut self, port: u16) {
|
|
||||||
let ipv4_any = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port));
|
|
||||||
let ipv6_any = SocketAddr::V6(SocketAddrV6::new(
|
|
||||||
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0),
|
|
||||||
port,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
));
|
|
||||||
self.listen.push(ipv4_any);
|
|
||||||
self.listen.push(ipv6_any);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// from chaotic args
|
|
||||||
/// Quest: the grammar is undecideable, what do we do here?
|
|
||||||
pub fn parse_args(args: Vec<String>) -> anyhow::Result<Self> {
|
|
||||||
let mut config = Self::new("", "");
|
|
||||||
|
|
||||||
#[derive(Debug, Hash, PartialEq, Eq)]
|
|
||||||
enum State {
|
|
||||||
Own,
|
|
||||||
OwnPublicKey,
|
|
||||||
OwnSecretKey,
|
|
||||||
OwnListen,
|
|
||||||
Peer,
|
|
||||||
PeerPsk,
|
|
||||||
PeerPublicKey,
|
|
||||||
PeerEndpoint,
|
|
||||||
PeerOutfile,
|
|
||||||
PeerWireguardDev,
|
|
||||||
PeerWireguardPeer,
|
|
||||||
PeerWireguardExtraArgs,
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut already_set = HashSet::new();
|
|
||||||
|
|
||||||
// TODO idea: use config.peers.len() to give index of peer with conflicting argument
|
|
||||||
use State::*;
|
|
||||||
let mut state = Own;
|
|
||||||
let mut current_peer = None;
|
|
||||||
let p_exists = "a peer should exist by now";
|
|
||||||
let wg_exists = "a peer wireguard should exist by now";
|
|
||||||
for arg in args {
|
|
||||||
state = match (state, arg.as_str(), &mut current_peer) {
|
|
||||||
(Own, "public-key", None) => OwnPublicKey,
|
|
||||||
(Own, "secret-key", None) => OwnSecretKey,
|
|
||||||
(Own, "private-key", None) => {
|
|
||||||
log::warn!(
|
|
||||||
"the private-key argument is deprecated, please use secret-key instead"
|
|
||||||
);
|
|
||||||
OwnSecretKey
|
|
||||||
}
|
|
||||||
(Own, "listen", None) => OwnListen,
|
|
||||||
(Own, "verbose", None) => {
|
|
||||||
config.verbosity = Verbosity::Verbose;
|
|
||||||
Own
|
|
||||||
}
|
|
||||||
(Own, "peer", None) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.contains(&OwnPublicKey),
|
|
||||||
"public-key file must be set"
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
already_set.contains(&OwnSecretKey),
|
|
||||||
"secret-key file must be set"
|
|
||||||
);
|
|
||||||
|
|
||||||
already_set.clear();
|
|
||||||
current_peer = Some(RosenpassPeer::default());
|
|
||||||
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(OwnPublicKey, pk, None) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(OwnPublicKey),
|
|
||||||
"public-key was already set"
|
|
||||||
);
|
|
||||||
config.public_key = pk.into();
|
|
||||||
Own
|
|
||||||
}
|
|
||||||
(OwnSecretKey, sk, None) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(OwnSecretKey),
|
|
||||||
"secret-key was already set"
|
|
||||||
);
|
|
||||||
config.secret_key = sk.into();
|
|
||||||
Own
|
|
||||||
}
|
|
||||||
(OwnListen, l, None) => {
|
|
||||||
already_set.insert(OwnListen); // multiple listen directives are allowed
|
|
||||||
for socket_addr in l.to_socket_addrs()? {
|
|
||||||
config.listen.push(socket_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
Own
|
|
||||||
}
|
|
||||||
(Peer | PeerWireguardExtraArgs, "peer", maybe_peer @ Some(_)) => {
|
|
||||||
// TODO check current peer
|
|
||||||
// commit current peer, create a new one
|
|
||||||
config.peers.push(maybe_peer.take().expect(p_exists));
|
|
||||||
|
|
||||||
already_set.clear();
|
|
||||||
current_peer = Some(RosenpassPeer::default());
|
|
||||||
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(Peer, "public-key", Some(_)) => PeerPublicKey,
|
|
||||||
(Peer, "endpoint", Some(_)) => PeerEndpoint,
|
|
||||||
(Peer, "preshared-key", Some(_)) => PeerPsk,
|
|
||||||
(Peer, "outfile", Some(_)) => PeerOutfile,
|
|
||||||
(Peer, "wireguard", Some(_)) => PeerWireguardDev,
|
|
||||||
(PeerPublicKey, pk, Some(peer)) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(PeerPublicKey),
|
|
||||||
"public-key was already set"
|
|
||||||
);
|
|
||||||
peer.public_key = pk.into();
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(PeerEndpoint, e, Some(peer)) => {
|
|
||||||
ensure!(already_set.insert(PeerEndpoint), "endpoint was already set");
|
|
||||||
peer.endpoint = Some(e.to_owned());
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(PeerPsk, psk, Some(peer)) => {
|
|
||||||
ensure!(already_set.insert(PeerEndpoint), "peer psk was already set");
|
|
||||||
peer.pre_shared_key = Some(psk.into());
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(PeerOutfile, of, Some(peer)) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(PeerOutfile),
|
|
||||||
"peer outfile was already set"
|
|
||||||
);
|
|
||||||
peer.key_out = Some(of.into());
|
|
||||||
Peer
|
|
||||||
}
|
|
||||||
(PeerWireguardDev, dev, Some(peer)) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(PeerWireguardDev),
|
|
||||||
"peer wireguard-dev was already set"
|
|
||||||
);
|
|
||||||
assert!(peer.wg.is_none());
|
|
||||||
peer.wg = Some(WireGuard {
|
|
||||||
device: dev.to_string(),
|
|
||||||
..Default::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
PeerWireguardPeer
|
|
||||||
}
|
|
||||||
(PeerWireguardPeer, p, Some(peer)) => {
|
|
||||||
ensure!(
|
|
||||||
already_set.insert(PeerWireguardPeer),
|
|
||||||
"peer wireguard-peer was already set"
|
|
||||||
);
|
|
||||||
peer.wg.as_mut().expect(wg_exists).peer = p.to_string();
|
|
||||||
PeerWireguardExtraArgs
|
|
||||||
}
|
|
||||||
(PeerWireguardExtraArgs, arg, Some(peer)) => {
|
|
||||||
peer.wg
|
|
||||||
.as_mut()
|
|
||||||
.expect(wg_exists)
|
|
||||||
.extra_params
|
|
||||||
.push(arg.to_string());
|
|
||||||
PeerWireguardExtraArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
// error cases
|
|
||||||
(Own, x, None) => {
|
|
||||||
bail!("unrecognised argument {x}");
|
|
||||||
}
|
|
||||||
(Own | OwnPublicKey | OwnSecretKey | OwnListen, _, Some(_)) => {
|
|
||||||
panic!("current_peer is not None while in Own* state, this must never happen")
|
|
||||||
}
|
|
||||||
|
|
||||||
(State::Peer, arg, Some(_)) => {
|
|
||||||
bail!("unrecongnised argument {arg}");
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Peer
|
|
||||||
| PeerEndpoint
|
|
||||||
| PeerOutfile
|
|
||||||
| PeerPublicKey
|
|
||||||
| PeerPsk
|
|
||||||
| PeerWireguardDev
|
|
||||||
| PeerWireguardPeer
|
|
||||||
| PeerWireguardExtraArgs,
|
|
||||||
_,
|
|
||||||
None,
|
|
||||||
) => {
|
|
||||||
panic!("got peer options but no peer was created")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(p) = current_peer {
|
|
||||||
// TODO ensure peer is propagated with sufficient information
|
|
||||||
config.peers.push(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Rosenpass {
|
|
||||||
/// Generate an example configuration
|
|
||||||
pub fn example_config() -> Self {
|
|
||||||
let peer = RosenpassPeer {
|
|
||||||
public_key: "rp-peer-public-key".into(),
|
|
||||||
endpoint: Some("my-peer.test:9999".into()),
|
|
||||||
exchange_command: [
|
|
||||||
"wg",
|
|
||||||
"set",
|
|
||||||
"wg0",
|
|
||||||
"peer",
|
|
||||||
"<PEER_ID>",
|
|
||||||
"preshared-key",
|
|
||||||
"/dev/stdin",
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| x.to_string())
|
|
||||||
.collect(),
|
|
||||||
key_out: Some("rp-key-out".into()),
|
|
||||||
pre_shared_key: None,
|
|
||||||
wg: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
public_key: "rp-public-key".into(),
|
|
||||||
secret_key: "rp-secret-key".into(),
|
|
||||||
peers: vec![peer],
|
|
||||||
..Self::new("", "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Verbosity {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Quiet
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
fn split_str(s: &str) -> Vec<String> {
|
|
||||||
s.split(" ").map(|s| s.to_string()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_simple_cli_parse() {
|
|
||||||
let args = split_str(
|
|
||||||
"public-key /my/public-key secret-key /my/secret-key verbose \
|
|
||||||
listen 0.0.0.0:9999 peer public-key /peer/public-key endpoint \
|
|
||||||
peer.test:9999 outfile /peer/rp-out",
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = Rosenpass::parse_args(args).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
|
|
||||||
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
|
|
||||||
assert_eq!(config.verbosity, Verbosity::Verbose);
|
|
||||||
assert_eq!(
|
|
||||||
&config.listen,
|
|
||||||
&vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 9999)]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
config.peers,
|
|
||||||
vec![RosenpassPeer {
|
|
||||||
public_key: PathBuf::from("/peer/public-key"),
|
|
||||||
endpoint: Some("peer.test:9999".into()),
|
|
||||||
pre_shared_key: None,
|
|
||||||
key_out: Some(PathBuf::from("/peer/rp-out")),
|
|
||||||
..Default::default()
|
|
||||||
}]
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_cli_parse_multiple_peers() {
|
|
||||||
let args = split_str(
|
|
||||||
"public-key /my/public-key secret-key /my/secret-key verbose \
|
|
||||||
peer public-key /peer-a/public-key endpoint \
|
|
||||||
peer.test:9999 outfile /peer-a/rp-out \
|
|
||||||
peer public-key /peer-b/public-key outfile /peer-b/rp-out",
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = Rosenpass::parse_args(args).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
|
|
||||||
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
|
|
||||||
assert_eq!(config.verbosity, Verbosity::Verbose);
|
|
||||||
assert!(&config.listen.is_empty());
|
|
||||||
assert_eq!(
|
|
||||||
config.peers,
|
|
||||||
vec![
|
|
||||||
RosenpassPeer {
|
|
||||||
public_key: PathBuf::from("/peer-a/public-key"),
|
|
||||||
endpoint: Some("peer.test:9999".into()),
|
|
||||||
pre_shared_key: None,
|
|
||||||
key_out: Some(PathBuf::from("/peer-a/rp-out")),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
RosenpassPeer {
|
|
||||||
public_key: PathBuf::from("/peer-b/public-key"),
|
|
||||||
endpoint: None,
|
|
||||||
pre_shared_key: None,
|
|
||||||
key_out: Some(PathBuf::from("/peer-b/rp-out")),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,3 @@
|
|||||||
//! Pseudo Random Functions (PRFs) with a tree-like label scheme which
|
|
||||||
//! ensures their uniqueness
|
|
||||||
|
|
||||||
use {
|
use {
|
||||||
crate::{prftree::PrfTree, sodium::KEY_SIZE},
|
crate::{prftree::PrfTree, sodium::KEY_SIZE},
|
||||||
anyhow::Result,
|
anyhow::Result,
|
||||||
@@ -10,7 +7,7 @@ pub fn protocol() -> Result<PrfTree> {
|
|||||||
PrfTree::zero().mix("Rosenpass v1 mceliece460896 Kyber512 ChaChaPoly1305 BLAKE2s".as_bytes())
|
PrfTree::zero().mix("Rosenpass v1 mceliece460896 Kyber512 ChaChaPoly1305 BLAKE2s".as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO Use labels that can serve as identifiers
|
// TODO Use labels that can serve as idents
|
||||||
macro_rules! prflabel {
|
macro_rules! prflabel {
|
||||||
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
($base:ident, $name:ident, $($lbl:expr),* ) => {
|
||||||
pub fn $name() -> Result<PrfTree> {
|
pub fn $name() -> Result<PrfTree> {
|
||||||
|
|||||||
@@ -3,11 +3,7 @@ pub mod util;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod sodium;
|
pub mod sodium;
|
||||||
pub mod coloring;
|
pub mod coloring;
|
||||||
#[rustfmt::skip]
|
|
||||||
pub mod labeled_prf;
|
pub mod labeled_prf;
|
||||||
pub mod app_server;
|
|
||||||
pub mod cli;
|
|
||||||
pub mod config;
|
|
||||||
pub mod msgs;
|
pub mod msgs;
|
||||||
pub mod pqkem;
|
pub mod pqkem;
|
||||||
pub mod prftree;
|
pub mod prftree;
|
||||||
@@ -19,7 +15,7 @@ pub enum RosenpassError {
|
|||||||
Oqs,
|
Oqs,
|
||||||
#[error("error from external library while calling OQS")]
|
#[error("error from external library while calling OQS")]
|
||||||
OqsExternalLib,
|
OqsExternalLib,
|
||||||
#[error("buffer size mismatch, required {required_size} but found {actual_size}")]
|
#[error("buffer size mismatch, required {required_size} but only found {actual_size}")]
|
||||||
BufferSizeMismatch {
|
BufferSizeMismatch {
|
||||||
required_size: usize,
|
required_size: usize,
|
||||||
actual_size: usize,
|
actual_size: usize,
|
||||||
|
|||||||
@@ -70,9 +70,8 @@ impl IprfBranch {
|
|||||||
|
|
||||||
impl SecretIprf {
|
impl SecretIprf {
|
||||||
fn prf_invoc(k: &[u8], d: &[u8]) -> SecretIprf {
|
fn prf_invoc(k: &[u8], d: &[u8]) -> SecretIprf {
|
||||||
mutating(SecretIprf(Secret::zero()), |r| {
|
mutating(SecretIprf(Secret::zero()), |r|
|
||||||
prf_into(k, d, r.secret_mut())
|
prf_into(k, d, r.secret_mut()))
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_key(k: Secret<N>) -> SecretIprf {
|
fn from_key(k: Secret<N>) -> SecretIprf {
|
||||||
|
|||||||
@@ -1,11 +1,262 @@
|
|||||||
use log::error;
|
use anyhow::{bail, ensure, Context, Result};
|
||||||
use rosenpass::{cli::Cli, sodium::sodium_init};
|
use log::{error, info};
|
||||||
use std::process::exit;
|
use rosenpass::{
|
||||||
|
attempt,
|
||||||
|
coloring::{Public, Secret},
|
||||||
|
multimatch,
|
||||||
|
pqkem::{SKEM, KEM},
|
||||||
|
protocol::{SPk, SSk, MsgBuf, PeerPtr, Server as CryptoServer, SymKey, Timing},
|
||||||
|
sodium::sodium_init,
|
||||||
|
util::{b64_reader, b64_writer, fmt_b64},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
fs::{File, OpenOptions},
|
||||||
|
io::{ErrorKind, Read, Write},
|
||||||
|
net::{SocketAddr, ToSocketAddrs, UdpSocket},
|
||||||
|
path::Path,
|
||||||
|
process::{exit, Command, Stdio},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Open a file writable
|
||||||
|
pub fn fopen_w<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||||
|
Ok(OpenOptions::new()
|
||||||
|
.read(false)
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.truncate(true)
|
||||||
|
.open(path)?)
|
||||||
|
}
|
||||||
|
/// Open a file readable
|
||||||
|
pub fn fopen_r<P: AsRef<Path>>(path: P) -> Result<File> {
|
||||||
|
Ok(OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(false)
|
||||||
|
.create(false)
|
||||||
|
.truncate(false)
|
||||||
|
.open(path)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ReadExactToEnd {
|
||||||
|
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: Read> ReadExactToEnd for R {
|
||||||
|
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()> {
|
||||||
|
let mut dummy = [0u8; 8];
|
||||||
|
self.read_exact(buf)?;
|
||||||
|
ensure!(self.read(&mut dummy)? == 0, "File too long!");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait LoadValue {
|
||||||
|
fn load<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait LoadValueB64 {
|
||||||
|
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self>
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
}
|
||||||
|
|
||||||
|
trait StoreValue {
|
||||||
|
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
trait StoreSecret {
|
||||||
|
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: StoreValue> StoreSecret for T {
|
||||||
|
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
|
self.store(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> LoadValue for Secret<N> {
|
||||||
|
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
|
let mut v = Self::random();
|
||||||
|
let p = path.as_ref();
|
||||||
|
fopen_r(p)?
|
||||||
|
.read_exact_to_end(v.secret_mut())
|
||||||
|
.with_context(|| format!("Could not load file {p:?}"))?;
|
||||||
|
Ok(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> LoadValueB64 for Secret<N> {
|
||||||
|
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
|
let mut v = Self::random();
|
||||||
|
let p = path.as_ref();
|
||||||
|
// This might leave some fragments of the secret on the stack;
|
||||||
|
// in practice this is likely not a problem because the stack likely
|
||||||
|
// will be overwritten by something else soon but this is not exactly
|
||||||
|
// guaranteed. It would be possible to remedy this, but since the secret
|
||||||
|
// data will linger in the linux page cache anyways with the current
|
||||||
|
// implementation, going to great length to erase the secret here is
|
||||||
|
// not worth it right now.
|
||||||
|
b64_reader(&mut fopen_r(p)?)
|
||||||
|
.read_exact(v.secret_mut())
|
||||||
|
.with_context(|| format!("Could not load base64 file {p:?}"))?;
|
||||||
|
Ok(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> StoreSecret for Secret<N> {
|
||||||
|
unsafe fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
|
std::fs::write(path, self.secret())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> LoadValue for Public<N> {
|
||||||
|
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
|
let mut v = Self::random();
|
||||||
|
fopen_r(path)?.read_exact_to_end(&mut *v)?;
|
||||||
|
Ok(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> StoreValue for Public<N> {
|
||||||
|
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
|
std::fs::write(path, **self)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! bail_usage {
|
||||||
|
($args:expr, $($pt:expr),*) => {{
|
||||||
|
error!($($pt),*);
|
||||||
|
cmd_help()?;
|
||||||
|
exit(1);
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! ensure_usage {
|
||||||
|
($args:expr, $ck:expr, $($pt:expr),*) => {{
|
||||||
|
if !$ck {
|
||||||
|
bail_usage!($args, $($pt),*);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! mandatory_opt {
|
||||||
|
($args:expr, $val:expr, $name:expr) => {{
|
||||||
|
ensure_usage!($args, $val.is_some(), "{0} option is mandatory", $name)
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ArgsWalker {
|
||||||
|
pub argv: Vec<String>,
|
||||||
|
pub off: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ArgsWalker {
|
||||||
|
pub fn get(&self) -> Option<&str> {
|
||||||
|
self.argv.get(self.off).map(|s| s as &str)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prev(&mut self) -> Option<&str> {
|
||||||
|
assert!(self.off > 0);
|
||||||
|
self.off -= 1;
|
||||||
|
self.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::should_implement_trait)]
|
||||||
|
pub fn next(&mut self) -> Option<&str> {
|
||||||
|
assert!(self.todo() > 0);
|
||||||
|
self.off += 1;
|
||||||
|
self.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn opt(&mut self, dst: &mut Option<String>) -> Result<()> {
|
||||||
|
let cmd = &self.argv[self.off - 1];
|
||||||
|
ensure_usage!(&self, self.todo() > 0, "Option {} takes a value", cmd);
|
||||||
|
ensure_usage!(&self, dst.is_none(), "Cannot set {} multiple times.", cmd);
|
||||||
|
*dst = Some(String::from(self.next().unwrap()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn todo(&self) -> usize {
|
||||||
|
self.argv.len() - self.off
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Debug)]
|
||||||
|
pub struct WireguardOut {
|
||||||
|
// impl KeyOutput
|
||||||
|
dev: String,
|
||||||
|
pk: String,
|
||||||
|
extra_params: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Debug)]
|
||||||
|
pub struct AppPeer {
|
||||||
|
pub outfile: Option<String>,
|
||||||
|
pub outwg: Option<WireguardOut>,
|
||||||
|
pub tx_addr: Option<SocketAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Verbosity {
|
||||||
|
Quiet,
|
||||||
|
Verbose,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Holds the state of the application, namely the external IO
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AppServer {
|
||||||
|
pub crypt: CryptoServer,
|
||||||
|
pub sock: UdpSocket,
|
||||||
|
pub peers: Vec<AppPeer>,
|
||||||
|
pub verbosity: Verbosity,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Index based pointer to a Peer
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AppPeerPtr(pub usize);
|
||||||
|
|
||||||
|
impl AppPeerPtr {
|
||||||
|
/// Takes an index based handle and returns the actual peer
|
||||||
|
pub fn lift(p: PeerPtr) -> Self {
|
||||||
|
Self(p.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an index based handle to one Peer
|
||||||
|
pub fn lower(&self) -> PeerPtr {
|
||||||
|
PeerPtr(self.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_app<'a>(&self, srv: &'a AppServer) -> &'a AppPeer {
|
||||||
|
&srv.peers[self.0]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_app_mut<'a>(&self, srv: &'a mut AppServer) -> &'a mut AppPeer {
|
||||||
|
&mut srv.peers[self.0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum AppPollResult {
|
||||||
|
DeleteKey(AppPeerPtr),
|
||||||
|
SendInitiation(AppPeerPtr),
|
||||||
|
SendRetransmission(AppPeerPtr),
|
||||||
|
ReceivedMessage(usize, SocketAddr),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum KeyOutputReason {
|
||||||
|
Exchanged,
|
||||||
|
Stale,
|
||||||
|
}
|
||||||
|
|
||||||
/// Catches errors, prints them through the logger, then exits
|
/// Catches errors, prints them through the logger, then exits
|
||||||
pub fn main() {
|
pub fn main() {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
match sodium_init().and_then(|()| Cli::run()) {
|
match rosenpass_main() {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("{e}");
|
error!("{e}");
|
||||||
@@ -13,3 +264,383 @@ pub fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Entry point to the whole program
|
||||||
|
pub fn rosenpass_main() -> Result<()> {
|
||||||
|
sodium_init()?;
|
||||||
|
|
||||||
|
let mut args = ArgsWalker {
|
||||||
|
argv: std::env::args().collect(),
|
||||||
|
off: 0, // skipping executable path
|
||||||
|
};
|
||||||
|
|
||||||
|
// Command parsing
|
||||||
|
match args.next() {
|
||||||
|
Some("help") | Some("-h") | Some("-help") | Some("--help") => cmd_help()?,
|
||||||
|
Some("keygen") => cmd_keygen(args)?,
|
||||||
|
Some("exchange") => cmd_exchange(args)?,
|
||||||
|
Some(cmd) => bail_usage!(&args, "No such command {}", cmd),
|
||||||
|
None => bail_usage!(&args, "Expected a command!"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Print the usage information
|
||||||
|
pub fn cmd_help() -> Result<()> {
|
||||||
|
eprint!(include_str!("usage.md"), env!("CARGO_BIN_NAME"));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate a keypair
|
||||||
|
pub fn cmd_keygen(mut args: ArgsWalker) -> Result<()> {
|
||||||
|
let mut sf: Option<String> = None;
|
||||||
|
let mut pf: Option<String> = None;
|
||||||
|
|
||||||
|
// Arg parsing
|
||||||
|
loop {
|
||||||
|
match args.next() {
|
||||||
|
Some("private-key") => args.opt(&mut sf)?,
|
||||||
|
Some("public-key") => args.opt(&mut pf)?,
|
||||||
|
Some(opt) => bail_usage!(&args, "Unknown option `{}`", opt),
|
||||||
|
None => break,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
mandatory_opt!(&args, sf, "private-key");
|
||||||
|
mandatory_opt!(&args, pf, "private-key");
|
||||||
|
|
||||||
|
// Cmd
|
||||||
|
let (mut ssk, mut spk) = (SSk::random(), SPk::random());
|
||||||
|
unsafe {
|
||||||
|
SKEM::keygen(ssk.secret_mut(), spk.secret_mut())?;
|
||||||
|
ssk.store_secret(sf.unwrap())?;
|
||||||
|
spk.store_secret(pf.unwrap())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_exchange(mut args: ArgsWalker) -> Result<()> {
|
||||||
|
// Argument parsing
|
||||||
|
let mut sf: Option<String> = None;
|
||||||
|
let mut pf: Option<String> = None;
|
||||||
|
let mut listen: Option<String> = None;
|
||||||
|
let mut verbosity = Verbosity::Quiet;
|
||||||
|
|
||||||
|
// Global parameters
|
||||||
|
loop {
|
||||||
|
match args.next() {
|
||||||
|
Some("private-key") => args.opt(&mut sf)?,
|
||||||
|
Some("public-key") => args.opt(&mut pf)?,
|
||||||
|
Some("listen") => args.opt(&mut listen)?,
|
||||||
|
Some("verbose") => {
|
||||||
|
verbosity = Verbosity::Verbose;
|
||||||
|
}
|
||||||
|
Some("peer") => {
|
||||||
|
args.prev();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Some(opt) => bail_usage!(&args, "Unknown option `{}`", opt),
|
||||||
|
None => break,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
mandatory_opt!(&args, sf, "private-key");
|
||||||
|
mandatory_opt!(&args, pf, "public-key");
|
||||||
|
|
||||||
|
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
|
||||||
|
// sk, pk, addr
|
||||||
|
SSk::load(&sf.unwrap())?,
|
||||||
|
SPk::load(&pf.unwrap())?,
|
||||||
|
listen.as_deref().unwrap_or("[0::0]:0"),
|
||||||
|
verbosity,
|
||||||
|
)?);
|
||||||
|
|
||||||
|
// Peer parameters
|
||||||
|
'_parseAllPeers: while args.todo() > 0 {
|
||||||
|
let mut pf: Option<String> = None;
|
||||||
|
let mut outfile: Option<String> = None;
|
||||||
|
let mut outwg: Option<WireguardOut> = None;
|
||||||
|
let mut endpoint: Option<String> = None;
|
||||||
|
let mut pskf: Option<String> = None;
|
||||||
|
|
||||||
|
args.next(); // skip "peer" starter itself
|
||||||
|
|
||||||
|
'parseOnePeer: loop {
|
||||||
|
match args.next() {
|
||||||
|
// Done with this peer
|
||||||
|
Some("peer") => {
|
||||||
|
args.prev();
|
||||||
|
break 'parseOnePeer;
|
||||||
|
}
|
||||||
|
None => break 'parseOnePeer,
|
||||||
|
// Options
|
||||||
|
Some("public-key") => args.opt(&mut pf)?,
|
||||||
|
Some("endpoint") => args.opt(&mut endpoint)?,
|
||||||
|
Some("preshared-key") => args.opt(&mut pskf)?,
|
||||||
|
Some("outfile") => args.opt(&mut outfile)?,
|
||||||
|
// Wireguard out
|
||||||
|
Some("wireguard") => {
|
||||||
|
ensure_usage!(
|
||||||
|
&args,
|
||||||
|
outwg.is_none(),
|
||||||
|
"Cannot set wireguard output for the same peer multiple times."
|
||||||
|
);
|
||||||
|
ensure_usage!(&args, args.todo() >= 2, "Option wireguard takes to values");
|
||||||
|
let dev = String::from(args.next().unwrap());
|
||||||
|
let pk = String::from(args.next().unwrap());
|
||||||
|
let wg = outwg.insert(WireguardOut {
|
||||||
|
dev,
|
||||||
|
pk,
|
||||||
|
extra_params: Vec::new(),
|
||||||
|
});
|
||||||
|
'_parseWgOutExtra: loop {
|
||||||
|
match args.next() {
|
||||||
|
Some("peer") => {
|
||||||
|
args.prev();
|
||||||
|
break 'parseOnePeer;
|
||||||
|
}
|
||||||
|
None => break 'parseOnePeer,
|
||||||
|
Some(xtra) => wg.extra_params.push(xtra.to_string()),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Invalid
|
||||||
|
Some(opt) => bail_usage!(&args, "Unknown peer option `{}`", opt),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
mandatory_opt!(&args, pf, "private-key");
|
||||||
|
ensure_usage!(
|
||||||
|
&args,
|
||||||
|
outfile.is_some() || outwg.is_some(),
|
||||||
|
"Either of the outfile or wireguard option is mandatory"
|
||||||
|
);
|
||||||
|
|
||||||
|
let tx_addr = endpoint
|
||||||
|
.map(|e| {
|
||||||
|
e.to_socket_addrs()?
|
||||||
|
.next()
|
||||||
|
.context("Expected address in endpoint parameter")
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
srv.add_peer(
|
||||||
|
// psk, pk, outfile, outwg, tx_addr
|
||||||
|
pskf.map(SymKey::load_b64).transpose()?,
|
||||||
|
SPk::load(&pf.unwrap())?,
|
||||||
|
outfile,
|
||||||
|
outwg,
|
||||||
|
tx_addr,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
srv.listen_loop()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AppServer {
|
||||||
|
pub fn new<A: ToSocketAddrs>(
|
||||||
|
sk: SSk,
|
||||||
|
pk: SPk,
|
||||||
|
addr: A,
|
||||||
|
verbosity: Verbosity,
|
||||||
|
) -> Result<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
crypt: CryptoServer::new(sk, pk),
|
||||||
|
sock: UdpSocket::bind(addr)?,
|
||||||
|
peers: Vec::new(),
|
||||||
|
verbosity,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verbose(&self) -> bool {
|
||||||
|
matches!(self.verbosity, Verbosity::Verbose)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_peer(
|
||||||
|
&mut self,
|
||||||
|
psk: Option<SymKey>,
|
||||||
|
pk: SPk,
|
||||||
|
outfile: Option<String>,
|
||||||
|
outwg: Option<WireguardOut>,
|
||||||
|
tx_addr: Option<SocketAddr>,
|
||||||
|
) -> Result<AppPeerPtr> {
|
||||||
|
let PeerPtr(pn) = self.crypt.add_peer(psk, pk)?;
|
||||||
|
assert!(pn == self.peers.len());
|
||||||
|
self.peers.push(AppPeer {
|
||||||
|
outfile,
|
||||||
|
outwg,
|
||||||
|
tx_addr,
|
||||||
|
});
|
||||||
|
Ok(AppPeerPtr(pn))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn listen_loop(&mut self) -> Result<()> {
|
||||||
|
const INIT_SLEEP: f64 = 0.01;
|
||||||
|
const MAX_FAILURES: i32 = 10;
|
||||||
|
let mut failure_cnt = 0;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let msgs_processed = 0usize;
|
||||||
|
let err = match self.event_loop() {
|
||||||
|
Ok(()) => return Ok(()),
|
||||||
|
Err(e) => e,
|
||||||
|
};
|
||||||
|
|
||||||
|
// This should not happen…
|
||||||
|
failure_cnt = if msgs_processed > 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
failure_cnt + 1
|
||||||
|
};
|
||||||
|
let sleep = INIT_SLEEP * 2.0f64.powf(f64::from(failure_cnt - 1));
|
||||||
|
let tries_left = MAX_FAILURES - (failure_cnt - 1);
|
||||||
|
error!(
|
||||||
|
"unexpected error after processing {} messages: {:?} {}",
|
||||||
|
msgs_processed,
|
||||||
|
err,
|
||||||
|
err.backtrace()
|
||||||
|
);
|
||||||
|
if tries_left > 0 {
|
||||||
|
error!("reinitializing networking in {sleep}! {tries_left} tries left.");
|
||||||
|
std::thread::sleep(self.crypt.timebase.dur(sleep));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("too many network failures");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn event_loop(&mut self) -> Result<()> {
|
||||||
|
let (mut rx, mut tx) = (MsgBuf::zero(), MsgBuf::zero());
|
||||||
|
macro_rules! tx_maybe_with {
|
||||||
|
($peer:expr, $fn:expr) => {
|
||||||
|
attempt!({
|
||||||
|
let p = $peer.get_app(self);
|
||||||
|
if let Some(addr) = p.tx_addr {
|
||||||
|
let len = $fn()?;
|
||||||
|
self.sock.send_to(&tx[..len], addr)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
use rosenpass::protocol::HandleMsgResult;
|
||||||
|
use AppPollResult::*;
|
||||||
|
use KeyOutputReason::*;
|
||||||
|
match self.poll(&mut *rx)? {
|
||||||
|
SendInitiation(peer) => tx_maybe_with!(peer, || self
|
||||||
|
.crypt
|
||||||
|
.initiate_handshake(peer.lower(), &mut *tx))?,
|
||||||
|
SendRetransmission(peer) => tx_maybe_with!(peer, || self
|
||||||
|
.crypt
|
||||||
|
.retransmit_handshake(peer.lower(), &mut *tx))?,
|
||||||
|
DeleteKey(peer) => self.output_key(peer, Stale, &SymKey::random())?,
|
||||||
|
|
||||||
|
ReceivedMessage(len, addr) => {
|
||||||
|
multimatch!(self.crypt.handle_msg(&rx[..len], &mut *tx),
|
||||||
|
Err(ref e) =>
|
||||||
|
self.verbose().then(||
|
||||||
|
info!("error processing incoming message from {:?}: {:?} {}", addr, e, e.backtrace())),
|
||||||
|
|
||||||
|
Ok(HandleMsgResult { resp: Some(len), .. }) => {
|
||||||
|
self.sock.send_to(&tx[0..len], addr)?
|
||||||
|
},
|
||||||
|
|
||||||
|
Ok(HandleMsgResult { exchanged_with: Some(p), .. }) => {
|
||||||
|
let ap = AppPeerPtr::lift(p);
|
||||||
|
ap.get_app_mut(self).tx_addr = Some(addr);
|
||||||
|
// TODO: Maybe we should rather call the key "rosenpass output"?
|
||||||
|
self.output_key(ap, Exchanged, &self.crypt.osk(p)?)?;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn output_key(&self, peer: AppPeerPtr, why: KeyOutputReason, key: &SymKey) -> Result<()> {
|
||||||
|
let peerid = peer.lower().get(&self.crypt).pidt()?;
|
||||||
|
let ap = peer.get_app(self);
|
||||||
|
|
||||||
|
if self.verbose() {
|
||||||
|
let msg = match why {
|
||||||
|
KeyOutputReason::Exchanged => "Exchanged key with peer",
|
||||||
|
KeyOutputReason::Stale => "Erasing outdated key from peer",
|
||||||
|
};
|
||||||
|
info!("{} {}", msg, fmt_b64(&*peerid));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(of) = ap.outfile.as_ref() {
|
||||||
|
// This might leave some fragments of the secret on the stack;
|
||||||
|
// in practice this is likely not a problem because the stack likely
|
||||||
|
// will be overwritten by something else soon but this is not exactly
|
||||||
|
// guaranteed. It would be possible to remedy this, but since the secret
|
||||||
|
// data will linger in the linux page cache anyways with the current
|
||||||
|
// implementation, going to great length to erase the secret here is
|
||||||
|
// not worth it right now.
|
||||||
|
b64_writer(fopen_w(of)?).write_all(key.secret())?;
|
||||||
|
let why = match why {
|
||||||
|
KeyOutputReason::Exchanged => "exchanged",
|
||||||
|
KeyOutputReason::Stale => "stale",
|
||||||
|
};
|
||||||
|
println!(
|
||||||
|
"output-key peer {} key-file {} {}",
|
||||||
|
fmt_b64(&*peerid),
|
||||||
|
of,
|
||||||
|
why
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(owg) = ap.outwg.as_ref() {
|
||||||
|
let child = Command::new("wg")
|
||||||
|
.arg("set")
|
||||||
|
.arg(&owg.dev)
|
||||||
|
.arg("peer")
|
||||||
|
.arg(&owg.pk)
|
||||||
|
.arg("preshared-key")
|
||||||
|
.arg("/dev/stdin")
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.args(&owg.extra_params)
|
||||||
|
.spawn()?;
|
||||||
|
b64_writer(child.stdin.unwrap()).write_all(key.secret())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn poll(&mut self, rx_buf: &mut [u8]) -> Result<AppPollResult> {
|
||||||
|
use rosenpass::protocol::PollResult as C;
|
||||||
|
use AppPollResult as A;
|
||||||
|
loop {
|
||||||
|
return Ok(match self.crypt.poll()? {
|
||||||
|
C::DeleteKey(PeerPtr(no)) => A::DeleteKey(AppPeerPtr(no)),
|
||||||
|
C::SendInitiation(PeerPtr(no)) => A::SendInitiation(AppPeerPtr(no)),
|
||||||
|
C::SendRetransmission(PeerPtr(no)) => A::SendRetransmission(AppPeerPtr(no)),
|
||||||
|
C::Sleep(timeout) => match self.try_recv(rx_buf, timeout)? {
|
||||||
|
Some((len, addr)) => A::ReceivedMessage(len, addr),
|
||||||
|
None => continue,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_recv(&self, buf: &mut [u8], timeout: Timing) -> Result<Option<(usize, SocketAddr)>> {
|
||||||
|
if timeout == 0.0 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
self.sock
|
||||||
|
.set_read_timeout(Some(Duration::from_secs_f64(timeout)))?;
|
||||||
|
match self.sock.recv_from(buf) {
|
||||||
|
Ok(x) => Ok(Some(x)),
|
||||||
|
Err(e) => match e.kind() {
|
||||||
|
ErrorKind::WouldBlock => Ok(None),
|
||||||
|
ErrorKind::TimedOut => Ok(None),
|
||||||
|
_ => Err(anyhow::Error::new(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
//! Data structures representing the messages going over the wire
|
//! # Messages
|
||||||
//!
|
//!
|
||||||
//! This module contains de-/serialization of the protocol's messages. Thats kind
|
//! This module contains data structures that help in the
|
||||||
//! of a lie, since no actual ser/de happens. Instead, the structures offer views
|
//! serialization/deserialization (ser/de) of messages. Thats kind of a lie,
|
||||||
//! into mutable byte slices (`&mut [u8]`), allowing to modify the fields of an
|
//! since no actual ser/de happens. Instead, the structures offer views into
|
||||||
|
//! mutable byte slices (`&mut [u8]`), allowing to modify the fields of an
|
||||||
//! always serialized instance of the data in question. This is closely related
|
//! always serialized instance of the data in question. This is closely related
|
||||||
//! to the concept of lenses in function programming; more on that here:
|
//! to the concept of lenses in function programming; more on that here:
|
||||||
//! [https://sinusoid.es/misc/lager/lenses.pdf](https://sinusoid.es/misc/lager/lenses.pdf)
|
//! [https://sinusoid.es/misc/lager/lenses.pdf](https://sinusoid.es/misc/lager/lenses.pdf)
|
||||||
@@ -143,7 +144,7 @@ macro_rules! data_lense(
|
|||||||
pub fn check_size(len: usize) -> Result<(), RosenpassError>{
|
pub fn check_size(len: usize) -> Result<(), RosenpassError>{
|
||||||
let required_size = $( $len + )+ 0;
|
let required_size = $( $len + )+ 0;
|
||||||
let actual_size = len;
|
let actual_size = len;
|
||||||
if required_size != actual_size {
|
if required_size < actual_size {
|
||||||
Err(RosenpassError::BufferSizeMismatch {
|
Err(RosenpassError::BufferSizeMismatch {
|
||||||
required_size,
|
required_size,
|
||||||
actual_size,
|
actual_size,
|
||||||
@@ -199,53 +200,23 @@ macro_rules! data_lense(
|
|||||||
type __ContainerType;
|
type __ContainerType;
|
||||||
|
|
||||||
/// Create a lense to the byte slice
|
/// Create a lense to the byte slice
|
||||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
||||||
|
|
||||||
/// Create a lense to the byte slice, automatically truncating oversized buffers
|
|
||||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> [< $type Ext >] for &'a [u8] {
|
impl<'a> [< $type Ext >] for &'a [u8] {
|
||||||
type __ContainerType = &'a [u8];
|
type __ContainerType = &'a [u8];
|
||||||
|
|
||||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||||
$type::<Self::__ContainerType, $( $($generic),+ )? >::check_size(self.len())?;
|
|
||||||
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
|
||||||
let required_size = $( $len + )+ 0;
|
|
||||||
let actual_size = self.len();
|
|
||||||
if actual_size < required_size {
|
|
||||||
return Err(RosenpassError::BufferSizeMismatch {
|
|
||||||
required_size,
|
|
||||||
actual_size,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
[< $type Ext >]::[< $type:snake >](&self[..required_size])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> [< $type Ext >] for &'a mut [u8] {
|
impl<'a> [< $type Ext >] for &'a mut [u8] {
|
||||||
type __ContainerType = &'a mut [u8];
|
type __ContainerType = &'a mut [u8];
|
||||||
fn [< $type:snake >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
|
||||||
$type::<Self::__ContainerType, $( $($generic),+ )? >::check_size(self.len())?;
|
fn [< $type:snake >] $(< $($generic),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
||||||
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
Ok($type ( self, $( $( ::core::marker::PhantomData::<$generic> ),+ )? ))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn [< $type:snake _ truncating >] $(< $($generic : LenseView),* >)? (self) -> Result< $type<Self::__ContainerType, $( $($generic),+ )? >, RosenpassError> {
|
|
||||||
let required_size = $( $len + )+ 0;
|
|
||||||
let actual_size = self.len();
|
|
||||||
if actual_size < required_size {
|
|
||||||
return Err(RosenpassError::BufferSizeMismatch {
|
|
||||||
required_size,
|
|
||||||
actual_size,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
[< $type Ext >]::[< $type:snake >](&mut self[..required_size])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
);
|
);
|
||||||
@@ -273,9 +244,9 @@ data_lense! { InitHello :=
|
|||||||
/// Randomly generated connection id
|
/// Randomly generated connection id
|
||||||
sidi: 4,
|
sidi: 4,
|
||||||
/// Kyber 512 Ephemeral Public Key
|
/// Kyber 512 Ephemeral Public Key
|
||||||
epki: EphemeralKEM::PK_LEN,
|
epki: EKEM::PK_LEN,
|
||||||
/// Classic McEliece Ciphertext
|
/// Classic McEliece Ciphertext
|
||||||
sctr: StaticKEM::CT_LEN,
|
sctr: SKEM::CT_LEN,
|
||||||
/// Encryped: 16 byte hash of McEliece initiator static key
|
/// Encryped: 16 byte hash of McEliece initiator static key
|
||||||
pidic: sodium::AEAD_TAG_LEN + 32,
|
pidic: sodium::AEAD_TAG_LEN + 32,
|
||||||
/// Encrypted TAI64N Time Stamp (against replay attacks)
|
/// Encrypted TAI64N Time Stamp (against replay attacks)
|
||||||
@@ -288,9 +259,9 @@ data_lense! { RespHello :=
|
|||||||
/// Copied from InitHello
|
/// Copied from InitHello
|
||||||
sidi: 4,
|
sidi: 4,
|
||||||
/// Kyber 512 Ephemeral Ciphertext
|
/// Kyber 512 Ephemeral Ciphertext
|
||||||
ecti: EphemeralKEM::CT_LEN,
|
ecti: EKEM::CT_LEN,
|
||||||
/// Classic McEliece Ciphertext
|
/// Classic McEliece Ciphertext
|
||||||
scti: StaticKEM::CT_LEN,
|
scti: SKEM::CT_LEN,
|
||||||
/// Empty encrypted message (just an auth tag)
|
/// Empty encrypted message (just an auth tag)
|
||||||
auth: sodium::AEAD_TAG_LEN,
|
auth: sodium::AEAD_TAG_LEN,
|
||||||
/// Responders handshake state in encrypted form
|
/// Responders handshake state in encrypted form
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
//! Traits and implementations for Key Encapsulation Mechanisms (KEMs)
|
//! This module contains Traits and implementations for Key Encapsulation
|
||||||
//!
|
//! Mechanisms (KEM). KEMs are the interface provided by almost all post-quantum
|
||||||
//! KEMs are the interface provided by almost all post-quantum
|
|
||||||
//! secure key exchange mechanisms.
|
//! secure key exchange mechanisms.
|
||||||
//!
|
//!
|
||||||
//! Conceptually KEMs are akin to public-key encryption, but instead of encrypting
|
//! Conceptually KEMs are akin to public-key encryption, but instead of encrypting
|
||||||
@@ -8,7 +7,7 @@
|
|||||||
//!
|
//!
|
||||||
//! encapsulation.
|
//! encapsulation.
|
||||||
//! The [KEM] Trait describes the basic API offered by a Key Encapsulation
|
//! The [KEM] Trait describes the basic API offered by a Key Encapsulation
|
||||||
//! Mechanism. Two implementations for it are provided, [StaticKEM] and [EphemeralKEM].
|
//! Mechanism. Two implementations for it are provided, [SKEM] and [EKEM].
|
||||||
|
|
||||||
use crate::{RosenpassError, RosenpassMaybeError};
|
use crate::{RosenpassError, RosenpassMaybeError};
|
||||||
|
|
||||||
@@ -51,7 +50,7 @@ pub trait KEM {
|
|||||||
/// Classic McEliece is chosen because of its high security margin and its small
|
/// Classic McEliece is chosen because of its high security margin and its small
|
||||||
/// ciphertexts. The public keys are humongous, but (being static keys) the are never transmitted over
|
/// ciphertexts. The public keys are humongous, but (being static keys) the are never transmitted over
|
||||||
/// the wire so this is not a big problem.
|
/// the wire so this is not a big problem.
|
||||||
pub struct StaticKEM;
|
pub struct SKEM;
|
||||||
|
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
@@ -66,7 +65,7 @@ pub struct StaticKEM;
|
|||||||
/// to only check that the buffers are big enough, allowing them to be even
|
/// to only check that the buffers are big enough, allowing them to be even
|
||||||
/// bigger. However, from a correctness point of view it does not make sense to
|
/// bigger. However, from a correctness point of view it does not make sense to
|
||||||
/// allow bigger buffers.
|
/// allow bigger buffers.
|
||||||
impl KEM for StaticKEM {
|
impl KEM for SKEM {
|
||||||
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_secret_key as usize;
|
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_secret_key as usize;
|
||||||
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_public_key as usize;
|
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_public_key as usize;
|
||||||
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_ciphertext as usize;
|
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_classic_mceliece_460896_length_ciphertext as usize;
|
||||||
@@ -120,7 +119,7 @@ impl KEM for StaticKEM {
|
|||||||
/// wireguard paper claimed that CPA security would be sufficient. Nonetheless we choose kyber
|
/// wireguard paper claimed that CPA security would be sufficient. Nonetheless we choose kyber
|
||||||
/// which provides CCA security since there are no publicly vetted KEMs out there which provide
|
/// which provides CCA security since there are no publicly vetted KEMs out there which provide
|
||||||
/// only CPA security.
|
/// only CPA security.
|
||||||
pub struct EphemeralKEM;
|
pub struct EKEM;
|
||||||
|
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
@@ -135,7 +134,7 @@ pub struct EphemeralKEM;
|
|||||||
/// to only check that the buffers are big enough, allowing them to be even
|
/// to only check that the buffers are big enough, allowing them to be even
|
||||||
/// bigger. However, from a correctness point of view it does not make sense to
|
/// bigger. However, from a correctness point of view it does not make sense to
|
||||||
/// allow bigger buffers.
|
/// allow bigger buffers.
|
||||||
impl KEM for EphemeralKEM {
|
impl KEM for EKEM {
|
||||||
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_secret_key as usize;
|
const SK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_secret_key as usize;
|
||||||
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_public_key as usize;
|
const PK_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_public_key as usize;
|
||||||
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_ciphertext as usize;
|
const CT_LEN: usize = oqs_sys::kem::OQS_KEM_kyber_512_length_ciphertext as usize;
|
||||||
@@ -144,7 +143,8 @@ impl KEM for EphemeralKEM {
|
|||||||
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
||||||
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
||||||
unsafe {
|
unsafe {
|
||||||
oqs_sys::kem::OQS_KEM_kyber_512_keypair(pk.as_mut_ptr(), sk.as_mut_ptr()).to_rg_error()
|
oqs_sys::kem::OQS_KEM_kyber_512_keypair(pk.as_mut_ptr(), sk.as_mut_ptr())
|
||||||
|
.to_rg_error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Result<(), RosenpassError> {
|
fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Result<(), RosenpassError> {
|
||||||
@@ -152,7 +152,11 @@ impl KEM for EphemeralKEM {
|
|||||||
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
||||||
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
RosenpassError::check_buffer_size(pk.len(), Self::PK_LEN)?;
|
||||||
unsafe {
|
unsafe {
|
||||||
oqs_sys::kem::OQS_KEM_kyber_512_encaps(ct.as_mut_ptr(), shk.as_mut_ptr(), pk.as_ptr())
|
oqs_sys::kem::OQS_KEM_kyber_512_encaps(
|
||||||
|
ct.as_mut_ptr(),
|
||||||
|
shk.as_mut_ptr(),
|
||||||
|
pk.as_ptr(),
|
||||||
|
)
|
||||||
.to_rg_error()
|
.to_rg_error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -161,7 +165,11 @@ impl KEM for EphemeralKEM {
|
|||||||
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
RosenpassError::check_buffer_size(sk.len(), Self::SK_LEN)?;
|
||||||
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
RosenpassError::check_buffer_size(ct.len(), Self::CT_LEN)?;
|
||||||
unsafe {
|
unsafe {
|
||||||
oqs_sys::kem::OQS_KEM_kyber_512_decaps(shk.as_mut_ptr(), ct.as_ptr(), sk.as_ptr())
|
oqs_sys::kem::OQS_KEM_kyber_512_decaps(
|
||||||
|
shk.as_mut_ptr(),
|
||||||
|
ct.as_ptr(),
|
||||||
|
sk.as_ptr(),
|
||||||
|
)
|
||||||
.to_rg_error()
|
.to_rg_error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
//! Implementation of the tree-like structure used for the label derivation in [labeled_prf](crate::labeled_prf)
|
|
||||||
use {
|
use {
|
||||||
crate::{
|
crate::{
|
||||||
coloring::Secret,
|
coloring::Secret,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,3 @@
|
|||||||
//! Bindings and helpers for accessing libsodium functions
|
|
||||||
|
|
||||||
use crate::util::*;
|
use crate::util::*;
|
||||||
use anyhow::{ensure, Result};
|
use anyhow::{ensure, Result};
|
||||||
use libsodium_sys as libsodium;
|
use libsodium_sys as libsodium;
|
||||||
|
|||||||
48
rosenpass/src/usage.md
Normal file
48
rosenpass/src/usage.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
NAME
|
||||||
|
|
||||||
|
{0} – Perform post-quantum secure key exchanges for wireguard and other services.
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
|
||||||
|
{0} [ COMMAND ] [ OPTIONS ]... [ ARGS ]...
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
{0} performs cryptographic key exchanges that are secure against quantum-computers and outputs the keys.
|
||||||
|
These keys can then be passed to various services such as wireguard or other vpn services
|
||||||
|
as pre-shared-keys to achieve security against attackers with quantum computers.
|
||||||
|
|
||||||
|
COMMANDS
|
||||||
|
|
||||||
|
keygen private-key <file-path> public-key <file-path>
|
||||||
|
Generate a keypair to use in the exchange command later. Send the public-key file to your communication partner
|
||||||
|
and keep the private-key file a secret!
|
||||||
|
|
||||||
|
exchange private-key <file-path> public-key <file-path> [ OPTIONS ]... PEER...\n"
|
||||||
|
Start a process to exchange keys with the specified peers. You should specify at least one peer.
|
||||||
|
|
||||||
|
OPTIONS
|
||||||
|
listen <ip>[:<port>]
|
||||||
|
Instructs {0} to listen on the specified interface and port. By default {0} will listen on all interfaces and select a random port.
|
||||||
|
|
||||||
|
verbose
|
||||||
|
Extra logging
|
||||||
|
|
||||||
|
PEER := peer public-key <file-path> [endpoint <ip>[:<port>]] [preshared-key <file-path>] [outfile <file-path>] [wireguard <dev> <peer> <extra_params>]
|
||||||
|
Instructs {0} to exchange keys with the given peer and write the resulting PSK into the given output file.
|
||||||
|
You must either specify the outfile or wireguard output option.
|
||||||
|
|
||||||
|
endpoint <ip>[:<port>]
|
||||||
|
Specifies the address where the peer can be reached. This will be automatically updated after the first successful
|
||||||
|
key exchange with the peer. If this is unspecified, the peer must initiate the connection.
|
||||||
|
|
||||||
|
preshared-key <file-path>
|
||||||
|
You may specify a pre-shared key which will be mixed into the final secret.
|
||||||
|
|
||||||
|
outfile <file-path>
|
||||||
|
You may specify a file to write the exchanged keys to. If this option is specified, {0} will
|
||||||
|
write a notification to standard out every time the key is updated.
|
||||||
|
|
||||||
|
wireguard <dev> <peer> <extra_params>
|
||||||
|
This allows you to directly specify a wireguard peer to deploy the pre-shared-key to.
|
||||||
|
You may specify extra parameters you would pass to `wg set` besides the preshared-key parameter which is used by {0}.
|
||||||
|
This makes it possible to add peers entirely from {0}.
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
//! Helper functions and macros
|
|
||||||
use anyhow::{ensure, Context, Result};
|
|
||||||
use base64::{
|
use base64::{
|
||||||
display::Base64Display as B64Display, read::DecoderReader as B64Reader,
|
display::Base64Display as B64Display, read::DecoderReader as B64Reader,
|
||||||
write::EncoderWriter as B64Writer,
|
write::EncoderWriter as B64Writer,
|
||||||
@@ -7,25 +5,10 @@ use base64::{
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::{Borrow, BorrowMut},
|
borrow::{Borrow, BorrowMut},
|
||||||
cmp::min,
|
cmp::min,
|
||||||
fs::{File, OpenOptions},
|
|
||||||
io::{Read, Write},
|
io::{Read, Write},
|
||||||
path::Path,
|
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::coloring::{Public, Secret};
|
|
||||||
|
|
||||||
/// Xors a and b element-wise and writes the result into a.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use rosenpass::util::xor_into;
|
|
||||||
/// let mut a = String::from("hello").into_bytes();
|
|
||||||
/// let b = b"world";
|
|
||||||
/// xor_into(&mut a, b);
|
|
||||||
/// assert_eq!(&a, b"\x1f\n\x1e\x00\x0b");
|
|
||||||
/// ```
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn xor_into(a: &mut [u8], b: &[u8]) {
|
pub fn xor_into(a: &mut [u8], b: &[u8]) {
|
||||||
assert!(a.len() == b.len());
|
assert!(a.len() == b.len());
|
||||||
@@ -34,8 +17,8 @@ pub fn xor_into(a: &mut [u8], b: &[u8]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Concatenate two byte arrays
|
|
||||||
// TODO: Zeroize result?
|
// TODO: Zeroize result?
|
||||||
|
/** Concatenate two byte arrays */
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! cat {
|
macro_rules! cat {
|
||||||
($len:expr; $($toks:expr),+) => {{
|
($len:expr; $($toks:expr),+) => {{
|
||||||
@@ -57,10 +40,9 @@ pub fn cpy<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, dst:
|
|||||||
dst.borrow_mut().copy_from_slice(src.borrow());
|
dst.borrow_mut().copy_from_slice(src.borrow());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Copy from `src` to `dst`. If `src` and `dst` are not of equal length, copy as many bytes as possible.
|
pub fn cpy_min<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, to: &mut T) {
|
||||||
pub fn cpy_min<T: BorrowMut<[u8]> + ?Sized, F: Borrow<[u8]> + ?Sized>(src: &F, dst: &mut T) {
|
|
||||||
let src = src.borrow();
|
let src = src.borrow();
|
||||||
let dst = dst.borrow_mut();
|
let dst = to.borrow_mut();
|
||||||
let len = min(src.len(), dst.len());
|
let len = min(src.len(), dst.len());
|
||||||
dst[..len].copy_from_slice(&src[..len]);
|
dst[..len].copy_from_slice(&src[..len]);
|
||||||
}
|
}
|
||||||
@@ -73,19 +55,18 @@ macro_rules! attempt {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
use base64::engine::general_purpose::GeneralPurpose as Base64Engine;
|
const B64TYPE: base64::Config = base64::STANDARD;
|
||||||
const B64ENGINE: Base64Engine = base64::engine::general_purpose::STANDARD;
|
|
||||||
|
|
||||||
pub fn fmt_b64<'a>(payload: &'a [u8]) -> B64Display<'a, 'static, Base64Engine> {
|
pub fn fmt_b64<'a>(payload: &'a [u8]) -> B64Display<'a> {
|
||||||
B64Display::<'a, 'static>::new(payload, &B64ENGINE)
|
B64Display::<'a>::with_config(payload, B64TYPE)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn b64_writer<W: Write>(w: W) -> B64Writer<'static, Base64Engine, W> {
|
pub fn b64_writer<W: Write>(w: W) -> B64Writer<W> {
|
||||||
B64Writer::new(w, &B64ENGINE)
|
B64Writer::new(w, B64TYPE)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn b64_reader<R: Read>(r: R) -> B64Reader<'static, Base64Engine, R> {
|
pub fn b64_reader<R: Read>(r: &mut R) -> B64Reader<'_, R> {
|
||||||
B64Reader::new(r, &B64ENGINE)
|
B64Reader::new(r, B64TYPE)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO remove this once std::cmp::max becomes const
|
// TODO remove this once std::cmp::max becomes const
|
||||||
@@ -116,6 +97,15 @@ impl Timebase {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! multimatch {
|
||||||
|
($val:expr) => {{ () }};
|
||||||
|
($val:expr, $($p:pat => $thn:expr),*) => {{
|
||||||
|
let v = $val;
|
||||||
|
($(if let $p = v { Some($thn) } else { None }),*)
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
pub fn mutating<T, F>(mut v: T, f: F) -> T
|
pub fn mutating<T, F>(mut v: T, f: F) -> T
|
||||||
where
|
where
|
||||||
F: Fn(&mut T),
|
F: Fn(&mut T),
|
||||||
@@ -131,114 +121,3 @@ where
|
|||||||
f(&v);
|
f(&v);
|
||||||
v
|
v
|
||||||
}
|
}
|
||||||
|
|
||||||
/// load'n store
|
|
||||||
|
|
||||||
/// Open a file writable
|
|
||||||
pub fn fopen_w<P: AsRef<Path>>(path: P) -> Result<File> {
|
|
||||||
Ok(OpenOptions::new()
|
|
||||||
.read(false)
|
|
||||||
.write(true)
|
|
||||||
.create(true)
|
|
||||||
.truncate(true)
|
|
||||||
.open(path)?)
|
|
||||||
}
|
|
||||||
/// Open a file readable
|
|
||||||
pub fn fopen_r<P: AsRef<Path>>(path: P) -> Result<File> {
|
|
||||||
Ok(OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.write(false)
|
|
||||||
.create(false)
|
|
||||||
.truncate(false)
|
|
||||||
.open(path)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ReadExactToEnd {
|
|
||||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Read> ReadExactToEnd for R {
|
|
||||||
fn read_exact_to_end(&mut self, buf: &mut [u8]) -> Result<()> {
|
|
||||||
let mut dummy = [0u8; 8];
|
|
||||||
self.read_exact(buf)?;
|
|
||||||
ensure!(self.read(&mut dummy)? == 0, "File too long!");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait LoadValue {
|
|
||||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self>
|
|
||||||
where
|
|
||||||
Self: Sized;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait LoadValueB64 {
|
|
||||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self>
|
|
||||||
where
|
|
||||||
Self: Sized;
|
|
||||||
}
|
|
||||||
|
|
||||||
trait StoreValue {
|
|
||||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
trait StoreSecret {
|
|
||||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: StoreValue> StoreSecret for T {
|
|
||||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
|
||||||
self.store(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> LoadValue for Secret<N> {
|
|
||||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
|
||||||
let mut v = Self::random();
|
|
||||||
let p = path.as_ref();
|
|
||||||
fopen_r(p)?
|
|
||||||
.read_exact_to_end(v.secret_mut())
|
|
||||||
.with_context(|| format!("Could not load file {p:?}"))?;
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> LoadValueB64 for Secret<N> {
|
|
||||||
fn load_b64<P: AsRef<Path>>(path: P) -> Result<Self> {
|
|
||||||
let mut v = Self::random();
|
|
||||||
let p = path.as_ref();
|
|
||||||
// This might leave some fragments of the secret on the stack;
|
|
||||||
// in practice this is likely not a problem because the stack likely
|
|
||||||
// will be overwritten by something else soon but this is not exactly
|
|
||||||
// guaranteed. It would be possible to remedy this, but since the secret
|
|
||||||
// data will linger in the Linux page cache anyways with the current
|
|
||||||
// implementation, going to great length to erase the secret here is
|
|
||||||
// not worth it right now.
|
|
||||||
b64_reader(&mut fopen_r(p)?)
|
|
||||||
.read_exact(v.secret_mut())
|
|
||||||
.with_context(|| format!("Could not load base64 file {p:?}"))?;
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> StoreSecret for Secret<N> {
|
|
||||||
fn store_secret<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
|
||||||
std::fs::write(path, self.secret())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> LoadValue for Public<N> {
|
|
||||||
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
|
||||||
let mut v = Self::random();
|
|
||||||
fopen_r(path)?.read_exact_to_end(&mut *v)?;
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> StoreValue for Public<N> {
|
|
||||||
fn store<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
|
||||||
std::fs::write(path, **self)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -8,21 +8,21 @@ fn generate_keys() {
|
|||||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("keygen");
|
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("keygen");
|
||||||
fs::create_dir_all(&tmpdir).unwrap();
|
fs::create_dir_all(&tmpdir).unwrap();
|
||||||
|
|
||||||
let secret_key_path = tmpdir.join("secret-key");
|
let priv_key_path = tmpdir.join("private-key");
|
||||||
let public_key_path = tmpdir.join("public-key");
|
let pub_key_path = tmpdir.join("public-key");
|
||||||
|
|
||||||
let output = test_bin::get_test_bin(BIN)
|
let output = test_bin::get_test_bin(BIN)
|
||||||
.args(["gen-keys", "--secret-key"])
|
.args(["keygen", "private-key"])
|
||||||
.arg(&secret_key_path)
|
.arg(&priv_key_path)
|
||||||
.arg("--public-key")
|
.arg("public-key")
|
||||||
.arg(&public_key_path)
|
.arg(&pub_key_path)
|
||||||
.output()
|
.output()
|
||||||
.expect("Failed to start {BIN}");
|
.expect("Failed to start {BIN}");
|
||||||
|
|
||||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||||
|
|
||||||
assert!(secret_key_path.is_file());
|
assert!(priv_key_path.is_file());
|
||||||
assert!(public_key_path.is_file());
|
assert!(pub_key_path.is_file());
|
||||||
|
|
||||||
// cleanup
|
// cleanup
|
||||||
fs::remove_dir_all(&tmpdir).unwrap();
|
fs::remove_dir_all(&tmpdir).unwrap();
|
||||||
@@ -46,22 +46,22 @@ fn check_exchange() {
|
|||||||
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("exchange");
|
let tmpdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join("exchange");
|
||||||
fs::create_dir_all(&tmpdir).unwrap();
|
fs::create_dir_all(&tmpdir).unwrap();
|
||||||
|
|
||||||
let secret_key_paths = [tmpdir.join("secret-key-0"), tmpdir.join("secret-key-1")];
|
let priv_key_paths = [tmpdir.join("private-key-0"), tmpdir.join("private-key-1")];
|
||||||
let public_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
let pub_key_paths = [tmpdir.join("public-key-0"), tmpdir.join("public-key-1")];
|
||||||
let shared_key_paths = [tmpdir.join("shared-key-0"), tmpdir.join("shared-key-1")];
|
let shared_key_paths = [tmpdir.join("shared-key-0"), tmpdir.join("shared-key-1")];
|
||||||
|
|
||||||
// generate key pairs
|
// generate key pairs
|
||||||
for (secret_key_path, pub_key_path) in secret_key_paths.iter().zip(public_key_paths.iter()) {
|
for (priv_key_path, pub_key_path) in priv_key_paths.iter().zip(pub_key_paths.iter()) {
|
||||||
let output = test_bin::get_test_bin(BIN)
|
let output = test_bin::get_test_bin(BIN)
|
||||||
.args(["gen-keys", "--secret-key"])
|
.args(["keygen", "private-key"])
|
||||||
.arg(&secret_key_path)
|
.arg(&priv_key_path)
|
||||||
.arg("--public-key")
|
.arg("public-key")
|
||||||
.arg(&pub_key_path)
|
.arg(&pub_key_path)
|
||||||
.output()
|
.output()
|
||||||
.expect("Failed to start {BIN}");
|
.expect("Failed to start {BIN}");
|
||||||
|
|
||||||
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
assert_eq!(String::from_utf8_lossy(&output.stdout), "");
|
||||||
assert!(secret_key_path.is_file());
|
assert!(priv_key_path.is_file());
|
||||||
assert!(pub_key_path.is_file());
|
assert!(pub_key_path.is_file());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,12 +69,12 @@ fn check_exchange() {
|
|||||||
let port = find_udp_socket();
|
let port = find_udp_socket();
|
||||||
let listen_addr = format!("localhost:{port}");
|
let listen_addr = format!("localhost:{port}");
|
||||||
let mut server = test_bin::get_test_bin(BIN)
|
let mut server = test_bin::get_test_bin(BIN)
|
||||||
.args(["exchange", "secret-key"])
|
.args(["exchange", "private-key"])
|
||||||
.arg(&secret_key_paths[0])
|
.arg(&priv_key_paths[0])
|
||||||
.arg("public-key")
|
.arg("public-key")
|
||||||
.arg(&public_key_paths[0])
|
.arg(&pub_key_paths[0])
|
||||||
.args(["listen", &listen_addr, "verbose", "peer", "public-key"])
|
.args(["listen", &listen_addr, "verbose", "peer", "public-key"])
|
||||||
.arg(&public_key_paths[1])
|
.arg(&pub_key_paths[1])
|
||||||
.arg("outfile")
|
.arg("outfile")
|
||||||
.arg(&shared_key_paths[0])
|
.arg(&shared_key_paths[0])
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
@@ -82,16 +82,14 @@ fn check_exchange() {
|
|||||||
.spawn()
|
.spawn()
|
||||||
.expect("Failed to start {BIN}");
|
.expect("Failed to start {BIN}");
|
||||||
|
|
||||||
std::thread::sleep(Duration::from_millis(500));
|
|
||||||
|
|
||||||
// start second process, the client
|
// start second process, the client
|
||||||
let mut client = test_bin::get_test_bin(BIN)
|
let mut client = test_bin::get_test_bin(BIN)
|
||||||
.args(["exchange", "secret-key"])
|
.args(["exchange", "private-key"])
|
||||||
.arg(&secret_key_paths[1])
|
.arg(&priv_key_paths[1])
|
||||||
.arg("public-key")
|
.arg("public-key")
|
||||||
.arg(&public_key_paths[1])
|
.arg(&pub_key_paths[1])
|
||||||
.args(["verbose", "peer", "public-key"])
|
.args(["verbose", "peer", "public-key"])
|
||||||
.arg(&public_key_paths[0])
|
.arg(&pub_key_paths[0])
|
||||||
.args(["endpoint", &listen_addr])
|
.args(["endpoint", &listen_addr])
|
||||||
.arg("outfile")
|
.arg("outfile")
|
||||||
.arg(&shared_key_paths[1])
|
.arg(&shared_key_paths[1])
|
||||||
|
|||||||
391
rp
391
rp
@@ -1,391 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# String formatting subsystem
|
|
||||||
|
|
||||||
formatting_init() {
|
|
||||||
endl=$'\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
enquote() {
|
|
||||||
while (( $# > 1 )); do
|
|
||||||
printf "%q " "${1}"; shift
|
|
||||||
done
|
|
||||||
if (( $# == 1 )); then
|
|
||||||
printf "%q" "${1}"; shift
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
multiline() {
|
|
||||||
# shellcheck disable=SC1004
|
|
||||||
echo "${1} " | awk '
|
|
||||||
function pm(a, b, l) {
|
|
||||||
return length(a) > l \
|
|
||||||
&& length(b) > l \
|
|
||||||
&& substr(a, 1, l+1) == substr(b, 1, l+1) \
|
|
||||||
? pm(a, b, l+1) : l;
|
|
||||||
}
|
|
||||||
|
|
||||||
!started && $0 !~ /^[ \t]*$/ {
|
|
||||||
started=1
|
|
||||||
match($0, /^[ \t]*/)
|
|
||||||
prefix=substr($0, 1, RLENGTH)
|
|
||||||
}
|
|
||||||
|
|
||||||
started {
|
|
||||||
print(substr($0, 1 + pm($0, prefix)));
|
|
||||||
}
|
|
||||||
'
|
|
||||||
}
|
|
||||||
|
|
||||||
dbg() {
|
|
||||||
echo >&2 "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
detect_git_dir() {
|
|
||||||
# https://stackoverflow.com/questions/3618078/pipe-only-stderr-through-a-filter
|
|
||||||
(
|
|
||||||
git -C "${scriptdir}" rev-parse --show-toplevel 3>&1 1>&2 2>&3 3>&- \
|
|
||||||
| sed '
|
|
||||||
/not a git repository/d;
|
|
||||||
s/^/WARNING: /'
|
|
||||||
) 3>&1 1>&2 2>&3 3>&-
|
|
||||||
}
|
|
||||||
|
|
||||||
# Cleanup subsystem (sigterm)
|
|
||||||
|
|
||||||
cleanup_init() {
|
|
||||||
cleanup_actions=()
|
|
||||||
trap cleanup_apply exit
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup_apply() {
|
|
||||||
local f
|
|
||||||
for f in "${cleanup_actions[@]}"; do
|
|
||||||
eval "${f}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
cleanup_actions+=("$(multiline "${1}")")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Transactional execution subsystem
|
|
||||||
|
|
||||||
frag_init() {
|
|
||||||
explain=0
|
|
||||||
frag_transaction=()
|
|
||||||
frag "
|
|
||||||
#! /bin/bash
|
|
||||||
set -e"
|
|
||||||
}
|
|
||||||
|
|
||||||
frag_apply() {
|
|
||||||
local f
|
|
||||||
for f in "${frag_transaction[@]}"; do
|
|
||||||
if (( explain == 1 )); then
|
|
||||||
dbg "${f}"
|
|
||||||
fi
|
|
||||||
eval "${f}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
frag() {
|
|
||||||
frag_transaction+=("$(multiline "${1}")")
|
|
||||||
}
|
|
||||||
|
|
||||||
frag_append() {
|
|
||||||
local len; len="${#frag_transaction[@]}"
|
|
||||||
frag_transaction=("${frag_transaction[@]:0:len-1}" "${frag_transaction[len-1]}${1}")
|
|
||||||
}
|
|
||||||
|
|
||||||
frag_append_esc() {
|
|
||||||
frag_append " \\${endl}${1}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Usage documentation subsystem
|
|
||||||
usage_init() {
|
|
||||||
usagestack=("${script}")
|
|
||||||
}
|
|
||||||
|
|
||||||
usage_snap() {
|
|
||||||
echo "${#usagestack}"
|
|
||||||
}
|
|
||||||
|
|
||||||
usage_restore() {
|
|
||||||
local n; n="${1}"
|
|
||||||
dbg REST "${1}"
|
|
||||||
usagestack=("${usagestack[@]:0:n-2}")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
dbg "Usage: ${usagestack[*]}"
|
|
||||||
}
|
|
||||||
|
|
||||||
fatal() {
|
|
||||||
dbg "FATAL: $*"
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
genkey() {
|
|
||||||
usagestack+=("PRIVATE_KEYS_DIR")
|
|
||||||
local skdir
|
|
||||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
|
||||||
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
local arg; arg="$1"; shift
|
|
||||||
case "${arg}" in
|
|
||||||
-h | -help | --help | help) usage; return 0 ;;
|
|
||||||
*) fatal "Unknown option ${arg}";;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if test -e "${skdir}"; then
|
|
||||||
fatal "PRIVATE_KEYS_DIR \"${skdir}\" already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
frag "
|
|
||||||
umask 077
|
|
||||||
mkdir -p $(enquote "${skdir}")
|
|
||||||
wg genkey > $(enquote "${skdir}"/wgsk)
|
|
||||||
$(enquote "${binary}") gen-keys \\
|
|
||||||
-s $(enquote "${skdir}"/pqsk) \\
|
|
||||||
-p $(enquote "${skdir}"/pqpk)"
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkey() {
|
|
||||||
usagestack+=("PRIVATE_KEYS_DIR" "PUBLIC_KEYS_DIR")
|
|
||||||
local skdir pkdir
|
|
||||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
|
||||||
pkdir="${1%/}"; shift || fatal "Required positional argument: PUBLIC_KEYS_DIR"
|
|
||||||
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
local arg; arg="$1"; shift
|
|
||||||
case "${arg}" in
|
|
||||||
-h | -help | --help | help) usage; exit 0;;
|
|
||||||
*) fatal "Unknown option ${arg}";;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if test -e "${pkdir}"; then
|
|
||||||
fatal "PUBLIC_KEYS_DIR \"${pkdir}\" already exists"
|
|
||||||
fi
|
|
||||||
|
|
||||||
frag "
|
|
||||||
mkdir -p $(enquote "${pkdir}")
|
|
||||||
wg pubkey < $(enquote "${skdir}"/wgsk) > $(enquote "${pkdir}/wgpk")
|
|
||||||
cp $(enquote "${skdir}"/pqpk) $(enquote "${pkdir}/pqpk")"
|
|
||||||
}
|
|
||||||
|
|
||||||
exchange() {
|
|
||||||
usagestack+=("PRIVATE_KEYS_DIR" "[dev <device>]" "[listen <ip>:<port>]" "[peer PUBLIC_KEYS_DIR [endpoint <ip>:<port>] [persistent-keepalive <interval>] [allowed-ips <ip1>/<cidr1>[,<ip2>/<cidr2>]...]]...")
|
|
||||||
local skdir dev lport
|
|
||||||
dev="${project_name}0"
|
|
||||||
skdir="${1%/}"; shift || fatal "Required positional argument: PRIVATE_KEYS_DIR"
|
|
||||||
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
local arg; arg="$1"; shift
|
|
||||||
case "${arg}" in
|
|
||||||
dev) dev="${1}"; shift || fatal "dev option requires parameter";;
|
|
||||||
peer) set -- "peer" "$@"; break;; # Parsed down below
|
|
||||||
listen)
|
|
||||||
local listen; listen="${1}";
|
|
||||||
lip="${listen%:*}";
|
|
||||||
lport="${listen/*:/}";
|
|
||||||
if [[ "$lip" = "$lport" ]]; then
|
|
||||||
lip="[::]"
|
|
||||||
fi
|
|
||||||
shift;;
|
|
||||||
-h | -help | --help | help) usage; return 0;;
|
|
||||||
*) fatal "Unknown option ${arg}";;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if (( $# == 0 )); then
|
|
||||||
fatal "Needs at least one peer specified"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# os dependent setup
|
|
||||||
case "$OSTYPE" in
|
|
||||||
linux-*) # could be linux-gnu or linux-musl
|
|
||||||
frag "
|
|
||||||
# Create the WireGuard interface
|
|
||||||
ip link add dev $(enquote "${dev}") type wireguard || true"
|
|
||||||
|
|
||||||
cleanup "
|
|
||||||
ip link del dev $(enquote "${dev}") || true"
|
|
||||||
|
|
||||||
frag "
|
|
||||||
ip link set dev $(enquote "${dev}") up"
|
|
||||||
;;
|
|
||||||
|
|
||||||
freebsd*)
|
|
||||||
frag "
|
|
||||||
# load the WireGuard kernel module
|
|
||||||
kldload -n if_wg || fatal 'Cannot load if_wg kernel module'"
|
|
||||||
|
|
||||||
frag "
|
|
||||||
# Create the WireGuard interface
|
|
||||||
ifconfig wg create name $(enquote "${dev}") || true"
|
|
||||||
|
|
||||||
cleanup "
|
|
||||||
ifconfig $(enquote "${dev}") destroy || true"
|
|
||||||
|
|
||||||
frag "
|
|
||||||
ifconfig $(enquote "${dev}") up"
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
fatal "Your system $OSTYPE is not yet supported. We are happy to receive patches to address this :)"
|
|
||||||
;;
|
|
||||||
|
|
||||||
esac
|
|
||||||
|
|
||||||
frag "
|
|
||||||
# Deploy the classic wireguard private key
|
|
||||||
wg set $(enquote "${dev}") private-key $(enquote "${skdir}/wgsk")"
|
|
||||||
|
|
||||||
|
|
||||||
if test -n "${lport}"; then
|
|
||||||
frag_append "listen-port $(enquote "$(( lport + 1 ))")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
frag "
|
|
||||||
# Launch the post quantum wireguard exchange daemon
|
|
||||||
$(enquote "${binary}") exchange"
|
|
||||||
|
|
||||||
if (( verbose == 1 )); then
|
|
||||||
frag_append "verbose"
|
|
||||||
fi
|
|
||||||
|
|
||||||
frag_append_esc " secret-key $(enquote "${skdir}/pqsk")"
|
|
||||||
frag_append_esc " public-key $(enquote "${skdir}/pqpk")"
|
|
||||||
|
|
||||||
if test -n "${lport}"; then
|
|
||||||
frag_append_esc " listen $(enquote "${lip}:${lport}")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
usagestack+=("peer" "PUBLIC_KEYS_DIR endpoint IP:PORT")
|
|
||||||
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
shift; # Skip "peer" argument
|
|
||||||
|
|
||||||
local peerdir ip port keepalive allowedips
|
|
||||||
peerdir="${1%/}"; shift || fatal "Required peer argument: PUBLIC_KEYS_DIR"
|
|
||||||
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
local arg; arg="$1"; shift
|
|
||||||
case "${arg}" in
|
|
||||||
peer) set -- "peer" "$@"; break;; # Next peer
|
|
||||||
endpoint) ip="${1%:*}"; port="${1##*:}"; shift;;
|
|
||||||
persistent-keepalive) keepalive="${1}"; shift;;
|
|
||||||
allowed-ips) allowedips="${1}"; shift;;
|
|
||||||
-h | -help | --help | help) usage; return 0;;
|
|
||||||
*) fatal "Unknown option ${arg}";;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Public key
|
|
||||||
frag_append_esc " peer public-key $(enquote "${peerdir}/pqpk")"
|
|
||||||
|
|
||||||
# PSK
|
|
||||||
local pskfile; pskfile="${peerdir}/psk"
|
|
||||||
if test -f "${pskfile}"; then
|
|
||||||
frag_append_esc " preshared-key $(enquote "${pskfile}")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if test -n "${ip}"; then
|
|
||||||
frag_append_esc " endpoint $(enquote "${ip}:${port}")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
frag_append_esc " wireguard $(enquote "${dev}") $(enquote "$(cat "${peerdir}/wgpk")")"
|
|
||||||
|
|
||||||
if test -n "${ip}"; then
|
|
||||||
frag_append_esc " endpoint $(enquote "${ip}:$(( port + 1 ))")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if test -n "${keepalive}"; then
|
|
||||||
frag_append_esc " persistent-keepalive $(enquote "${keepalive}")"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if test -n "${allowedips}"; then
|
|
||||||
frag_append_esc " allowed-ips $(enquote "${allowedips}")"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
find_rosenpass_binary() {
|
|
||||||
local binary; binary=""
|
|
||||||
if [[ -n "${gitdir}" ]]; then
|
|
||||||
# If rp is run from the git repo, use the newest build artifact
|
|
||||||
binary=$(
|
|
||||||
find "${gitdir}/result/bin/${project_name}" \
|
|
||||||
"${gitdir}"/target/{release,debug}/"${project_name}" \
|
|
||||||
-printf "%T@ %p\n" 2>/dev/null \
|
|
||||||
| sort -nr \
|
|
||||||
| awk 'NR==1 { print($2) }'
|
|
||||||
)
|
|
||||||
elif [[ -n "${nixdir}" ]]; then
|
|
||||||
# If rp is run from nix, use the nix-installed rosenpass version
|
|
||||||
binary="${nixdir}/bin/${project_name}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "${binary}" ]]; then
|
|
||||||
binary="${project_name}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${binary}"
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
formatting_init
|
|
||||||
cleanup_init
|
|
||||||
usage_init
|
|
||||||
frag_init
|
|
||||||
|
|
||||||
project_name="rosenpass"
|
|
||||||
verbose=0
|
|
||||||
scriptdir="$(dirname "${script}")"
|
|
||||||
gitdir="$(detect_git_dir)" || true
|
|
||||||
if [[ -d /nix ]]; then
|
|
||||||
nixdir="$(readlink -f result/bin/rp | grep -Pio '^/nix/store/[^/]+(?=/bin/[^/]+)')" || true
|
|
||||||
fi
|
|
||||||
binary="$(find_rosenpass_binary)"
|
|
||||||
|
|
||||||
# Parse command
|
|
||||||
|
|
||||||
usagestack+=("[explain]" "[verbose]" "genkey|pubkey|exchange" "[ARGS]...")
|
|
||||||
|
|
||||||
local cmd
|
|
||||||
while (( $# > 0 )); do
|
|
||||||
local arg; arg="$1"; shift
|
|
||||||
case "${arg}" in
|
|
||||||
genkey|pubkey|exchange) cmd="${arg}"; break;;
|
|
||||||
explain) explain=1;;
|
|
||||||
verbose) verbose=1;;
|
|
||||||
-h | -help | --help | help) usage; return 0 ;;
|
|
||||||
*) fatal "Unknown command ${arg}";;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
test -n "${cmd}" || fatal "No command supplied"
|
|
||||||
usagestack=("${script}")
|
|
||||||
|
|
||||||
# Execute command
|
|
||||||
|
|
||||||
usagestack+=("${cmd}")
|
|
||||||
"${cmd}" "$@"
|
|
||||||
usagestack=("${script}")
|
|
||||||
|
|
||||||
# Apply transaction
|
|
||||||
|
|
||||||
frag_apply
|
|
||||||
}
|
|
||||||
|
|
||||||
script="$0"
|
|
||||||
main "$@"
|
|
||||||
13
rp/Cargo.toml
Normal file
13
rp/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "rp"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
authors = ["wucke13 <wucke13@gmail.com>", "Karolin Varner <karo@cupdev.net>"]
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
description = "Build post-quantum-secure VPNs with WireGuard!"
|
||||||
|
homepage = "https://rosenpass.eu/"
|
||||||
|
repository = "https://github.com/rosenpass/rosenpass"
|
||||||
|
readme = "../readme.md"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
clap = { version = "4.1.8", features = ["derive"] }
|
||||||
55
rp/src/main.rs
Normal file
55
rp/src/main.rs
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
|
// Usage: ../rp-old [explain] [verbose] genkey|pubkey|exchange [ARGS]...
|
||||||
|
|
||||||
|
/// Simple program to greet a person
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
struct Cli {
|
||||||
|
/// Explain what is done
|
||||||
|
#[arg(short, long)]
|
||||||
|
explain: bool,
|
||||||
|
|
||||||
|
/// Be verbose about what's going on
|
||||||
|
#[arg(short, long)]
|
||||||
|
verbose: bool,
|
||||||
|
|
||||||
|
#[command(subcommand)]
|
||||||
|
command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Doc comment
|
||||||
|
#[derive(Subcommand, Debug)]
|
||||||
|
// #[command(PARENT CMD ATTRIBUTE)]
|
||||||
|
enum Command {
|
||||||
|
/// Generate a keypair
|
||||||
|
// --- Requirements ---
|
||||||
|
// requires wireguard
|
||||||
|
// should not exist before
|
||||||
|
// should be dir after
|
||||||
|
// should contain three files after pqpk, pqsk, wgsk
|
||||||
|
Genkey {
|
||||||
|
private_keys_dir: PathBuf,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Generate public keys
|
||||||
|
// --- Requirements ---
|
||||||
|
// requires wireguard
|
||||||
|
// requires private_keys_dir to exist
|
||||||
|
// should create public_keys_dir
|
||||||
|
// should copy pqpk from private_ to public_keys_dir
|
||||||
|
// should generate wgpk to public_keys_dir
|
||||||
|
Pubkey {
|
||||||
|
private_keys_dir: PathBuf,
|
||||||
|
public_keys_dir: PathBuf,
|
||||||
|
},
|
||||||
|
|
||||||
|
Exchange {},
|
||||||
|
}
|
||||||
|
fn main() {
|
||||||
|
let args = Cli::parse();
|
||||||
|
|
||||||
|
println!("{args:#?}");
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user