WIP: Actual send and receive data #5
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,3 +2,4 @@
|
|||||||
*.swp
|
*.swp
|
||||||
/target
|
/target
|
||||||
/Cargo.lock
|
/Cargo.lock
|
||||||
|
/flake.profile*
|
||||||
|
34
Cargo.toml
34
Cargo.toml
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
name = "fenrir"
|
name = "fenrir"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
# Fenrir won't be ready for a while,
|
# Fenrir won't be ready for a while,
|
||||||
# we might as well use async fn in trait, which is nightly
|
# we might as well use async fn in trait, which is nightly
|
||||||
# remember to update this
|
# remember to update this
|
||||||
rust-version = "1.67.0"
|
rust-version = "1.85.0"
|
||||||
homepage = "https://git.runesauth.com/RunesAuth/libFenrir"
|
homepage = "https://git.runesauth.com/RunesAuth/libFenrir"
|
||||||
repository = "https://git.runesauth.com/RunesAuth/libFenrir"
|
repository = "https://git.runesauth.com/RunesAuth/libFenrir"
|
||||||
license = "Apache-2.0 WITH LLVM-exception"
|
license = "Apache-2.0 WITH LLVM-exception"
|
||||||
@ -21,16 +21,16 @@ publish = false
|
|||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
|
||||||
crate_type = [ "lib", "cdylib", "staticlib" ]
|
crate-type = [ "lib", "cdylib", "staticlib" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# please keep these in alphabetical order
|
# please keep these in alphabetical order
|
||||||
|
|
||||||
arc-swap = { version = "1.6" }
|
arc-swap = { version = "1.7" }
|
||||||
arrayref = { version = "0.3" }
|
arrayref = { version = "0.3" }
|
||||||
async-channel = { version = "1.8" }
|
async-channel = { version = "2.3" }
|
||||||
# base85 repo has no tags, fix on a commit. v1.1.1 points to older, wrong version
|
# base85 repo has no tags, fix on a commit. v1.1.1 points to older, wrong version
|
||||||
base85 = { git = "https://gitlab.com/darkwyrm/base85", rev = "d98efbfd171dd9ba48e30a5c88f94db92fc7b3c6" }
|
base85 = { git = "https://gitlab.com/darkwyrm/base85", rev = "b5389888aca6208a7563c8dbf2af46a82e724fa1" }
|
||||||
bitmaps = { version = "3.2" }
|
bitmaps = { version = "3.2" }
|
||||||
chacha20poly1305 = { version = "0.10" }
|
chacha20poly1305 = { version = "0.10" }
|
||||||
futures = { version = "0.3" }
|
futures = { version = "0.3" }
|
||||||
@ -38,27 +38,27 @@ hkdf = { version = "0.12" }
|
|||||||
hwloc2 = {version = "2.2" }
|
hwloc2 = {version = "2.2" }
|
||||||
libc = { version = "0.2" }
|
libc = { version = "0.2" }
|
||||||
num-traits = { version = "0.2" }
|
num-traits = { version = "0.2" }
|
||||||
num-derive = { version = "0.3" }
|
num-derive = { version = "0.4" }
|
||||||
rand_core = {version = "0.6" }
|
rand_core = {version = "0.6" }
|
||||||
ring = { version = "0.16" }
|
ring = { version = "0.17" }
|
||||||
bincode = { version = "1.3" }
|
bincode = { version = "1.3" }
|
||||||
sha3 = { version = "0.10" }
|
sha3 = { version = "0.10" }
|
||||||
strum = { version = "0.24" }
|
strum = { version = "0.26" }
|
||||||
strum_macros = { version = "0.24" }
|
strum_macros = { version = "0.26" }
|
||||||
thiserror = { version = "1.0" }
|
thiserror = { version = "2.0" }
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
# PERF: todo linux-only, behind "iouring" feature
|
# PERF: todo linux-only, behind "iouring" feature
|
||||||
#tokio-uring = { version = "0.4" }
|
#tokio-uring = { version = "0.4" }
|
||||||
tracing = { version = "0.1" }
|
tracing = { version = "0.1" }
|
||||||
tracing-test = { version = "0.2" }
|
tracing-test = { version = "0.2" }
|
||||||
trust-dns-resolver = { version = "0.22", features = [ "dnssec-ring" ] }
|
trust-dns-resolver = { version = "0.23", features = [ "dnssec-ring" ] }
|
||||||
trust-dns-client = { version = "0.22", features = [ "dnssec" ] }
|
trust-dns-client = { version = "0.23", features = [ "dnssec" ] }
|
||||||
trust-dns-proto = { version = "0.22" }
|
trust-dns-proto = { version = "0.23" }
|
||||||
# don't use stable dalek. forces zeroize 1.3,
|
# don't use stable dalek. forces zeroize 1.3,
|
||||||
# breaks our and chacha20poly1305
|
# breaks our and chacha20poly1305
|
||||||
# reason: zeroize is not pure rust,
|
# reason: zeroize is not pure rust,
|
||||||
# so we can't have multiple versions of if
|
# so we can't have multiple versions of if
|
||||||
x25519-dalek = { version = "2.0.0-pre.1", features = [ "serde" ] }
|
x25519-dalek = { version = "2.0", features = [ "serde", "static_secrets" ] }
|
||||||
zeroize = { version = "1" }
|
zeroize = { version = "1" }
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
@ -84,3 +84,7 @@ incremental = true
|
|||||||
codegen-units = 256
|
codegen-units = 256
|
||||||
rpath = false
|
rpath = false
|
||||||
|
|
||||||
|
#[target.x86_64-unknown-linux-gnu]
|
||||||
|
#linker = "clang"
|
||||||
|
#rustflags = ["-C", "link-arg=--ld-path=mold"]
|
||||||
|
|
||||||
|
5
TODO
5
TODO
@ -1 +1,6 @@
|
|||||||
* Wrapping for everything that wraps (sigh)
|
* Wrapping for everything that wraps (sigh)
|
||||||
|
* track user connection (add u64 from user)
|
||||||
|
* API plit
|
||||||
|
* split API in ThreadLocal, ThreadSafe
|
||||||
|
* split send/recv API in Centralized, Connection
|
||||||
|
* all re wrappers on ThreadLocal-Centralized
|
||||||
|
80
flake.lock
generated
80
flake.lock
generated
@ -5,29 +5,11 @@
|
|||||||
"systems": "systems"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1685518550,
|
"lastModified": 1731533236,
|
||||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems_2"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681202837,
|
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -38,27 +20,27 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1686921029,
|
"lastModified": 1742751704,
|
||||||
"narHash": "sha256-J1bX9plPCFhTSh6E3TWn9XSxggBh/zDD4xigyaIQBy8=",
|
"narHash": "sha256-rBfc+H1dDBUQ2mgVITMGBPI1PGuCznf9rcWX/XIULyE=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "c7ff1b9b95620ce8728c0d7bd501c458e6da9e04",
|
"rev": "f0946fa5f1fb876a9dc2e1850d9d3a4e3f914092",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"ref": "nixos-23.05",
|
"ref": "nixos-24.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs-unstable": {
|
"nixpkgs-unstable": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1686960236,
|
"lastModified": 1742889210,
|
||||||
"narHash": "sha256-AYCC9rXNLpUWzD9hm+askOfpliLEC9kwAo7ITJc4HIw=",
|
"narHash": "sha256-hw63HnwnqU3ZQfsMclLhMvOezpM7RSB0dMAtD5/sOiw=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "04af42f3b31dba0ef742d254456dc4c14eedac86",
|
"rev": "698214a32beb4f4c8e3942372c694f40848b360d",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -68,22 +50,6 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681358109,
|
|
||||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
@ -94,15 +60,16 @@
|
|||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-utils": "flake-utils_2",
|
"nixpkgs": [
|
||||||
"nixpkgs": "nixpkgs_2"
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1687055571,
|
"lastModified": 1742956365,
|
||||||
"narHash": "sha256-UvLoO6u5n9TzY80BpM4DaacxvyJl7u9mm9CA72d309g=",
|
"narHash": "sha256-Slrqmt6kJ/M7Z/ce4ebQWsz2aeEodrX56CsupOEPoz0=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "2de557c780dcb127128ae987fca9d6c2b0d7dc0f",
|
"rev": "a0e3395c63cdbc9c1ec17915f8328c077c79c4a1",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -125,21 +92,6 @@
|
|||||||
"repo": "default",
|
"repo": "default",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
25
flake.nix
25
flake.nix
@ -2,9 +2,12 @@
|
|||||||
description = "libFenrir";
|
description = "libFenrir";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-23.05";
|
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
|
||||||
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable";
|
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
rust-overlay = {
|
||||||
|
url = "github:oxalica/rust-overlay";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -18,18 +21,21 @@
|
|||||||
pkgs-unstable = import nixpkgs-unstable {
|
pkgs-unstable = import nixpkgs-unstable {
|
||||||
inherit system overlays;
|
inherit system overlays;
|
||||||
};
|
};
|
||||||
RUST_VERSION="1.69.0";
|
#RUST_VERSION="1.85.0";
|
||||||
|
RUST_VERSION="2025-03-15";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
devShells.default = pkgs.mkShell {
|
devShells.default = pkgs.mkShell {
|
||||||
|
name = "libFenrir";
|
||||||
buildInputs = with pkgs; [
|
buildInputs = with pkgs; [
|
||||||
|
# system deps
|
||||||
git
|
git
|
||||||
gnupg
|
gnupg
|
||||||
openssh
|
openssh
|
||||||
openssl
|
openssl
|
||||||
pkg-config
|
pkg-config
|
||||||
exa
|
|
||||||
fd
|
fd
|
||||||
|
# rust deps
|
||||||
#(rust-bin.stable.latest.default.override {
|
#(rust-bin.stable.latest.default.override {
|
||||||
# go with nightly to have async fn in traits
|
# go with nightly to have async fn in traits
|
||||||
#(rust-bin.nightly."2023-02-01".default.override {
|
#(rust-bin.nightly."2023-02-01".default.override {
|
||||||
@ -41,12 +47,21 @@
|
|||||||
cargo-flamegraph
|
cargo-flamegraph
|
||||||
cargo-license
|
cargo-license
|
||||||
lld
|
lld
|
||||||
rust-bin.stable.${RUST_VERSION}.default
|
#rust-bin.stable.${RUST_VERSION}.default
|
||||||
|
#rust-bin.beta.${RUST_VERSION}.default
|
||||||
|
rust-bin.nightly.${RUST_VERSION}.default
|
||||||
rustfmt
|
rustfmt
|
||||||
rust-analyzer
|
rust-analyzer
|
||||||
|
#clang_16
|
||||||
|
#mold
|
||||||
# fenrir deps
|
# fenrir deps
|
||||||
hwloc
|
hwloc
|
||||||
];
|
];
|
||||||
|
# if you want to try the mold linker, add 'clang_16', 'mold', and append this to ~/.cargo/config.toml:
|
||||||
|
# [target.x86_64-unknown-linux-gnu]
|
||||||
|
# linker = "clang"
|
||||||
|
# rustflags = ["-C", "link-arg=--ld-path=mold"]
|
||||||
|
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
# use zsh or other custom shell
|
# use zsh or other custom shell
|
||||||
USER_SHELL="$(grep $USER /etc/passwd | cut -d ':' -f 7)"
|
USER_SHELL="$(grep $USER /etc/passwd | cut -d ':' -f 7)"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
edition = "2021"
|
edition = "2024"
|
||||||
unstable_features = true
|
unstable_features = true
|
||||||
format_strings = true
|
format_strings = true
|
||||||
max_width = 80
|
max_width = 80
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
auth::{Domain, ServiceID},
|
auth::{Domain, ServiceID},
|
||||||
connection::{
|
connection::{
|
||||||
|
self,
|
||||||
handshake::{self, Error, Handshake},
|
handshake::{self, Error, Handshake},
|
||||||
Conn, IDRecv, IDSend,
|
Connection, IDRecv, IDSend,
|
||||||
},
|
},
|
||||||
enc::{
|
enc::{
|
||||||
self,
|
self,
|
||||||
@ -18,20 +19,27 @@ use crate::{
|
|||||||
use ::tokio::sync::oneshot;
|
use ::tokio::sync::oneshot;
|
||||||
|
|
||||||
pub(crate) struct Server {
|
pub(crate) struct Server {
|
||||||
pub id: KeyID,
|
pub(crate) id: KeyID,
|
||||||
pub key: PrivKey,
|
pub(crate) key: PrivKey,
|
||||||
pub domains: Vec<Domain>,
|
pub(crate) domains: Vec<Domain>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) type ConnectAnswer = Result<(KeyID, IDSend), crate::Error>;
|
pub(crate) type ConnectAnswer = Result<ConnectOk, crate::Error>;
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct ConnectOk {
|
||||||
|
pub(crate) auth_key_id: KeyID,
|
||||||
|
pub(crate) auth_id_send: IDSend,
|
||||||
|
pub(crate) authsrv_conn: connection::AuthSrvConn,
|
||||||
|
pub(crate) service_conn: Option<connection::ServiceConn>,
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) struct Client {
|
pub(crate) struct Client {
|
||||||
pub service_id: ServiceID,
|
pub(crate) service_id: ServiceID,
|
||||||
pub service_conn_id: IDRecv,
|
pub(crate) service_conn_id: IDRecv,
|
||||||
pub connection: Conn,
|
pub(crate) connection: Connection,
|
||||||
pub timeout: Option<::tokio::task::JoinHandle<()>>,
|
pub(crate) timeout: Option<::tokio::time::Instant>,
|
||||||
pub answer: oneshot::Sender<ConnectAnswer>,
|
pub(crate) answer: oneshot::Sender<ConnectAnswer>,
|
||||||
pub srv_key_id: KeyID,
|
pub(crate) srv_key_id: KeyID,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tracks the keys used by the client and the handshake
|
/// Tracks the keys used by the client and the handshake
|
||||||
@ -78,7 +86,7 @@ impl ClientList {
|
|||||||
pub_key: PubKey,
|
pub_key: PubKey,
|
||||||
service_id: ServiceID,
|
service_id: ServiceID,
|
||||||
service_conn_id: IDRecv,
|
service_conn_id: IDRecv,
|
||||||
connection: Conn,
|
connection: Connection,
|
||||||
answer: oneshot::Sender<ConnectAnswer>,
|
answer: oneshot::Sender<ConnectAnswer>,
|
||||||
srv_key_id: KeyID,
|
srv_key_id: KeyID,
|
||||||
) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
|
) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
|
||||||
@ -128,26 +136,28 @@ impl ClientList {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub(crate) struct AuthNeededInfo {
|
pub(crate) struct AuthNeededInfo {
|
||||||
/// Parsed handshake packet
|
/// Parsed handshake packet
|
||||||
pub handshake: Handshake,
|
pub(crate) handshake: Handshake,
|
||||||
/// hkdf generated from the handshake
|
/// hkdf generated from the handshake
|
||||||
pub hkdf: Hkdf,
|
pub(crate) hkdf: Hkdf,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Client information needed to fully establish the conenction
|
/// Client information needed to fully establish the conenction
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct ClientConnectInfo {
|
pub(crate) struct ClientConnectInfo {
|
||||||
/// The service ID that we are connecting to
|
/// The service ID that we are connecting to
|
||||||
pub service_id: ServiceID,
|
pub(crate) service_id: ServiceID,
|
||||||
/// The service ID that we are connecting to
|
/// The service ID that we are connecting to
|
||||||
pub service_connection_id: IDRecv,
|
pub(crate) service_connection_id: IDRecv,
|
||||||
/// Parsed handshake packet
|
/// Parsed handshake packet
|
||||||
pub handshake: Handshake,
|
pub(crate) handshake: Handshake,
|
||||||
/// Conn
|
/// Old timeout for the handshake completion
|
||||||
pub connection: Conn,
|
pub(crate) old_timeout: ::tokio::time::Instant,
|
||||||
|
/// Connection
|
||||||
|
pub(crate) connection: Connection,
|
||||||
/// where to wake up the waiting client
|
/// where to wake up the waiting client
|
||||||
pub answer: oneshot::Sender<ConnectAnswer>,
|
pub(crate) answer: oneshot::Sender<ConnectAnswer>,
|
||||||
/// server public key id that we used on the handshake
|
/// server pub(crate)lic key id that we used on the handshake
|
||||||
pub srv_key_id: KeyID,
|
pub(crate) srv_key_id: KeyID,
|
||||||
}
|
}
|
||||||
/// Intermediate actions to be taken while parsing the handshake
|
/// Intermediate actions to be taken while parsing the handshake
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -231,7 +241,7 @@ impl Tracker {
|
|||||||
pub_key: PubKey,
|
pub_key: PubKey,
|
||||||
service_id: ServiceID,
|
service_id: ServiceID,
|
||||||
service_conn_id: IDRecv,
|
service_conn_id: IDRecv,
|
||||||
connection: Conn,
|
connection: Connection,
|
||||||
answer: oneshot::Sender<ConnectAnswer>,
|
answer: oneshot::Sender<ConnectAnswer>,
|
||||||
srv_key_id: KeyID,
|
srv_key_id: KeyID,
|
||||||
) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
|
) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
|
||||||
@ -267,7 +277,7 @@ impl Tracker {
|
|||||||
use handshake::dirsync::DirSync;
|
use handshake::dirsync::DirSync;
|
||||||
match handshake.data {
|
match handshake.data {
|
||||||
handshake::Data::DirSync(ref mut ds) => match ds {
|
handshake::Data::DirSync(ref mut ds) => match ds {
|
||||||
DirSync::Req(ref mut req) => {
|
&mut DirSync::Req(ref mut req) => {
|
||||||
if !self.key_exchanges.contains(&req.exchange) {
|
if !self.key_exchanges.contains(&req.exchange) {
|
||||||
return Err(enc::Error::UnsupportedKeyExchange.into());
|
return Err(enc::Error::UnsupportedKeyExchange.into());
|
||||||
}
|
}
|
||||||
@ -288,21 +298,19 @@ impl Tracker {
|
|||||||
let ephemeral_key;
|
let ephemeral_key;
|
||||||
match has_key {
|
match has_key {
|
||||||
Some(s_k) => {
|
Some(s_k) => {
|
||||||
if let PrivKey::Exchange(ref k) = &s_k.key {
|
if let &PrivKey::Exchange(ref k) = &s_k.key {
|
||||||
ephemeral_key = k;
|
ephemeral_key = k;
|
||||||
} else {
|
} else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => return Err(Error::UnknownKeyID.into()),
|
||||||
return Err(handshake::Error::UnknownKeyID.into())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
let shared_key = match ephemeral_key
|
let shared_key = match ephemeral_key
|
||||||
.key_exchange(req.exchange, req.exchange_key)
|
.key_exchange(req.exchange, req.exchange_key)
|
||||||
{
|
{
|
||||||
Ok(shared_key) => shared_key,
|
Ok(shared_key) => shared_key,
|
||||||
Err(e) => return Err(handshake::Error::Key(e).into()),
|
Err(e) => return Err(Error::Key(e).into()),
|
||||||
};
|
};
|
||||||
let hkdf =
|
let hkdf =
|
||||||
Hkdf::new(hkdf::Kind::Sha3, b"fenrir", shared_key);
|
Hkdf::new(hkdf::Kind::Sha3, b"fenrir", shared_key);
|
||||||
@ -325,7 +333,7 @@ impl Tracker {
|
|||||||
req.data.deserialize_as_cleartext(cleartext)?;
|
req.data.deserialize_as_cleartext(cleartext)?;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(handshake::Error::Key(e).into());
|
return Err(Error::Key(e).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +350,7 @@ impl Tracker {
|
|||||||
"No such client key id: {:?}",
|
"No such client key id: {:?}",
|
||||||
resp.client_key_id
|
resp.client_key_id
|
||||||
);
|
);
|
||||||
return Err(handshake::Error::UnknownKeyID.into());
|
return Err(Error::UnknownKeyID.into());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let cipher_recv = &hshake.connection.cipher_recv;
|
let cipher_recv = &hshake.connection.cipher_recv;
|
||||||
@ -361,18 +369,16 @@ impl Tracker {
|
|||||||
resp.data.deserialize_as_cleartext(&cleartext)?;
|
resp.data.deserialize_as_cleartext(&cleartext)?;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(handshake::Error::Key(e).into());
|
return Err(Error::Key(e).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let hshake =
|
let hshake =
|
||||||
self.hshake_cli.remove(resp.client_key_id).unwrap();
|
self.hshake_cli.remove(resp.client_key_id).unwrap();
|
||||||
if let Some(timeout) = hshake.timeout {
|
|
||||||
timeout.abort();
|
|
||||||
}
|
|
||||||
return Ok(Action::ClientConnect(ClientConnectInfo {
|
return Ok(Action::ClientConnect(ClientConnectInfo {
|
||||||
service_id: hshake.service_id,
|
service_id: hshake.service_id,
|
||||||
service_connection_id: hshake.service_conn_id,
|
service_connection_id: hshake.service_conn_id,
|
||||||
handshake,
|
handshake,
|
||||||
|
old_timeout: hshake.timeout.unwrap(),
|
||||||
connection: hshake.connection,
|
connection: hshake.connection,
|
||||||
answer: hshake.answer,
|
answer: hshake.answer,
|
||||||
srv_key_id: hshake.srv_key_id,
|
srv_key_id: hshake.srv_key_id,
|
||||||
|
@ -5,21 +5,43 @@ pub mod packet;
|
|||||||
pub mod socket;
|
pub mod socket;
|
||||||
pub mod stream;
|
pub mod stream;
|
||||||
|
|
||||||
use ::std::{rc::Rc, vec::Vec};
|
use ::core::num::Wrapping;
|
||||||
|
use ::std::{
|
||||||
|
collections::{BTreeMap, HashMap, VecDeque},
|
||||||
|
vec::Vec,
|
||||||
|
};
|
||||||
|
|
||||||
pub use crate::connection::{handshake::Handshake, packet::Packet};
|
pub use crate::connection::{handshake::Handshake, packet::Packet};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
connection::{socket::UdpClient, stream::StreamData},
|
||||||
dnssec,
|
dnssec,
|
||||||
enc::{
|
enc::{
|
||||||
|
self,
|
||||||
asym::PubKey,
|
asym::PubKey,
|
||||||
hkdf::Hkdf,
|
hkdf::Hkdf,
|
||||||
sym::{self, CipherRecv, CipherSend},
|
sym::{self, CipherRecv, CipherSend},
|
||||||
Random,
|
Random,
|
||||||
},
|
},
|
||||||
inner::ThreadTracker,
|
inner::{worker, ThreadTracker},
|
||||||
};
|
};
|
||||||
use ::std::rc;
|
|
||||||
|
/// Connection errors
|
||||||
|
#[derive(::thiserror::Error, Debug, Copy, Clone)]
|
||||||
|
pub enum Error {
|
||||||
|
/// Can't decrypt packet
|
||||||
|
#[error("Decrypt error: {0}")]
|
||||||
|
Decrypt(#[from] enc::Error),
|
||||||
|
/// Error in parsing a packet realated to the connection
|
||||||
|
#[error("Chunk parsing: {0}")]
|
||||||
|
Parse(#[from] stream::Error),
|
||||||
|
/// No such Connection
|
||||||
|
#[error("No suck connection")]
|
||||||
|
NoSuchConnection,
|
||||||
|
/// No such Stream
|
||||||
|
#[error("No suck Stream")]
|
||||||
|
NoSuchStream,
|
||||||
|
}
|
||||||
|
|
||||||
/// Fenrir Connection ID
|
/// Fenrir Connection ID
|
||||||
///
|
///
|
||||||
@ -126,25 +148,72 @@ impl ProtocolVersion {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Connection tracking id. Set by the user
|
||||||
|
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
|
||||||
|
pub struct UserTracker(pub ::core::num::NonZeroU64);
|
||||||
|
|
||||||
|
/// Unique tracker of connections
|
||||||
|
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
|
||||||
|
pub struct LibTracker(Wrapping<u64>);
|
||||||
|
impl LibTracker {
|
||||||
|
pub(crate) fn new(start: u16) -> Self {
|
||||||
|
Self(Wrapping(start as u64))
|
||||||
|
}
|
||||||
|
pub(crate) fn advance(&mut self, amount: u16) -> Self {
|
||||||
|
let old = self.0;
|
||||||
|
self.0 = self.0 + Wrapping(amount as u64);
|
||||||
|
LibTracker(old)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Collection of connection tracking, but user-given and library generated
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct ConnTracker {
|
||||||
|
/// Optional tracker set by the user
|
||||||
|
pub user: Option<UserTracker>,
|
||||||
|
/// library generated tracker. Unique and non-repeating
|
||||||
|
pub(crate) lib: LibTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for ConnTracker {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.lib == other.lib
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Eq for ConnTracker {}
|
||||||
|
|
||||||
|
/// Connection to an Authentication Server
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct AuthSrvConn(pub ConnTracker);
|
||||||
|
/// Connection to a service
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct ServiceConn(pub ConnTracker);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: only on Thread{Local,Safe}::Connection oriented flows
|
||||||
/// The connection, as seen from a user of libFenrir
|
/// The connection, as seen from a user of libFenrir
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Connection(rc::Weak<Conn>);
|
pub struct Conn {
|
||||||
|
pub(crate) queue: ::async_channel::Sender<worker::Work>,
|
||||||
/// A single connection and its data
|
pub(crate) tracker: ConnTracker,
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct Conn {
|
|
||||||
/// Receiving Conn ID
|
|
||||||
pub id_recv: IDRecv,
|
|
||||||
/// Sending Conn ID
|
|
||||||
pub id_send: IDSend,
|
|
||||||
/// The main hkdf used for all secrets in this connection
|
|
||||||
pub hkdf: Hkdf,
|
|
||||||
/// Cipher for decrypting data
|
|
||||||
pub cipher_recv: CipherRecv,
|
|
||||||
/// Cipher for encrypting data
|
|
||||||
pub cipher_send: CipherSend,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Conn {
|
||||||
|
/// Queue some data to be sent in this connection
|
||||||
|
// TODO: send_and_wait, that wait for recipient ACK
|
||||||
|
pub async fn send(&mut self, stream: stream::ID, data: Vec<u8>) {
|
||||||
|
use crate::inner::worker::Work;
|
||||||
|
let _ = self
|
||||||
|
.queue
|
||||||
|
.send(Work::UserSend((self.tracker.lib, stream, data)))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
/// Get the library tracking id
|
||||||
|
pub fn tracker(&self) -> ConnTracker {
|
||||||
|
self.tracker
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
/// Role: track the connection direction
|
/// Role: track the connection direction
|
||||||
///
|
///
|
||||||
/// The Role is used to select the correct secrets, and track the direction
|
/// The Role is used to select the correct secrets, and track the direction
|
||||||
@ -160,7 +229,48 @@ pub enum Role {
|
|||||||
Client,
|
Client,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Conn {
|
#[derive(Debug)]
|
||||||
|
enum TimerKind {
|
||||||
|
None,
|
||||||
|
SendData(::tokio::time::Instant),
|
||||||
|
Keepalive(::tokio::time::Instant),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) enum Enqueue {
|
||||||
|
TimerWait,
|
||||||
|
Immediate(::tokio::time::Instant),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A single connection and its data
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct Connection {
|
||||||
|
/// Receiving Conn ID
|
||||||
|
pub(crate) id_recv: IDRecv,
|
||||||
|
/// Sending Conn ID
|
||||||
|
pub(crate) id_send: IDSend,
|
||||||
|
/// User-managed id to track this connection
|
||||||
|
/// the user can set this to better track this connection
|
||||||
|
pub(crate) user_tracker: Option<UserTracker>,
|
||||||
|
pub(crate) lib_tracker: LibTracker,
|
||||||
|
/// Sending address
|
||||||
|
pub(crate) send_addr: UdpClient,
|
||||||
|
/// The main hkdf used for all secrets in this connection
|
||||||
|
hkdf: Hkdf,
|
||||||
|
/// Cipher for decrypting data
|
||||||
|
pub(crate) cipher_recv: CipherRecv,
|
||||||
|
/// Cipher for encrypting data
|
||||||
|
pub(crate) cipher_send: CipherSend,
|
||||||
|
mtu: usize,
|
||||||
|
next_timer: TimerKind,
|
||||||
|
/// send queue for each Stream
|
||||||
|
send_queue: BTreeMap<stream::ID, stream::SendTracker>,
|
||||||
|
last_stream_sent: stream::ID,
|
||||||
|
/// receive queue for each Stream
|
||||||
|
recv_queue: BTreeMap<stream::ID, stream::Stream>,
|
||||||
|
streams_ready: VecDeque<stream::ID>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Connection {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
hkdf: Hkdf,
|
hkdf: Hkdf,
|
||||||
cipher: sym::Kind,
|
cipher: sym::Kind,
|
||||||
@ -178,19 +288,195 @@ impl Conn {
|
|||||||
let cipher_recv = CipherRecv::new(cipher, secret_recv);
|
let cipher_recv = CipherRecv::new(cipher, secret_recv);
|
||||||
let cipher_send = CipherSend::new(cipher, secret_send, rand);
|
let cipher_send = CipherSend::new(cipher, secret_send, rand);
|
||||||
|
|
||||||
|
use ::std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
Self {
|
Self {
|
||||||
id_recv: IDRecv(ID::Handshake),
|
id_recv: IDRecv(ID::Handshake),
|
||||||
id_send: IDSend(ID::Handshake),
|
id_send: IDSend(ID::Handshake),
|
||||||
|
user_tracker: None,
|
||||||
|
lib_tracker: LibTracker::new(0),
|
||||||
|
// will be overwritten
|
||||||
|
send_addr: UdpClient(SocketAddr::new(
|
||||||
|
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
||||||
|
31337,
|
||||||
|
)),
|
||||||
hkdf,
|
hkdf,
|
||||||
cipher_recv,
|
cipher_recv,
|
||||||
cipher_send,
|
cipher_send,
|
||||||
|
mtu: 1200,
|
||||||
|
next_timer: TimerKind::None,
|
||||||
|
send_queue: BTreeMap::new(),
|
||||||
|
last_stream_sent: stream::ID(0),
|
||||||
|
recv_queue: BTreeMap::new(),
|
||||||
|
streams_ready: VecDeque::with_capacity(4),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn get_data(&mut self) -> Option<Vec<(stream::ID, Vec<u8>)>> {
|
||||||
|
if self.streams_ready.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let ret_len = self.streams_ready.len();
|
||||||
|
let mut ret = Vec::with_capacity(ret_len);
|
||||||
|
while let Some(stream_id) = self.streams_ready.pop_front() {
|
||||||
|
let stream = match self.recv_queue.get_mut(&stream_id) {
|
||||||
|
Some(stream) => stream,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
let data = stream.get(); // FIXME
|
||||||
|
ret.push((stream_id, data.1));
|
||||||
|
}
|
||||||
|
Some(ret)
|
||||||
|
}
|
||||||
|
pub(crate) fn recv(
|
||||||
|
&mut self,
|
||||||
|
mut udp: crate::RawUdp,
|
||||||
|
) -> Result<StreamData, Error> {
|
||||||
|
let mut data = &mut udp.data[ID::len()..];
|
||||||
|
let aad = sym::AAD(&[]);
|
||||||
|
self.cipher_recv.decrypt(aad, &mut data)?;
|
||||||
|
let mut bytes_parsed = 0;
|
||||||
|
let mut chunks = Vec::with_capacity(2);
|
||||||
|
loop {
|
||||||
|
let chunk = match stream::Chunk::deserialize(&data[bytes_parsed..])
|
||||||
|
{
|
||||||
|
Ok(chunk) => chunk,
|
||||||
|
Err(e) => {
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
bytes_parsed = bytes_parsed + chunk.len();
|
||||||
|
chunks.push(chunk);
|
||||||
|
if bytes_parsed == data.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let mut data_ready = StreamData::NotReady;
|
||||||
|
for chunk in chunks.into_iter() {
|
||||||
|
let stream_id = chunk.id;
|
||||||
|
let stream = match self.recv_queue.get_mut(&stream_id) {
|
||||||
|
Some(stream) => stream,
|
||||||
|
None => {
|
||||||
|
::tracing::debug!("Ignoring chunk for unknown stream::ID");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match stream.recv(chunk) {
|
||||||
|
Ok(status) => {
|
||||||
|
if !self.streams_ready.contains(&stream_id) {
|
||||||
|
self.streams_ready.push_back(stream_id);
|
||||||
|
}
|
||||||
|
data_ready = data_ready | status;
|
||||||
|
}
|
||||||
|
Err(e) => ::tracing::debug!("stream: {:?}: {:?}", stream_id, e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(data_ready)
|
||||||
|
}
|
||||||
|
pub(crate) fn enqueue(
|
||||||
|
&mut self,
|
||||||
|
stream: stream::ID,
|
||||||
|
data: Vec<u8>,
|
||||||
|
) -> Result<Enqueue, Error> {
|
||||||
|
let stream = match self.send_queue.get_mut(&stream) {
|
||||||
|
None => return Err(Error::NoSuchStream),
|
||||||
|
Some(stream) => stream,
|
||||||
|
};
|
||||||
|
stream.enqueue(data);
|
||||||
|
let instant;
|
||||||
|
let ret;
|
||||||
|
self.next_timer = match self.next_timer {
|
||||||
|
TimerKind::None | TimerKind::Keepalive(_) => {
|
||||||
|
instant = ::tokio::time::Instant::now();
|
||||||
|
ret = Enqueue::Immediate(instant);
|
||||||
|
TimerKind::SendData(instant)
|
||||||
|
}
|
||||||
|
TimerKind::SendData(old_timer) => {
|
||||||
|
// There already is some data to be sent
|
||||||
|
// wait for this timer,
|
||||||
|
// or risk going over max transmission rate
|
||||||
|
ret = Enqueue::TimerWait;
|
||||||
|
TimerKind::SendData(old_timer)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(ret)
|
||||||
|
}
|
||||||
|
pub(crate) fn write_pkt<'a>(
|
||||||
|
&mut self,
|
||||||
|
raw: &'a mut [u8],
|
||||||
|
) -> Result<&'a [u8], enc::Error> {
|
||||||
|
assert!(raw.len() >= self.mtu, "I should have at least 1200 MTU");
|
||||||
|
if self.send_queue.len() == 0 {
|
||||||
|
return Err(enc::Error::NotEnoughData(0));
|
||||||
|
}
|
||||||
|
raw[..ID::len()]
|
||||||
|
.copy_from_slice(&self.id_send.0.as_u64().to_le_bytes());
|
||||||
|
let data_from = ID::len() + self.cipher_send.nonce_len().0;
|
||||||
|
let data_max_to = raw.len() - self.cipher_send.tag_len().0;
|
||||||
|
let mut chunk_from = data_from;
|
||||||
|
let mut available_len = data_max_to - data_from;
|
||||||
|
|
||||||
|
use std::ops::Bound::{Excluded, Included};
|
||||||
|
let last_stream = self.last_stream_sent;
|
||||||
|
|
||||||
|
// Loop over our streams, write them to the packet.
|
||||||
|
// Notes:
|
||||||
|
// * to avoid starvation, just round-robin them all for now
|
||||||
|
// * we can enqueue multiple times the same stream
|
||||||
|
// This is useful especially for Datagram streams
|
||||||
|
'queueloop: {
|
||||||
|
for (id, stream) in self
|
||||||
|
.send_queue
|
||||||
|
.range_mut((Included(last_stream), Included(stream::ID::max())))
|
||||||
|
{
|
||||||
|
if available_len < stream::Chunk::headers_len() + 1 {
|
||||||
|
break 'queueloop;
|
||||||
|
}
|
||||||
|
let bytes =
|
||||||
|
stream.serialize(*id, &mut raw[chunk_from..data_max_to]);
|
||||||
|
if bytes == 0 {
|
||||||
|
break 'queueloop;
|
||||||
|
}
|
||||||
|
available_len = available_len - bytes;
|
||||||
|
chunk_from = chunk_from + bytes;
|
||||||
|
self.last_stream_sent = *id;
|
||||||
|
}
|
||||||
|
if available_len > 0 {
|
||||||
|
for (id, stream) in self.send_queue.range_mut((
|
||||||
|
Included(stream::ID::min()),
|
||||||
|
Excluded(last_stream),
|
||||||
|
)) {
|
||||||
|
if available_len < stream::Chunk::headers_len() + 1 {
|
||||||
|
break 'queueloop;
|
||||||
|
}
|
||||||
|
let bytes = stream
|
||||||
|
.serialize(*id, &mut raw[chunk_from..data_max_to]);
|
||||||
|
if bytes == 0 {
|
||||||
|
break 'queueloop;
|
||||||
|
}
|
||||||
|
available_len = available_len - bytes;
|
||||||
|
chunk_from = chunk_from + bytes;
|
||||||
|
self.last_stream_sent = *id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if chunk_from == data_from {
|
||||||
|
return Err(enc::Error::NotEnoughData(0));
|
||||||
|
}
|
||||||
|
let data_to = chunk_from + self.cipher_send.tag_len().0;
|
||||||
|
|
||||||
|
// encrypt
|
||||||
|
let aad = sym::AAD(&[]);
|
||||||
|
match self.cipher_send.encrypt(aad, &mut raw[data_from..data_to]) {
|
||||||
|
Ok(_) => Ok(&raw[..data_to]),
|
||||||
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct ConnList {
|
pub(crate) struct ConnList {
|
||||||
thread_id: ThreadTracker,
|
thread_id: ThreadTracker,
|
||||||
connections: Vec<Option<Rc<Conn>>>,
|
connections: Vec<Option<Connection>>,
|
||||||
|
user_tracker: BTreeMap<LibTracker, usize>,
|
||||||
|
last_tracked: LibTracker,
|
||||||
/// Bitmap to track which connection ids are used or free
|
/// Bitmap to track which connection ids are used or free
|
||||||
ids_used: Vec<::bitmaps::Bitmap<1024>>,
|
ids_used: Vec<::bitmaps::Bitmap<1024>>,
|
||||||
}
|
}
|
||||||
@ -206,11 +492,43 @@ impl ConnList {
|
|||||||
let mut ret = Self {
|
let mut ret = Self {
|
||||||
thread_id,
|
thread_id,
|
||||||
connections: Vec::with_capacity(INITIAL_CAP),
|
connections: Vec::with_capacity(INITIAL_CAP),
|
||||||
|
user_tracker: BTreeMap::new(),
|
||||||
|
last_tracked: LibTracker(Wrapping(0)),
|
||||||
ids_used: vec![bitmap_id],
|
ids_used: vec![bitmap_id],
|
||||||
};
|
};
|
||||||
ret.connections.resize_with(INITIAL_CAP, || None);
|
ret.connections.resize_with(INITIAL_CAP, || None);
|
||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
pub fn get_id_mut(&mut self, id: ID) -> Result<&mut Connection, Error> {
|
||||||
|
let conn_id = match id {
|
||||||
|
ID::ID(conn_id) => conn_id,
|
||||||
|
ID::Handshake => {
|
||||||
|
return Err(Error::NoSuchConnection);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let id_in_thread: usize =
|
||||||
|
(conn_id.get() / (self.thread_id.total as u64)) as usize;
|
||||||
|
if let Some(conn) = &mut self.connections[id_in_thread] {
|
||||||
|
Ok(conn)
|
||||||
|
} else {
|
||||||
|
return Err(Error::NoSuchConnection);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn get_mut(
|
||||||
|
&mut self,
|
||||||
|
tracker: LibTracker,
|
||||||
|
) -> Result<&mut Connection, Error> {
|
||||||
|
let idx = if let Some(idx) = self.user_tracker.get(&tracker) {
|
||||||
|
*idx
|
||||||
|
} else {
|
||||||
|
return Err(Error::NoSuchConnection);
|
||||||
|
};
|
||||||
|
if let Some(conn) = &mut self.connections[idx] {
|
||||||
|
Ok(conn)
|
||||||
|
} else {
|
||||||
|
return Err(Error::NoSuchConnection);
|
||||||
|
}
|
||||||
|
}
|
||||||
pub fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
let mut total: usize = 0;
|
let mut total: usize = 0;
|
||||||
for bitmap in self.ids_used.iter() {
|
for bitmap in self.ids_used.iter() {
|
||||||
@ -220,7 +538,23 @@ impl ConnList {
|
|||||||
}
|
}
|
||||||
/// Only *Reserve* a connection,
|
/// Only *Reserve* a connection,
|
||||||
/// without actually tracking it in self.connections
|
/// without actually tracking it in self.connections
|
||||||
|
pub(crate) fn reserve_and_track<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
mut conn: Connection,
|
||||||
|
) -> (LibTracker, &'a mut Connection) {
|
||||||
|
let (id_conn, id_in_thread) = self.reserve_first_with_idx();
|
||||||
|
conn.id_recv = id_conn;
|
||||||
|
let tracker = self.get_new_tracker(id_in_thread);
|
||||||
|
conn.lib_tracker = tracker;
|
||||||
|
self.connections[id_in_thread] = Some(conn);
|
||||||
|
(tracker, self.connections[id_in_thread].as_mut().unwrap())
|
||||||
|
}
|
||||||
|
/// Only *Reserve* a connection,
|
||||||
|
/// without actually tracking it in self.connections
|
||||||
pub(crate) fn reserve_first(&mut self) -> IDRecv {
|
pub(crate) fn reserve_first(&mut self) -> IDRecv {
|
||||||
|
self.reserve_first_with_idx().0
|
||||||
|
}
|
||||||
|
fn reserve_first_with_idx(&mut self) -> (IDRecv, usize) {
|
||||||
// uhm... bad things are going on here:
|
// uhm... bad things are going on here:
|
||||||
// * id must be initialized, but only because:
|
// * id must be initialized, but only because:
|
||||||
// * rust does not understand that after the `!found` id is always
|
// * rust does not understand that after the `!found` id is always
|
||||||
@ -258,10 +592,13 @@ impl ConnList {
|
|||||||
let actual_id = ((id_in_thread as u64) * (self.thread_id.total as u64))
|
let actual_id = ((id_in_thread as u64) * (self.thread_id.total as u64))
|
||||||
+ (self.thread_id.id as u64);
|
+ (self.thread_id.id as u64);
|
||||||
let new_id = IDRecv(ID::new_u64(actual_id));
|
let new_id = IDRecv(ID::new_u64(actual_id));
|
||||||
new_id
|
(new_id, id_in_thread)
|
||||||
}
|
}
|
||||||
/// NOTE: does NOT check if the connection has been previously reserved!
|
/// NOTE: does NOT check if the connection has been previously reserved!
|
||||||
pub(crate) fn track(&mut self, conn: Rc<Conn>) -> Result<(), ()> {
|
pub(crate) fn track(
|
||||||
|
&mut self,
|
||||||
|
mut conn: Connection,
|
||||||
|
) -> Result<LibTracker, ()> {
|
||||||
let conn_id = match conn.id_recv {
|
let conn_id = match conn.id_recv {
|
||||||
IDRecv(ID::Handshake) => {
|
IDRecv(ID::Handshake) => {
|
||||||
return Err(());
|
return Err(());
|
||||||
@ -270,8 +607,22 @@ impl ConnList {
|
|||||||
};
|
};
|
||||||
let id_in_thread: usize =
|
let id_in_thread: usize =
|
||||||
(conn_id.get() / (self.thread_id.total as u64)) as usize;
|
(conn_id.get() / (self.thread_id.total as u64)) as usize;
|
||||||
|
let tracker = self.get_new_tracker(id_in_thread);
|
||||||
|
conn.lib_tracker = tracker;
|
||||||
self.connections[id_in_thread] = Some(conn);
|
self.connections[id_in_thread] = Some(conn);
|
||||||
Ok(())
|
Ok(tracker)
|
||||||
|
}
|
||||||
|
fn get_new_tracker(&mut self, id_in_thread: usize) -> LibTracker {
|
||||||
|
let mut tracker;
|
||||||
|
loop {
|
||||||
|
tracker = self.last_tracked.advance(self.thread_id.total);
|
||||||
|
if self.user_tracker.get(&tracker).is_none() {
|
||||||
|
// like, never gonna happen, it's 64 bit
|
||||||
|
let _ = self.user_tracker.insert(tracker, id_in_thread);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tracker
|
||||||
}
|
}
|
||||||
pub(crate) fn remove(&mut self, id: IDRecv) {
|
pub(crate) fn remove(&mut self, id: IDRecv) {
|
||||||
if let IDRecv(ID::ID(raw_id)) = id {
|
if let IDRecv(ID::ID(raw_id)) = id {
|
||||||
@ -303,7 +654,6 @@ enum MapEntry {
|
|||||||
Present(IDSend),
|
Present(IDSend),
|
||||||
Reserved,
|
Reserved,
|
||||||
}
|
}
|
||||||
use ::std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Link the public key of the authentication server to a connection id
|
/// Link the public key of the authentication server to a connection id
|
||||||
/// so that we can reuse that connection to ask for more authentications
|
/// so that we can reuse that connection to ask for more authentications
|
||||||
|
@ -1,10 +1,18 @@
|
|||||||
//! Errors while parsing streams
|
//! Errors while parsing streams
|
||||||
|
|
||||||
|
|
||||||
/// Crypto errors
|
/// Crypto errors
|
||||||
#[derive(::thiserror::Error, Debug, Copy, Clone)]
|
#[derive(::thiserror::Error, Debug, Copy, Clone)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Error while parsing key material
|
/// Error while parsing key material
|
||||||
#[error("Not enough data for stream chunk: {0}")]
|
#[error("Not enough data for stream chunk: {0}")]
|
||||||
NotEnoughData(usize),
|
NotEnoughData(usize),
|
||||||
|
/// Sequence outside of the window
|
||||||
|
#[error("Sequence out of the sliding window")]
|
||||||
|
OutOfWindow,
|
||||||
|
/// Wrong start/end flags received, can't reconstruct data
|
||||||
|
#[error("Wrong start/end flags received")]
|
||||||
|
WrongFlags,
|
||||||
|
/// Can't reconstruct the data
|
||||||
|
#[error("Error in reconstructing the bytestream/datagrams")]
|
||||||
|
Reconstructing,
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,17 @@
|
|||||||
|
|
||||||
mod errors;
|
mod errors;
|
||||||
mod rob;
|
mod rob;
|
||||||
|
mod uud;
|
||||||
|
mod uudl;
|
||||||
pub use errors::Error;
|
pub use errors::Error;
|
||||||
|
|
||||||
use crate::{connection::stream::rob::ReliableOrderedBytestream, enc::Random};
|
use crate::{
|
||||||
|
connection::stream::{
|
||||||
|
rob::ReliableOrderedBytestream, uud::UnreliableUnorderedDatagram,
|
||||||
|
uudl::UnreliableUnorderedDatagramLimited,
|
||||||
|
},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
/// Kind of stream. any combination of:
|
/// Kind of stream. any combination of:
|
||||||
/// reliable/unreliable ordered/unordered, bytestream/datagram
|
/// reliable/unreliable ordered/unordered, bytestream/datagram
|
||||||
@ -16,10 +24,45 @@ pub enum Kind {
|
|||||||
/// ROB: Reliable, Ordered, Bytestream
|
/// ROB: Reliable, Ordered, Bytestream
|
||||||
/// AKA: TCP-like
|
/// AKA: TCP-like
|
||||||
ROB = 0,
|
ROB = 0,
|
||||||
|
/// UUDL: Unreliable, Unordered, Datagram Limited
|
||||||
|
/// Aka: UDP-like. Data limited to the packet size
|
||||||
|
UUDL,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tracking for a contiguous set of data
|
||||||
|
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||||
|
pub enum Fragment {
|
||||||
|
/// Beginning, no end
|
||||||
|
Start((SequenceStart, SequenceEnd)),
|
||||||
|
/// Neither beginning nor end
|
||||||
|
Middle((SequenceStart, SequenceEnd)),
|
||||||
|
/// No beginning, but with end
|
||||||
|
End((SequenceStart, SequenceEnd)),
|
||||||
|
/// both beginning and end, waiting to be delivered to the user
|
||||||
|
Ready((SequenceStart, SequenceEnd)),
|
||||||
|
/// both beginning and end, already delivered to the user
|
||||||
|
Delivered((SequenceStart, SequenceEnd)),
|
||||||
|
/// both beginning and end, data might not be available anymore
|
||||||
|
Deallocated((SequenceStart, SequenceEnd)),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Fragment {
|
||||||
|
// FIXME: sequence start/end?
|
||||||
|
/// extract the sequences from the fragment
|
||||||
|
pub fn get_seqs(&self) -> (SequenceStart, SequenceEnd) {
|
||||||
|
match self {
|
||||||
|
Fragment::Start((f, t))
|
||||||
|
| Fragment::Middle((f, t))
|
||||||
|
| Fragment::End((f, t))
|
||||||
|
| Fragment::Ready((f, t))
|
||||||
|
| Fragment::Delivered((f, t))
|
||||||
|
| Fragment::Deallocated((f, t)) => (*f, *t),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Id of the stream
|
/// Id of the stream
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct ID(pub u16);
|
pub struct ID(pub u16);
|
||||||
|
|
||||||
impl ID {
|
impl ID {
|
||||||
@ -27,6 +70,14 @@ impl ID {
|
|||||||
pub const fn len() -> usize {
|
pub const fn len() -> usize {
|
||||||
2
|
2
|
||||||
}
|
}
|
||||||
|
/// Minimum possible Stream ID (u16::MIN)
|
||||||
|
pub const fn min() -> Self {
|
||||||
|
Self(u16::MIN)
|
||||||
|
}
|
||||||
|
/// Maximum possible Stream ID (u16::MAX)
|
||||||
|
pub const fn max() -> Self {
|
||||||
|
Self(u16::MAX)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// length of the chunk
|
/// length of the chunk
|
||||||
@ -40,28 +91,150 @@ impl ChunkLen {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: make pub?
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
pub(crate) struct SequenceStart(pub(crate) Sequence);
|
||||||
|
impl SequenceStart {
|
||||||
|
pub(crate) fn offset(&self, seq: Sequence) -> usize {
|
||||||
|
if self.0.0 <= seq.0 {
|
||||||
|
(seq.0 - self.0.0).0 as usize
|
||||||
|
} else {
|
||||||
|
(seq.0 + (Sequence::max().0 - self.0.0)).0 as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::Add<u32> for SequenceStart {
|
||||||
|
type Output = SequenceStart;
|
||||||
|
fn add(self, other: u32) -> SequenceStart {
|
||||||
|
SequenceStart(self.0 + other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::AddAssign<u32> for SequenceStart {
|
||||||
|
fn add_assign(&mut self, other: u32) {
|
||||||
|
self.0 += other;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceEnd is INCLUSIVE
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
pub(crate) struct SequenceEnd(pub(crate) Sequence);
|
||||||
|
|
||||||
|
impl ::core::ops::Add<u32> for SequenceEnd {
|
||||||
|
type Output = SequenceEnd;
|
||||||
|
fn add(self, other: u32) -> SequenceEnd {
|
||||||
|
SequenceEnd(self.0 + other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::AddAssign<u32> for SequenceEnd {
|
||||||
|
fn add_assign(&mut self, other: u32) {
|
||||||
|
self.0 += other;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: how to tell the compiler we don't use the two most significant bits?
|
||||||
|
// maybe NonZero + always using 2nd most significant bit?
|
||||||
/// Sequence number to rebuild the stream correctly
|
/// Sequence number to rebuild the stream correctly
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
||||||
pub struct Sequence(pub ::core::num::Wrapping<u32>);
|
pub struct Sequence(pub ::core::num::Wrapping<u32>);
|
||||||
|
|
||||||
impl Sequence {
|
impl Sequence {
|
||||||
const SEQ_NOFLAG: u32 = 0x3FFFFFFF;
|
const SEQ_NOFLAG: u32 = 0x3FFFFFFF;
|
||||||
/// return a new sequence number, starting at random
|
/// return a new sequence number, starting at random
|
||||||
pub fn new(rand: &Random) -> Self {
|
pub fn new(rand: &Random) -> Self {
|
||||||
let seq: u32 = 0;
|
let mut raw_seq: [u8; 4] = [0; 4];
|
||||||
rand.fill(&mut seq.to_le_bytes());
|
rand.fill(&mut raw_seq);
|
||||||
|
let seq = u32::from_le_bytes(raw_seq);
|
||||||
Self(::core::num::Wrapping(seq & Self::SEQ_NOFLAG))
|
Self(::core::num::Wrapping(seq & Self::SEQ_NOFLAG))
|
||||||
}
|
}
|
||||||
/// Length of the serialized field
|
/// Length of the serialized field
|
||||||
pub const fn len() -> usize {
|
pub const fn len() -> usize {
|
||||||
4
|
4
|
||||||
}
|
}
|
||||||
|
/// Maximum possible sequence
|
||||||
|
pub const fn min() -> Self {
|
||||||
|
Self(::core::num::Wrapping(0))
|
||||||
|
}
|
||||||
|
/// Maximum possible sequence
|
||||||
|
pub const fn max() -> Self {
|
||||||
|
Self(::core::num::Wrapping(Self::SEQ_NOFLAG))
|
||||||
|
}
|
||||||
|
pub(crate) fn is_between(
|
||||||
|
&self,
|
||||||
|
start: SequenceStart,
|
||||||
|
end: SequenceEnd,
|
||||||
|
) -> bool {
|
||||||
|
if start.0 < end.0 {
|
||||||
|
start.0.0 <= self.0 && self.0 <= end.0.0
|
||||||
|
} else {
|
||||||
|
start.0.0 <= self.0 || self.0 <= end.0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn cmp_in_window(
|
||||||
|
&self,
|
||||||
|
window_start: SequenceStart,
|
||||||
|
compare: Sequence,
|
||||||
|
) -> ::core::cmp::Ordering {
|
||||||
|
let offset_self = self.0 - window_start.0.0;
|
||||||
|
let offset_compare = compare.0 - window_start.0.0;
|
||||||
|
return offset_self.cmp(&offset_compare);
|
||||||
|
}
|
||||||
|
pub(crate) fn remaining_window(&self, end: SequenceEnd) -> u32 {
|
||||||
|
if self.0 <= end.0.0 {
|
||||||
|
(end.0.0.0 - self.0.0) + 1
|
||||||
|
} else {
|
||||||
|
end.0.0.0 + 1 + (Self::max().0 - self.0).0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn diff_from(self, other: Sequence) -> u32 {
|
||||||
|
assert!(
|
||||||
|
self.0.0 > other.0.0,
|
||||||
|
"Sequence::diff_from inverted parameters"
|
||||||
|
);
|
||||||
|
self.0.0 - other.0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::Sub<u32> for Sequence {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn sub(self, other: u32) -> Self {
|
||||||
|
Self(::core::num::Wrapping(
|
||||||
|
(self.0 - ::core::num::Wrapping::<u32>(other)).0 & Self::SEQ_NOFLAG,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::Add<Sequence> for Sequence {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn add(self, other: Self) -> Self {
|
||||||
|
Self(::core::num::Wrapping(
|
||||||
|
(self.0 + other.0).0 & Self::SEQ_NOFLAG,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::Add<u32> for Sequence {
|
||||||
|
type Output = Sequence;
|
||||||
|
fn add(self, other: u32) -> Sequence {
|
||||||
|
Sequence(self.0 + ::core::num::Wrapping::<u32>(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::core::ops::AddAssign<u32> for Sequence {
|
||||||
|
fn add_assign(&mut self, other: u32) {
|
||||||
|
self.0 += ::core::num::Wrapping::<u32>(other);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Chunk of data representing a stream
|
/// Chunk of data representing a stream
|
||||||
/// Every chunk is as follows:
|
/// Every chunk is as follows:
|
||||||
/// | id (2 bytes) | length (2 bytes) |
|
/// | id (2 bytes) | length (2 bytes) |
|
||||||
/// | flag_start (1 BIT) | flag_end (1 BIT) | sequence (30 bits) |
|
/// | flag_start (1 BIT) | flag_end (1 BIT) | sequence (30 bits) |
|
||||||
|
/// | ...data... |
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Chunk<'a> {
|
pub struct Chunk<'a> {
|
||||||
/// Id of the stream this chunk is part of
|
/// Id of the stream this chunk is part of
|
||||||
@ -79,6 +252,10 @@ impl<'a> Chunk<'a> {
|
|||||||
const FLAGS_EXCLUDED_BITMASK: u8 = 0x3F;
|
const FLAGS_EXCLUDED_BITMASK: u8 = 0x3F;
|
||||||
const FLAG_START_BITMASK: u8 = 0x80;
|
const FLAG_START_BITMASK: u8 = 0x80;
|
||||||
const FLAG_END_BITMASK: u8 = 0x40;
|
const FLAG_END_BITMASK: u8 = 0x40;
|
||||||
|
/// Return the length of the header of a Chunk
|
||||||
|
pub const fn headers_len() -> usize {
|
||||||
|
ID::len() + ChunkLen::len() + Sequence::len()
|
||||||
|
}
|
||||||
/// Returns the total length of the chunk, including headers
|
/// Returns the total length of the chunk, including headers
|
||||||
pub fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
ID::len() + ChunkLen::len() + Sequence::len() + self.data.len()
|
ID::len() + ChunkLen::len() + Sequence::len() + self.data.len()
|
||||||
@ -129,7 +306,7 @@ impl<'a> Chunk<'a> {
|
|||||||
let bytes = bytes_next;
|
let bytes = bytes_next;
|
||||||
bytes_next = bytes_next + Sequence::len();
|
bytes_next = bytes_next + Sequence::len();
|
||||||
raw_out[bytes..bytes_next]
|
raw_out[bytes..bytes_next]
|
||||||
.copy_from_slice(&self.sequence.0 .0.to_le_bytes());
|
.copy_from_slice(&self.sequence.0.0.to_le_bytes());
|
||||||
let mut flag_byte = raw_out[bytes] & Self::FLAGS_EXCLUDED_BITMASK;
|
let mut flag_byte = raw_out[bytes] & Self::FLAGS_EXCLUDED_BITMASK;
|
||||||
if self.flag_start {
|
if self.flag_start {
|
||||||
flag_byte = flag_byte | Self::FLAG_START_BITMASK;
|
flag_byte = flag_byte | Self::FLAG_START_BITMASK;
|
||||||
@ -149,23 +326,47 @@ impl<'a> Chunk<'a> {
|
|||||||
/// differences from Kind:
|
/// differences from Kind:
|
||||||
/// * not public
|
/// * not public
|
||||||
/// * has actual data
|
/// * has actual data
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum Tracker {
|
pub(crate) enum Tracker {
|
||||||
/// ROB: Reliable, Ordered, Bytestream
|
/// ROB: Reliable, Ordered, Bytestream
|
||||||
/// AKA: TCP-like
|
/// AKA: TCP-like
|
||||||
ROB(ReliableOrderedBytestream),
|
ROB(ReliableOrderedBytestream),
|
||||||
|
UUDL(UnreliableUnorderedDatagramLimited),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tracker {
|
impl Tracker {
|
||||||
pub(crate) fn new(kind: Kind, rand: &Random) -> Self {
|
pub(crate) fn new(kind: Kind, rand: &Random) -> Self {
|
||||||
match kind {
|
match kind {
|
||||||
Kind::ROB => Tracker::ROB(ReliableOrderedBytestream::new(rand)),
|
Kind::ROB => Tracker::ROB(ReliableOrderedBytestream::new(rand)),
|
||||||
|
Kind::UUDL => {
|
||||||
|
Tracker::UUDL(UnreliableUnorderedDatagramLimited::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
|
pub(crate) enum StreamData {
|
||||||
|
/// not enough data to return somthing to the user
|
||||||
|
NotReady = 0,
|
||||||
|
/// we can return something to the user
|
||||||
|
Ready,
|
||||||
|
}
|
||||||
|
impl ::core::ops::BitOr for StreamData {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
// Required method
|
||||||
|
fn bitor(self, other: Self) -> Self::Output {
|
||||||
|
if self == StreamData::Ready || other == StreamData::Ready {
|
||||||
|
StreamData::Ready
|
||||||
|
} else {
|
||||||
|
StreamData::NotReady
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Actual stream-tracking structure
|
/// Actual stream-tracking structure
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct Stream {
|
pub(crate) struct Stream {
|
||||||
id: ID,
|
id: ID,
|
||||||
data: Tracker,
|
data: Tracker,
|
||||||
@ -180,4 +381,84 @@ impl Stream {
|
|||||||
data: Tracker::new(kind, rand),
|
data: Tracker::new(kind, rand),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<StreamData, Error> {
|
||||||
|
match &mut self.data {
|
||||||
|
Tracker::ROB(tracker) => tracker.recv(chunk),
|
||||||
|
Tracker::UUDL(tracker) => tracker.recv(chunk),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn get(&mut self) -> (SequenceStart, Vec<u8>) {
|
||||||
|
match &mut self.data {
|
||||||
|
// FIXME
|
||||||
|
Tracker::ROB(tracker) => {
|
||||||
|
(SequenceStart(Sequence::min()), tracker.get())
|
||||||
|
}
|
||||||
|
Tracker::UUDL(tracker) => tracker.get(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Track what has been sent and what has been ACK'd from a stream
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct SendTracker {
|
||||||
|
queue: Vec<Vec<u8>>,
|
||||||
|
sent: Vec<usize>,
|
||||||
|
ackd: Vec<usize>,
|
||||||
|
chunk_started: bool,
|
||||||
|
is_datagram: bool,
|
||||||
|
next_sequence: Sequence,
|
||||||
|
}
|
||||||
|
impl SendTracker {
|
||||||
|
pub(crate) fn new(rand: &Random) -> Self {
|
||||||
|
Self {
|
||||||
|
queue: Vec::with_capacity(4),
|
||||||
|
sent: Vec::with_capacity(4),
|
||||||
|
ackd: Vec::with_capacity(4),
|
||||||
|
chunk_started: false,
|
||||||
|
is_datagram: false,
|
||||||
|
next_sequence: Sequence::new(rand),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Enqueue user data to be sent
|
||||||
|
pub(crate) fn enqueue(&mut self, data: Vec<u8>) {
|
||||||
|
self.queue.push(data);
|
||||||
|
self.sent.push(0);
|
||||||
|
self.ackd.push(0);
|
||||||
|
}
|
||||||
|
/// Write the user data to the buffer and mark it as sent
|
||||||
|
pub(crate) fn get(&mut self, out: &mut [u8]) -> usize {
|
||||||
|
let data = match self.queue.get(0) {
|
||||||
|
Some(data) => data,
|
||||||
|
None => return 0,
|
||||||
|
};
|
||||||
|
let len = ::std::cmp::min(out.len(), data.len());
|
||||||
|
out[..len].copy_from_slice(&data[self.sent[0]..len]);
|
||||||
|
self.sent[0] = self.sent[0] + len;
|
||||||
|
len
|
||||||
|
}
|
||||||
|
/// Mark the sent data as successfully received from the receiver
|
||||||
|
pub(crate) fn ack(&mut self, size: usize) {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
pub(crate) fn serialize(&mut self, id: ID, raw: &mut [u8]) -> usize {
|
||||||
|
let max_data_len = raw.len() - Chunk::headers_len();
|
||||||
|
let data_len = ::std::cmp::min(max_data_len, self.queue[0].len());
|
||||||
|
let flag_start = !self.chunk_started;
|
||||||
|
let flag_end = self.is_datagram && data_len == self.queue[0].len();
|
||||||
|
let chunk = Chunk {
|
||||||
|
id,
|
||||||
|
flag_start,
|
||||||
|
flag_end,
|
||||||
|
sequence: self.next_sequence,
|
||||||
|
data: &self.queue[0][..data_len],
|
||||||
|
};
|
||||||
|
self.next_sequence = Sequence(
|
||||||
|
self.next_sequence.0 + ::core::num::Wrapping(data_len as u32),
|
||||||
|
);
|
||||||
|
if chunk.flag_end {
|
||||||
|
self.chunk_started = false;
|
||||||
|
}
|
||||||
|
chunk.serialize(raw);
|
||||||
|
data_len
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
//! Implementation of the Reliable, Ordered, Bytestream transmission model
|
|
||||||
//! AKA: TCP-like
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
connection::stream::{Chunk, Error, Sequence},
|
|
||||||
enc::Random,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Reliable, Ordered, Bytestream stream tracker
|
|
||||||
/// AKA: TCP-like
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub(crate) struct ReliableOrderedBytestream {
|
|
||||||
window_start: Sequence,
|
|
||||||
window_len: usize,
|
|
||||||
data: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReliableOrderedBytestream {
|
|
||||||
pub(crate) fn new(rand: &Random) -> Self {
|
|
||||||
Self {
|
|
||||||
window_start: Sequence::new(rand),
|
|
||||||
window_len: 1048576, // 1MB. should be enough for anybody. (lol)
|
|
||||||
data: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<(), Error> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
204
src/connection/stream/rob/mod.rs
Normal file
204
src/connection/stream/rob/mod.rs
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
//! Implementation of the Reliable, Ordered, Bytestream transmission model
|
||||||
|
//! AKA: TCP-like
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
connection::stream::{
|
||||||
|
Chunk, Error, Sequence, SequenceEnd, SequenceStart, StreamData,
|
||||||
|
},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
/// Reliable, Ordered, Bytestream stream tracker
|
||||||
|
/// AKA: TCP-like
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub(crate) struct ReliableOrderedBytestream {
|
||||||
|
pub(crate) window_start: SequenceStart,
|
||||||
|
window_end: SequenceEnd,
|
||||||
|
pivot: u32,
|
||||||
|
data: Vec<u8>,
|
||||||
|
missing: Vec<(Sequence, Sequence)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReliableOrderedBytestream {
|
||||||
|
pub(crate) fn new(rand: &Random) -> Self {
|
||||||
|
let window_len = 1048576; // 1MB. should be enough for anybody. (lol)
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
missing: [(window_start.0, window_end.0)].to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn with_window_size(rand: &Random, size: u32) -> Self {
|
||||||
|
assert!(
|
||||||
|
size < Sequence::max().0.0,
|
||||||
|
"Max window size is {}",
|
||||||
|
Sequence::max().0.0
|
||||||
|
);
|
||||||
|
let window_len = size;
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
missing: [(window_start.0, window_end.0)].to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn window_size(&self) -> u32 {
|
||||||
|
self.data.len() as u32
|
||||||
|
}
|
||||||
|
pub(crate) fn get(&mut self) -> Vec<u8> {
|
||||||
|
if self.missing.len() == 0 {
|
||||||
|
let (first, second) = self.data.split_at(self.pivot as usize);
|
||||||
|
let mut ret = Vec::with_capacity(self.data.len());
|
||||||
|
ret.extend_from_slice(first);
|
||||||
|
ret.extend_from_slice(second);
|
||||||
|
self.window_start = self.window_start + (ret.len() as u32);
|
||||||
|
self.window_end = self.window_end + (ret.len() as u32);
|
||||||
|
self.data.clear();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
let data_len = self.window_start.offset(self.missing[0].0);
|
||||||
|
let last_missing_idx = self.missing.len() - 1;
|
||||||
|
let mut last_missing = &mut self.missing[last_missing_idx];
|
||||||
|
last_missing.1 = last_missing.1 + (data_len as u32);
|
||||||
|
self.window_start = self.window_start + (data_len as u32);
|
||||||
|
self.window_end = self.window_end + (data_len as u32);
|
||||||
|
|
||||||
|
let mut ret = Vec::with_capacity(data_len);
|
||||||
|
let (first, second) = self.data[..].split_at(self.pivot as usize);
|
||||||
|
let first_len = ::core::cmp::min(data_len, first.len());
|
||||||
|
let second_len = data_len - first_len;
|
||||||
|
|
||||||
|
ret.extend_from_slice(&first[..first_len]);
|
||||||
|
ret.extend_from_slice(&second[..second_len]);
|
||||||
|
|
||||||
|
self.pivot =
|
||||||
|
((self.pivot as usize + data_len) % self.data.len()) as u32;
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<StreamData, Error> {
|
||||||
|
if !chunk
|
||||||
|
.sequence
|
||||||
|
.is_between(self.window_start, self.window_end)
|
||||||
|
{
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
// make sure we consider only the bytes inside the sliding window
|
||||||
|
let maxlen = ::std::cmp::min(
|
||||||
|
chunk.sequence.remaining_window(self.window_end) as usize,
|
||||||
|
chunk.data.len(),
|
||||||
|
);
|
||||||
|
if maxlen == 0 {
|
||||||
|
// empty window or empty chunk, but we don't care
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
// translate Sequences to offsets in self.data
|
||||||
|
let data = &chunk.data[..maxlen];
|
||||||
|
let offset = self.window_start.offset(chunk.sequence);
|
||||||
|
let offset_end = offset + chunk.data.len() - 1;
|
||||||
|
|
||||||
|
// Find the chunks we are missing that we can copy,
|
||||||
|
// and fix the missing tracker
|
||||||
|
let mut copy_ranges = Vec::new();
|
||||||
|
let mut to_delete = Vec::new();
|
||||||
|
let mut to_add = Vec::new();
|
||||||
|
// note: the ranges are (INCLUSIVE, INCLUSIVE)
|
||||||
|
for (idx, el) in self.missing.iter_mut().enumerate() {
|
||||||
|
let missing_from = self.window_start.offset(el.0);
|
||||||
|
if missing_from > offset_end {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let missing_to = self.window_start.offset(el.1);
|
||||||
|
if missing_to < offset {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if missing_from >= offset && missing_from <= offset_end {
|
||||||
|
if missing_to <= offset_end {
|
||||||
|
// [.....chunk.....]
|
||||||
|
// [..missing..]
|
||||||
|
to_delete.push(idx);
|
||||||
|
copy_ranges.push((missing_from, missing_to));
|
||||||
|
} else {
|
||||||
|
// [....chunk....]
|
||||||
|
// [...missing...]
|
||||||
|
copy_ranges.push((missing_from, offset_end));
|
||||||
|
el.0 += ((offset_end - missing_from) + 1) as u32;
|
||||||
|
}
|
||||||
|
} else if missing_from < offset {
|
||||||
|
if missing_to > offset_end {
|
||||||
|
// [..chunk..]
|
||||||
|
// [....missing....]
|
||||||
|
to_add.push((
|
||||||
|
el.0 + (((offset_end - missing_from) + 1) as u32),
|
||||||
|
el.1,
|
||||||
|
));
|
||||||
|
el.1 = el.0 + (((offset - missing_from) - 1) as u32);
|
||||||
|
copy_ranges.push((offset, offset_end));
|
||||||
|
} else if offset <= missing_to {
|
||||||
|
// [....chunk....]
|
||||||
|
// [...missing...]
|
||||||
|
copy_ranges.push((offset, (missing_to - 0)));
|
||||||
|
el.1 = el.0 + (((offset_end - missing_from) - 1) as u32);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut deleted = 0;
|
||||||
|
for idx in to_delete.into_iter() {
|
||||||
|
self.missing.remove(idx + deleted);
|
||||||
|
deleted = deleted + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.missing.append(&mut to_add);
|
||||||
|
self.missing
|
||||||
|
.sort_by(|(from_a, _), (from_b, _)| from_a.0.0.cmp(&from_b.0.0));
|
||||||
|
|
||||||
|
// copy only the missing data
|
||||||
|
let (first, second) = self.data[..].split_at_mut(self.pivot as usize);
|
||||||
|
for (from, to) in copy_ranges.into_iter() {
|
||||||
|
let to = to + 1;
|
||||||
|
if from <= first.len() {
|
||||||
|
let first_from = from;
|
||||||
|
let first_to = ::core::cmp::min(first.len(), to);
|
||||||
|
let data_first_from = from - offset;
|
||||||
|
let data_first_to = first_to - offset;
|
||||||
|
first[first_from..first_to]
|
||||||
|
.copy_from_slice(&data[data_first_from..data_first_to]);
|
||||||
|
|
||||||
|
let second_to = to - first_to;
|
||||||
|
let data_second_to = data_first_to + second_to;
|
||||||
|
second[..second_to]
|
||||||
|
.copy_from_slice(&data[data_first_to..data_second_to]);
|
||||||
|
} else {
|
||||||
|
let second_from = from - first.len();
|
||||||
|
let second_to = to - first.len();
|
||||||
|
let data_from = from - offset;
|
||||||
|
let data_to = to - offset;
|
||||||
|
second[second_from..second_to]
|
||||||
|
.copy_from_slice(&data[data_from..data_to]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.missing.len() == 0
|
||||||
|
|| self.window_start.offset(self.missing[0].0) == 0
|
||||||
|
{
|
||||||
|
Ok(StreamData::Ready)
|
||||||
|
} else {
|
||||||
|
Ok(StreamData::NotReady)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
249
src/connection/stream/rob/tests.rs
Normal file
249
src/connection/stream/rob/tests.rs
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
use crate::{
|
||||||
|
connection::stream::{self, rob::*, Chunk},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_rob_sequential() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let mut rob = ReliableOrderedBytestream::with_window_size(&rand, 1048576);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(1024);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
rand.fill(&mut data[..]);
|
||||||
|
|
||||||
|
let start = rob.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..512],
|
||||||
|
};
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(&got[..] == &[], "rob: got data?");
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[..512] == &got[..],
|
||||||
|
"ROB1: DIFF: {:?} {:?}",
|
||||||
|
&data[..512].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start + 512,
|
||||||
|
data: &data[512..],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[512..] == &got[..],
|
||||||
|
"ROB2: DIFF: {:?} {:?}",
|
||||||
|
&data[512..].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_rob_retransmit() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut rob =
|
||||||
|
ReliableOrderedBytestream::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = rob.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start +50,
|
||||||
|
data: &data[50..60],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..60],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 80,
|
||||||
|
data: &data[80..],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..90],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start +(max_window as u32),
|
||||||
|
data: &data[max_window..],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start +90,
|
||||||
|
data: &data[90..max_window],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[..max_window] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..max_window],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_rob_rolling() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut rob =
|
||||||
|
ReliableOrderedBytestream::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = rob.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..100],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[..40] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..40],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[40..] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[40..],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_rob_rolling_second_case() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut rob =
|
||||||
|
ReliableOrderedBytestream::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = rob.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..100],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[..40] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..40],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..100],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 100,
|
||||||
|
data: &data[100..],
|
||||||
|
};
|
||||||
|
let _ = rob.recv(chunk);
|
||||||
|
let got = rob.get();
|
||||||
|
assert!(
|
||||||
|
&data[40..] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[40..],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
647
src/connection/stream/uud/mod.rs
Normal file
647
src/connection/stream/uud/mod.rs
Normal file
@ -0,0 +1,647 @@
|
|||||||
|
//! Implementation of the Unreliable, unordered, Datagram transmission model
|
||||||
|
//!
|
||||||
|
//! AKA: UDP-like, but the datagram can cross the packet-size (MTU) limit.
|
||||||
|
//!
|
||||||
|
//! Only fully received datagrams will be delivered to the user, and
|
||||||
|
//! half-received ones will be discarded after a timeout
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
connection::stream::{
|
||||||
|
Chunk, Error, Fragment, Sequence, SequenceEnd, SequenceStart,
|
||||||
|
StreamData,
|
||||||
|
},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
|
use ::core::{
|
||||||
|
cmp::{self, Ordering},
|
||||||
|
marker::PhantomData,
|
||||||
|
num::Wrapping,
|
||||||
|
ops,
|
||||||
|
};
|
||||||
|
use ::std::collections::{BTreeMap, VecDeque};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
type Timer = u64;
|
||||||
|
|
||||||
|
pub struct Data<'a> {
|
||||||
|
data_first: &'a mut [u8],
|
||||||
|
data_second: &'a mut [u8],
|
||||||
|
pub from: SequenceStart,
|
||||||
|
//pub(crate) stream: &'a Uud,
|
||||||
|
pub(crate) stream: ::std::ptr::NonNull<Uud>,
|
||||||
|
_not_send_sync: PhantomData<*const ()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for Data<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// safe because we are !Send
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
let uud = self.stream.as_mut();
|
||||||
|
uud.free(
|
||||||
|
self.from,
|
||||||
|
(self.data_first.len() + self.data_second.len()) as u32,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ops::Index<usize> for Data<'a> {
|
||||||
|
type Output = u8;
|
||||||
|
|
||||||
|
fn index(&self, index: usize) -> &Self::Output {
|
||||||
|
let first_len = self.data_first.len();
|
||||||
|
if index < first_len {
|
||||||
|
return &self.data_first[index];
|
||||||
|
}
|
||||||
|
return &self.data_second[index - first_len];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ops::IndexMut<usize> for Data<'a> {
|
||||||
|
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||||
|
let first_len = self.data_first.len();
|
||||||
|
if index < first_len {
|
||||||
|
return &mut self.data_first[index];
|
||||||
|
}
|
||||||
|
return &mut self.data_second[index - first_len];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct Uud {
|
||||||
|
pub(crate) window_start: SequenceStart,
|
||||||
|
window_end: SequenceEnd,
|
||||||
|
pivot: u32,
|
||||||
|
data: Vec<u8>,
|
||||||
|
track: VecDeque<(Fragment, Timer)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Uud {
|
||||||
|
pub(crate) fn new(rand: &Random) -> Self {
|
||||||
|
let window_len = 1048576; // 1MB. should be enough for anybody. (lol)
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
track: VecDeque::with_capacity(4),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn with_window_size(rand: &Random, size: u32) -> Self {
|
||||||
|
assert!(
|
||||||
|
size < Sequence::max().0.0,
|
||||||
|
"Max window size is {}",
|
||||||
|
Sequence::max().0.0
|
||||||
|
);
|
||||||
|
let window_len = size;
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
track: VecDeque::with_capacity(4),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn window_size(&self) -> u32 {
|
||||||
|
self.data.len() as u32
|
||||||
|
}
|
||||||
|
pub(crate) fn get(&mut self) -> Option<Data> {
|
||||||
|
let self_ptr = ::std::ptr::NonNull::new(self).unwrap();
|
||||||
|
for track in self.track.iter_mut() {
|
||||||
|
if let Fragment::Ready((start, end)) = track.0 {
|
||||||
|
let data_from = (self.window_start.offset(start.0)
|
||||||
|
+ self.pivot as usize)
|
||||||
|
% self.data.len();
|
||||||
|
let data_to = (self.window_start.offset(end.0)
|
||||||
|
+ self.pivot as usize)
|
||||||
|
% self.data.len();
|
||||||
|
|
||||||
|
track.0 = Fragment::Delivered((start, end));
|
||||||
|
let first: &mut [u8];
|
||||||
|
let second: &mut [u8];
|
||||||
|
if data_from < data_to {
|
||||||
|
let (tmp_first, tmp_second) =
|
||||||
|
self.data.split_at_mut(data_to);
|
||||||
|
first = &mut tmp_first[data_from..];
|
||||||
|
second = &mut tmp_second[0..0];
|
||||||
|
} else {
|
||||||
|
let (tmp_second, tmp_first) =
|
||||||
|
self.data.split_at_mut(self.pivot as usize);
|
||||||
|
first = &mut tmp_first[(data_from - self.pivot as usize)..];
|
||||||
|
second = &mut tmp_second[..data_to];
|
||||||
|
}
|
||||||
|
|
||||||
|
return Some(Data {
|
||||||
|
from: start,
|
||||||
|
data_first: first,
|
||||||
|
data_second: second,
|
||||||
|
stream: self_ptr,
|
||||||
|
_not_send_sync: PhantomData::default(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
pub(crate) fn free(&mut self, from: SequenceStart, len: u32) {
|
||||||
|
if !from.0.is_between(self.window_start, self.window_end) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut first_keep = 0;
|
||||||
|
let mut last_sequence = self.window_start.0;
|
||||||
|
let mut deallocated = false;
|
||||||
|
for (idx, track) in self.track.iter_mut().enumerate() {
|
||||||
|
if let Fragment::Delivered((start, to)) = track.0 {
|
||||||
|
if start == from && to.0 == from.0 + len {
|
||||||
|
track.0 = Fragment::Deallocated((start, to));
|
||||||
|
deallocated = true;
|
||||||
|
if idx == first_keep {
|
||||||
|
first_keep = idx + 1;
|
||||||
|
last_sequence = to.0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if idx == first_keep {
|
||||||
|
if let Fragment::Deallocated((_, to)) = track.0 {
|
||||||
|
first_keep = idx + 1;
|
||||||
|
last_sequence = to.0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if deallocated {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.track.drain(..first_keep);
|
||||||
|
self.pivot = ((self.pivot as usize
|
||||||
|
+ self.window_start.offset(last_sequence))
|
||||||
|
% self.data.len()) as u32;
|
||||||
|
}
|
||||||
|
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<StreamData, Error> {
|
||||||
|
let chunk_to = chunk.sequence + chunk.data.len() as u32;
|
||||||
|
if !chunk
|
||||||
|
.sequence
|
||||||
|
.is_between(self.window_start, self.window_end)
|
||||||
|
{
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we consider only the bytes inside the sliding window
|
||||||
|
let maxlen = ::std::cmp::min(
|
||||||
|
chunk.sequence.remaining_window(self.window_end) as usize,
|
||||||
|
chunk.data.len(),
|
||||||
|
);
|
||||||
|
if maxlen == 0 {
|
||||||
|
// empty window or empty chunk, but we don't care
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
let chunk_flag_end: bool;
|
||||||
|
if maxlen != chunk.data.len() {
|
||||||
|
// we are not considering the full chunk, so
|
||||||
|
// make sure the end flag is not set
|
||||||
|
chunk_flag_end = false;
|
||||||
|
|
||||||
|
// FIXME: what happens if we "truncate" this chunk now,
|
||||||
|
// then we have more space in the window
|
||||||
|
// then we receive the same packet again?
|
||||||
|
} else {
|
||||||
|
chunk_flag_end = chunk.flag_end;
|
||||||
|
}
|
||||||
|
// translate Sequences to offsets in self.data
|
||||||
|
let data = &chunk.data[..maxlen];
|
||||||
|
let chunk_to = chunk.sequence + data.len() as u32;
|
||||||
|
let mut last_usable = self.window_end.0;
|
||||||
|
let mut ret = StreamData::NotReady;
|
||||||
|
let mut copy_data_idx_from = 0;
|
||||||
|
for (idx, (fragment, _)) in self.track.iter_mut().enumerate().rev() {
|
||||||
|
let (from, to) = fragment.get_seqs();
|
||||||
|
let to_next = to + 1;
|
||||||
|
match to_next.0.cmp_in_window(self.window_start, chunk.sequence) {
|
||||||
|
Ordering::Equal => {
|
||||||
|
// `chunk` is immediately after `fragment`
|
||||||
|
if !chunk_to.is_between(
|
||||||
|
SequenceStart(to_next.0),
|
||||||
|
SequenceEnd(last_usable),
|
||||||
|
) {
|
||||||
|
return Err(Error::Reconstructing);
|
||||||
|
}
|
||||||
|
match fragment {
|
||||||
|
Fragment::Start((_, f_end)) => {
|
||||||
|
if chunk.flag_start {
|
||||||
|
// we can't start a datagram twice.
|
||||||
|
// ignore the data
|
||||||
|
return Err(Error::WrongFlags);
|
||||||
|
}
|
||||||
|
if chunk_flag_end {
|
||||||
|
*fragment = Fragment::Ready((
|
||||||
|
from,
|
||||||
|
to + (data.len() as u32),
|
||||||
|
));
|
||||||
|
ret = StreamData::Ready;
|
||||||
|
} else {
|
||||||
|
*f_end += data.len() as u32;
|
||||||
|
}
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0)
|
||||||
|
as usize;
|
||||||
|
}
|
||||||
|
Fragment::Middle((_, f_end)) => {
|
||||||
|
if chunk.flag_start {
|
||||||
|
// we can't start a datagram twice.
|
||||||
|
// ignore the data
|
||||||
|
return Err(Error::WrongFlags);
|
||||||
|
}
|
||||||
|
if chunk_flag_end {
|
||||||
|
*fragment = Fragment::End((
|
||||||
|
from,
|
||||||
|
to + (data.len() as u32),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
*f_end += data.len() as u32;
|
||||||
|
}
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0)
|
||||||
|
as usize;
|
||||||
|
}
|
||||||
|
Fragment::End(_)
|
||||||
|
| Fragment::Ready(_)
|
||||||
|
| Fragment::Delivered(_)
|
||||||
|
| Fragment::Deallocated(_) => {
|
||||||
|
if !chunk.flag_start {
|
||||||
|
return Err(Error::WrongFlags);
|
||||||
|
}
|
||||||
|
let toinsert = if chunk_flag_end {
|
||||||
|
ret = StreamData::Ready;
|
||||||
|
Fragment::Ready((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Fragment::Start((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
};
|
||||||
|
self.track.insert(idx + 1, (toinsert, 0));
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0)
|
||||||
|
as usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ordering::Less => {
|
||||||
|
// there is a data hole between `chunk` and `fragment`
|
||||||
|
|
||||||
|
if !chunk_to.is_between(
|
||||||
|
SequenceStart(to_next.0),
|
||||||
|
SequenceEnd(last_usable),
|
||||||
|
) {
|
||||||
|
return Err(Error::Reconstructing);
|
||||||
|
}
|
||||||
|
let toinsert = if chunk.flag_start {
|
||||||
|
if chunk_flag_end {
|
||||||
|
ret = StreamData::Ready;
|
||||||
|
Fragment::Ready((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Fragment::Start((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if chunk_flag_end {
|
||||||
|
Fragment::End((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Fragment::Middle((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.track.insert(idx + 1, (toinsert, 0));
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0) as usize;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
// to_next > chunk.sequence
|
||||||
|
// `fragment` is too new, need to look at older ones
|
||||||
|
|
||||||
|
if from.0.cmp_in_window(self.window_start, chunk.sequence)
|
||||||
|
!= Ordering::Greater
|
||||||
|
{
|
||||||
|
// to_next > chunk.sequence >= from
|
||||||
|
// overlapping not yet allowed
|
||||||
|
return Err(Error::Reconstructing);
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
// check if we can add before everything
|
||||||
|
if chunk_to == from.0 {
|
||||||
|
match fragment {
|
||||||
|
Fragment::Middle(_) => {
|
||||||
|
if chunk.flag_start {
|
||||||
|
*fragment = Fragment::Start((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
to,
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
*fragment = Fragment::Middle((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
to,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Fragment::End(_) => {
|
||||||
|
if chunk.flag_start {
|
||||||
|
*fragment = Fragment::Ready((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
to,
|
||||||
|
));
|
||||||
|
ret = StreamData::Ready;
|
||||||
|
} else {
|
||||||
|
*fragment = Fragment::End((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
to,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Fragment::Start(_)
|
||||||
|
| Fragment::Ready(_)
|
||||||
|
| Fragment::Delivered(_)
|
||||||
|
| Fragment::Deallocated(_) => {
|
||||||
|
if chunk_flag_end {
|
||||||
|
// add, don't merge
|
||||||
|
} else {
|
||||||
|
// fragment.start, but !chunk.end
|
||||||
|
return Err(Error::WrongFlags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0)
|
||||||
|
as usize;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// chunk before fragment
|
||||||
|
let toinsert = if chunk.flag_start {
|
||||||
|
if chunk_flag_end {
|
||||||
|
ret = StreamData::Ready;
|
||||||
|
Fragment::Ready((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Fragment::Start((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if chunk_flag_end {
|
||||||
|
Fragment::End((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Fragment::Middle((
|
||||||
|
SequenceStart(chunk.sequence),
|
||||||
|
SequenceEnd(chunk_to),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.track.insert(0, (toinsert, 0));
|
||||||
|
copy_data_idx_from =
|
||||||
|
chunk.sequence.diff_from(self.window_start.0)
|
||||||
|
as usize;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
last_usable = from.0 - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let data_idx_from =
|
||||||
|
(copy_data_idx_from + self.pivot as usize) % self.data.len();
|
||||||
|
let data_idx_to = (data_idx_from + data.len()) % self.data.len();
|
||||||
|
if data_idx_from < data_idx_to {
|
||||||
|
self.data[data_idx_from..data_idx_to].copy_from_slice(&data);
|
||||||
|
} else {
|
||||||
|
let data_pivot = self.data.len() - data_idx_from;
|
||||||
|
let (first, second) = data.split_at(data_pivot);
|
||||||
|
self.data[data_idx_from..].copy_from_slice(&first);
|
||||||
|
self.data[..data_idx_to].copy_from_slice(&second);
|
||||||
|
}
|
||||||
|
Ok(ret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copy of ROB for reference
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub(crate) struct UnreliableUnorderedDatagram {
|
||||||
|
pub(crate) window_start: SequenceStart,
|
||||||
|
window_end: SequenceEnd,
|
||||||
|
pivot: u32,
|
||||||
|
data: Vec<u8>,
|
||||||
|
missing: Vec<(Sequence, Sequence)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UnreliableUnorderedDatagram {
|
||||||
|
pub(crate) fn new(rand: &Random) -> Self {
|
||||||
|
let window_len = 1048576; // 1MB. should be enough for anybody. (lol)
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
missing: [(window_start.0, window_end.0)].to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn with_window_size(rand: &Random, size: u32) -> Self {
|
||||||
|
assert!(
|
||||||
|
size < Sequence::max().0.0,
|
||||||
|
"Max window size is {}",
|
||||||
|
Sequence::max().0.0
|
||||||
|
);
|
||||||
|
let window_len = size; // 1MB. should be enough for anybody. (lol)
|
||||||
|
let window_start = SequenceStart(Sequence::new(rand));
|
||||||
|
let window_end = SequenceEnd(window_start.0 + (window_len - 1));
|
||||||
|
let mut data = Vec::with_capacity(window_len as usize);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
window_start,
|
||||||
|
window_end,
|
||||||
|
pivot: window_len,
|
||||||
|
data,
|
||||||
|
missing: [(window_start.0, window_end.0)].to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn window_size(&self) -> u32 {
|
||||||
|
self.data.len() as u32
|
||||||
|
}
|
||||||
|
pub(crate) fn get(&mut self) -> Vec<u8> {
|
||||||
|
if self.missing.len() == 0 {
|
||||||
|
let (first, second) = self.data.split_at(self.pivot as usize);
|
||||||
|
let mut ret = Vec::with_capacity(self.data.len());
|
||||||
|
ret.extend_from_slice(first);
|
||||||
|
ret.extend_from_slice(second);
|
||||||
|
self.window_start += ret.len() as u32;
|
||||||
|
self.window_end =
|
||||||
|
SequenceEnd(Sequence(Wrapping::<u32>(ret.len() as u32)));
|
||||||
|
self.data.clear();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
let data_len = self.window_start.offset(self.missing[0].0);
|
||||||
|
let last_missing_idx = self.missing.len() - 1;
|
||||||
|
let mut last_missing = &mut self.missing[last_missing_idx];
|
||||||
|
last_missing.1 += data_len as u32;
|
||||||
|
self.window_start += data_len as u32;
|
||||||
|
self.window_end += data_len as u32;
|
||||||
|
|
||||||
|
let mut ret = Vec::with_capacity(data_len);
|
||||||
|
let (first, second) = self.data[..].split_at(self.pivot as usize);
|
||||||
|
let first_len = cmp::min(data_len, first.len());
|
||||||
|
let second_len = data_len - first_len;
|
||||||
|
|
||||||
|
ret.extend_from_slice(&first[..first_len]);
|
||||||
|
ret.extend_from_slice(&second[..second_len]);
|
||||||
|
|
||||||
|
self.pivot =
|
||||||
|
((self.pivot as usize + data_len) % self.data.len()) as u32;
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<StreamData, Error> {
|
||||||
|
if !chunk
|
||||||
|
.sequence
|
||||||
|
.is_between(self.window_start, self.window_end)
|
||||||
|
{
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
// make sure we consider only the bytes inside the sliding window
|
||||||
|
let maxlen = ::std::cmp::min(
|
||||||
|
chunk.sequence.remaining_window(self.window_end) as usize,
|
||||||
|
chunk.data.len(),
|
||||||
|
);
|
||||||
|
if maxlen == 0 {
|
||||||
|
// empty window or empty chunk, but we don't care
|
||||||
|
return Err(Error::OutOfWindow);
|
||||||
|
}
|
||||||
|
// translate Sequences to offsets in self.data
|
||||||
|
let data = &chunk.data[..maxlen];
|
||||||
|
let offset = self.window_start.offset(chunk.sequence);
|
||||||
|
let offset_end = offset + chunk.data.len() - 1;
|
||||||
|
|
||||||
|
// Find the chunks we are missing that we can copy,
|
||||||
|
// and fix the missing tracker
|
||||||
|
let mut copy_ranges = Vec::new();
|
||||||
|
let mut to_delete = Vec::new();
|
||||||
|
let mut to_add = Vec::new();
|
||||||
|
// note: the ranges are (INCLUSIVE, INCLUSIVE)
|
||||||
|
for (idx, el) in self.missing.iter_mut().enumerate() {
|
||||||
|
let missing_from = self.window_start.offset(el.0);
|
||||||
|
if missing_from > offset_end {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let missing_to = self.window_start.offset(el.1);
|
||||||
|
if missing_to < offset {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if missing_from >= offset && missing_from <= offset_end {
|
||||||
|
if missing_to <= offset_end {
|
||||||
|
// [.....chunk.....]
|
||||||
|
// [..missing..]
|
||||||
|
to_delete.push(idx);
|
||||||
|
copy_ranges.push((missing_from, missing_to));
|
||||||
|
} else {
|
||||||
|
// [....chunk....]
|
||||||
|
// [...missing...]
|
||||||
|
copy_ranges.push((missing_from, offset_end));
|
||||||
|
el.0 += ((offset_end - missing_from) + 1) as u32;
|
||||||
|
}
|
||||||
|
} else if missing_from < offset {
|
||||||
|
if missing_to > offset_end {
|
||||||
|
// [..chunk..]
|
||||||
|
// [....missing....]
|
||||||
|
to_add.push((
|
||||||
|
el.0 + (((offset_end - missing_from) + 1) as u32),
|
||||||
|
el.1,
|
||||||
|
));
|
||||||
|
el.1 = el.0 + (((offset - missing_from) - 1) as u32);
|
||||||
|
copy_ranges.push((offset, offset_end));
|
||||||
|
} else if offset <= missing_to {
|
||||||
|
// [....chunk....]
|
||||||
|
// [...missing...]
|
||||||
|
copy_ranges.push((offset, (missing_to - 0)));
|
||||||
|
el.1 = el.0 + (((offset_end - missing_from) - 1) as u32);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut deleted = 0;
|
||||||
|
for idx in to_delete.into_iter() {
|
||||||
|
self.missing.remove(idx + deleted);
|
||||||
|
deleted = deleted + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.missing.append(&mut to_add);
|
||||||
|
self.missing
|
||||||
|
.sort_by(|(from_a, _), (from_b, _)| from_a.0.0.cmp(&from_b.0.0));
|
||||||
|
|
||||||
|
// copy only the missing data
|
||||||
|
let (first, second) = self.data[..].split_at_mut(self.pivot as usize);
|
||||||
|
for (from, to) in copy_ranges.into_iter() {
|
||||||
|
let to = to + 1;
|
||||||
|
if from <= first.len() {
|
||||||
|
let first_from = from;
|
||||||
|
let first_to = cmp::min(first.len(), to);
|
||||||
|
let data_first_from = from - offset;
|
||||||
|
let data_first_to = first_to - offset;
|
||||||
|
first[first_from..first_to]
|
||||||
|
.copy_from_slice(&data[data_first_from..data_first_to]);
|
||||||
|
|
||||||
|
let second_to = to - first_to;
|
||||||
|
let data_second_to = data_first_to + second_to;
|
||||||
|
second[..second_to]
|
||||||
|
.copy_from_slice(&data[data_first_to..data_second_to]);
|
||||||
|
} else {
|
||||||
|
let second_from = from - first.len();
|
||||||
|
let second_to = to - first.len();
|
||||||
|
let data_from = from - offset;
|
||||||
|
let data_to = to - offset;
|
||||||
|
second[second_from..second_to]
|
||||||
|
.copy_from_slice(&data[data_from..data_to]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.missing.len() == 0
|
||||||
|
|| self.window_start.offset(self.missing[0].0) == 0
|
||||||
|
{
|
||||||
|
Ok(StreamData::Ready)
|
||||||
|
} else {
|
||||||
|
Ok(StreamData::NotReady)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
249
src/connection/stream/uud/tests.rs
Normal file
249
src/connection/stream/uud/tests.rs
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
use crate::{
|
||||||
|
connection::stream::{self, Chunk, uud::*},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_uud_sequential() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let mut uud = UnreliableUnorderedDatagram::with_window_size(&rand, 1048576);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(1024);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
rand.fill(&mut data[..]);
|
||||||
|
|
||||||
|
let start = uud.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..512],
|
||||||
|
};
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(&got[..] == &[], "uud: got data?");
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[..512] == &got[..],
|
||||||
|
"UUD1: DIFF: {:?} {:?}",
|
||||||
|
&data[..512].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start + 512,
|
||||||
|
data: &data[512..],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[512..] == &got[..],
|
||||||
|
"UUD2: DIFF: {:?} {:?}",
|
||||||
|
&data[512..].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_uud_retransmit() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut uud =
|
||||||
|
UnreliableUnorderedDatagram::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = uud.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..60],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..60],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 80,
|
||||||
|
data: &data[80..],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..90],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + (max_window as u32),
|
||||||
|
data: &data[max_window..],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: false,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start + 90,
|
||||||
|
data: &data[90..max_window],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[..max_window] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..max_window],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_uud_rolling() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut uud =
|
||||||
|
UnreliableUnorderedDatagram::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = uud.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..100],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[..40] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..40],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[40..] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[40..],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_uud_rolling_second_case() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let max_window: usize = 100;
|
||||||
|
let mut uud =
|
||||||
|
UnreliableUnorderedDatagram::with_window_size(&rand, max_window as u32);
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(120);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
for i in 0..data.len() {
|
||||||
|
data[i] = i as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = uud.window_start.0;
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..40],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 50,
|
||||||
|
data: &data[50..100],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[..40] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[..40],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 40,
|
||||||
|
data: &data[40..100],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: false,
|
||||||
|
sequence: start + 100,
|
||||||
|
data: &data[100..],
|
||||||
|
};
|
||||||
|
let _ = uud.recv(chunk);
|
||||||
|
let got = uud.get();
|
||||||
|
assert!(
|
||||||
|
&data[40..] == &got[..],
|
||||||
|
"DIFF:\n {:?}\n {:?}",
|
||||||
|
&data[40..],
|
||||||
|
&got[..],
|
||||||
|
);
|
||||||
|
}
|
43
src/connection/stream/uudl/mod.rs
Normal file
43
src/connection/stream/uudl/mod.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
//! Implementation of the Unreliable, Unordered, Datagram Limited
|
||||||
|
//! transmission model
|
||||||
|
//!
|
||||||
|
//! AKA: UDP-like. "Limited" because the data must fit in a single packet
|
||||||
|
//!
|
||||||
|
|
||||||
|
use crate::connection::stream::{
|
||||||
|
Chunk, Error, Sequence, SequenceStart, StreamData,
|
||||||
|
};
|
||||||
|
|
||||||
|
use ::std::collections::{BTreeMap, VecDeque};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
/// UnReliable, UnOrdered, Datagram, Limited to the packet size
|
||||||
|
/// AKA: UDP-like
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct UnreliableUnorderedDatagramLimited {
|
||||||
|
received: VecDeque<(SequenceStart, Vec<u8>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UnreliableUnorderedDatagramLimited {
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
received: VecDeque::with_capacity(4),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn get(&mut self) -> (SequenceStart, Vec<u8>) {
|
||||||
|
match self.received.pop_front() {
|
||||||
|
Some(data) => data,
|
||||||
|
None => (SequenceStart(Sequence::min()), Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<StreamData, Error> {
|
||||||
|
if !chunk.flag_start || !chunk.flag_end {
|
||||||
|
return Err(Error::WrongFlags);
|
||||||
|
}
|
||||||
|
self.received
|
||||||
|
.push_back((SequenceStart(chunk.sequence), chunk.data.to_vec()));
|
||||||
|
Ok(StreamData::Ready)
|
||||||
|
}
|
||||||
|
}
|
56
src/connection/stream/uudl/tests.rs
Normal file
56
src/connection/stream/uudl/tests.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use crate::{
|
||||||
|
connection::stream::{self, uudl::*, Chunk},
|
||||||
|
enc::Random,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[::tracing_test::traced_test]
|
||||||
|
#[test]
|
||||||
|
fn test_stream_uudl_sequential() {
|
||||||
|
let rand = Random::new();
|
||||||
|
let mut uudl = UnreliableUnorderedDatagramLimited::new();
|
||||||
|
|
||||||
|
let mut data = Vec::with_capacity(1024);
|
||||||
|
data.resize(data.capacity(), 0);
|
||||||
|
rand.fill(&mut data[..]);
|
||||||
|
|
||||||
|
//let start = uudl.window_start.0;
|
||||||
|
let start = Sequence(
|
||||||
|
::core::num::Wrapping(0)
|
||||||
|
);
|
||||||
|
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start,
|
||||||
|
data: &data[..512],
|
||||||
|
};
|
||||||
|
let got = uudl.get().1;
|
||||||
|
assert!(&got[..] == &[], "uudl: got data?");
|
||||||
|
let _ = uudl.recv(chunk);
|
||||||
|
let got = uudl.get().1;
|
||||||
|
assert!(
|
||||||
|
&data[..512] == &got[..],
|
||||||
|
"UUDL1: DIFF: {:?} {:?}",
|
||||||
|
&data[..512].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
let chunk = Chunk {
|
||||||
|
id: stream::ID(42),
|
||||||
|
flag_start: true,
|
||||||
|
flag_end: true,
|
||||||
|
sequence: start + 512,
|
||||||
|
data: &data[512..],
|
||||||
|
};
|
||||||
|
let _ = uudl.recv(chunk);
|
||||||
|
let got = uudl.get().1;
|
||||||
|
assert!(
|
||||||
|
&data[512..] == &got[..],
|
||||||
|
"UUDL2: DIFF: {:?} {:?}",
|
||||||
|
&data[512..].len(),
|
||||||
|
&got[..].len()
|
||||||
|
);
|
||||||
|
let got = uudl.get().1;
|
||||||
|
assert!(&got[..] == &[], "uudl: got data?");
|
||||||
|
}
|
||||||
|
|
@ -87,14 +87,11 @@ impl Dnssec {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let resolver = match TokioAsyncResolver::tokio(config, opts) {
|
let resolver = TokioAsyncResolver::tokio(config, opts);
|
||||||
Ok(resolver) => resolver,
|
|
||||||
Err(e) => return Err(Error::Setup(e.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self { resolver })
|
Ok(Self { resolver })
|
||||||
}
|
}
|
||||||
const TXT_RECORD_START: &str = "v=Fenrir1 ";
|
const TXT_RECORD_START: &'static str = "v=Fenrir1 ";
|
||||||
/// Get the fenrir data for a domain
|
/// Get the fenrir data for a domain
|
||||||
pub async fn resolv(&self, domain: &Domain) -> ::std::io::Result<String> {
|
pub async fn resolv(&self, domain: &Domain) -> ::std::io::Result<String> {
|
||||||
use ::trust_dns_client::rr::Name;
|
use ::trust_dns_client::rr::Name;
|
||||||
|
@ -162,7 +162,8 @@ impl KeyExchangeKind {
|
|||||||
) -> Result<(ExchangePrivKey, ExchangePubKey), Error> {
|
) -> Result<(ExchangePrivKey, ExchangePubKey), Error> {
|
||||||
match self {
|
match self {
|
||||||
KeyExchangeKind::X25519DiffieHellman => {
|
KeyExchangeKind::X25519DiffieHellman => {
|
||||||
let raw_priv = ::x25519_dalek::StaticSecret::new(rnd);
|
let raw_priv =
|
||||||
|
::x25519_dalek::StaticSecret::random_from_rng(rnd);
|
||||||
let pub_key = ExchangePubKey::X25519(
|
let pub_key = ExchangePubKey::X25519(
|
||||||
::x25519_dalek::PublicKey::from(&raw_priv),
|
::x25519_dalek::PublicKey::from(&raw_priv),
|
||||||
);
|
);
|
||||||
|
@ -71,7 +71,7 @@ impl Hkdf {
|
|||||||
// Hack & tricks:
|
// Hack & tricks:
|
||||||
// HKDF are pretty important, but this lib don't zero out the data.
|
// HKDF are pretty important, but this lib don't zero out the data.
|
||||||
// we can't use #[derive(Zeroing)] either.
|
// we can't use #[derive(Zeroing)] either.
|
||||||
// So we craete a union with a Zeroing object, and drop both manually.
|
// So we create a union with a Zeroing object, and drop the zeroable buffer.
|
||||||
|
|
||||||
// TODO: move this to Hkdf instead of Sha3
|
// TODO: move this to Hkdf instead of Sha3
|
||||||
|
|
||||||
@ -88,8 +88,7 @@ impl Drop for HkdfInner {
|
|||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
#[allow(unsafe_code)]
|
#[allow(unsafe_code)]
|
||||||
unsafe {
|
unsafe {
|
||||||
drop(&mut self.hkdf);
|
::core::mem::ManuallyDrop::drop(&mut self.zeroable);
|
||||||
drop(&mut self.zeroable);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ impl ::rand_core::RngCore for &Random {
|
|||||||
) -> Result<(), ::rand_core::Error> {
|
) -> Result<(), ::rand_core::Error> {
|
||||||
match self.rnd.fill(dest) {
|
match self.rnd.fill(dest) {
|
||||||
Ok(()) => Ok(()),
|
Ok(()) => Ok(()),
|
||||||
Err(e) => Err(::rand_core::Error::new(e)),
|
Err(e) => Err(::rand_core::Error::new(e.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -241,6 +241,14 @@ impl CipherSend {
|
|||||||
pub fn kind(&self) -> Kind {
|
pub fn kind(&self) -> Kind {
|
||||||
self.cipher.kind()
|
self.cipher.kind()
|
||||||
}
|
}
|
||||||
|
/// Get the length of the nonce for this cipher
|
||||||
|
pub fn nonce_len(&self) -> NonceLen {
|
||||||
|
self.cipher.nonce_len()
|
||||||
|
}
|
||||||
|
/// Get the length of the nonce for this cipher
|
||||||
|
pub fn tag_len(&self) -> TagLen {
|
||||||
|
self.cipher.tag_len()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// XChaCha20Poly1305 cipher
|
/// XChaCha20Poly1305 cipher
|
||||||
|
@ -70,7 +70,7 @@ fn test_encrypt_decrypt() {
|
|||||||
let encrypt_to = encrypt_from + resp.encrypted_length(nonce_len, tag_len);
|
let encrypt_to = encrypt_from + resp.encrypted_length(nonce_len, tag_len);
|
||||||
|
|
||||||
let h_resp =
|
let h_resp =
|
||||||
Handshake::new(handshake::Data::DirSync(dirsync::DirSync::Resp(resp)));
|
Handshake::new(Data::DirSync(dirsync::DirSync::Resp(resp)));
|
||||||
|
|
||||||
let mut bytes = Vec::<u8>::with_capacity(
|
let mut bytes = Vec::<u8>::with_capacity(
|
||||||
h_resp.len(cipher.nonce_len(), cipher.tag_len()),
|
h_resp.len(cipher.nonce_len(), cipher.tag_len()),
|
||||||
@ -119,7 +119,7 @@ fn test_encrypt_decrypt() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
// reparse
|
// reparse
|
||||||
if let handshake::Data::DirSync(dirsync::DirSync::Resp(r_a)) =
|
if let Data::DirSync(dirsync::DirSync::Resp(r_a)) =
|
||||||
&mut deserialized.data
|
&mut deserialized.data
|
||||||
{
|
{
|
||||||
let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0;
|
let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0;
|
||||||
|
110
src/inner/mod.rs
110
src/inner/mod.rs
@ -4,6 +4,10 @@
|
|||||||
|
|
||||||
pub(crate) mod worker;
|
pub(crate) mod worker;
|
||||||
|
|
||||||
|
use crate::inner::worker::Work;
|
||||||
|
use ::std::{collections::BTreeMap, vec::Vec};
|
||||||
|
use ::tokio::time::Instant;
|
||||||
|
|
||||||
/// Track the total number of threads and our index
|
/// Track the total number of threads and our index
|
||||||
/// 65K cpus should be enough for anybody
|
/// 65K cpus should be enough for anybody
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
@ -12,3 +16,109 @@ pub(crate) struct ThreadTracker {
|
|||||||
/// Note: starts from 1
|
/// Note: starts from 1
|
||||||
pub id: u16,
|
pub id: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) static mut SLEEP_RESOLUTION: ::std::time::Duration =
|
||||||
|
if cfg!(target_os = "linux") || cfg!(target_os = "macos") {
|
||||||
|
::std::time::Duration::from_millis(1)
|
||||||
|
} else {
|
||||||
|
// windows
|
||||||
|
::std::time::Duration::from_millis(16)
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(crate) async fn set_minimum_sleep_resolution() {
|
||||||
|
let nanosleep = ::std::time::Duration::from_nanos(1);
|
||||||
|
let mut tests: usize = 3;
|
||||||
|
|
||||||
|
while tests > 0 {
|
||||||
|
let pre_sleep = ::std::time::Instant::now();
|
||||||
|
::tokio::time::sleep(nanosleep).await;
|
||||||
|
let post_sleep = ::std::time::Instant::now();
|
||||||
|
let slept_for = post_sleep - pre_sleep;
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
if slept_for < SLEEP_RESOLUTION {
|
||||||
|
SLEEP_RESOLUTION = slept_for;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tests = tests - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sleeping has a higher resolution that we would like for packet pacing.
|
||||||
|
/// So we sleep for however log we need, then chunk up all the work here
|
||||||
|
/// we will end up chunking the work in SLEEP_RESOLUTION, then we will busy wait
|
||||||
|
/// for more precise timing
|
||||||
|
pub(crate) struct Timers {
|
||||||
|
times: BTreeMap<Instant, Work>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Timers {
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
times: BTreeMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn get_next(&self) -> ::tokio::time::Sleep {
|
||||||
|
match self.times.keys().next() {
|
||||||
|
Some(entry) => ::tokio::time::sleep_until((*entry).into()),
|
||||||
|
None => {
|
||||||
|
::tokio::time::sleep(::std::time::Duration::from_secs(3600))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn add(
|
||||||
|
&mut self,
|
||||||
|
duration: ::tokio::time::Duration,
|
||||||
|
work: Work,
|
||||||
|
) -> ::tokio::time::Instant {
|
||||||
|
// the returned time is the key in the map.
|
||||||
|
// Make sure it is unique.
|
||||||
|
//
|
||||||
|
// We can be pretty sure we won't do a lot of stuff
|
||||||
|
// in a single nanosecond, so if we hit a time that is already present
|
||||||
|
// just add a nanosecond and retry
|
||||||
|
let mut time = ::tokio::time::Instant::now() + duration;
|
||||||
|
let mut work = work;
|
||||||
|
loop {
|
||||||
|
if let Some(old_val) = self.times.insert(time, work) {
|
||||||
|
work = self.times.insert(time, old_val).unwrap();
|
||||||
|
time = time + ::std::time::Duration::from_nanos(1);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time
|
||||||
|
}
|
||||||
|
pub(crate) fn remove(&mut self, time: ::tokio::time::Instant) {
|
||||||
|
let _ = self.times.remove(&time);
|
||||||
|
}
|
||||||
|
/// Get all the work from now up until now + SLEEP_RESOLUTION
|
||||||
|
pub(crate) fn get_work(&mut self) -> Vec<Work> {
|
||||||
|
let now: ::tokio::time::Instant = ::std::time::Instant::now().into();
|
||||||
|
let mut ret = Vec::with_capacity(4);
|
||||||
|
let mut count_rm = 0;
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let next_instant = unsafe { now + SLEEP_RESOLUTION };
|
||||||
|
let mut iter = self.times.iter_mut().peekable();
|
||||||
|
loop {
|
||||||
|
match iter.peek() {
|
||||||
|
None => break,
|
||||||
|
Some(next) => {
|
||||||
|
if *next.0 > next_instant {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let mut work = Work::DropHandshake(crate::enc::asym::KeyID(0));
|
||||||
|
let mut entry = iter.next().unwrap();
|
||||||
|
::core::mem::swap(&mut work, &mut entry.1);
|
||||||
|
ret.push(work);
|
||||||
|
count_rm = count_rm + 1;
|
||||||
|
}
|
||||||
|
while count_rm > 0 {
|
||||||
|
self.times.pop_first();
|
||||||
|
count_rm = count_rm - 1;
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -11,17 +11,19 @@ use crate::{
|
|||||||
},
|
},
|
||||||
packet::{self, Packet},
|
packet::{self, Packet},
|
||||||
socket::{UdpClient, UdpServer},
|
socket::{UdpClient, UdpServer},
|
||||||
Conn, ConnList, IDSend,
|
stream, AuthSrvConn, ConnList, ConnTracker, Connection, IDSend,
|
||||||
|
LibTracker, ServiceConn,
|
||||||
},
|
},
|
||||||
dnssec,
|
dnssec,
|
||||||
enc::{
|
enc::{
|
||||||
|
self,
|
||||||
asym::{self, KeyID, PrivKey, PubKey},
|
asym::{self, KeyID, PrivKey, PubKey},
|
||||||
hkdf::{self, Hkdf},
|
hkdf::{self, Hkdf},
|
||||||
sym, Random, Secret,
|
sym, Random, Secret,
|
||||||
},
|
},
|
||||||
inner::ThreadTracker,
|
inner::ThreadTracker,
|
||||||
};
|
};
|
||||||
use ::std::{sync::Arc, vec::Vec};
|
use ::std::{collections::VecDeque, sync::Arc, vec::Vec};
|
||||||
/// This worker must be cpu-pinned
|
/// This worker must be cpu-pinned
|
||||||
use ::tokio::{
|
use ::tokio::{
|
||||||
net::UdpSocket,
|
net::UdpSocket,
|
||||||
@ -44,6 +46,25 @@ pub(crate) struct ConnectInfo {
|
|||||||
// TODO: UserID, Token information
|
// TODO: UserID, Token information
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// return to the user the data received from a connection
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ConnData {
|
||||||
|
/// Connection tracking information
|
||||||
|
pub conn: ConnTracker,
|
||||||
|
/// received data, for each stream
|
||||||
|
pub data: Vec<(stream::ID, Vec<u8>)>, //FIXME: ChunkOwned
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connection event. Mostly used to give the data to the user
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
#[non_exhaustive]
|
||||||
|
pub enum Event {
|
||||||
|
/// Work loop has exited. nothing more to do
|
||||||
|
End,
|
||||||
|
/// Data from a connection
|
||||||
|
Data(ConnData),
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) enum Work {
|
pub(crate) enum Work {
|
||||||
/// ask the thread to report to the main thread the total number of
|
/// ask the thread to report to the main thread the total number of
|
||||||
/// connections present
|
/// connections present
|
||||||
@ -51,6 +72,8 @@ pub(crate) enum Work {
|
|||||||
Connect(ConnectInfo),
|
Connect(ConnectInfo),
|
||||||
DropHandshake(KeyID),
|
DropHandshake(KeyID),
|
||||||
Recv(RawUdp),
|
Recv(RawUdp),
|
||||||
|
UserSend((LibTracker, stream::ID, Vec<u8>)),
|
||||||
|
SendData((LibTracker, ::tokio::time::Instant)),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Actual worker implementation.
|
/// Actual worker implementation.
|
||||||
@ -64,11 +87,15 @@ pub struct Worker {
|
|||||||
token_check: Option<Arc<Mutex<TokenChecker>>>,
|
token_check: Option<Arc<Mutex<TokenChecker>>>,
|
||||||
sockets: Vec<Arc<UdpSocket>>,
|
sockets: Vec<Arc<UdpSocket>>,
|
||||||
queue: ::async_channel::Receiver<Work>,
|
queue: ::async_channel::Receiver<Work>,
|
||||||
|
queue_sender: ::async_channel::Sender<Work>,
|
||||||
queue_timeouts_recv: mpsc::UnboundedReceiver<Work>,
|
queue_timeouts_recv: mpsc::UnboundedReceiver<Work>,
|
||||||
queue_timeouts_send: mpsc::UnboundedSender<Work>,
|
queue_timeouts_send: mpsc::UnboundedSender<Work>,
|
||||||
thread_channels: Vec<::async_channel::Sender<Work>>,
|
thread_channels: Vec<::async_channel::Sender<Work>>,
|
||||||
connections: ConnList,
|
connections: ConnList,
|
||||||
|
// connectsion untracker by the user. (users still needs to get(..) them)
|
||||||
|
untracked_connections: VecDeque<LibTracker>,
|
||||||
handshakes: handshake::Tracker,
|
handshakes: handshake::Tracker,
|
||||||
|
work_timers: super::Timers,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unsafe_code)]
|
#[allow(unsafe_code)]
|
||||||
@ -82,6 +109,7 @@ impl Worker {
|
|||||||
token_check: Option<Arc<Mutex<TokenChecker>>>,
|
token_check: Option<Arc<Mutex<TokenChecker>>>,
|
||||||
sockets: Vec<Arc<UdpSocket>>,
|
sockets: Vec<Arc<UdpSocket>>,
|
||||||
queue: ::async_channel::Receiver<Work>,
|
queue: ::async_channel::Receiver<Work>,
|
||||||
|
queue_sender: ::async_channel::Sender<Work>,
|
||||||
) -> ::std::io::Result<Self> {
|
) -> ::std::io::Result<Self> {
|
||||||
let (queue_timeouts_send, queue_timeouts_recv) =
|
let (queue_timeouts_send, queue_timeouts_recv) =
|
||||||
mpsc::unbounded_channel();
|
mpsc::unbounded_channel();
|
||||||
@ -118,17 +146,93 @@ impl Worker {
|
|||||||
token_check,
|
token_check,
|
||||||
sockets,
|
sockets,
|
||||||
queue,
|
queue,
|
||||||
|
queue_sender,
|
||||||
queue_timeouts_recv,
|
queue_timeouts_recv,
|
||||||
queue_timeouts_send,
|
queue_timeouts_send,
|
||||||
thread_channels: Vec::new(),
|
thread_channels: Vec::new(),
|
||||||
connections: ConnList::new(thread_id),
|
connections: ConnList::new(thread_id),
|
||||||
|
untracked_connections: VecDeque::with_capacity(8),
|
||||||
handshakes,
|
handshakes,
|
||||||
|
work_timers: super::Timers::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
/// return a handle to the worker that you can use to send data
|
||||||
|
/// The handle will enqueue work in the main worker and is thread-local safe
|
||||||
|
///
|
||||||
|
/// While this does not require `&mut` on the `Worker`, everything
|
||||||
|
/// will be put in the work queue,
|
||||||
|
/// So you might have less immediate results in a few cases
|
||||||
|
pub fn handle(&self) -> Handle {
|
||||||
|
Handle {
|
||||||
|
queue: self.queue_sender.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// change the UserTracker in the connection
|
||||||
|
///
|
||||||
|
/// This is `unsafe` because you will be responsible for manually updating
|
||||||
|
/// any copy of the `ConnTracker` you might have cloned around
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
pub unsafe fn set_connection_tracker(
|
||||||
|
&mut self,
|
||||||
|
tracker: ConnTracker,
|
||||||
|
new_id: connection::UserTracker,
|
||||||
|
) -> Result<ConnTracker, crate::Error> {
|
||||||
|
let conn = self.connections.get_mut(tracker.lib)?;
|
||||||
|
conn.user_tracker = Some(new_id);
|
||||||
|
Ok(ConnTracker {
|
||||||
|
lib: tracker.lib,
|
||||||
|
user: Some(new_id),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Enqueue data to send
|
||||||
|
pub fn send(
|
||||||
|
&mut self,
|
||||||
|
tracker: LibTracker,
|
||||||
|
stream: stream::ID,
|
||||||
|
data: Vec<u8>,
|
||||||
|
) -> Result<(), crate::Error> {
|
||||||
|
let conn = self.connections.get_mut(tracker)?;
|
||||||
|
conn.enqueue(stream, data)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
/// Returns new connections, if any
|
||||||
|
///
|
||||||
|
/// You can provide an optional tracker, different from the library tracker.
|
||||||
|
///
|
||||||
|
/// Differently from the library tracker, you can change this later on,
|
||||||
|
/// but you will be responsible to change it on every `ConnTracker`
|
||||||
|
/// you might have cloned elsewhere
|
||||||
|
pub fn try_get_connection(
|
||||||
|
&mut self,
|
||||||
|
tracker: Option<connection::UserTracker>,
|
||||||
|
) -> Option<ConnTracker> {
|
||||||
|
let ret_tracker = ConnTracker {
|
||||||
|
lib: self.untracked_connections.pop_front()?,
|
||||||
|
user: None,
|
||||||
|
};
|
||||||
|
match tracker {
|
||||||
|
Some(tracker) => {
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
match unsafe {
|
||||||
|
self.set_connection_tracker(ret_tracker, tracker)
|
||||||
|
} {
|
||||||
|
Ok(tracker) => Some(tracker),
|
||||||
|
Err(_) => {
|
||||||
|
// we had a connection, but it expired before the user
|
||||||
|
// remembered to get it. Just remove it from the queue.
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => Some(ret_tracker),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Continuously loop and process work as needed
|
/// Continuously loop and process work as needed
|
||||||
pub async fn work_loop(&mut self) {
|
pub async fn work_loop(&mut self) -> Result<Event, crate::Error> {
|
||||||
'mainloop: loop {
|
'mainloop: loop {
|
||||||
|
let next_timer = self.work_timers.get_next();
|
||||||
|
::tokio::pin!(next_timer);
|
||||||
let work = ::tokio::select! {
|
let work = ::tokio::select! {
|
||||||
tell_stopped = self.stop_working.recv() => {
|
tell_stopped = self.stop_working.recv() => {
|
||||||
if let Ok(stop_ch) = tell_stopped {
|
if let Ok(stop_ch) = tell_stopped {
|
||||||
@ -137,6 +241,13 @@ impl Worker {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
() = &mut next_timer => {
|
||||||
|
let work_list = self.work_timers.get_work();
|
||||||
|
for w in work_list.into_iter() {
|
||||||
|
let _ = self.queue_sender.send(w).await;
|
||||||
|
}
|
||||||
|
continue 'mainloop;
|
||||||
|
}
|
||||||
maybe_timeout = self.queue.recv() => {
|
maybe_timeout = self.queue.recv() => {
|
||||||
match maybe_timeout {
|
match maybe_timeout {
|
||||||
Ok(work) => work,
|
Ok(work) => work,
|
||||||
@ -293,12 +404,14 @@ impl Worker {
|
|||||||
// are PubKey::Exchange
|
// are PubKey::Exchange
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
let mut conn = Conn::new(
|
let mut conn = Connection::new(
|
||||||
hkdf,
|
hkdf,
|
||||||
cipher_selected,
|
cipher_selected,
|
||||||
connection::Role::Client,
|
connection::Role::Client,
|
||||||
&self.rand,
|
&self.rand,
|
||||||
);
|
);
|
||||||
|
let dest = UdpClient(addr.as_sockaddr().unwrap());
|
||||||
|
conn.send_addr = dest;
|
||||||
|
|
||||||
let auth_recv_id = self.connections.reserve_first();
|
let auth_recv_id = self.connections.reserve_first();
|
||||||
let service_conn_id = self.connections.reserve_first();
|
let service_conn_id = self.connections.reserve_first();
|
||||||
@ -389,15 +502,13 @@ impl Worker {
|
|||||||
// send always from the first socket
|
// send always from the first socket
|
||||||
// FIXME: select based on routing table
|
// FIXME: select based on routing table
|
||||||
let sender = self.sockets[0].local_addr().unwrap();
|
let sender = self.sockets[0].local_addr().unwrap();
|
||||||
let dest = UdpClient(addr.as_sockaddr().unwrap());
|
|
||||||
|
|
||||||
// start the timeout right before sending the packet
|
// start the timeout right before sending the packet
|
||||||
hshake.timeout = Some(::tokio::task::spawn_local(
|
let time_drop = self.work_timers.add(
|
||||||
Self::handshake_timeout(
|
::tokio::time::Duration::from_secs(10),
|
||||||
self.queue_timeouts_send.clone(),
|
Work::DropHandshake(client_key_id),
|
||||||
client_key_id,
|
);
|
||||||
),
|
hshake.timeout = Some(time_drop);
|
||||||
));
|
|
||||||
|
|
||||||
// send packet
|
// send packet
|
||||||
self.send_packet(raw, dest, UdpServer(sender)).await;
|
self.send_packet(raw, dest, UdpServer(sender)).await;
|
||||||
@ -413,21 +524,70 @@ impl Worker {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
Work::Recv(pkt) => {
|
Work::Recv(pkt) => match self.recv(pkt).await {
|
||||||
self.recv(pkt).await;
|
Ok(event) => return Ok(event),
|
||||||
|
Err(_) => continue 'mainloop,
|
||||||
|
},
|
||||||
|
Work::UserSend((tracker, stream, data)) => {
|
||||||
|
let conn = match self.connections.get_mut(tracker) {
|
||||||
|
Ok(conn) => conn,
|
||||||
|
Err(_) => continue 'mainloop,
|
||||||
|
};
|
||||||
|
use connection::Enqueue;
|
||||||
|
if let Ok(enqueued) = conn.enqueue(stream, data) {
|
||||||
|
match enqueued {
|
||||||
|
Enqueue::Immediate(instant) => {
|
||||||
|
let _ = self
|
||||||
|
.queue_sender
|
||||||
|
.send(Work::SendData((tracker, instant)))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
Enqueue::TimerWait => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Work::SendData((tracker, instant)) => {
|
||||||
|
// make sure we don't process events before they are
|
||||||
|
// actually needed.
|
||||||
|
// This is basically busy waiting with extra steps,
|
||||||
|
// but we don't want to spawn lots of timers and
|
||||||
|
// we don't really have a fine-grained sleep that is
|
||||||
|
// multiplatform
|
||||||
|
let now = ::tokio::time::Instant::now();
|
||||||
|
if instant <= now {
|
||||||
|
let _ = self
|
||||||
|
.queue_sender
|
||||||
|
.send(Work::SendData((tracker, instant)))
|
||||||
|
.await;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
async fn handshake_timeout(
|
|
||||||
timeout_queue: mpsc::UnboundedSender<Work>,
|
let mut raw: Vec<u8> = Vec::with_capacity(1200);
|
||||||
key_id: KeyID,
|
raw.resize(raw.capacity(), 0);
|
||||||
) {
|
let conn = match self.connections.get_mut(tracker) {
|
||||||
::tokio::time::sleep(::std::time::Duration::from_secs(10)).await;
|
Ok(conn) => conn,
|
||||||
let _ = timeout_queue.send(Work::DropHandshake(key_id));
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
let pkt = match conn.write_pkt(&mut raw) {
|
||||||
|
Ok(pkt) => pkt,
|
||||||
|
Err(enc::Error::NotEnoughData(0)) => continue,
|
||||||
|
Err(e) => {
|
||||||
|
::tracing::error!("Packet generation: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let dest = conn.send_addr;
|
||||||
|
let src = UdpServer(self.sockets[0].local_addr().unwrap());
|
||||||
|
let len = pkt.len();
|
||||||
|
raw.truncate(len);
|
||||||
|
let _ = self.send_packet(raw, dest, src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Event::End)
|
||||||
}
|
}
|
||||||
/// Read and do stuff with the raw udp packet
|
/// Read and do stuff with the raw udp packet
|
||||||
async fn recv(&mut self, mut udp: RawUdp) {
|
async fn recv(&mut self, mut udp: RawUdp) -> Result<Event, ()> {
|
||||||
if udp.packet.id.is_handshake() {
|
if udp.packet.id.is_handshake() {
|
||||||
let handshake = match Handshake::deserialize(
|
let handshake = match Handshake::deserialize(
|
||||||
&udp.data[connection::ID::len()..],
|
&udp.data[connection::ID::len()..],
|
||||||
@ -435,7 +595,7 @@ impl Worker {
|
|||||||
Ok(handshake) => handshake,
|
Ok(handshake) => handshake,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
::tracing::debug!("Handshake parsing: {}", e);
|
::tracing::debug!("Handshake parsing: {}", e);
|
||||||
return;
|
return Err(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let action = match self.handshakes.recv_handshake(
|
let action = match self.handshakes.recv_handshake(
|
||||||
@ -445,9 +605,38 @@ impl Worker {
|
|||||||
Ok(action) => action,
|
Ok(action) => action,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
::tracing::debug!("Handshake recv error {}", err);
|
::tracing::debug!("Handshake recv error {}", err);
|
||||||
return;
|
return Err(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
self.recv_handshake(udp, action).await;
|
||||||
|
Err(())
|
||||||
|
} else {
|
||||||
|
self.recv_packet(udp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Receive a non-handshake packet
|
||||||
|
fn recv_packet(&mut self, udp: RawUdp) -> Result<Event, ()> {
|
||||||
|
let conn = match self.connections.get_id_mut(udp.packet.id) {
|
||||||
|
Ok(conn) => conn,
|
||||||
|
Err(_) => return Err(()),
|
||||||
|
};
|
||||||
|
match conn.recv(udp) {
|
||||||
|
Ok(stream::StreamData::NotReady) => Err(()),
|
||||||
|
Ok(stream::StreamData::Ready) => Ok(Event::Data(ConnData {
|
||||||
|
conn: ConnTracker {
|
||||||
|
user: conn.user_tracker,
|
||||||
|
lib: conn.lib_tracker,
|
||||||
|
},
|
||||||
|
data: conn.get_data().unwrap(),
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
::tracing::trace!("Conn Recv: {:?}", e.to_string());
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Receive an handshake packet
|
||||||
|
async fn recv_handshake(&mut self, udp: RawUdp, action: handshake::Action) {
|
||||||
match action {
|
match action {
|
||||||
handshake::Action::AuthNeeded(authinfo) => {
|
handshake::Action::AuthNeeded(authinfo) => {
|
||||||
let req;
|
let req;
|
||||||
@ -472,8 +661,7 @@ impl Worker {
|
|||||||
let maybe_auth_check = {
|
let maybe_auth_check = {
|
||||||
match &self.token_check {
|
match &self.token_check {
|
||||||
None => {
|
None => {
|
||||||
if req_data.auth.user == auth::USERID_ANONYMOUS
|
if req_data.auth.user == auth::USERID_ANONYMOUS {
|
||||||
{
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
Ok(false)
|
Ok(false)
|
||||||
@ -515,16 +703,20 @@ impl Worker {
|
|||||||
let head_len = req.cipher.nonce_len();
|
let head_len = req.cipher.nonce_len();
|
||||||
let tag_len = req.cipher.tag_len();
|
let tag_len = req.cipher.tag_len();
|
||||||
|
|
||||||
let mut auth_conn = Conn::new(
|
let mut auth_conn = Connection::new(
|
||||||
authinfo.hkdf,
|
authinfo.hkdf,
|
||||||
req.cipher,
|
req.cipher,
|
||||||
connection::Role::Server,
|
connection::Role::Server,
|
||||||
&self.rand,
|
&self.rand,
|
||||||
);
|
);
|
||||||
auth_conn.id_send = IDSend(req_data.id);
|
auth_conn.id_send = IDSend(req_data.id);
|
||||||
|
auth_conn.send_addr = udp.src;
|
||||||
// track connection
|
// track connection
|
||||||
let auth_id_recv = self.connections.reserve_first();
|
let auth_id_recv = self.connections.reserve_first();
|
||||||
auth_conn.id_recv = auth_id_recv;
|
auth_conn.id_recv = auth_id_recv;
|
||||||
|
let (tracker, auth_conn) =
|
||||||
|
self.connections.reserve_and_track(auth_conn);
|
||||||
|
self.untracked_connections.push_back(tracker);
|
||||||
|
|
||||||
let resp_data = dirsync::resp::Data {
|
let resp_data = dirsync::resp::Data {
|
||||||
client_nonce: req_data.nonce,
|
client_nonce: req_data.nonce,
|
||||||
@ -544,9 +736,9 @@ impl Worker {
|
|||||||
connection::ID::len() + resp.encrypted_offset();
|
connection::ID::len() + resp.encrypted_offset();
|
||||||
let encrypt_until =
|
let encrypt_until =
|
||||||
encrypt_from + resp.encrypted_length(head_len, tag_len);
|
encrypt_from + resp.encrypted_length(head_len, tag_len);
|
||||||
let resp_handshake = Handshake::new(
|
let resp_handshake = Handshake::new(handshake::Data::DirSync(
|
||||||
handshake::Data::DirSync(DirSync::Resp(resp)),
|
DirSync::Resp(resp),
|
||||||
);
|
));
|
||||||
let packet = Packet {
|
let packet = Packet {
|
||||||
id: connection::ID::new_handshake(),
|
id: connection::ID::new_handshake(),
|
||||||
data: packet::Data::Handshake(resp_handshake),
|
data: packet::Data::Handshake(resp_handshake),
|
||||||
@ -566,6 +758,7 @@ impl Worker {
|
|||||||
self.send_packet(raw_out, udp.src, udp.dst).await;
|
self.send_packet(raw_out, udp.src, udp.dst).await;
|
||||||
}
|
}
|
||||||
handshake::Action::ClientConnect(cci) => {
|
handshake::Action::ClientConnect(cci) => {
|
||||||
|
self.work_timers.remove(cci.old_timeout);
|
||||||
let ds_resp;
|
let ds_resp;
|
||||||
if let handshake::Data::DirSync(DirSync::Resp(resp)) =
|
if let handshake::Data::DirSync(DirSync::Resp(resp)) =
|
||||||
cci.handshake.data
|
cci.handshake.data
|
||||||
@ -577,9 +770,7 @@ impl Worker {
|
|||||||
}
|
}
|
||||||
// track connection
|
// track connection
|
||||||
let resp_data;
|
let resp_data;
|
||||||
if let dirsync::resp::State::ClearText(r_data) =
|
if let dirsync::resp::State::ClearText(r_data) = ds_resp.data {
|
||||||
ds_resp.data
|
|
||||||
{
|
|
||||||
resp_data = r_data;
|
resp_data = r_data;
|
||||||
} else {
|
} else {
|
||||||
::tracing::error!(
|
::tracing::error!(
|
||||||
@ -587,20 +778,31 @@ impl Worker {
|
|||||||
);
|
);
|
||||||
unreachable!();
|
unreachable!();
|
||||||
}
|
}
|
||||||
let auth_srv_conn = IDSend(resp_data.id);
|
let auth_id_send = IDSend(resp_data.id);
|
||||||
let mut conn = cci.connection;
|
let mut conn = cci.connection;
|
||||||
conn.id_send = auth_srv_conn;
|
conn.id_send = auth_id_send;
|
||||||
let id_recv = conn.id_recv;
|
let id_recv = conn.id_recv;
|
||||||
let cipher = conn.cipher_recv.kind();
|
let cipher = conn.cipher_recv.kind();
|
||||||
// track the connection to the authentication server
|
// track the connection to the authentication server
|
||||||
if self.connections.track(conn.into()).is_err() {
|
let track_auth_conn = match self.connections.track(conn) {
|
||||||
::tracing::error!("Could not track new connection");
|
Ok(track_auth_conn) => track_auth_conn,
|
||||||
|
Err(_) => {
|
||||||
|
::tracing::error!(
|
||||||
|
"Could not track new auth srv connection"
|
||||||
|
);
|
||||||
self.connections.remove(id_recv);
|
self.connections.remove(id_recv);
|
||||||
|
// FIXME: proper connection closing
|
||||||
let _ = cci.answer.send(Err(
|
let _ = cci.answer.send(Err(
|
||||||
handshake::Error::InternalTracking.into(),
|
handshake::Error::InternalTracking.into(),
|
||||||
));
|
));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
let authsrv_conn = AuthSrvConn(ConnTracker {
|
||||||
|
lib: track_auth_conn,
|
||||||
|
user: None,
|
||||||
|
});
|
||||||
|
let mut service_conn = None;
|
||||||
if cci.service_id != auth::SERVICEID_AUTH {
|
if cci.service_id != auth::SERVICEID_AUTH {
|
||||||
// create and track the connection to the service
|
// create and track the connection to the service
|
||||||
// SECURITY: xor with secrets
|
// SECURITY: xor with secrets
|
||||||
@ -611,7 +813,7 @@ impl Worker {
|
|||||||
cci.service_id.as_bytes(),
|
cci.service_id.as_bytes(),
|
||||||
resp_data.service_key,
|
resp_data.service_key,
|
||||||
);
|
);
|
||||||
let mut service_connection = Conn::new(
|
let mut service_connection = Connection::new(
|
||||||
hkdf,
|
hkdf,
|
||||||
cipher,
|
cipher,
|
||||||
connection::Role::Client,
|
connection::Role::Client,
|
||||||
@ -620,16 +822,39 @@ impl Worker {
|
|||||||
service_connection.id_recv = cci.service_connection_id;
|
service_connection.id_recv = cci.service_connection_id;
|
||||||
service_connection.id_send =
|
service_connection.id_send =
|
||||||
IDSend(resp_data.service_connection_id);
|
IDSend(resp_data.service_connection_id);
|
||||||
let _ =
|
let track_serv_conn =
|
||||||
self.connections.track(service_connection.into());
|
match self.connections.track(service_connection) {
|
||||||
|
Ok(track_serv_conn) => track_serv_conn,
|
||||||
|
Err(_) => {
|
||||||
|
::tracing::error!(
|
||||||
|
"Could not track new service connection"
|
||||||
|
);
|
||||||
|
self.connections
|
||||||
|
.remove(cci.service_connection_id);
|
||||||
|
// FIXME: proper connection closing
|
||||||
|
// FIXME: drop auth srv connection if we just
|
||||||
|
// established it
|
||||||
|
let _ = cci.answer.send(Err(
|
||||||
|
handshake::Error::InternalTracking.into(),
|
||||||
|
));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
let _ =
|
};
|
||||||
cci.answer.send(Ok((cci.srv_key_id, auth_srv_conn)));
|
service_conn = Some(ServiceConn(ConnTracker {
|
||||||
|
lib: track_serv_conn,
|
||||||
|
user: None,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
let _ = cci.answer.send(Ok(handshake::tracker::ConnectOk {
|
||||||
|
auth_key_id: cci.srv_key_id,
|
||||||
|
auth_id_send,
|
||||||
|
authsrv_conn,
|
||||||
|
service_conn,
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
handshake::Action::Nothing => {}
|
handshake::Action::Nothing => {}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
|
||||||
async fn send_packet(
|
async fn send_packet(
|
||||||
&self,
|
&self,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
@ -653,3 +878,16 @@ impl Worker {
|
|||||||
let _res = src_sock.send_to(&data, client.0).await;
|
let _res = src_sock.send_to(&data, client.0).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle to send work asyncronously to the worker
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Handle {
|
||||||
|
queue: ::async_channel::Sender<Work>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Handle {
|
||||||
|
// TODO
|
||||||
|
// pub fn send(..)
|
||||||
|
// pub fn set_connection_id(..)
|
||||||
|
// try_get_new_connections()
|
||||||
|
}
|
||||||
|
60
src/lib.rs
60
src/lib.rs
@ -34,12 +34,12 @@ use crate::{
|
|||||||
AuthServerConnections, Packet,
|
AuthServerConnections, Packet,
|
||||||
},
|
},
|
||||||
inner::{
|
inner::{
|
||||||
worker::{ConnectInfo, RawUdp, Work, Worker},
|
worker::{ConnectInfo, Event, RawUdp, Work, Worker},
|
||||||
ThreadTracker,
|
ThreadTracker,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
pub use config::Config;
|
pub use config::Config;
|
||||||
pub use connection::Connection;
|
pub use connection::{AuthSrvConn, ServiceConn};
|
||||||
|
|
||||||
/// Main fenrir library errors
|
/// Main fenrir library errors
|
||||||
#[derive(::thiserror::Error, Debug)]
|
#[derive(::thiserror::Error, Debug)]
|
||||||
@ -59,15 +59,15 @@ pub enum Error {
|
|||||||
/// Handshake errors
|
/// Handshake errors
|
||||||
#[error("Handshake: {0:?}")]
|
#[error("Handshake: {0:?}")]
|
||||||
Handshake(#[from] handshake::Error),
|
Handshake(#[from] handshake::Error),
|
||||||
/// Key error
|
|
||||||
#[error("key: {0:?}")]
|
|
||||||
Key(#[from] crate::enc::Error),
|
|
||||||
/// Resolution problems. wrong or incomplete DNSSEC data
|
/// Resolution problems. wrong or incomplete DNSSEC data
|
||||||
#[error("DNSSEC resolution: {0}")]
|
#[error("DNSSEC resolution: {0}")]
|
||||||
Resolution(String),
|
Resolution(String),
|
||||||
/// Wrapper on encryption errors
|
/// Wrapper on encryption errors
|
||||||
#[error("Encrypt: {0}")]
|
#[error("Crypto: {0}")]
|
||||||
Encrypt(enc::Error),
|
Crypto(#[from] enc::Error),
|
||||||
|
/// Wrapper on connection errors
|
||||||
|
#[error("Connection: {0}")]
|
||||||
|
Connection(#[from] connection::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) enum StopWorking {
|
pub(crate) enum StopWorking {
|
||||||
@ -176,6 +176,7 @@ impl Fenrir {
|
|||||||
config: &Config,
|
config: &Config,
|
||||||
tokio_rt: Arc<::tokio::runtime::Runtime>,
|
tokio_rt: Arc<::tokio::runtime::Runtime>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
inner::set_minimum_sleep_resolution().await;
|
||||||
let (sender, _) = ::tokio::sync::broadcast::channel(1);
|
let (sender, _) = ::tokio::sync::broadcast::channel(1);
|
||||||
let dnssec = dnssec::Dnssec::new(&config.resolvers)?;
|
let dnssec = dnssec::Dnssec::new(&config.resolvers)?;
|
||||||
// bind sockets early so we can change "port 0" (aka: random)
|
// bind sockets early so we can change "port 0" (aka: random)
|
||||||
@ -214,6 +215,7 @@ impl Fenrir {
|
|||||||
pub async fn with_workers(
|
pub async fn with_workers(
|
||||||
config: &Config,
|
config: &Config,
|
||||||
) -> Result<(Self, Vec<Worker>), Error> {
|
) -> Result<(Self, Vec<Worker>), Error> {
|
||||||
|
inner::set_minimum_sleep_resolution().await;
|
||||||
let (stop_working, _) = ::tokio::sync::broadcast::channel(1);
|
let (stop_working, _) = ::tokio::sync::broadcast::channel(1);
|
||||||
let dnssec = dnssec::Dnssec::new(&config.resolvers)?;
|
let dnssec = dnssec::Dnssec::new(&config.resolvers)?;
|
||||||
// bind sockets early so we can change "port 0" (aka: random)
|
// bind sockets early so we can change "port 0" (aka: random)
|
||||||
@ -382,7 +384,7 @@ impl Fenrir {
|
|||||||
&self,
|
&self,
|
||||||
domain: &Domain,
|
domain: &Domain,
|
||||||
service: ServiceID,
|
service: ServiceID,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(AuthSrvConn, Option<ServiceConn>), Error> {
|
||||||
let resolved = self.resolv(domain).await?;
|
let resolved = self.resolv(domain).await?;
|
||||||
self.connect_resolved(resolved, domain, service).await
|
self.connect_resolved(resolved, domain, service).await
|
||||||
}
|
}
|
||||||
@ -392,7 +394,7 @@ impl Fenrir {
|
|||||||
resolved: dnssec::Record,
|
resolved: dnssec::Record,
|
||||||
domain: &Domain,
|
domain: &Domain,
|
||||||
service: ServiceID,
|
service: ServiceID,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(AuthSrvConn, Option<ServiceConn>), Error> {
|
||||||
loop {
|
loop {
|
||||||
// check if we already have a connection to that auth. srv
|
// check if we already have a connection to that auth. srv
|
||||||
let is_reserved = {
|
let is_reserved = {
|
||||||
@ -460,29 +462,28 @@ impl Fenrir {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
match recv.await {
|
match recv.await {
|
||||||
Ok(res) => {
|
Ok(res) => match res {
|
||||||
match res {
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let mut conn_auth_lock =
|
let mut conn_auth_lock = self.conn_auth_srv.lock().await;
|
||||||
self.conn_auth_srv.lock().await;
|
|
||||||
conn_auth_lock.remove_reserved(&resolved);
|
conn_auth_lock.remove_reserved(&resolved);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
Ok((key_id, id_send)) => {
|
Ok(connections) => {
|
||||||
let key = resolved
|
let key = resolved
|
||||||
.public_keys
|
.public_keys
|
||||||
.iter()
|
.iter()
|
||||||
.find(|k| k.0 == key_id)
|
.find(|k| k.0 == connections.auth_key_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut conn_auth_lock =
|
let mut conn_auth_lock = self.conn_auth_srv.lock().await;
|
||||||
self.conn_auth_srv.lock().await;
|
conn_auth_lock.add(
|
||||||
conn_auth_lock.add(&key.1, id_send, &resolved);
|
&key.1,
|
||||||
|
connections.auth_id_send,
|
||||||
|
&resolved,
|
||||||
|
);
|
||||||
|
|
||||||
//FIXME: user needs to somehow track the connection
|
Ok((connections.authsrv_conn, connections.service_conn))
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Thread dropped the sender. no more thread?
|
// Thread dropped the sender. no more thread?
|
||||||
let mut conn_auth_lock = self.conn_auth_srv.lock().await;
|
let mut conn_auth_lock = self.conn_auth_srv.lock().await;
|
||||||
@ -524,6 +525,7 @@ impl Fenrir {
|
|||||||
self.token_check.clone(),
|
self.token_check.clone(),
|
||||||
socks,
|
socks,
|
||||||
work_recv,
|
work_recv,
|
||||||
|
work_send.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
// don't keep around private keys too much
|
// don't keep around private keys too much
|
||||||
@ -547,7 +549,6 @@ impl Fenrir {
|
|||||||
}
|
}
|
||||||
Ok(worker)
|
Ok(worker)
|
||||||
}
|
}
|
||||||
|
|
||||||
// needs to be called before add_sockets
|
// needs to be called before add_sockets
|
||||||
/// Start one working thread for each physical cpu
|
/// Start one working thread for each physical cpu
|
||||||
/// threads are pinned to each cpu core.
|
/// threads are pinned to each cpu core.
|
||||||
@ -589,6 +590,7 @@ impl Fenrir {
|
|||||||
let th_tokio_rt = tokio_rt.clone();
|
let th_tokio_rt = tokio_rt.clone();
|
||||||
let th_config = self.cfg.clone();
|
let th_config = self.cfg.clone();
|
||||||
let (work_send, work_recv) = ::async_channel::unbounded::<Work>();
|
let (work_send, work_recv) = ::async_channel::unbounded::<Work>();
|
||||||
|
let th_work_send = work_send.clone();
|
||||||
let th_stop_working = self.stop_working.subscribe();
|
let th_stop_working = self.stop_working.subscribe();
|
||||||
let th_token_check = self.token_check.clone();
|
let th_token_check = self.token_check.clone();
|
||||||
let th_sockets = sockets.clone();
|
let th_sockets = sockets.clone();
|
||||||
@ -629,13 +631,23 @@ impl Fenrir {
|
|||||||
th_token_check,
|
th_token_check,
|
||||||
th_sockets,
|
th_sockets,
|
||||||
work_recv,
|
work_recv,
|
||||||
|
th_work_send,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(worker) => worker,
|
Ok(worker) => worker,
|
||||||
Err(_) => return,
|
Err(_) => return,
|
||||||
};
|
};
|
||||||
worker.work_loop().await
|
loop {
|
||||||
|
match worker.work_loop().await {
|
||||||
|
Ok(_) => continue,
|
||||||
|
Ok(Event::End) => break,
|
||||||
|
Err(e) => {
|
||||||
|
::tracing::error!("Worker: {:?}", e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
loop {
|
loop {
|
||||||
|
@ -17,7 +17,7 @@ async fn test_connection_dirsync() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
let cfg_client = {
|
let cfg_client = {
|
||||||
let mut cfg = config::Config::default();
|
let mut cfg = Config::default();
|
||||||
cfg.threads = Some(::core::num::NonZeroUsize::new(1).unwrap());
|
cfg.threads = Some(::core::num::NonZeroUsize::new(1).unwrap());
|
||||||
cfg
|
cfg
|
||||||
};
|
};
|
||||||
@ -88,7 +88,7 @@ async fn test_connection_dirsync() {
|
|||||||
.connect_resolved(dnssec_record, &test_domain, auth::SERVICEID_AUTH)
|
.connect_resolved(dnssec_record, &test_domain, auth::SERVICEID_AUTH)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(()) => {}
|
Ok((_, _)) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
assert!(false, "Err on client connection: {:?}", e);
|
assert!(false, "Err on client connection: {:?}", e);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user