namespace #4

Manually merged
luca.fulchir merged 3 commits from namespace into main 2023-06-28 17:07:28 +00:00
22 changed files with 917 additions and 699 deletions

View File

@ -5,11 +5,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1681202837, "lastModified": 1685518550,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401", "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -38,11 +38,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1684922889, "lastModified": 1686921029,
"narHash": "sha256-l0WZAmln8959O7RdYUJ3gnAIM9OPKFLKHKGX4q+Blrk=", "narHash": "sha256-J1bX9plPCFhTSh6E3TWn9XSxggBh/zDD4xigyaIQBy8=",
"owner": "nixos", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "04aaf8511678a0d0f347fdf1e8072fe01e4a509e", "rev": "c7ff1b9b95620ce8728c0d7bd501c458e6da9e04",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -54,11 +54,11 @@
}, },
"nixpkgs-unstable": { "nixpkgs-unstable": {
"locked": { "locked": {
"lastModified": 1684844536, "lastModified": 1686960236,
"narHash": "sha256-M7HhXYVqAuNb25r/d3FOO0z4GxPqDIZp5UjHFbBgw0Q=", "narHash": "sha256-AYCC9rXNLpUWzD9hm+askOfpliLEC9kwAo7ITJc4HIw=",
"owner": "nixos", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "d30264c2691128adc261d7c9388033645f0e742b", "rev": "04af42f3b31dba0ef742d254456dc4c14eedac86",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -98,11 +98,11 @@
"nixpkgs": "nixpkgs_2" "nixpkgs": "nixpkgs_2"
}, },
"locked": { "locked": {
"lastModified": 1684894917, "lastModified": 1687055571,
"narHash": "sha256-kwKCfmliHIxKuIjnM95TRcQxM/4AAEIZ+4A9nDJ6cJs=", "narHash": "sha256-UvLoO6u5n9TzY80BpM4DaacxvyJl7u9mm9CA72d309g=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "9ea38d547100edcf0da19aaebbdffa2810585495", "rev": "2de557c780dcb127128ae987fca9d6c2b0d7dc0f",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@ -2,11 +2,10 @@
//! Configuration to initialize the Fenrir networking library //! Configuration to initialize the Fenrir networking library
use crate::{ use crate::{
connection::handshake::HandshakeID, connection::handshake,
enc::{ enc::{
asym::{KeyExchangeKind, KeyID, PrivKey, PubKey}, asym::{KeyExchangeKind, KeyID, PrivKey, PubKey},
hkdf::HkdfKind, hkdf, sym,
sym::CipherKind,
}, },
}; };
use ::std::{ use ::std::{
@ -44,13 +43,13 @@ pub struct Config {
/// List of DNS resolvers to use /// List of DNS resolvers to use
pub resolvers: Vec<SocketAddr>, pub resolvers: Vec<SocketAddr>,
/// Supported handshakes /// Supported handshakes
pub handshakes: Vec<HandshakeID>, pub handshakes: Vec<handshake::ID>,
/// Supported key exchanges /// Supported key exchanges
pub key_exchanges: Vec<KeyExchangeKind>, pub key_exchanges: Vec<KeyExchangeKind>,
/// Supported Hkdfs /// Supported Hkdfs
pub hkdfs: Vec<HkdfKind>, pub hkdfs: Vec<hkdf::Kind>,
/// Supported Ciphers /// Supported Ciphers
pub ciphers: Vec<CipherKind>, pub ciphers: Vec<sym::Kind>,
/// list of authentication servers /// list of authentication servers
/// clients will have this empty /// clients will have this empty
pub servers: Vec<AuthServer>, pub servers: Vec<AuthServer>,
@ -73,10 +72,10 @@ impl Default for Config {
), ),
], ],
resolvers: Vec::new(), resolvers: Vec::new(),
handshakes: [HandshakeID::DirectorySynchronized].to_vec(), handshakes: [handshake::ID::DirectorySynchronized].to_vec(),
key_exchanges: [KeyExchangeKind::X25519DiffieHellman].to_vec(), key_exchanges: [KeyExchangeKind::X25519DiffieHellman].to_vec(),
hkdfs: [HkdfKind::Sha3].to_vec(), hkdfs: [hkdf::Kind::Sha3].to_vec(),
ciphers: [CipherKind::XChaCha20Poly1305].to_vec(), ciphers: [sym::Kind::XChaCha20Poly1305].to_vec(),
servers: Vec::new(), servers: Vec::new(),
server_keys: Vec::new(), server_keys: Vec::new(),
} }

View File

@ -0,0 +1,77 @@
//! Directory synchronized handshake
//! 1-RTT connection
//!
//! The simplest, fastest handshake supported by Fenrir
//! Downside: It does not offer protection from DDos,
//! no perfect forward secrecy
//!
//! To grant a form of perfect forward secrecy, the server should periodically
//! change the DNSSEC public/private keys
use crate::enc::{
sym::{NonceLen, TagLen},
Random,
};
pub mod req;
pub mod resp;
// TODO: merge with crate::enc::sym::Nonce
/// random nonce
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Nonce(pub(crate) [u8; 16]);
impl Nonce {
/// Create a new random Nonce
pub fn new(rnd: &Random) -> Self {
use ::core::mem::MaybeUninit;
let mut out: MaybeUninit<[u8; 16]>;
#[allow(unsafe_code)]
unsafe {
out = MaybeUninit::uninit();
let _ = rnd.fill(out.assume_init_mut());
Self(out.assume_init())
}
}
/// Length of the serialized Nonce
pub const fn len() -> usize {
16
}
}
impl From<&[u8; 16]> for Nonce {
fn from(raw: &[u8; 16]) -> Self {
Self(raw.clone())
}
}
/// Parsed handshake
#[derive(Debug, Clone, PartialEq)]
pub enum DirSync {
/// Directory synchronized handshake: client request
Req(req::Req),
/// Directory synchronized handshake: server response
Resp(resp::Resp),
}
impl DirSync {
/// actual length of the dirsync handshake data
pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
match self {
DirSync::Req(req) => req.len(),
DirSync::Resp(resp) => resp.len(head_len, tag_len),
}
}
/// Serialize into raw bytes
/// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize(
&self,
head_len: NonceLen,
tag_len: TagLen,
out: &mut [u8],
) {
match self {
DirSync::Req(req) => req.serialize(head_len, tag_len, out),
DirSync::Resp(resp) => resp.serialize(head_len, tag_len, out),
}
}
}

View File

@ -1,85 +1,22 @@
//! Directory synchronized handshake //! Directory synchronized handshake, Request parsing
//! 1-RTT connection
//!
//! The simplest, fastest handshake supported by Fenrir
//! Downside: It does not offer protection from DDos,
//! no perfect forward secrecy
//!
//! To grant a form of perfect forward secrecy, the server should periodically
//! change the DNSSEC public/private keys
use super::{Error, HandshakeData};
use crate::{ use crate::{
auth, auth,
connection::{ProtocolVersion, ID}, connection::{
handshake::{
self,
dirsync::{DirSync, Nonce},
Error,
},
ProtocolVersion, ID,
},
enc::{ enc::{
asym::{ExchangePubKey, KeyExchangeKind, KeyID}, asym::{ExchangePubKey, KeyExchangeKind, KeyID},
hkdf::HkdfKind, hkdf,
sym::{CipherKind, HeadLen, TagLen}, sym::{self, NonceLen, TagLen},
Random, Secret,
}, },
}; };
// TODO: merge with crate::enc::sym::Nonce
/// random nonce
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Nonce(pub(crate) [u8; 16]);
impl Nonce {
/// Create a new random Nonce
pub fn new(rnd: &Random) -> Self {
use ::core::mem::MaybeUninit;
let mut out: MaybeUninit<[u8; 16]>;
#[allow(unsafe_code)]
unsafe {
out = MaybeUninit::uninit();
let _ = rnd.fill(out.assume_init_mut());
Self(out.assume_init())
}
}
/// Length of the serialized Nonce
pub const fn len() -> usize {
16
}
}
impl From<&[u8; 16]> for Nonce {
fn from(raw: &[u8; 16]) -> Self {
Self(raw.clone())
}
}
/// Parsed handshake
#[derive(Debug, Clone, PartialEq)]
pub enum DirSync {
/// Directory synchronized handshake: client request
Req(Req),
/// Directory synchronized handshake: server response
Resp(Resp),
}
impl DirSync {
/// actual length of the dirsync handshake data
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize {
match self {
DirSync::Req(req) => req.len(),
DirSync::Resp(resp) => resp.len(head_len, tag_len),
}
}
/// Serialize into raw bytes
/// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize(
&self,
head_len: HeadLen,
tag_len: TagLen,
out: &mut [u8],
) {
match self {
DirSync::Req(req) => req.serialize(head_len, tag_len, out),
DirSync::Resp(resp) => resp.serialize(head_len, tag_len, out),
}
}
}
/// Client request of a directory synchronized handshake /// Client request of a directory synchronized handshake
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct Req { pub struct Req {
@ -88,13 +25,13 @@ pub struct Req {
/// Selected key exchange /// Selected key exchange
pub exchange: KeyExchangeKind, pub exchange: KeyExchangeKind,
/// Selected hkdf /// Selected hkdf
pub hkdf: HkdfKind, pub hkdf: hkdf::Kind,
/// Selected cipher /// Selected cipher
pub cipher: CipherKind, pub cipher: sym::Kind,
/// Client ephemeral public key used for key exchanges /// Client ephemeral public key used for key exchanges
pub exchange_key: ExchangePubKey, pub exchange_key: ExchangePubKey,
/// encrypted data /// encrypted data
pub data: ReqInner, pub data: State,
// SECURITY: TODO: Add padding to min: 1200 bytes // SECURITY: TODO: Add padding to min: 1200 bytes
// to avoid amplification attaks // to avoid amplification attaks
// also: 1200 < 1280 to allow better vpn compatibility // also: 1200 < 1280 to allow better vpn compatibility
@ -105,30 +42,30 @@ impl Req {
/// NOTE: starts from the beginning of the fenrir packet /// NOTE: starts from the beginning of the fenrir packet
pub fn encrypted_offset(&self) -> usize { pub fn encrypted_offset(&self) -> usize {
ProtocolVersion::len() ProtocolVersion::len()
+ crate::handshake::HandshakeID::len() + handshake::ID::len()
+ KeyID::len() + KeyID::len()
+ KeyExchangeKind::len() + KeyExchangeKind::len()
+ HkdfKind::len() + hkdf::Kind::len()
+ CipherKind::len() + sym::Kind::len()
+ self.exchange_key.kind().pub_len() + self.exchange_key.kind().pub_len()
} }
/// return the total length of the cleartext data /// return the total length of the cleartext data
pub fn encrypted_length( pub fn encrypted_length(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
) -> usize { ) -> usize {
match &self.data { match &self.data {
ReqInner::ClearText(data) => data.len() + head_len.0 + tag_len.0, State::ClearText(data) => data.len() + head_len.0 + tag_len.0,
ReqInner::CipherText(length) => *length, State::CipherText(length) => *length,
} }
} }
/// actual length of the directory synchronized request /// actual length of the directory synchronized request
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
KeyID::len() KeyID::len()
+ KeyExchangeKind::len() + KeyExchangeKind::len()
+ HkdfKind::len() + hkdf::Kind::len()
+ CipherKind::len() + sym::Kind::len()
+ self.exchange_key.kind().pub_len() + self.exchange_key.kind().pub_len()
+ self.cipher.nonce_len().0 + self.cipher.nonce_len().0
+ self.data.len() + self.data.len()
@ -138,7 +75,7 @@ impl Req {
/// NOTE: assumes that there is exactly as much buffer as needed /// NOTE: assumes that there is exactly as much buffer as needed
pub fn serialize( pub fn serialize(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
out: &mut [u8], out: &mut [u8],
) { ) {
@ -150,7 +87,7 @@ impl Req {
let written_next = 5 + key_len; let written_next = 5 + key_len;
self.exchange_key.serialize_into(&mut out[5..written_next]); self.exchange_key.serialize_into(&mut out[5..written_next]);
let written = written_next; let written = written_next;
if let ReqInner::ClearText(data) = &self.data { if let State::ClearText(data) = &self.data {
let from = written + head_len.0; let from = written + head_len.0;
let to = out.len() - tag_len.0; let to = out.len() - tag_len.0;
data.serialize(&mut out[from..to]); data.serialize(&mut out[from..to]);
@ -160,8 +97,8 @@ impl Req {
} }
} }
impl super::HandshakeParsing for Req { impl handshake::Parsing for Req {
fn deserialize(raw: &[u8]) -> Result<HandshakeData, Error> { fn deserialize(raw: &[u8]) -> Result<handshake::Data, Error> {
const MIN_PKT_LEN: usize = 10; const MIN_PKT_LEN: usize = 10;
if raw.len() < MIN_PKT_LEN { if raw.len() < MIN_PKT_LEN {
return Err(Error::NotEnoughData); return Err(Error::NotEnoughData);
@ -173,25 +110,25 @@ impl super::HandshakeParsing for Req {
Some(exchange) => exchange, Some(exchange) => exchange,
None => return Err(Error::Parsing), None => return Err(Error::Parsing),
}; };
let hkdf: HkdfKind = match HkdfKind::from_u8(raw[3]) { let hkdf: hkdf::Kind = match hkdf::Kind::from_u8(raw[3]) {
Some(exchange) => exchange, Some(exchange) => exchange,
None => return Err(Error::Parsing), None => return Err(Error::Parsing),
}; };
let cipher: CipherKind = match CipherKind::from_u8(raw[4]) { let cipher: sym::Kind = match sym::Kind::from_u8(raw[4]) {
Some(cipher) => cipher, Some(cipher) => cipher,
None => return Err(Error::Parsing), None => return Err(Error::Parsing),
}; };
const CURR_SIZE: usize = KeyID::len() const CURR_SIZE: usize = KeyID::len()
+ KeyExchangeKind::len() + KeyExchangeKind::len()
+ HkdfKind::len() + hkdf::Kind::len()
+ CipherKind::len(); + sym::Kind::len();
let (exchange_key, len) = let (exchange_key, len) =
match ExchangePubKey::deserialize(&raw[CURR_SIZE..]) { match ExchangePubKey::deserialize(&raw[CURR_SIZE..]) {
Ok(exchange_key) => exchange_key, Ok(exchange_key) => exchange_key,
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
}; };
let data = ReqInner::CipherText(raw.len() - (CURR_SIZE + len)); let data = State::CipherText(raw.len() - (CURR_SIZE + len));
Ok(HandshakeData::DirSync(DirSync::Req(Self { Ok(handshake::Data::DirSync(DirSync::Req(Self {
key_id, key_id,
exchange, exchange,
hkdf, hkdf,
@ -204,18 +141,18 @@ impl super::HandshakeParsing for Req {
/// Quick way to avoid mixing cipher and clear text /// Quick way to avoid mixing cipher and clear text
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum ReqInner { pub enum State {
/// Data is still encrytped, we only keep the length /// Data is still encrytped, we only keep the length
CipherText(usize), CipherText(usize),
/// Client data, decrypted and parsed /// Client data, decrypted and parsed
ClearText(ReqData), ClearText(Data),
} }
impl ReqInner { impl State {
/// The length of the data /// The length of the data
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
match self { match self {
ReqInner::CipherText(len) => *len, State::CipherText(len) => *len,
ReqInner::ClearText(data) => data.len(), State::ClearText(data) => data.len(),
} }
} }
/// parse the cleartext /// parse the cleartext
@ -224,19 +161,19 @@ impl ReqInner {
raw: &[u8], raw: &[u8],
) -> Result<(), Error> { ) -> Result<(), Error> {
let clear = match self { let clear = match self {
ReqInner::CipherText(len) => { State::CipherText(len) => {
assert!( assert!(
*len > raw.len(), *len > raw.len(),
"DirSync::ReqInner::CipherText length mismatch" "DirSync::State::CipherText length mismatch"
); );
match ReqData::deserialize(raw) { match Data::deserialize(raw) {
Ok(clear) => clear, Ok(clear) => clear,
Err(e) => return Err(e), Err(e) => return Err(e),
} }
} }
_ => return Err(Error::Parsing), _ => return Err(Error::Parsing),
}; };
*self = ReqInner::ClearText(clear); *self = State::ClearText(clear);
Ok(()) Ok(())
} }
} }
@ -321,7 +258,7 @@ impl AuthInfo {
/// Decrypted request data /// Decrypted request data
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct ReqData { pub struct Data {
/// Random nonce, the client can use this to track multiple key exchanges /// Random nonce, the client can use this to track multiple key exchanges
pub nonce: Nonce, pub nonce: Nonce,
/// Client key id so the client can use and rotate keys /// Client key id so the client can use and rotate keys
@ -331,7 +268,7 @@ pub struct ReqData {
/// Authentication data /// Authentication data
pub auth: AuthInfo, pub auth: AuthInfo,
} }
impl ReqData { impl Data {
/// actual length of the request data /// actual length of the request data
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
Nonce::len() + KeyID::len() + ID::len() + self.auth.len() Nonce::len() + KeyID::len() + ID::len() + self.auth.len()
@ -383,179 +320,3 @@ impl ReqData {
}) })
} }
} }
/// Quick way to avoid mixing cipher and clear text
#[derive(Debug, Clone, PartialEq)]
pub enum RespInner {
/// Server data, still in ciphertext
CipherText(usize),
/// Parsed, cleartext server data
ClearText(RespData),
}
impl RespInner {
/// The length of the data
pub fn len(&self) -> usize {
match self {
RespInner::CipherText(len) => *len,
RespInner::ClearText(_) => RespData::len(),
}
}
/// parse the cleartext
pub fn deserialize_as_cleartext(
&mut self,
raw: &[u8],
) -> Result<(), Error> {
let clear = match self {
RespInner::CipherText(len) => {
assert!(
*len > raw.len(),
"DirSync::RespInner::CipherText length mismatch"
);
match RespData::deserialize(raw) {
Ok(clear) => clear,
Err(e) => return Err(e),
}
}
_ => return Err(Error::Parsing),
};
*self = RespInner::ClearText(clear);
Ok(())
}
/// Serialize the still cleartext data
pub fn serialize(&self, out: &mut [u8]) {
if let RespInner::ClearText(clear) = &self {
clear.serialize(out);
}
}
}
/// Server response in a directory synchronized handshake
#[derive(Debug, Clone, PartialEq)]
pub struct Resp {
/// Tells the client with which key the exchange was done
pub client_key_id: KeyID,
/// actual response data, might be encrypted
pub data: RespInner,
}
impl super::HandshakeParsing for Resp {
fn deserialize(raw: &[u8]) -> Result<HandshakeData, Error> {
const MIN_PKT_LEN: usize = 68;
if raw.len() < MIN_PKT_LEN {
return Err(Error::NotEnoughData);
}
let client_key_id: KeyID =
KeyID(u16::from_le_bytes(raw[0..KeyID::len()].try_into().unwrap()));
Ok(HandshakeData::DirSync(DirSync::Resp(Self {
client_key_id,
data: RespInner::CipherText(raw[KeyID::len()..].len()),
})))
}
}
impl Resp {
/// return the offset of the encrypted data
/// NOTE: starts from the beginning of the fenrir packet
pub fn encrypted_offset(&self) -> usize {
ProtocolVersion::len()
+ crate::connection::handshake::HandshakeID::len()
+ KeyID::len()
}
/// return the total length of the cleartext data
pub fn encrypted_length(
&self,
head_len: HeadLen,
tag_len: TagLen,
) -> usize {
match &self.data {
RespInner::ClearText(_data) => {
RespData::len() + head_len.0 + tag_len.0
}
RespInner::CipherText(len) => *len,
}
}
/// Total length of the response handshake
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize {
KeyID::len() + head_len.0 + self.data.len() + tag_len.0
}
/// Serialize into raw bytes
/// NOTE: assumes that there is exactly as much buffer as needed
pub fn serialize(
&self,
head_len: HeadLen,
_tag_len: TagLen,
out: &mut [u8],
) {
out[0..KeyID::len()]
.copy_from_slice(&self.client_key_id.0.to_le_bytes());
let start_data = KeyID::len() + head_len.0;
let end_data = start_data + self.data.len();
self.data.serialize(&mut out[start_data..end_data]);
}
}
/// Decrypted response data
#[derive(Debug, Clone, PartialEq)]
pub struct RespData {
/// Client nonce, copied from the request
pub client_nonce: Nonce,
/// Server Connection ID
pub id: ID,
/// Service Connection ID
pub service_connection_id: ID,
/// Service encryption key
pub service_key: Secret,
}
impl RespData {
/// Return the expected length for buffer allocation
pub fn len() -> usize {
Nonce::len() + ID::len() + ID::len() + Secret::len()
}
/// Serialize the data into a buffer
/// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize(&self, out: &mut [u8]) {
let mut start = 0;
let mut end = Nonce::len();
out[start..end].copy_from_slice(&self.client_nonce.0);
start = end;
end = end + ID::len();
self.id.serialize(&mut out[start..end]);
start = end;
end = end + ID::len();
self.service_connection_id.serialize(&mut out[start..end]);
start = end;
end = end + Secret::len();
out[start..end].copy_from_slice(self.service_key.as_ref());
}
/// Parse the cleartext raw data
pub fn deserialize(raw: &[u8]) -> Result<Self, Error> {
let raw_sized: &[u8; 16] = raw[..Nonce::len()].try_into().unwrap();
let client_nonce: Nonce = raw_sized.into();
let end = Nonce::len() + ID::len();
let id: ID =
u64::from_le_bytes(raw[Nonce::len()..end].try_into().unwrap())
.into();
if id.is_handshake() {
return Err(Error::Parsing);
}
let parsed = end;
let end = parsed + ID::len();
let service_connection_id: ID =
u64::from_le_bytes(raw[parsed..end].try_into().unwrap()).into();
if service_connection_id.is_handshake() {
return Err(Error::Parsing);
}
let parsed = end;
let end = parsed + Secret::len();
let raw_secret: &[u8; 32] = raw[parsed..end].try_into().unwrap();
let service_key = raw_secret.into();
Ok(Self {
client_nonce,
id,
service_connection_id,
service_key,
})
}
}

View File

@ -0,0 +1,189 @@
//! Directory synchronized handshake, Response parsing
use crate::{
connection::{
handshake::{
self,
dirsync::{DirSync, Nonce},
Error,
},
ProtocolVersion, ID,
},
enc::{
asym::KeyID,
sym::{NonceLen, TagLen},
Secret,
},
};
/// Server response in a directory synchronized handshake
#[derive(Debug, Clone, PartialEq)]
pub struct Resp {
/// Tells the client with which key the exchange was done
pub client_key_id: KeyID,
/// actual response data, might be encrypted
pub data: State,
}
impl handshake::Parsing for Resp {
fn deserialize(raw: &[u8]) -> Result<handshake::Data, Error> {
const MIN_PKT_LEN: usize = 68;
if raw.len() < MIN_PKT_LEN {
return Err(Error::NotEnoughData);
}
let client_key_id: KeyID =
KeyID(u16::from_le_bytes(raw[0..KeyID::len()].try_into().unwrap()));
Ok(handshake::Data::DirSync(DirSync::Resp(Self {
client_key_id,
data: State::CipherText(raw[KeyID::len()..].len()),
})))
}
}
impl Resp {
/// return the offset of the encrypted data
/// NOTE: starts from the beginning of the fenrir packet
pub fn encrypted_offset(&self) -> usize {
ProtocolVersion::len() + handshake::ID::len() + KeyID::len()
}
/// return the total length of the cleartext data
pub fn encrypted_length(
&self,
head_len: NonceLen,
tag_len: TagLen,
) -> usize {
match &self.data {
State::ClearText(_data) => Data::len() + head_len.0 + tag_len.0,
State::CipherText(len) => *len,
}
}
/// Total length of the response handshake
pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
KeyID::len() + head_len.0 + self.data.len() + tag_len.0
}
/// Serialize into raw bytes
/// NOTE: assumes that there is exactly as much buffer as needed
pub fn serialize(
&self,
head_len: NonceLen,
_tag_len: TagLen,
out: &mut [u8],
) {
out[0..KeyID::len()]
.copy_from_slice(&self.client_key_id.0.to_le_bytes());
let start_data = KeyID::len() + head_len.0;
let end_data = start_data + self.data.len();
self.data.serialize(&mut out[start_data..end_data]);
}
}
/// Quick way to avoid mixing cipher and clear text
#[derive(Debug, Clone, PartialEq)]
pub enum State {
/// Server data, still in ciphertext
CipherText(usize),
/// Parsed, cleartext server data
ClearText(Data),
}
impl State {
/// The length of the data
pub fn len(&self) -> usize {
match self {
State::CipherText(len) => *len,
State::ClearText(_) => Data::len(),
}
}
/// parse the cleartext
pub fn deserialize_as_cleartext(
&mut self,
raw: &[u8],
) -> Result<(), Error> {
let clear = match self {
State::CipherText(len) => {
assert!(
*len > raw.len(),
"DirSync::State::CipherText length mismatch"
);
match Data::deserialize(raw) {
Ok(clear) => clear,
Err(e) => return Err(e),
}
}
_ => return Err(Error::Parsing),
};
*self = State::ClearText(clear);
Ok(())
}
/// Serialize the still cleartext data
pub fn serialize(&self, out: &mut [u8]) {
if let State::ClearText(clear) = &self {
clear.serialize(out);
}
}
}
/// Decrypted response data
#[derive(Debug, Clone, PartialEq)]
pub struct Data {
/// Client nonce, copied from the request
pub client_nonce: Nonce,
/// Server Connection ID
pub id: ID,
/// Service Connection ID
pub service_connection_id: ID,
/// Service encryption key
pub service_key: Secret,
}
impl Data {
/// Return the expected length for buffer allocation
pub fn len() -> usize {
Nonce::len() + ID::len() + ID::len() + Secret::len()
}
/// Serialize the data into a buffer
/// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize(&self, out: &mut [u8]) {
let mut start = 0;
let mut end = Nonce::len();
out[start..end].copy_from_slice(&self.client_nonce.0);
start = end;
end = end + ID::len();
self.id.serialize(&mut out[start..end]);
start = end;
end = end + ID::len();
self.service_connection_id.serialize(&mut out[start..end]);
start = end;
end = end + Secret::len();
out[start..end].copy_from_slice(self.service_key.as_ref());
}
/// Parse the cleartext raw data
pub fn deserialize(raw: &[u8]) -> Result<Self, Error> {
let raw_sized: &[u8; 16] = raw[..Nonce::len()].try_into().unwrap();
let client_nonce: Nonce = raw_sized.into();
let end = Nonce::len() + ID::len();
let id: ID =
u64::from_le_bytes(raw[Nonce::len()..end].try_into().unwrap())
.into();
if id.is_handshake() {
return Err(Error::Parsing);
}
let parsed = end;
let end = parsed + ID::len();
let service_connection_id: ID =
u64::from_le_bytes(raw[parsed..end].try_into().unwrap()).into();
if service_connection_id.is_handshake() {
return Err(Error::Parsing);
}
let parsed = end;
let end = parsed + Secret::len();
let raw_secret: &[u8; 32] = raw[parsed..end].try_into().unwrap();
let service_key = raw_secret.into();
Ok(Self {
client_nonce,
id,
service_connection_id,
service_key,
})
}
}

View File

@ -4,10 +4,11 @@ pub mod dirsync;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
pub(crate) mod tracker; pub(crate) mod tracker;
pub(crate) use tracker::{Action, Tracker};
use crate::{ use crate::{
connection::ProtocolVersion, connection::ProtocolVersion,
enc::sym::{HeadLen, TagLen}, enc::sym::{NonceLen, TagLen},
}; };
use ::num_traits::FromPrimitive; use ::num_traits::FromPrimitive;
@ -56,7 +57,7 @@ pub enum Error {
::strum_macros::IntoStaticStr, ::strum_macros::IntoStaticStr,
)] )]
#[repr(u8)] #[repr(u8)]
pub enum HandshakeID { pub enum ID {
/// 1-RTT Directory synchronized handshake. Fast, no forward secrecy /// 1-RTT Directory synchronized handshake. Fast, no forward secrecy
#[strum(serialize = "directory_synchronized")] #[strum(serialize = "directory_synchronized")]
DirectorySynchronized = 0, DirectorySynchronized = 0,
@ -67,7 +68,7 @@ pub enum HandshakeID {
#[strum(serialize = "stateless")] #[strum(serialize = "stateless")]
Stateless, Stateless,
} }
impl HandshakeID { impl ID {
/// The length of the serialized field /// The length of the serialized field
pub const fn len() -> usize { pub const fn len() -> usize {
1 1
@ -75,28 +76,28 @@ impl HandshakeID {
} }
/// Parsed handshake /// Parsed handshake
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum HandshakeData { pub enum Data {
/// Directory synchronized handhsake /// Directory synchronized handhsake
DirSync(dirsync::DirSync), DirSync(dirsync::DirSync),
} }
impl HandshakeData { impl Data {
/// actual length of the handshake data /// actual length of the handshake data
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize { pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
match self { match self {
HandshakeData::DirSync(d) => d.len(head_len, tag_len), Data::DirSync(d) => d.len(head_len, tag_len),
} }
} }
/// Serialize into raw bytes /// Serialize into raw bytes
/// NOTE: assumes that there is exactly asa much buffer as needed /// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize( pub fn serialize(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
out: &mut [u8], out: &mut [u8],
) { ) {
match self { match self {
HandshakeData::DirSync(d) => d.serialize(head_len, tag_len, out), Data::DirSync(d) => d.serialize(head_len, tag_len, out),
} }
} }
} }
@ -133,19 +134,19 @@ pub struct Handshake {
/// Fenrir Protocol version /// Fenrir Protocol version
pub fenrir_version: ProtocolVersion, pub fenrir_version: ProtocolVersion,
/// enum for the parsed data /// enum for the parsed data
pub data: HandshakeData, pub data: Data,
} }
impl Handshake { impl Handshake {
/// Build new handshake from the data /// Build new handshake from the data
pub fn new(data: HandshakeData) -> Self { pub fn new(data: Data) -> Self {
Handshake { Handshake {
fenrir_version: ProtocolVersion::V0, fenrir_version: ProtocolVersion::V0,
data, data,
} }
} }
/// return the total length of the handshake /// return the total length of the handshake
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize { pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
ProtocolVersion::len() ProtocolVersion::len()
+ HandshakeKind::len() + HandshakeKind::len()
+ self.data.len(head_len, tag_len) + self.data.len(head_len, tag_len)
@ -165,9 +166,11 @@ impl Handshake {
None => return Err(Error::Parsing), None => return Err(Error::Parsing),
}; };
let data = match handshake_kind { let data = match handshake_kind {
HandshakeKind::DirSyncReq => dirsync::Req::deserialize(&raw[2..])?, HandshakeKind::DirSyncReq => {
dirsync::req::Req::deserialize(&raw[2..])?
}
HandshakeKind::DirSyncResp => { HandshakeKind::DirSyncResp => {
dirsync::Resp::deserialize(&raw[2..])? dirsync::resp::Resp::deserialize(&raw[2..])?
} }
}; };
Ok(Self { Ok(Self {
@ -179,13 +182,13 @@ impl Handshake {
/// NOTE: assumes that there is exactly as much buffer as needed /// NOTE: assumes that there is exactly as much buffer as needed
pub fn serialize( pub fn serialize(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
out: &mut [u8], out: &mut [u8],
) { ) {
out[0] = self.fenrir_version as u8; out[0] = self.fenrir_version as u8;
out[1] = match &self.data { out[1] = match &self.data {
HandshakeData::DirSync(d) => match d { Data::DirSync(d) => match d {
dirsync::DirSync::Req(_) => HandshakeKind::DirSyncReq, dirsync::DirSync::Req(_) => HandshakeKind::DirSyncReq,
dirsync::DirSync::Resp(_) => HandshakeKind::DirSyncResp, dirsync::DirSync::Resp(_) => HandshakeKind::DirSyncResp,
}, },
@ -194,6 +197,6 @@ impl Handshake {
} }
} }
trait HandshakeParsing { trait Parsing {
fn deserialize(raw: &[u8]) -> Result<HandshakeData, Error>; fn deserialize(raw: &[u8]) -> Result<Data, Error>;
} }

View File

@ -1,13 +1,16 @@
use crate::{ use crate::{
auth, auth,
connection::{handshake::*, ID}, connection::{
handshake::{self, dirsync, Handshake},
ID,
},
enc::{self, asym::KeyID}, enc::{self, asym::KeyID},
}; };
#[test] #[test]
fn test_handshake_dirsync_req() { fn test_handshake_dirsync_req() {
let rand = enc::Random::new(); let rand = enc::Random::new();
let cipher = enc::sym::CipherKind::XChaCha20Poly1305; let cipher = enc::sym::Kind::XChaCha20Poly1305;
let (_, exchange_key) = let (_, exchange_key) =
match enc::asym::KeyExchangeKind::X25519DiffieHellman.new_keypair(&rand) match enc::asym::KeyExchangeKind::X25519DiffieHellman.new_keypair(&rand)
@ -19,11 +22,11 @@ fn test_handshake_dirsync_req() {
} }
}; };
let data = dirsync::ReqInner::ClearText(dirsync::ReqData { let data = dirsync::req::State::ClearText(dirsync::req::Data {
nonce: dirsync::Nonce::new(&rand), nonce: dirsync::Nonce::new(&rand),
client_key_id: KeyID(2424), client_key_id: KeyID(2424),
id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()), id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()),
auth: dirsync::AuthInfo { auth: dirsync::req::AuthInfo {
user: auth::UserID::new(&rand), user: auth::UserID::new(&rand),
token: auth::Token::new_anonymous(&rand), token: auth::Token::new_anonymous(&rand),
service_id: auth::SERVICEID_AUTH, service_id: auth::SERVICEID_AUTH,
@ -31,16 +34,16 @@ fn test_handshake_dirsync_req() {
}, },
}); });
let h_req = Handshake::new(HandshakeData::DirSync(dirsync::DirSync::Req( let h_req = Handshake::new(handshake::Data::DirSync(
dirsync::Req { dirsync::DirSync::Req(dirsync::req::Req {
key_id: KeyID(4224), key_id: KeyID(4224),
exchange: enc::asym::KeyExchangeKind::X25519DiffieHellman, exchange: enc::asym::KeyExchangeKind::X25519DiffieHellman,
hkdf: enc::hkdf::HkdfKind::Sha3, hkdf: enc::hkdf::Kind::Sha3,
cipher: enc::sym::CipherKind::XChaCha20Poly1305, cipher: enc::sym::Kind::XChaCha20Poly1305,
exchange_key, exchange_key,
data, data,
}, }),
))); ));
let mut bytes = Vec::<u8>::with_capacity( let mut bytes = Vec::<u8>::with_capacity(
h_req.len(cipher.nonce_len(), cipher.tag_len()), h_req.len(cipher.nonce_len(), cipher.tag_len()),
@ -55,7 +58,7 @@ fn test_handshake_dirsync_req() {
return; return;
} }
}; };
if let HandshakeData::DirSync(dirsync::DirSync::Req(r_a)) = if let handshake::Data::DirSync(dirsync::DirSync::Req(r_a)) =
&mut deserialized.data &mut deserialized.data
{ {
let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0; let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0;
@ -74,11 +77,11 @@ fn test_handshake_dirsync_req() {
#[test] #[test]
fn test_handshake_dirsync_reqsp() { fn test_handshake_dirsync_reqsp() {
let rand = enc::Random::new(); let rand = enc::Random::new();
let cipher = enc::sym::CipherKind::XChaCha20Poly1305; let cipher = enc::sym::Kind::XChaCha20Poly1305;
let service_key = enc::Secret::new_rand(&rand); let service_key = enc::Secret::new_rand(&rand);
let data = dirsync::RespInner::ClearText(dirsync::RespData { let data = dirsync::resp::State::ClearText(dirsync::resp::Data {
client_nonce: dirsync::Nonce::new(&rand), client_nonce: dirsync::Nonce::new(&rand),
id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()), id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()),
service_connection_id: ID::ID( service_connection_id: ID::ID(
@ -87,8 +90,8 @@ fn test_handshake_dirsync_reqsp() {
service_key, service_key,
}); });
let h_resp = Handshake::new(HandshakeData::DirSync( let h_resp = Handshake::new(handshake::Data::DirSync(
dirsync::DirSync::Resp(dirsync::Resp { dirsync::DirSync::Resp(dirsync::resp::Resp {
client_key_id: KeyID(4444), client_key_id: KeyID(4444),
data, data,
}), }),
@ -107,7 +110,7 @@ fn test_handshake_dirsync_reqsp() {
return; return;
} }
}; };
if let HandshakeData::DirSync(dirsync::DirSync::Resp(r_a)) = if let handshake::Data::DirSync(dirsync::DirSync::Resp(r_a)) =
&mut deserialized.data &mut deserialized.data
{ {
let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0; let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0;

View File

@ -3,22 +3,21 @@
use crate::{ use crate::{
auth::{Domain, ServiceID}, auth::{Domain, ServiceID},
connection::{ connection::{
self,
handshake::{self, Error, Handshake}, handshake::{self, Error, Handshake},
Connection, IDRecv, IDSend, Conn, IDRecv, IDSend,
}, },
enc::{ enc::{
self, self,
asym::{self, KeyID, PrivKey, PubKey}, asym::{self, KeyID, PrivKey, PubKey},
hkdf::{Hkdf, HkdfKind}, hkdf::{self, Hkdf},
sym::{CipherKind, CipherRecv}, sym::{self, CipherRecv},
}, },
inner::ThreadTracker, inner::ThreadTracker,
}; };
use ::tokio::sync::oneshot; use ::tokio::sync::oneshot;
pub(crate) struct HandshakeServer { pub(crate) struct Server {
pub id: KeyID, pub id: KeyID,
pub key: PrivKey, pub key: PrivKey,
pub domains: Vec<Domain>, pub domains: Vec<Domain>,
@ -26,10 +25,10 @@ pub(crate) struct HandshakeServer {
pub(crate) type ConnectAnswer = Result<(KeyID, IDSend), crate::Error>; pub(crate) type ConnectAnswer = Result<(KeyID, IDSend), crate::Error>;
pub(crate) struct HandshakeClient { pub(crate) struct Client {
pub service_id: ServiceID, pub service_id: ServiceID,
pub service_conn_id: IDRecv, pub service_conn_id: IDRecv,
pub connection: Connection, pub connection: Conn,
pub timeout: Option<::tokio::task::JoinHandle<()>>, pub timeout: Option<::tokio::task::JoinHandle<()>>,
pub answer: oneshot::Sender<ConnectAnswer>, pub answer: oneshot::Sender<ConnectAnswer>,
pub srv_key_id: KeyID, pub srv_key_id: KeyID,
@ -37,13 +36,13 @@ pub(crate) struct HandshakeClient {
/// Tracks the keys used by the client and the handshake /// Tracks the keys used by the client and the handshake
/// they are associated with /// they are associated with
pub(crate) struct HandshakeClientList { pub(crate) struct ClientList {
used: Vec<::bitmaps::Bitmap<1024>>, // index = KeyID used: Vec<::bitmaps::Bitmap<1024>>, // index = KeyID
keys: Vec<Option<(PrivKey, PubKey)>>, keys: Vec<Option<(PrivKey, PubKey)>>,
list: Vec<Option<HandshakeClient>>, list: Vec<Option<Client>>,
} }
impl HandshakeClientList { impl ClientList {
pub(crate) fn new() -> Self { pub(crate) fn new() -> Self {
Self { Self {
used: [::bitmaps::Bitmap::<1024>::new()].to_vec(), used: [::bitmaps::Bitmap::<1024>::new()].to_vec(),
@ -51,13 +50,13 @@ impl HandshakeClientList {
list: Vec::with_capacity(16), list: Vec::with_capacity(16),
} }
} }
pub(crate) fn get(&self, id: KeyID) -> Option<&HandshakeClient> { pub(crate) fn get(&self, id: KeyID) -> Option<&Client> {
if id.0 as usize >= self.list.len() { if id.0 as usize >= self.list.len() {
return None; return None;
} }
self.list[id.0 as usize].as_ref() self.list[id.0 as usize].as_ref()
} }
pub(crate) fn remove(&mut self, id: KeyID) -> Option<HandshakeClient> { pub(crate) fn remove(&mut self, id: KeyID) -> Option<Client> {
if id.0 as usize >= self.list.len() { if id.0 as usize >= self.list.len() {
return None; return None;
} }
@ -79,11 +78,10 @@ impl HandshakeClientList {
pub_key: PubKey, pub_key: PubKey,
service_id: ServiceID, service_id: ServiceID,
service_conn_id: IDRecv, service_conn_id: IDRecv,
connection: Connection, connection: Conn,
answer: oneshot::Sender<ConnectAnswer>, answer: oneshot::Sender<ConnectAnswer>,
srv_key_id: KeyID, srv_key_id: KeyID,
) -> Result<(KeyID, &mut HandshakeClient), oneshot::Sender<ConnectAnswer>> ) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
{
let maybe_free_key_idx = let maybe_free_key_idx =
self.used.iter().enumerate().find_map(|(idx, bmap)| { self.used.iter().enumerate().find_map(|(idx, bmap)| {
match bmap.first_false_index() { match bmap.first_false_index() {
@ -112,7 +110,7 @@ impl HandshakeClientList {
self.list.push(None); self.list.push(None);
} }
self.keys[free_key_idx] = Some((priv_key, pub_key)); self.keys[free_key_idx] = Some((priv_key, pub_key));
self.list[free_key_idx] = Some(HandshakeClient { self.list[free_key_idx] = Some(Client {
service_id, service_id,
service_conn_id, service_conn_id,
connection, connection,
@ -144,8 +142,8 @@ pub(crate) struct ClientConnectInfo {
pub service_connection_id: IDRecv, pub service_connection_id: IDRecv,
/// Parsed handshake packet /// Parsed handshake packet
pub handshake: Handshake, pub handshake: Handshake,
/// Connection /// Conn
pub connection: Connection, pub connection: Conn,
/// where to wake up the waiting client /// where to wake up the waiting client
pub answer: oneshot::Sender<ConnectAnswer>, pub answer: oneshot::Sender<ConnectAnswer>,
/// server public key id that we used on the handshake /// server public key id that we used on the handshake
@ -153,7 +151,7 @@ pub(crate) struct ClientConnectInfo {
} }
/// Intermediate actions to be taken while parsing the handshake /// Intermediate actions to be taken while parsing the handshake
#[derive(Debug)] #[derive(Debug)]
pub(crate) enum HandshakeAction { pub(crate) enum Action {
/// Parsing finished, all ok, nothing to do /// Parsing finished, all ok, nothing to do
Nothing, Nothing,
/// Packet parsed, now go perform authentication /// Packet parsed, now go perform authentication
@ -167,20 +165,20 @@ pub(crate) enum HandshakeAction {
/// Each of them will handle a subset of all handshakes. /// Each of them will handle a subset of all handshakes.
/// Each handshake is routed to a different tracker by checking /// Each handshake is routed to a different tracker by checking
/// core = (udp_src_sender_port % total_threads) - 1 /// core = (udp_src_sender_port % total_threads) - 1
pub(crate) struct HandshakeTracker { pub(crate) struct Tracker {
thread_id: ThreadTracker, thread_id: ThreadTracker,
key_exchanges: Vec<asym::KeyExchangeKind>, key_exchanges: Vec<asym::KeyExchangeKind>,
ciphers: Vec<CipherKind>, ciphers: Vec<sym::Kind>,
/// ephemeral keys used server side in key exchange /// ephemeral keys used server side in key exchange
keys_srv: Vec<HandshakeServer>, keys_srv: Vec<Server>,
/// ephemeral keys used client side in key exchange /// ephemeral keys used client side in key exchange
hshake_cli: HandshakeClientList, hshake_cli: ClientList,
} }
impl HandshakeTracker { impl Tracker {
pub(crate) fn new( pub(crate) fn new(
thread_id: ThreadTracker, thread_id: ThreadTracker,
ciphers: Vec<CipherKind>, ciphers: Vec<sym::Kind>,
key_exchanges: Vec<asym::KeyExchangeKind>, key_exchanges: Vec<asym::KeyExchangeKind>,
) -> Self { ) -> Self {
Self { Self {
@ -188,7 +186,7 @@ impl HandshakeTracker {
ciphers, ciphers,
key_exchanges, key_exchanges,
keys_srv: Vec::new(), keys_srv: Vec::new(),
hshake_cli: HandshakeClientList::new(), hshake_cli: ClientList::new(),
} }
} }
pub(crate) fn add_server_key( pub(crate) fn add_server_key(
@ -199,7 +197,7 @@ impl HandshakeTracker {
if self.keys_srv.iter().find(|&k| k.id == id).is_some() { if self.keys_srv.iter().find(|&k| k.id == id).is_some() {
return Err(()); return Err(());
} }
self.keys_srv.push(HandshakeServer { self.keys_srv.push(Server {
id, id,
key, key,
domains: Vec::new(), domains: Vec::new(),
@ -233,11 +231,10 @@ impl HandshakeTracker {
pub_key: PubKey, pub_key: PubKey,
service_id: ServiceID, service_id: ServiceID,
service_conn_id: IDRecv, service_conn_id: IDRecv,
connection: Connection, connection: Conn,
answer: oneshot::Sender<ConnectAnswer>, answer: oneshot::Sender<ConnectAnswer>,
srv_key_id: KeyID, srv_key_id: KeyID,
) -> Result<(KeyID, &mut HandshakeClient), oneshot::Sender<ConnectAnswer>> ) -> Result<(KeyID, &mut Client), oneshot::Sender<ConnectAnswer>> {
{
self.hshake_cli.add( self.hshake_cli.add(
priv_key, priv_key,
pub_key, pub_key,
@ -248,10 +245,7 @@ impl HandshakeTracker {
srv_key_id, srv_key_id,
) )
} }
pub(crate) fn remove_client( pub(crate) fn remove_client(&mut self, key_id: KeyID) -> Option<Client> {
&mut self,
key_id: KeyID,
) -> Option<HandshakeClient> {
self.hshake_cli.remove(key_id) self.hshake_cli.remove(key_id)
} }
pub(crate) fn timeout_client( pub(crate) fn timeout_client(
@ -269,10 +263,10 @@ impl HandshakeTracker {
&mut self, &mut self,
mut handshake: Handshake, mut handshake: Handshake,
handshake_raw: &mut [u8], handshake_raw: &mut [u8],
) -> Result<HandshakeAction, Error> { ) -> Result<Action, Error> {
use connection::handshake::{dirsync::DirSync, HandshakeData}; use handshake::dirsync::DirSync;
match handshake.data { match handshake.data {
HandshakeData::DirSync(ref mut ds) => match ds { handshake::Data::DirSync(ref mut ds) => match ds {
DirSync::Req(ref mut req) => { DirSync::Req(ref mut req) => {
if !self.key_exchanges.contains(&req.exchange) { if !self.key_exchanges.contains(&req.exchange) {
return Err(enc::Error::UnsupportedKeyExchange.into()); return Err(enc::Error::UnsupportedKeyExchange.into());
@ -310,7 +304,8 @@ impl HandshakeTracker {
Ok(shared_key) => shared_key, Ok(shared_key) => shared_key,
Err(e) => return Err(handshake::Error::Key(e).into()), Err(e) => return Err(handshake::Error::Key(e).into()),
}; };
let hkdf = Hkdf::new(HkdfKind::Sha3, b"fenrir", shared_key); let hkdf =
Hkdf::new(hkdf::Kind::Sha3, b"fenrir", shared_key);
let secret_recv = hkdf.get_secret(b"to_server"); let secret_recv = hkdf.get_secret(b"to_server");
let cipher_recv = CipherRecv::new(req.cipher, secret_recv); let cipher_recv = CipherRecv::new(req.cipher, secret_recv);
use crate::enc::sym::AAD; use crate::enc::sym::AAD;
@ -334,7 +329,7 @@ impl HandshakeTracker {
} }
} }
return Ok(HandshakeAction::AuthNeeded(AuthNeededInfo { return Ok(Action::AuthNeeded(AuthNeededInfo {
handshake, handshake,
hkdf, hkdf,
})); }));
@ -374,16 +369,14 @@ impl HandshakeTracker {
if let Some(timeout) = hshake.timeout { if let Some(timeout) = hshake.timeout {
timeout.abort(); timeout.abort();
} }
return Ok(HandshakeAction::ClientConnect( return Ok(Action::ClientConnect(ClientConnectInfo {
ClientConnectInfo {
service_id: hshake.service_id, service_id: hshake.service_id,
service_connection_id: hshake.service_conn_id, service_connection_id: hshake.service_conn_id,
handshake, handshake,
connection: hshake.connection, connection: hshake.connection,
answer: hshake.answer, answer: hshake.answer,
srv_key_id: hshake.srv_key_id, srv_key_id: hshake.srv_key_id,
}, }));
));
} }
}, },
} }

View File

@ -3,30 +3,109 @@
pub mod handshake; pub mod handshake;
pub mod packet; pub mod packet;
pub mod socket; pub mod socket;
pub mod stream;
use ::std::{rc::Rc, vec::Vec}; use ::std::{rc::Rc, vec::Vec};
pub use crate::connection::{ pub use crate::connection::{handshake::Handshake, packet::Packet};
handshake::Handshake,
packet::{ConnectionID as ID, Packet, PacketData},
};
use crate::{ use crate::{
dnssec, dnssec,
enc::{ enc::{
asym::PubKey, asym::PubKey,
hkdf::Hkdf, hkdf::Hkdf,
sym::{CipherKind, CipherRecv, CipherSend}, sym::{self, CipherRecv, CipherSend},
Random, Random,
}, },
inner::ThreadTracker, inner::ThreadTracker,
}; };
use ::std::rc;
/// Fenrir Connection ID
///
/// 0 is special as it represents the handshake
/// Connection IDs are to be considered u64 little endian
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum ID {
/// Connection id 0 represent the handshake
Handshake,
/// Non-zero id can represent any connection
ID(::core::num::NonZeroU64),
}
impl ID {
/// Set the conenction id to handshake
pub fn new_handshake() -> Self {
Self::Handshake
}
/// New id from u64. PLZ NON ZERO
pub(crate) fn new_u64(raw: u64) -> Self {
#[allow(unsafe_code)]
unsafe {
ID::ID(::core::num::NonZeroU64::new_unchecked(raw))
}
}
pub(crate) fn as_u64(&self) -> u64 {
match self {
ID::Handshake => 0,
ID::ID(id) => id.get(),
}
}
/// New random service ID
pub fn new_rand(rand: &Random) -> Self {
let mut raw = [0; 8];
let mut num = 0;
while num == 0 {
rand.fill(&mut raw);
num = u64::from_le_bytes(raw);
}
#[allow(unsafe_code)]
unsafe {
ID::ID(::core::num::NonZeroU64::new_unchecked(num))
}
}
/// Quick check to know if this is an handshake
pub fn is_handshake(&self) -> bool {
*self == ID::Handshake
}
/// length if the connection ID in bytes
pub const fn len() -> usize {
8
}
/// write the ID to a buffer
pub fn serialize(&self, out: &mut [u8]) {
match self {
ID::Handshake => out[..8].copy_from_slice(&[0; 8]),
ID::ID(id) => out[..8].copy_from_slice(&id.get().to_le_bytes()),
}
}
}
impl From<u64> for ID {
fn from(raw: u64) -> Self {
if raw == 0 {
ID::Handshake
} else {
#[allow(unsafe_code)]
unsafe {
ID::ID(::core::num::NonZeroU64::new_unchecked(raw))
}
}
}
}
impl From<[u8; 8]> for ID {
fn from(raw: [u8; 8]) -> Self {
let raw_u64 = u64::from_le_bytes(raw);
raw_u64.into()
}
}
/// strong typedef for receiving connection id /// strong typedef for receiving connection id
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct IDRecv(pub ID); pub struct IDRecv(pub ID);
/// strong typedef for sending connection id /// strong typedef for sending connection id
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct IDSend(pub ID); pub struct IDSend(pub ID);
/// Version of the fenrir protocol in use /// Version of the fenrir protocol in use
@ -47,12 +126,16 @@ impl ProtocolVersion {
} }
} }
/// The connection, as seen from a user of libFenrir
#[derive(Debug)]
pub struct Connection(rc::Weak<Conn>);
/// A single connection and its data /// A single connection and its data
#[derive(Debug)] #[derive(Debug)]
pub struct Connection { pub(crate) struct Conn {
/// Receiving Connection ID /// Receiving Conn ID
pub id_recv: IDRecv, pub id_recv: IDRecv,
/// Sending Connection ID /// Sending Conn ID
pub id_send: IDSend, pub id_send: IDSend,
/// The main hkdf used for all secrets in this connection /// The main hkdf used for all secrets in this connection
pub hkdf: Hkdf, pub hkdf: Hkdf,
@ -62,9 +145,12 @@ pub struct Connection {
pub cipher_send: CipherSend, pub cipher_send: CipherSend,
} }
/// Role: used to set the correct secrets /// Role: track the connection direction
/// * Server: Connection is Incoming ///
/// * Client: Connection is Outgoing /// The Role is used to select the correct secrets, and track the direction
/// of the connection
/// * Server: Conn is Incoming
/// * Client: Conn is Outgoing
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
#[repr(u8)] #[repr(u8)]
pub enum Role { pub enum Role {
@ -74,10 +160,10 @@ pub enum Role {
Client, Client,
} }
impl Connection { impl Conn {
pub(crate) fn new( pub(crate) fn new(
hkdf: Hkdf, hkdf: Hkdf,
cipher: CipherKind, cipher: sym::Kind,
role: Role, role: Role,
rand: &Random, rand: &Random,
) -> Self { ) -> Self {
@ -102,11 +188,9 @@ impl Connection {
} }
} }
// PERF: Arc<RwLock<ConnList>> loks a bit too much, need to find
// faster ways to do this
pub(crate) struct ConnList { pub(crate) struct ConnList {
thread_id: ThreadTracker, thread_id: ThreadTracker,
connections: Vec<Option<Rc<Connection>>>, connections: Vec<Option<Rc<Conn>>>,
/// Bitmap to track which connection ids are used or free /// Bitmap to track which connection ids are used or free
ids_used: Vec<::bitmaps::Bitmap<1024>>, ids_used: Vec<::bitmaps::Bitmap<1024>>,
} }
@ -177,7 +261,7 @@ impl ConnList {
new_id new_id
} }
/// NOTE: does NOT check if the connection has been previously reserved! /// NOTE: does NOT check if the connection has been previously reserved!
pub(crate) fn track(&mut self, conn: Rc<Connection>) -> Result<(), ()> { pub(crate) fn track(&mut self, conn: Rc<Conn>) -> Result<(), ()> {
let conn_id = match conn.id_recv { let conn_id = match conn.id_recv {
IDRecv(ID::Handshake) => { IDRecv(ID::Handshake) => {
return Err(()); return Err(());

View File

@ -1,125 +1,44 @@
// //
//! Raw packet handling, encryption, decryption, parsing //! Raw packet handling, encryption, decryption, parsing
use crate::enc::{ use crate::{
sym::{HeadLen, TagLen}, connection,
Random, enc::sym::{NonceLen, TagLen},
}; };
/// Fenrir Connection id
/// 0 is special as it represents the handshake
/// Connection IDs are to be considered u64 little endian
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ConnectionID {
/// Connection id 0 represent the handshake
Handshake,
/// Non-zero id can represent any connection
ID(::core::num::NonZeroU64),
}
impl ConnectionID {
/// Set the conenction id to handshake
pub fn new_handshake() -> Self {
Self::Handshake
}
/// New id from u64. PLZ NON ZERO
pub(crate) fn new_u64(raw: u64) -> Self {
#[allow(unsafe_code)]
unsafe {
ConnectionID::ID(::core::num::NonZeroU64::new_unchecked(raw))
}
}
pub(crate) fn as_u64(&self) -> u64 {
match self {
ConnectionID::Handshake => 0,
ConnectionID::ID(id) => id.get(),
}
}
/// New random service ID
pub fn new_rand(rand: &Random) -> Self {
let mut raw = [0; 8];
let mut num = 0;
while num == 0 {
rand.fill(&mut raw);
num = u64::from_le_bytes(raw);
}
#[allow(unsafe_code)]
unsafe {
ConnectionID::ID(::core::num::NonZeroU64::new_unchecked(num))
}
}
/// Quick check to know if this is an handshake
pub fn is_handshake(&self) -> bool {
*self == ConnectionID::Handshake
}
/// length if the connection ID in bytes
pub const fn len() -> usize {
8
}
/// write the ID to a buffer
pub fn serialize(&self, out: &mut [u8]) {
match self {
ConnectionID::Handshake => out[..8].copy_from_slice(&[0; 8]),
ConnectionID::ID(id) => {
out[..8].copy_from_slice(&id.get().to_le_bytes())
}
}
}
}
impl From<u64> for ConnectionID {
fn from(raw: u64) -> Self {
if raw == 0 {
ConnectionID::Handshake
} else {
#[allow(unsafe_code)]
unsafe {
ConnectionID::ID(::core::num::NonZeroU64::new_unchecked(raw))
}
}
}
}
impl From<[u8; 8]> for ConnectionID {
fn from(raw: [u8; 8]) -> Self {
let raw_u64 = u64::from_le_bytes(raw);
raw_u64.into()
}
}
/// Enumerate the possible data in a fenrir packet /// Enumerate the possible data in a fenrir packet
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum PacketData { pub enum Data {
/// A parsed handshake packet /// A parsed handshake packet
Handshake(super::Handshake), Handshake(super::Handshake),
/// Raw packet. we only have the connection ID and packet length /// Raw packet. we only have the connection ID and packet length
Raw(usize), Raw(usize),
} }
impl PacketData { impl Data {
/// total length of the data in bytes /// total length of the data in bytes
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize { pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
match self { match self {
PacketData::Handshake(h) => h.len(head_len, tag_len), Data::Handshake(h) => h.len(head_len, tag_len),
PacketData::Raw(len) => *len, Data::Raw(len) => *len,
} }
} }
/// serialize data into bytes /// serialize data into bytes
/// NOTE: assumes that there is exactly asa much buffer as needed /// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize( pub fn serialize(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
out: &mut [u8], out: &mut [u8],
) { ) {
assert!( assert!(
self.len(head_len, tag_len) == out.len(), self.len(head_len, tag_len) == out.len(),
"PacketData: wrong buffer length" "Data: wrong buffer length"
); );
match self { match self {
PacketData::Handshake(h) => h.serialize(head_len, tag_len, out), Data::Handshake(h) => h.serialize(head_len, tag_len, out),
PacketData::Raw(_) => { Data::Raw(_) => {
::tracing::error!("Tried to serialize a raw PacketData!"); ::tracing::error!("Tried to serialize a raw Data!");
} }
} }
} }
@ -131,9 +50,9 @@ const MIN_PACKET_BYTES: usize = 16;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Packet { pub struct Packet {
/// Id of the Fenrir connection. /// Id of the Fenrir connection.
pub id: ConnectionID, pub id: connection::ID,
/// actual data inside the packet /// actual data inside the packet
pub data: PacketData, pub data: Data,
} }
impl Packet { impl Packet {
@ -146,27 +65,30 @@ impl Packet {
let raw_id: [u8; 8] = (raw[..8]).try_into().expect("unreachable"); let raw_id: [u8; 8] = (raw[..8]).try_into().expect("unreachable");
Ok(Packet { Ok(Packet {
id: raw_id.into(), id: raw_id.into(),
data: PacketData::Raw(raw.len()), data: Data::Raw(raw.len()),
}) })
} }
/// get the total length of the packet /// get the total length of the packet
pub fn len(&self, head_len: HeadLen, tag_len: TagLen) -> usize { pub fn len(&self, head_len: NonceLen, tag_len: TagLen) -> usize {
ConnectionID::len() + self.data.len(head_len, tag_len) connection::ID::len() + self.data.len(head_len, tag_len)
} }
/// serialize packet into buffer /// serialize packet into buffer
/// NOTE: assumes that there is exactly asa much buffer as needed /// NOTE: assumes that there is exactly asa much buffer as needed
pub fn serialize( pub fn serialize(
&self, &self,
head_len: HeadLen, head_len: NonceLen,
tag_len: TagLen, tag_len: TagLen,
out: &mut [u8], out: &mut [u8],
) { ) {
assert!( assert!(
out.len() > ConnectionID::len(), out.len() > connection::ID::len(),
"Packet: not enough buffer to serialize" "Packet: not enough buffer to serialize"
); );
self.id.serialize(&mut out[0..ConnectionID::len()]); self.id.serialize(&mut out[0..connection::ID::len()]);
self.data self.data.serialize(
.serialize(head_len, tag_len, &mut out[ConnectionID::len()..]); head_len,
tag_len,
&mut out[connection::ID::len()..],
);
} }
} }

View File

@ -0,0 +1,10 @@
//! Errors while parsing streams
/// Crypto errors
#[derive(::thiserror::Error, Debug, Copy, Clone)]
pub enum Error {
/// Error while parsing key material
#[error("Not enough data for stream chunk: {0}")]
NotEnoughData(usize),
}

View File

@ -0,0 +1,183 @@
//! Here we implement the multiplexing stream feature of Fenrir
//!
//! For now we will only have the TCP-like, reliable, in-order delivery
mod errors;
mod rob;
pub use errors::Error;
use crate::{connection::stream::rob::ReliableOrderedBytestream, enc::Random};
/// Kind of stream. any combination of:
/// reliable/unreliable ordered/unordered, bytestream/datagram
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum Kind {
/// ROB: Reliable, Ordered, Bytestream
/// AKA: TCP-like
ROB = 0,
}
/// Id of the stream
#[derive(Debug, Copy, Clone)]
pub struct ID(pub u16);
impl ID {
/// Length of the serialized field
pub const fn len() -> usize {
2
}
}
/// length of the chunk
#[derive(Debug, Copy, Clone)]
pub struct ChunkLen(pub u16);
impl ChunkLen {
/// Length of the serialized field
pub const fn len() -> usize {
2
}
}
/// Sequence number to rebuild the stream correctly
#[derive(Debug, Copy, Clone)]
pub struct Sequence(pub ::core::num::Wrapping<u32>);
impl Sequence {
const SEQ_NOFLAG: u32 = 0x3FFFFFFF;
/// return a new sequence number, starting at random
pub fn new(rand: &Random) -> Self {
let seq: u32 = 0;
rand.fill(&mut seq.to_le_bytes());
Self(::core::num::Wrapping(seq & Self::SEQ_NOFLAG))
}
/// Length of the serialized field
pub const fn len() -> usize {
4
}
}
/// Chunk of data representing a stream
/// Every chunk is as follows:
/// | id (2 bytes) | length (2 bytes) |
/// | flag_start (1 BIT) | flag_end (1 BIT) | sequence (30 bits) |
#[derive(Debug, Clone)]
pub struct Chunk<'a> {
/// Id of the stream this chunk is part of
pub id: ID,
/// Is this the beginning of a message?
pub flag_start: bool,
/// Is this the end of a message?
pub flag_end: bool,
/// Sequence number to reconstruct the Stream
pub sequence: Sequence,
data: &'a [u8],
}
impl<'a> Chunk<'a> {
const FLAGS_EXCLUDED_BITMASK: u8 = 0x3F;
const FLAG_START_BITMASK: u8 = 0x80;
const FLAG_END_BITMASK: u8 = 0x40;
/// Returns the total length of the chunk, including headers
pub fn len(&self) -> usize {
ID::len() + ChunkLen::len() + Sequence::len() + self.data.len()
}
/// deserialize a chunk of a stream
pub fn deserialize(raw: &'a [u8]) -> Result<Self, Error> {
if raw.len() <= ID::len() + ChunkLen::len() + Sequence::len() {
return Err(Error::NotEnoughData(0));
}
let id = ID(u16::from_le_bytes(raw[0..ID::len()].try_into().unwrap()));
let mut bytes_next = ID::len() + ChunkLen::len();
let length = ChunkLen(u16::from_le_bytes(
raw[ID::len()..bytes_next].try_into().unwrap(),
));
if ID::len() + ChunkLen::len() + Sequence::len() + length.0 as usize
> raw.len()
{
return Err(Error::NotEnoughData(4));
}
let flag_start = (raw[bytes_next] & Self::FLAG_START_BITMASK) != 0;
let flag_end = (raw[bytes_next] & Self::FLAG_END_BITMASK) != 0;
let bytes = bytes_next + 1;
bytes_next = bytes + Sequence::len();
let mut sequence_bytes: [u8; Sequence::len()] =
raw[bytes..bytes_next].try_into().unwrap();
sequence_bytes[0] = sequence_bytes[0] & Self::FLAGS_EXCLUDED_BITMASK;
let sequence =
Sequence(::core::num::Wrapping(u32::from_le_bytes(sequence_bytes)));
Ok(Self {
id,
flag_start,
flag_end,
sequence,
data: &raw[bytes_next..(bytes_next + length.0 as usize)],
})
}
/// serialize a chunk of a stream
pub fn serialize(&self, raw_out: &mut [u8]) {
raw_out[0..ID::len()].copy_from_slice(&self.id.0.to_le_bytes());
let mut bytes_next = ID::len() + ChunkLen::len();
raw_out[ID::len()..bytes_next]
.copy_from_slice(&(self.data.len() as u16).to_le_bytes());
let bytes = bytes_next;
bytes_next = bytes_next + Sequence::len();
raw_out[bytes..bytes_next]
.copy_from_slice(&self.sequence.0 .0.to_le_bytes());
let mut flag_byte = raw_out[bytes] & Self::FLAGS_EXCLUDED_BITMASK;
if self.flag_start {
flag_byte = flag_byte | Self::FLAG_START_BITMASK;
}
if self.flag_end {
flag_byte = flag_byte | Self::FLAG_END_BITMASK;
}
raw_out[bytes] = flag_byte;
let bytes = bytes_next;
bytes_next = bytes_next + self.data.len();
raw_out[bytes..bytes_next].copy_from_slice(&self.data);
}
}
/// Kind of stream. any combination of:
/// reliable/unreliable ordered/unordered, bytestream/datagram
/// differences from Kind:
/// * not public
/// * has actual data
#[derive(Debug, Clone)]
pub(crate) enum Tracker {
/// ROB: Reliable, Ordered, Bytestream
/// AKA: TCP-like
ROB(ReliableOrderedBytestream),
}
impl Tracker {
pub(crate) fn new(kind: Kind, rand: &Random) -> Self {
match kind {
Kind::ROB => Tracker::ROB(ReliableOrderedBytestream::new(rand)),
}
}
}
/// Actual stream-tracking structure
#[derive(Debug, Clone)]
pub(crate) struct Stream {
id: ID,
data: Tracker,
}
impl Stream {
pub(crate) fn new(kind: Kind, rand: &Random) -> Self {
let id: u16 = 0;
rand.fill(&mut id.to_le_bytes());
Self {
id: ID(id),
data: Tracker::new(kind, rand),
}
}
}

View File

@ -0,0 +1,29 @@
//! Implementation of the Reliable, Ordered, Bytestream transmission model
//! AKA: TCP-like
use crate::{
connection::stream::{Chunk, Error, Sequence},
enc::Random,
};
/// Reliable, Ordered, Bytestream stream tracker
/// AKA: TCP-like
#[derive(Debug, Clone)]
pub(crate) struct ReliableOrderedBytestream {
window_start: Sequence,
window_len: usize,
data: Vec<u8>,
}
impl ReliableOrderedBytestream {
pub(crate) fn new(rand: &Random) -> Self {
Self {
window_start: Sequence::new(rand),
window_len: 1048576, // 1MB. should be enough for anybody. (lol)
data: Vec::new(),
}
}
pub(crate) fn recv(&mut self, chunk: Chunk) -> Result<(), Error> {
todo!()
}
}

View File

@ -43,12 +43,11 @@
//! ] //! ]
use crate::{ use crate::{
connection::handshake::HandshakeID, connection::handshake,
enc::{ enc::{
self, self,
asym::{KeyExchangeKind, KeyID, PubKey}, asym::{KeyExchangeKind, KeyID, PubKey},
hkdf::HkdfKind, hkdf, sym,
sym::CipherKind,
}, },
}; };
use ::core::num::NonZeroU16; use ::core::num::NonZeroU16;
@ -180,7 +179,7 @@ pub struct Address {
/// Weight of this address in the priority group /// Weight of this address in the priority group
pub weight: AddressWeight, pub weight: AddressWeight,
/// List of supported handshakes /// List of supported handshakes
pub handshake_ids: Vec<HandshakeID>, pub handshake_ids: Vec<handshake::ID>,
/// Public key IDs used by this address /// Public key IDs used by this address
pub public_key_idx: Vec<PubKeyIdx>, pub public_key_idx: Vec<PubKeyIdx>,
} }
@ -331,7 +330,7 @@ impl Address {
for raw_handshake_id in for raw_handshake_id in
raw[bytes_parsed..(bytes_parsed + num_handshake_ids)].iter() raw[bytes_parsed..(bytes_parsed + num_handshake_ids)].iter()
{ {
match HandshakeID::from_u8(*raw_handshake_id) { match handshake::ID::from_u8(*raw_handshake_id) {
Some(h_id) => handshake_ids.push(h_id), Some(h_id) => handshake_ids.push(h_id),
None => { None => {
::tracing::warn!( ::tracing::warn!(
@ -392,9 +391,9 @@ pub struct Record {
/// List of supported key exchanges /// List of supported key exchanges
pub key_exchanges: Vec<KeyExchangeKind>, pub key_exchanges: Vec<KeyExchangeKind>,
/// List of supported key exchanges /// List of supported key exchanges
pub hkdfs: Vec<HkdfKind>, pub hkdfs: Vec<hkdf::Kind>,
/// List of supported ciphers /// List of supported ciphers
pub ciphers: Vec<CipherKind>, pub ciphers: Vec<sym::Kind>,
} }
impl Record { impl Record {
@ -597,7 +596,7 @@ impl Record {
num_key_exchanges = num_key_exchanges - 1; num_key_exchanges = num_key_exchanges - 1;
} }
while num_hkdfs > 0 { while num_hkdfs > 0 {
let hkdf = match HkdfKind::from_u8(raw[bytes_parsed]) { let hkdf = match hkdf::Kind::from_u8(raw[bytes_parsed]) {
Some(hkdf) => hkdf, Some(hkdf) => hkdf,
None => { None => {
// continue parsing. This could be a new hkdf type // continue parsing. This could be a new hkdf type
@ -615,7 +614,7 @@ impl Record {
num_hkdfs = num_hkdfs - 1; num_hkdfs = num_hkdfs - 1;
} }
while num_ciphers > 0 { while num_ciphers > 0 {
let cipher = match CipherKind::from_u8(raw[bytes_parsed]) { let cipher = match sym::Kind::from_u8(raw[bytes_parsed]) {
Some(cipher) => cipher, Some(cipher) => cipher,
None => { None => {
// continue parsing. This could be a new cipher type // continue parsing. This could be a new cipher type

View File

@ -12,7 +12,7 @@ fn test_dnssec_serialization() {
return; return;
} }
}; };
use crate::{connection::handshake::HandshakeID, enc}; use crate::{connection::handshake, enc};
let record = Record { let record = Record {
public_keys: [( public_keys: [(
@ -25,14 +25,14 @@ fn test_dnssec_serialization() {
port: Some(::core::num::NonZeroU16::new(31337).unwrap()), port: Some(::core::num::NonZeroU16::new(31337).unwrap()),
priority: record::AddressPriority::P1, priority: record::AddressPriority::P1,
weight: record::AddressWeight::W1, weight: record::AddressWeight::W1,
handshake_ids: [HandshakeID::DirectorySynchronized].to_vec(), handshake_ids: [handshake::ID::DirectorySynchronized].to_vec(),
public_key_idx: [record::PubKeyIdx(0)].to_vec(), public_key_idx: [record::PubKeyIdx(0)].to_vec(),
}] }]
.to_vec(), .to_vec(),
key_exchanges: [enc::asym::KeyExchangeKind::X25519DiffieHellman] key_exchanges: [enc::asym::KeyExchangeKind::X25519DiffieHellman]
.to_vec(), .to_vec(),
hkdfs: [enc::hkdf::HkdfKind::Sha3].to_vec(), hkdfs: [enc::hkdf::Kind::Sha3].to_vec(),
ciphers: [enc::sym::CipherKind::XChaCha20Poly1305].to_vec(), ciphers: [enc::sym::Kind::XChaCha20Poly1305].to_vec(),
}; };
let encoded = match record.encode() { let encoded = match record.encode() {
Ok(encoded) => encoded, Ok(encoded) => encoded,

View File

@ -45,7 +45,7 @@ impl ::std::fmt::Display for KeyID {
/// Capabilities of each key /// Capabilities of each key
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum KeyCapabilities { pub enum Capabilities {
/// signing *only* /// signing *only*
Sign, Sign,
/// encrypt *only* /// encrypt *only*
@ -61,13 +61,13 @@ pub enum KeyCapabilities {
/// All: sign, encrypt, Key Exchange /// All: sign, encrypt, Key Exchange
SignEncryptExchage, SignEncryptExchage,
} }
impl KeyCapabilities { impl Capabilities {
/// Check if this key supports eky exchage /// Check if this key supports eky exchage
pub fn has_exchange(&self) -> bool { pub fn has_exchange(&self) -> bool {
match self { match self {
KeyCapabilities::Exchange Capabilities::Exchange
| KeyCapabilities::SignExchange | Capabilities::SignExchange
| KeyCapabilities::SignEncryptExchage => true, | Capabilities::SignEncryptExchage => true,
_ => false, _ => false,
} }
} }
@ -85,7 +85,7 @@ impl KeyCapabilities {
)] )]
#[non_exhaustive] #[non_exhaustive]
#[repr(u8)] #[repr(u8)]
pub enum KeyKind { pub enum Kind {
/// Ed25519 Public key (sign only) /// Ed25519 Public key (sign only)
#[strum(serialize = "ed25519")] #[strum(serialize = "ed25519")]
Ed25519 = 0, Ed25519 = 0,
@ -93,25 +93,25 @@ pub enum KeyKind {
#[strum(serialize = "x25519")] #[strum(serialize = "x25519")]
X25519, X25519,
} }
impl KeyKind { impl Kind {
/// Length of the serialized field /// Length of the serialized field
pub const fn len() -> usize { pub const fn len() -> usize {
1 1
} }
/// return the expected length of the public key /// return the expected length of the public key
pub fn pub_len(&self) -> usize { pub fn pub_len(&self) -> usize {
KeyKind::len() Kind::len()
+ match self { + match self {
// FIXME: 99% wrong size // FIXME: 99% wrong size
KeyKind::Ed25519 => ::ring::signature::ED25519_PUBLIC_KEY_LEN, Kind::Ed25519 => ::ring::signature::ED25519_PUBLIC_KEY_LEN,
KeyKind::X25519 => 32, Kind::X25519 => 32,
} }
} }
/// Get the capabilities of this key type /// Get the capabilities of this key type
pub fn capabilities(&self) -> KeyCapabilities { pub fn capabilities(&self) -> Capabilities {
match self { match self {
KeyKind::Ed25519 => KeyCapabilities::Sign, Kind::Ed25519 => Capabilities::Sign,
KeyKind::X25519 => KeyCapabilities::Exchange, Kind::X25519 => Capabilities::Exchange,
} }
} }
/// Returns the key exchanges supported by this key /// Returns the key exchanges supported by this key
@ -120,8 +120,8 @@ impl KeyKind {
const X25519_KEY_EXCHANGES: [KeyExchangeKind; 1] = const X25519_KEY_EXCHANGES: [KeyExchangeKind; 1] =
[KeyExchangeKind::X25519DiffieHellman]; [KeyExchangeKind::X25519DiffieHellman];
match self { match self {
KeyKind::Ed25519 => &EMPTY, Kind::Ed25519 => &EMPTY,
KeyKind::X25519 => &X25519_KEY_EXCHANGES, Kind::X25519 => &X25519_KEY_EXCHANGES,
} }
} }
/// generate new keypair /// generate new keypair
@ -193,21 +193,21 @@ impl PubKey {
} }
} }
/// return the kind of public key /// return the kind of public key
pub fn kind(&self) -> KeyKind { pub fn kind(&self) -> Kind {
match self { match self {
// FIXME: lie, we don't fully support this // FIXME: lie, we don't fully support this
PubKey::Signing => KeyKind::Ed25519, PubKey::Signing => Kind::Ed25519,
PubKey::Exchange(ex) => ex.kind(), PubKey::Exchange(ex) => ex.kind(),
} }
} }
/// generate new keypair /// generate new keypair
fn new_keypair( fn new_keypair(
kind: KeyKind, kind: Kind,
rnd: &Random, rnd: &Random,
) -> Result<(PrivKey, PubKey), Error> { ) -> Result<(PrivKey, PubKey), Error> {
match kind { match kind {
KeyKind::Ed25519 => todo!(), Kind::Ed25519 => todo!(),
KeyKind::X25519 => { Kind::X25519 => {
let (priv_key, pub_key) = let (priv_key, pub_key) =
KeyExchangeKind::X25519DiffieHellman.new_keypair(rnd)?; KeyExchangeKind::X25519DiffieHellman.new_keypair(rnd)?;
Ok((PrivKey::Exchange(priv_key), PubKey::Exchange(pub_key))) Ok((PrivKey::Exchange(priv_key), PubKey::Exchange(pub_key)))
@ -231,7 +231,7 @@ impl PubKey {
if raw.len() < 1 { if raw.len() < 1 {
return Err(Error::NotEnoughData(0)); return Err(Error::NotEnoughData(0));
} }
let kind: KeyKind = match KeyKind::from_u8(raw[0]) { let kind: Kind = match Kind::from_u8(raw[0]) {
Some(kind) => kind, Some(kind) => kind,
None => return Err(Error::UnsupportedKey(1)), None => return Err(Error::UnsupportedKey(1)),
}; };
@ -239,11 +239,11 @@ impl PubKey {
return Err(Error::NotEnoughData(1)); return Err(Error::NotEnoughData(1));
} }
match kind { match kind {
KeyKind::Ed25519 => { Kind::Ed25519 => {
::tracing::error!("ed25519 keys are not yet supported"); ::tracing::error!("ed25519 keys are not yet supported");
return Err(Error::Parsing); return Err(Error::Parsing);
} }
KeyKind::X25519 => { Kind::X25519 => {
let pub_key: ::x25519_dalek::PublicKey = let pub_key: ::x25519_dalek::PublicKey =
//match ::bincode::deserialize(&raw[1..(1 + kind.pub_len())]) //match ::bincode::deserialize(&raw[1..(1 + kind.pub_len())])
match ::bincode::deserialize(&raw[1..]) match ::bincode::deserialize(&raw[1..])
@ -284,7 +284,7 @@ impl PrivKey {
} }
} }
/// return the kind of public key /// return the kind of public key
pub fn kind(&self) -> KeyKind { pub fn kind(&self) -> Kind {
match self { match self {
PrivKey::Signing => todo!(), PrivKey::Signing => todo!(),
PrivKey::Exchange(ex) => ex.kind(), PrivKey::Exchange(ex) => ex.kind(),
@ -322,13 +322,13 @@ impl ExchangePrivKey {
/// Get the serialized key length /// Get the serialized key length
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
match self { match self {
ExchangePrivKey::X25519(_) => KeyKind::X25519.pub_len(), ExchangePrivKey::X25519(_) => Kind::X25519.pub_len(),
} }
} }
/// Get the kind of key /// Get the kind of key
pub fn kind(&self) -> KeyKind { pub fn kind(&self) -> Kind {
match self { match self {
ExchangePrivKey::X25519(_) => KeyKind::X25519, ExchangePrivKey::X25519(_) => Kind::X25519,
} }
} }
/// Run the key exchange between two keys of the same kind /// Run the key exchange between two keys of the same kind
@ -372,13 +372,13 @@ impl ExchangePubKey {
/// Get the serialized key length /// Get the serialized key length
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
match self { match self {
ExchangePubKey::X25519(_) => KeyKind::X25519.pub_len(), ExchangePubKey::X25519(_) => Kind::X25519.pub_len(),
} }
} }
/// Get the kind of key /// Get the kind of key
pub fn kind(&self) -> KeyKind { pub fn kind(&self) -> Kind {
match self { match self {
ExchangePubKey::X25519(_) => KeyKind::X25519, ExchangePubKey::X25519(_) => Kind::X25519,
} }
} }
/// serialize the key into the buffer /// serialize the key into the buffer
@ -396,13 +396,13 @@ impl ExchangePubKey {
/// The riesult is "unparsed" since we don't verify /// The riesult is "unparsed" since we don't verify
/// the actual key /// the actual key
pub fn deserialize(raw: &[u8]) -> Result<(Self, usize), Error> { pub fn deserialize(raw: &[u8]) -> Result<(Self, usize), Error> {
match KeyKind::from_u8(raw[0]) { match Kind::from_u8(raw[0]) {
Some(kind) => match kind { Some(kind) => match kind {
KeyKind::Ed25519 => { Kind::Ed25519 => {
::tracing::error!("ed25519 keys are not yet supported"); ::tracing::error!("ed25519 keys are not yet supported");
return Err(Error::Parsing); return Err(Error::Parsing);
} }
KeyKind::X25519 => { Kind::X25519 => {
let pub_key: ::x25519_dalek::PublicKey = let pub_key: ::x25519_dalek::PublicKey =
match ::bincode::deserialize( match ::bincode::deserialize(
&raw[1..(1 + kind.pub_len())], &raw[1..(1 + kind.pub_len())],

View File

@ -18,12 +18,12 @@ use crate::{config::Config, enc::Secret};
)] )]
#[non_exhaustive] #[non_exhaustive]
#[repr(u8)] #[repr(u8)]
pub enum HkdfKind { pub enum Kind {
/// Sha3 /// Sha3
#[strum(serialize = "sha3")] #[strum(serialize = "sha3")]
Sha3 = 0, Sha3 = 0,
} }
impl HkdfKind { impl Kind {
/// Length of the serialized type /// Length of the serialized type
pub const fn len() -> usize { pub const fn len() -> usize {
1 1
@ -34,7 +34,7 @@ impl HkdfKind {
#[derive(Clone)] #[derive(Clone)]
pub enum Hkdf { pub enum Hkdf {
/// Sha3 based /// Sha3 based
Sha3(HkdfSha3), Sha3(Sha3),
} }
// Fake debug implementation to avoid leaking secrets // Fake debug implementation to avoid leaking secrets
@ -49,9 +49,9 @@ impl ::core::fmt::Debug for Hkdf {
impl Hkdf { impl Hkdf {
/// New Hkdf /// New Hkdf
pub fn new(kind: HkdfKind, salt: &[u8], key: Secret) -> Self { pub fn new(kind: Kind, salt: &[u8], key: Secret) -> Self {
match kind { match kind {
HkdfKind::Sha3 => Self::Sha3(HkdfSha3::new(salt, key)), Kind::Sha3 => Self::Sha3(Sha3::new(salt, key)),
} }
} }
/// Get a secret generated from the key and a given context /// Get a secret generated from the key and a given context
@ -61,9 +61,9 @@ impl Hkdf {
} }
} }
/// get the kind of this Hkdf /// get the kind of this Hkdf
pub fn kind(&self) -> HkdfKind { pub fn kind(&self) -> Kind {
match self { match self {
Hkdf::Sha3(_) => HkdfKind::Sha3, Hkdf::Sha3(_) => Kind::Sha3,
} }
} }
} }
@ -106,11 +106,11 @@ impl Clone for HkdfInner {
/// Sha3 based HKDF /// Sha3 based HKDF
#[derive(Clone)] #[derive(Clone)]
pub struct HkdfSha3 { pub struct Sha3 {
inner: HkdfInner, inner: HkdfInner,
} }
impl HkdfSha3 { impl Sha3 {
/// Instantiate a new HKDF with Sha3-256 /// Instantiate a new HKDF with Sha3-256
pub(crate) fn new(salt: &[u8], key: Secret) -> Self { pub(crate) fn new(salt: &[u8], key: Secret) -> Self {
let hkdf = ::hkdf::Hkdf::<Sha3_256>::new(Some(salt), key.as_ref()); let hkdf = ::hkdf::Hkdf::<Sha3_256>::new(Some(salt), key.as_ref());
@ -132,7 +132,7 @@ impl HkdfSha3 {
} }
// Fake debug implementation to avoid leaking secrets // Fake debug implementation to avoid leaking secrets
impl ::core::fmt::Debug for HkdfSha3 { impl ::core::fmt::Debug for Sha3 {
fn fmt( fn fmt(
&self, &self,
f: &mut core::fmt::Formatter<'_>, f: &mut core::fmt::Formatter<'_>,
@ -146,8 +146,8 @@ impl ::core::fmt::Debug for HkdfSha3 {
/// Give priority to our list /// Give priority to our list
pub fn server_select_hkdf( pub fn server_select_hkdf(
cfg: &Config, cfg: &Config,
client_supported: &Vec<HkdfKind>, client_supported: &Vec<Kind>,
) -> Option<HkdfKind> { ) -> Option<Kind> {
cfg.hkdfs cfg.hkdfs
.iter() .iter()
.find(|h| client_supported.contains(h)) .find(|h| client_supported.contains(h))
@ -159,8 +159,8 @@ pub fn server_select_hkdf(
/// this is used only in the directory synchronized handshake /// this is used only in the directory synchronized handshake
pub fn client_select_hkdf( pub fn client_select_hkdf(
cfg: &Config, cfg: &Config,
server_supported: &Vec<HkdfKind>, server_supported: &Vec<Kind>,
) -> Option<HkdfKind> { ) -> Option<Kind> {
server_supported server_supported
.iter() .iter()
.find(|h| cfg.hkdfs.contains(h)) .find(|h| cfg.hkdfs.contains(h))

View File

@ -17,20 +17,20 @@ use crate::{
::strum_macros::IntoStaticStr, ::strum_macros::IntoStaticStr,
)] )]
#[repr(u8)] #[repr(u8)]
pub enum CipherKind { pub enum Kind {
/// XChaCha20_Poly1305 /// XChaCha20_Poly1305
#[strum(serialize = "xchacha20poly1305")] #[strum(serialize = "xchacha20poly1305")]
XChaCha20Poly1305 = 0, XChaCha20Poly1305 = 0,
} }
impl CipherKind { impl Kind {
/// length of the serialized id for the cipher kind field /// length of the serialized id for the cipher kind field
pub const fn len() -> usize { pub const fn len() -> usize {
1 1
} }
/// required length of the nonce /// required length of the nonce
pub fn nonce_len(&self) -> HeadLen { pub fn nonce_len(&self) -> NonceLen {
HeadLen(Nonce::len()) Nonce::len()
} }
/// required length of the key /// required length of the key
pub fn key_len(&self) -> usize { pub fn key_len(&self) -> usize {
@ -48,21 +48,10 @@ impl CipherKind {
#[derive(Debug)] #[derive(Debug)]
pub struct AAD<'a>(pub &'a [u8]); pub struct AAD<'a>(pub &'a [u8]);
/// Cipher direction, to make sure we don't reuse the same cipher
/// for both decrypting and encrypting
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum CipherDirection {
/// Receive, to decrypt only
Recv = 0,
/// Send, to encrypt only
Send,
}
/// strong typedef for header length /// strong typedef for header length
/// aka: nonce length in the encrypted data) /// aka: nonce length in the encrypted data)
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct HeadLen(pub usize); pub struct NonceLen(pub usize);
/// strong typedef for the Tag length /// strong typedef for the Tag length
/// aka: cryptographic authentication tag length at the end /// aka: cryptographic authentication tag length at the end
/// of the encrypted data /// of the encrypted data
@ -77,21 +66,21 @@ enum Cipher {
impl Cipher { impl Cipher {
/// Build a new Cipher /// Build a new Cipher
fn new(kind: CipherKind, secret: Secret) -> Self { fn new(kind: Kind, secret: Secret) -> Self {
match kind { match kind {
CipherKind::XChaCha20Poly1305 => { Kind::XChaCha20Poly1305 => {
Self::XChaCha20Poly1305(XChaCha20Poly1305::new(secret)) Self::XChaCha20Poly1305(XChaCha20Poly1305::new(secret))
} }
} }
} }
pub fn kind(&self) -> CipherKind { pub fn kind(&self) -> Kind {
match self { match self {
Cipher::XChaCha20Poly1305(_) => CipherKind::XChaCha20Poly1305, Cipher::XChaCha20Poly1305(_) => Kind::XChaCha20Poly1305,
} }
} }
fn nonce_len(&self) -> HeadLen { fn nonce_len(&self) -> NonceLen {
match self { match self {
Cipher::XChaCha20Poly1305(_) => HeadLen(Nonce::len()), Cipher::XChaCha20Poly1305(_) => Nonce::len(),
} }
} }
fn tag_len(&self) -> TagLen { fn tag_len(&self) -> TagLen {
@ -117,7 +106,7 @@ impl Cipher {
return Err(Error::NotEnoughData(raw_data.len())); return Err(Error::NotEnoughData(raw_data.len()));
} }
let (nonce_bytes, data_and_tag) = let (nonce_bytes, data_and_tag) =
raw_data.split_at_mut(Nonce::len()); raw_data.split_at_mut(Nonce::len().0);
let (data_notag, tag_bytes) = data_and_tag.split_at_mut( let (data_notag, tag_bytes) = data_and_tag.split_at_mut(
data_and_tag.len() data_and_tag.len()
- ::ring::aead::CHACHA20_POLY1305.tag_len(), - ::ring::aead::CHACHA20_POLY1305.tag_len(),
@ -137,20 +126,20 @@ impl Cipher {
}; };
//data.drain(..Nonce::len()); //data.drain(..Nonce::len());
//data.truncate(final_len); //data.truncate(final_len);
Ok(&raw_data[Nonce::len()..Nonce::len() + final_len]) Ok(&raw_data[Nonce::len().0..Nonce::len().0 + final_len])
} }
} }
} }
fn overhead(&self) -> usize { fn overhead(&self) -> usize {
match self { match self {
Cipher::XChaCha20Poly1305(_) => { Cipher::XChaCha20Poly1305(_) => {
let cipher = CipherKind::XChaCha20Poly1305; let cipher = Kind::XChaCha20Poly1305;
cipher.nonce_len().0 + cipher.tag_len().0 cipher.nonce_len().0 + cipher.tag_len().0
} }
} }
} }
fn encrypt( fn encrypt(
&self, &mut self,
nonce: &Nonce, nonce: &Nonce,
aad: AAD, aad: AAD,
data: &mut [u8], data: &mut [u8],
@ -162,13 +151,13 @@ impl Cipher {
let tag_len: usize = ::ring::aead::CHACHA20_POLY1305.tag_len(); let tag_len: usize = ::ring::aead::CHACHA20_POLY1305.tag_len();
let data_len_notag = data.len() - tag_len; let data_len_notag = data.len() - tag_len;
// write nonce // write nonce
data[..Nonce::len()].copy_from_slice(nonce.as_bytes()); data[..Nonce::len().0].copy_from_slice(nonce.as_bytes());
// encrypt data // encrypt data
match cipher.cipher.encrypt_in_place_detached( match cipher.cipher.encrypt_in_place_detached(
nonce.as_bytes().into(), nonce.as_bytes().into(),
aad.0, aad.0,
&mut data[Nonce::len()..data_len_notag], &mut data[Nonce::len().0..data_len_notag],
) { ) {
Ok(tag) => { Ok(tag) => {
data[data_len_notag..].copy_from_slice(tag.as_slice()); data[data_len_notag..].copy_from_slice(tag.as_slice());
@ -194,11 +183,11 @@ impl ::core::fmt::Debug for CipherRecv {
impl CipherRecv { impl CipherRecv {
/// Build a new Cipher /// Build a new Cipher
pub fn new(kind: CipherKind, secret: Secret) -> Self { pub fn new(kind: Kind, secret: Secret) -> Self {
Self(Cipher::new(kind, secret)) Self(Cipher::new(kind, secret))
} }
/// Get the length of the nonce for this cipher /// Get the length of the nonce for this cipher
pub fn nonce_len(&self) -> HeadLen { pub fn nonce_len(&self) -> NonceLen {
self.0.nonce_len() self.0.nonce_len()
} }
/// Get the length of the nonce for this cipher /// Get the length of the nonce for this cipher
@ -215,14 +204,14 @@ impl CipherRecv {
self.0.decrypt(aad, data) self.0.decrypt(aad, data)
} }
/// return the underlying cipher id /// return the underlying cipher id
pub fn kind(&self) -> CipherKind { pub fn kind(&self) -> Kind {
self.0.kind() self.0.kind()
} }
} }
/// Send only cipher /// Send only cipher
pub struct CipherSend { pub struct CipherSend {
nonce: NonceSync, nonce: Nonce,
cipher: Cipher, cipher: Cipher,
} }
impl ::core::fmt::Debug for CipherSend { impl ::core::fmt::Debug for CipherSend {
@ -236,20 +225,20 @@ impl ::core::fmt::Debug for CipherSend {
impl CipherSend { impl CipherSend {
/// Build a new Cipher /// Build a new Cipher
pub fn new(kind: CipherKind, secret: Secret, rand: &Random) -> Self { pub fn new(kind: Kind, secret: Secret, rand: &Random) -> Self {
Self { Self {
nonce: NonceSync::new(rand), nonce: Nonce::new(rand),
cipher: Cipher::new(kind, secret), cipher: Cipher::new(kind, secret),
} }
} }
/// Encrypt the given data /// Encrypt the given data
pub fn encrypt(&self, aad: AAD, data: &mut [u8]) -> Result<(), Error> { pub fn encrypt(&mut self, aad: AAD, data: &mut [u8]) -> Result<(), Error> {
let old_nonce = self.nonce.advance(); let old_nonce = self.nonce.advance();
self.cipher.encrypt(&old_nonce, aad, data)?; self.cipher.encrypt(&old_nonce, aad, data)?;
Ok(()) Ok(())
} }
/// return the underlying cipher id /// return the underlying cipher id
pub fn kind(&self) -> CipherKind { pub fn kind(&self) -> Kind {
self.cipher.kind() self.cipher.kind()
} }
} }
@ -285,7 +274,7 @@ struct NonceNum {
#[repr(C)] #[repr(C)]
pub union Nonce { pub union Nonce {
num: NonceNum, num: NonceNum,
raw: [u8; Self::len()], raw: [u8; Self::len().0],
} }
impl ::core::fmt::Debug for Nonce { impl ::core::fmt::Debug for Nonce {
@ -303,17 +292,17 @@ impl ::core::fmt::Debug for Nonce {
impl Nonce { impl Nonce {
/// Generate a new random Nonce /// Generate a new random Nonce
pub fn new(rand: &Random) -> Self { pub fn new(rand: &Random) -> Self {
let mut raw = [0; Self::len()]; let mut raw = [0; Self::len().0];
rand.fill(&mut raw); rand.fill(&mut raw);
Self { raw } Self { raw }
} }
/// Length of this nonce in bytes /// Length of this nonce in bytes
pub const fn len() -> usize { pub const fn len() -> NonceLen {
// FIXME: was:12. xchacha20poly1305 requires 24. // FIXME: was:12. xchacha20poly1305 requires 24.
// but we should change keys much earlier than that, and our // but we should change keys much earlier than that, and our
// nonces are not random, but sequential. // nonces are not random, but sequential.
// we should change keys every 2^30 bytes to be sure (stream max window) // we should change keys every 2^30 bytes to be sure (stream max window)
return 24; return NonceLen(24);
} }
/// Get reference to the nonce bytes /// Get reference to the nonce bytes
pub fn as_bytes(&self) -> &[u8] { pub fn as_bytes(&self) -> &[u8] {
@ -323,11 +312,12 @@ impl Nonce {
} }
} }
/// Create Nonce from array /// Create Nonce from array
pub fn from_slice(raw: [u8; Self::len()]) -> Self { pub fn from_slice(raw: [u8; Self::len().0]) -> Self {
Self { raw } Self { raw }
} }
/// Go to the next nonce /// Go to the next nonce
pub fn advance(&mut self) { pub fn advance(&mut self) -> Self {
let old_nonce = self.clone();
#[allow(unsafe_code)] #[allow(unsafe_code)]
unsafe { unsafe {
let old_low = self.num.low; let old_low = self.num.low;
@ -336,40 +326,17 @@ impl Nonce {
self.num.high = self.num.high; self.num.high = self.num.high;
} }
} }
}
}
/// Synchronize the mutex acess with a nonce for multithread safety
// TODO: remove mutex, not needed anymore
#[derive(Debug)]
pub struct NonceSync {
nonce: ::std::sync::Mutex<Nonce>,
}
impl NonceSync {
/// Create a new thread safe nonce
pub fn new(rand: &Random) -> Self {
Self {
nonce: ::std::sync::Mutex::new(Nonce::new(rand)),
}
}
/// Advance the nonce and return the *old* value
pub fn advance(&self) -> Nonce {
let old_nonce: Nonce;
{
let mut nonce = self.nonce.lock().unwrap();
old_nonce = *nonce;
nonce.advance();
}
old_nonce old_nonce
} }
} }
/// Select the best cipher from our supported list /// Select the best cipher from our supported list
/// and the other endpoint supported list. /// and the other endpoint supported list.
/// Give priority to our list /// Give priority to our list
pub fn server_select_cipher( pub fn server_select_cipher(
cfg: &Config, cfg: &Config,
client_supported: &Vec<CipherKind>, client_supported: &Vec<Kind>,
) -> Option<CipherKind> { ) -> Option<Kind> {
cfg.ciphers cfg.ciphers
.iter() .iter()
.find(|c| client_supported.contains(c)) .find(|c| client_supported.contains(c))
@ -381,8 +348,8 @@ pub fn server_select_cipher(
/// This is used only in the Directory synchronized handshake /// This is used only in the Directory synchronized handshake
pub fn client_select_cipher( pub fn client_select_cipher(
cfg: &Config, cfg: &Config,
server_supported: &Vec<CipherKind>, server_supported: &Vec<Kind>,
) -> Option<CipherKind> { ) -> Option<Kind> {
server_supported server_supported
.iter() .iter()
.find(|c| cfg.ciphers.contains(c)) .find(|c| cfg.ciphers.contains(c))

View File

@ -1,24 +1,26 @@
use crate::{ use crate::{
auth, connection::{
connection::{handshake::*, ID}, handshake::{self, *},
ID,
},
enc::{self, asym::KeyID}, enc::{self, asym::KeyID},
}; };
#[test] #[test]
fn test_simple_encrypt_decrypt() { fn test_simple_encrypt_decrypt() {
let rand = enc::Random::new(); let rand = enc::Random::new();
let cipher = enc::sym::CipherKind::XChaCha20Poly1305; let cipher = enc::sym::Kind::XChaCha20Poly1305;
let secret = enc::Secret::new_rand(&rand); let secret = enc::Secret::new_rand(&rand);
let secret2 = secret.clone(); let secret2 = secret.clone();
let cipher_send = enc::sym::CipherSend::new(cipher, secret, &rand); let mut cipher_send = enc::sym::CipherSend::new(cipher, secret, &rand);
let cipher_recv = enc::sym::CipherRecv::new(cipher, secret2); let cipher_recv = enc::sym::CipherRecv::new(cipher, secret2);
let mut data = Vec::new(); let mut data = Vec::new();
let tot_len = cipher_recv.nonce_len().0 + 1234 + cipher_recv.tag_len().0; let tot_len = cipher_recv.nonce_len().0 + 1234 + cipher_recv.tag_len().0;
data.resize(tot_len, 0); data.resize(tot_len, 0);
rand.fill(&mut data); rand.fill(&mut data);
data[..enc::sym::Nonce::len()].copy_from_slice(&[0; 24]); data[..enc::sym::Nonce::len().0].copy_from_slice(&[0; 24]);
let last = data.len() - cipher_recv.tag_len().0; let last = data.len() - cipher_recv.tag_len().0;
data[last..].copy_from_slice(&[0; 16]); data[last..].copy_from_slice(&[0; 16]);
let orig = data.clone(); let orig = data.clone();
@ -31,7 +33,7 @@ fn test_simple_encrypt_decrypt() {
if cipher_recv.decrypt(aad2, &mut data).is_err() { if cipher_recv.decrypt(aad2, &mut data).is_err() {
assert!(false, "Decrypt failed"); assert!(false, "Decrypt failed");
} }
data[..enc::sym::Nonce::len()].copy_from_slice(&[0; 24]); data[..enc::sym::Nonce::len().0].copy_from_slice(&[0; 24]);
let last = data.len() - cipher_recv.tag_len().0; let last = data.len() - cipher_recv.tag_len().0;
data[last..].copy_from_slice(&[0; 16]); data[last..].copy_from_slice(&[0; 16]);
assert!(orig == data, "DIFFERENT!\n{:?}\n{:?}\n", orig, data); assert!(orig == data, "DIFFERENT!\n{:?}\n{:?}\n", orig, data);
@ -40,18 +42,18 @@ fn test_simple_encrypt_decrypt() {
#[test] #[test]
fn test_encrypt_decrypt() { fn test_encrypt_decrypt() {
let rand = enc::Random::new(); let rand = enc::Random::new();
let cipher = enc::sym::CipherKind::XChaCha20Poly1305; let cipher = enc::sym::Kind::XChaCha20Poly1305;
let secret = enc::Secret::new_rand(&rand); let secret = enc::Secret::new_rand(&rand);
let secret2 = secret.clone(); let secret2 = secret.clone();
let cipher_send = enc::sym::CipherSend::new(cipher, secret, &rand); let mut cipher_send = enc::sym::CipherSend::new(cipher, secret, &rand);
let cipher_recv = enc::sym::CipherRecv::new(cipher, secret2); let cipher_recv = enc::sym::CipherRecv::new(cipher, secret2);
let nonce_len = cipher_recv.nonce_len(); let nonce_len = cipher_recv.nonce_len();
let tag_len = cipher_recv.tag_len(); let tag_len = cipher_recv.tag_len();
let service_key = enc::Secret::new_rand(&rand); let service_key = enc::Secret::new_rand(&rand);
let data = dirsync::RespInner::ClearText(dirsync::RespData { let data = dirsync::resp::State::ClearText(dirsync::resp::Data {
client_nonce: dirsync::Nonce::new(&rand), client_nonce: dirsync::Nonce::new(&rand),
id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()), id: ID::ID(::core::num::NonZeroU64::new(424242).unwrap()),
service_connection_id: ID::ID( service_connection_id: ID::ID(
@ -60,7 +62,7 @@ fn test_encrypt_decrypt() {
service_key, service_key,
}); });
let resp = dirsync::Resp { let resp = dirsync::resp::Resp {
client_key_id: KeyID(4444), client_key_id: KeyID(4444),
data, data,
}; };
@ -68,7 +70,7 @@ fn test_encrypt_decrypt() {
let encrypt_to = encrypt_from + resp.encrypted_length(nonce_len, tag_len); let encrypt_to = encrypt_from + resp.encrypted_length(nonce_len, tag_len);
let h_resp = let h_resp =
Handshake::new(HandshakeData::DirSync(dirsync::DirSync::Resp(resp))); Handshake::new(handshake::Data::DirSync(dirsync::DirSync::Resp(resp)));
let mut bytes = Vec::<u8>::with_capacity( let mut bytes = Vec::<u8>::with_capacity(
h_resp.len(cipher.nonce_len(), cipher.tag_len()), h_resp.len(cipher.nonce_len(), cipher.tag_len()),
@ -117,7 +119,7 @@ fn test_encrypt_decrypt() {
} }
}; };
// reparse // reparse
if let HandshakeData::DirSync(dirsync::DirSync::Resp(r_a)) = if let handshake::Data::DirSync(dirsync::DirSync::Resp(r_a)) =
&mut deserialized.data &mut deserialized.data
{ {
let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0; let enc_start = r_a.encrypted_offset() + cipher.nonce_len().0;

View File

@ -7,16 +7,16 @@ use crate::{
handshake::{ handshake::{
self, self,
dirsync::{self, DirSync}, dirsync::{self, DirSync},
tracker::{HandshakeAction, HandshakeTracker}, Handshake,
Handshake, HandshakeData,
}, },
packet::{self, Packet},
socket::{UdpClient, UdpServer}, socket::{UdpClient, UdpServer},
ConnList, Connection, IDSend, Packet, Conn, ConnList, IDSend,
}, },
dnssec, dnssec,
enc::{ enc::{
asym::{self, KeyID, PrivKey, PubKey}, asym::{self, KeyID, PrivKey, PubKey},
hkdf::{self, Hkdf, HkdfKind}, hkdf::{self, Hkdf},
sym, Random, Secret, sym, Random, Secret,
}, },
inner::ThreadTracker, inner::ThreadTracker,
@ -68,7 +68,7 @@ pub struct Worker {
queue_timeouts_send: mpsc::UnboundedSender<Work>, queue_timeouts_send: mpsc::UnboundedSender<Work>,
thread_channels: Vec<::async_channel::Sender<Work>>, thread_channels: Vec<::async_channel::Sender<Work>>,
connections: ConnList, connections: ConnList,
handshakes: HandshakeTracker, handshakes: handshake::Tracker,
} }
#[allow(unsafe_code)] #[allow(unsafe_code)]
@ -85,7 +85,7 @@ impl Worker {
) -> ::std::io::Result<Self> { ) -> ::std::io::Result<Self> {
let (queue_timeouts_send, queue_timeouts_recv) = let (queue_timeouts_send, queue_timeouts_recv) =
mpsc::unbounded_channel(); mpsc::unbounded_channel();
let mut handshakes = HandshakeTracker::new( let mut handshakes = handshake::Tracker::new(
thread_id, thread_id,
cfg.ciphers.clone(), cfg.ciphers.clone(),
cfg.key_exchanges.clone(), cfg.key_exchanges.clone(),
@ -125,6 +125,7 @@ impl Worker {
handshakes, handshakes,
}) })
} }
/// Continuously loop and process work as needed /// Continuously loop and process work as needed
pub async fn work_loop(&mut self) { pub async fn work_loop(&mut self) {
'mainloop: loop { 'mainloop: loop {
@ -292,7 +293,7 @@ impl Worker {
// are PubKey::Exchange // are PubKey::Exchange
unreachable!() unreachable!()
} }
let mut conn = Connection::new( let mut conn = Conn::new(
hkdf, hkdf,
cipher_selected, cipher_selected,
connection::Role::Client, connection::Role::Client,
@ -325,39 +326,39 @@ impl Worker {
}; };
// build request // build request
let auth_info = dirsync::AuthInfo { let auth_info = dirsync::req::AuthInfo {
user: UserID::new_anonymous(), user: UserID::new_anonymous(),
token: Token::new_anonymous(&self.rand), token: Token::new_anonymous(&self.rand),
service_id: conn_info.service_id, service_id: conn_info.service_id,
domain: conn_info.domain, domain: conn_info.domain,
}; };
let req_data = dirsync::ReqData { let req_data = dirsync::req::Data {
nonce: dirsync::Nonce::new(&self.rand), nonce: dirsync::Nonce::new(&self.rand),
client_key_id, client_key_id,
id: auth_recv_id.0, //FIXME: is zero id: auth_recv_id.0, //FIXME: is zero
auth: auth_info, auth: auth_info,
}; };
let req = dirsync::Req { let req = dirsync::req::Req {
key_id: key.0, key_id: key.0,
exchange, exchange,
hkdf: hkdf_selected, hkdf: hkdf_selected,
cipher: cipher_selected, cipher: cipher_selected,
exchange_key: pub_key, exchange_key: pub_key,
data: dirsync::ReqInner::ClearText(req_data), data: dirsync::req::State::ClearText(req_data),
}; };
let encrypt_start = ID::len() + req.encrypted_offset(); let encrypt_start =
connection::ID::len() + req.encrypted_offset();
let encrypt_end = encrypt_start let encrypt_end = encrypt_start
+ req.encrypted_length( + req.encrypted_length(
cipher_selected.nonce_len(), cipher_selected.nonce_len(),
cipher_selected.tag_len(), cipher_selected.tag_len(),
); );
let h_req = Handshake::new(HandshakeData::DirSync( let h_req = Handshake::new(handshake::Data::DirSync(
DirSync::Req(req), DirSync::Req(req),
)); ));
use connection::{PacketData, ID};
let packet = Packet { let packet = Packet {
id: ID::Handshake, id: connection::ID::Handshake,
data: PacketData::Handshake(h_req), data: packet::Data::Handshake(h_req),
}; };
let tot_len = packet.len( let tot_len = packet.len(
@ -448,9 +449,9 @@ impl Worker {
} }
}; };
match action { match action {
HandshakeAction::AuthNeeded(authinfo) => { handshake::Action::AuthNeeded(authinfo) => {
let req; let req;
if let HandshakeData::DirSync(DirSync::Req(r)) = if let handshake::Data::DirSync(DirSync::Req(r)) =
authinfo.handshake.data authinfo.handshake.data
{ {
req = r; req = r;
@ -458,9 +459,8 @@ impl Worker {
::tracing::error!("AuthInfo on non DS::Req"); ::tracing::error!("AuthInfo on non DS::Req");
return; return;
} }
use dirsync::ReqInner;
let req_data = match req.data { let req_data = match req.data {
ReqInner::ClearText(req_data) => req_data, dirsync::req::State::ClearText(req_data) => req_data,
_ => { _ => {
::tracing::error!("AuthNeeded: expected ClearText"); ::tracing::error!("AuthNeeded: expected ClearText");
assert!(false, "AuthNeeded: unreachable"); assert!(false, "AuthNeeded: unreachable");
@ -510,12 +510,12 @@ impl Worker {
// Client has correctly authenticated // Client has correctly authenticated
// TODO: contact the service, get the key and // TODO: contact the service, get the key and
// connection ID // connection ID
let srv_conn_id = ID::new_rand(&self.rand); let srv_conn_id = connection::ID::new_rand(&self.rand);
let srv_secret = Secret::new_rand(&self.rand); let srv_secret = Secret::new_rand(&self.rand);
let head_len = req.cipher.nonce_len(); let head_len = req.cipher.nonce_len();
let tag_len = req.cipher.tag_len(); let tag_len = req.cipher.tag_len();
let mut auth_conn = Connection::new( let mut auth_conn = Conn::new(
authinfo.hkdf, authinfo.hkdf,
req.cipher, req.cipher,
connection::Role::Server, connection::Role::Server,
@ -526,7 +526,7 @@ impl Worker {
let auth_id_recv = self.connections.reserve_first(); let auth_id_recv = self.connections.reserve_first();
auth_conn.id_recv = auth_id_recv; auth_conn.id_recv = auth_id_recv;
let resp_data = dirsync::RespData { let resp_data = dirsync::resp::Data {
client_nonce: req_data.nonce, client_nonce: req_data.nonce,
id: auth_conn.id_recv.0, id: auth_conn.id_recv.0,
service_connection_id: srv_conn_id, service_connection_id: srv_conn_id,
@ -536,21 +536,20 @@ impl Worker {
// no aad for now // no aad for now
let aad = AAD(&mut []); let aad = AAD(&mut []);
use dirsync::RespInner; let resp = dirsync::resp::Resp {
let resp = dirsync::Resp {
client_key_id: req_data.client_key_id, client_key_id: req_data.client_key_id,
data: RespInner::ClearText(resp_data), data: dirsync::resp::State::ClearText(resp_data),
}; };
let encrypt_from = ID::len() + resp.encrypted_offset(); let encrypt_from =
connection::ID::len() + resp.encrypted_offset();
let encrypt_until = let encrypt_until =
encrypt_from + resp.encrypted_length(head_len, tag_len); encrypt_from + resp.encrypted_length(head_len, tag_len);
let resp_handshake = Handshake::new( let resp_handshake = Handshake::new(
HandshakeData::DirSync(DirSync::Resp(resp)), handshake::Data::DirSync(DirSync::Resp(resp)),
); );
use connection::{PacketData, ID};
let packet = Packet { let packet = Packet {
id: ID::new_handshake(), id: connection::ID::new_handshake(),
data: PacketData::Handshake(resp_handshake), data: packet::Data::Handshake(resp_handshake),
}; };
let tot_len = packet.len(head_len, tag_len); let tot_len = packet.len(head_len, tag_len);
let mut raw_out = Vec::<u8>::with_capacity(tot_len); let mut raw_out = Vec::<u8>::with_capacity(tot_len);
@ -566,9 +565,9 @@ impl Worker {
} }
self.send_packet(raw_out, udp.src, udp.dst).await; self.send_packet(raw_out, udp.src, udp.dst).await;
} }
HandshakeAction::ClientConnect(cci) => { handshake::Action::ClientConnect(cci) => {
let ds_resp; let ds_resp;
if let HandshakeData::DirSync(DirSync::Resp(resp)) = if let handshake::Data::DirSync(DirSync::Resp(resp)) =
cci.handshake.data cci.handshake.data
{ {
ds_resp = resp; ds_resp = resp;
@ -578,7 +577,8 @@ impl Worker {
} }
// track connection // track connection
let resp_data; let resp_data;
if let dirsync::RespInner::ClearText(r_data) = ds_resp.data if let dirsync::resp::State::ClearText(r_data) =
ds_resp.data
{ {
resp_data = r_data; resp_data = r_data;
} else { } else {
@ -607,11 +607,11 @@ impl Worker {
//FIXME: the Secret should be XORed with the client //FIXME: the Secret should be XORed with the client
// stored secret (if any) // stored secret (if any)
let hkdf = Hkdf::new( let hkdf = Hkdf::new(
HkdfKind::Sha3, hkdf::Kind::Sha3,
cci.service_id.as_bytes(), cci.service_id.as_bytes(),
resp_data.service_key, resp_data.service_key,
); );
let mut service_connection = Connection::new( let mut service_connection = Conn::new(
hkdf, hkdf,
cipher, cipher,
connection::Role::Client, connection::Role::Client,
@ -626,7 +626,7 @@ impl Worker {
let _ = let _ =
cci.answer.send(Ok((cci.srv_key_id, auth_srv_conn))); cci.answer.send(Ok((cci.srv_key_id, auth_srv_conn)));
} }
HandshakeAction::Nothing => {} handshake::Action::Nothing => {}
}; };
} }
} }

View File

@ -39,6 +39,7 @@ use crate::{
}, },
}; };
pub use config::Config; pub use config::Config;
pub use connection::Connection;
/// Main fenrir library errors /// Main fenrir library errors
#[derive(::thiserror::Error, Debug)] #[derive(::thiserror::Error, Debug)]
@ -332,7 +333,7 @@ impl Fenrir {
let data: Vec<u8> = buffer[..bytes].to_vec(); let data: Vec<u8> = buffer[..bytes].to_vec();
// we very likely have multiple threads, pinned to different cpus. // we very likely have multiple threads, pinned to different cpus.
// use the ConnectionID to send the same connection // use the connection::ID to send the same connection
// to the same thread. // to the same thread.
// Handshakes have connection ID 0, so we use the sender's UDP port // Handshakes have connection ID 0, so we use the sender's UDP port
@ -341,13 +342,12 @@ impl Fenrir {
Err(_) => continue, // packet way too short, ignore. Err(_) => continue, // packet way too short, ignore.
}; };
let thread_idx: usize = { let thread_idx: usize = {
use connection::packet::ConnectionID;
match packet.id { match packet.id {
ConnectionID::Handshake => { connection::ID::Handshake => {
let send_port = sock_sender.0.port() as u64; let send_port = sock_sender.0.port() as u64;
(send_port % queues_num) as usize (send_port % queues_num) as usize
} }
ConnectionID::ID(id) => (id.get() % queues_num) as usize, connection::ID::ID(id) => (id.get() % queues_num) as usize,
} }
}; };
let _ = work_queues[thread_idx] let _ = work_queues[thread_idx]

View File

@ -62,10 +62,7 @@ async fn test_connection_dirsync() {
rt.block_on(local_thread); rt.block_on(local_thread);
}); });
use crate::{ use crate::dnssec::{record, Record};
connection::handshake::HandshakeID,
dnssec::{record, Record},
};
let port: u16 = server.addresses()[0].port(); let port: u16 = server.addresses()[0].port();
@ -76,14 +73,14 @@ async fn test_connection_dirsync() {
port: Some(::core::num::NonZeroU16::new(port).unwrap()), port: Some(::core::num::NonZeroU16::new(port).unwrap()),
priority: record::AddressPriority::P1, priority: record::AddressPriority::P1,
weight: record::AddressWeight::W1, weight: record::AddressWeight::W1,
handshake_ids: [HandshakeID::DirectorySynchronized].to_vec(), handshake_ids: [handshake::ID::DirectorySynchronized].to_vec(),
public_key_idx: [record::PubKeyIdx(0)].to_vec(), public_key_idx: [record::PubKeyIdx(0)].to_vec(),
}] }]
.to_vec(), .to_vec(),
key_exchanges: [enc::asym::KeyExchangeKind::X25519DiffieHellman] key_exchanges: [enc::asym::KeyExchangeKind::X25519DiffieHellman]
.to_vec(), .to_vec(),
hkdfs: [enc::hkdf::HkdfKind::Sha3].to_vec(), hkdfs: [enc::hkdf::Kind::Sha3].to_vec(),
ciphers: [enc::sym::CipherKind::XChaCha20Poly1305].to_vec(), ciphers: [enc::sym::Kind::XChaCha20Poly1305].to_vec(),
}; };
::tokio::time::sleep(::std::time::Duration::from_millis(500)).await; ::tokio::time::sleep(::std::time::Duration::from_millis(500)).await;