Compare commits

..

No commits in common. "main" and "43efb432435910373d6a35c849738c662b993f43" have entirely different histories.

92 changed files with 2589 additions and 6152 deletions

View File

@ -1,23 +0,0 @@
name: Rust
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose

5
.gitignore vendored
View File

@ -6,13 +6,8 @@
/ptth_relay.toml /ptth_relay.toml
/ptth_build_L6KLMVS6/ /ptth_build_L6KLMVS6/
/ptth_server_build_BIHWLQXQ/ /ptth_server_build_BIHWLQXQ/
/scope/untracked
/scraper-secret.txt /scraper-secret.txt
/target /target
/untracked
# TLS certs used for QUIC experiments # TLS certs used for QUIC experiments
*.crt *.crt
# Kate editor temp file
*.kate-swp

1562
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,6 @@ edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "An extroverted HTTP server that can run behind firewalls" description = "An extroverted HTTP server that can run behind firewalls"
repository = "https://six-five-six-four.com/git/reactor/ptth"
readme = "README.md" readme = "README.md"
keywords = ["http", "tunnel", "firewall"] keywords = ["http", "tunnel", "firewall"]
categories = ["command-line-utilities", "web-programming::http-server"] categories = ["command-line-utilities", "web-programming::http-server"]
@ -24,7 +23,7 @@ exclude = [
[dependencies] [dependencies]
anyhow = "1.0.38" anyhow = "1.0.38"
blake3 = "1.0.0" blake3 = "0.3.7"
tokio = { version = "1.4.0", features = ["full"] } tokio = { version = "1.4.0", features = ["full"] }
tracing-subscriber = "0.2.16" tracing-subscriber = "0.2.16"
tracing = "0.1.25" tracing = "0.1.25"
@ -47,5 +46,6 @@ chrono = {version = "0.4.19", features = ["serde"]}
members = [ members = [
"crates/*", "crates/*",
"prototypes/*",
"tools/*", "tools/*",
] ]

View File

@ -1,8 +1,8 @@
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
# TODO: https://stackoverflow.com/questions/57389547/how-to-define-the-context-for-a-docker-build-as-a-specific-commit-on-one-of-the # TODO: https://stackoverflow.com/questions/57389547/how-to-define-the-context-for-a-docker-build-as-a-specific-commit-on-one-of-the
# docker pull rust:1.75-slim-buster # rust:1.50-slim-buster
FROM rust@sha256:981dda194caa72aa466cb8789aa6d18ee1af22bc77f1c0b8dc9690f5d3e8fe82 as build FROM rust@sha256:5dd85eb0c60bbdea14a6ecba1f6fe4a0f5c878bcf06d2cdfae0aff3a19ed4b10 as build
WORKDIR / WORKDIR /
ENV USER root ENV USER root
@ -20,8 +20,7 @@ cargo new --bin crates/ptth_server && \
cargo new --bin crates/ptth_file_server_bin && \ cargo new --bin crates/ptth_file_server_bin && \
cargo new --bin tools/ptth_tail && \ cargo new --bin tools/ptth_tail && \
cargo new --bin crates/debug_proxy && \ cargo new --bin crates/debug_proxy && \
cargo new --bin crates/ptth_quic && \ cargo new --bin prototypes/quic_demo
cargo new --lib crates/udp_over_tcp
# copy over your manifests # copy over your manifests
COPY ./Cargo.lock ./ COPY ./Cargo.lock ./
@ -29,8 +28,7 @@ COPY ./Cargo.toml ./
COPY ./crates/always_equal/Cargo.toml ./crates/always_equal/ COPY ./crates/always_equal/Cargo.toml ./crates/always_equal/
COPY ./crates/ptth_core/Cargo.toml ./crates/ptth_core/ COPY ./crates/ptth_core/Cargo.toml ./crates/ptth_core/
COPY ./crates/ptth_relay/Cargo.toml ./crates/ptth_relay/ COPY ./crates/ptth_relay/Cargo.toml ./crates/ptth_relay/
COPY ./crates/ptth_quic/Cargo.toml ./crates/ptth_quic/ COPY ./prototypes/quic_demo/Cargo.toml ./prototypes/quic_demo/
COPY ./crates/udp_over_tcp/Cargo.toml ./crates/udp_over_tcp/
# this build step will cache your dependencies # this build step will cache your dependencies
RUN cargo build --release -p ptth_relay RUN cargo build --release -p ptth_relay
@ -41,7 +39,7 @@ src/*.rs \
crates/always_equal/src/*.rs \ crates/always_equal/src/*.rs \
crates/ptth_core/src/*.rs \ crates/ptth_core/src/*.rs \
crates/ptth_relay/src/*.rs \ crates/ptth_relay/src/*.rs \
crates/ptth_quic/src/*.rs prototypes/quic_demo/src/*.rs
# Copy source tree # Copy source tree
# Yes, I tried a few variations on the syntax. Dockerfiles are just rough. # Yes, I tried a few variations on the syntax. Dockerfiles are just rough.
@ -51,7 +49,7 @@ COPY ./crates/always_equal ./crates/always_equal
COPY ./crates/ptth_core ./crates/ptth_core COPY ./crates/ptth_core ./crates/ptth_core
COPY ./crates/ptth_relay ./crates/ptth_relay COPY ./crates/ptth_relay ./crates/ptth_relay
COPY ./handlebars/ ./handlebars COPY ./handlebars/ ./handlebars
COPY ./crates/ptth_quic ./crates/ptth_quic COPY ./prototypes/quic_demo ./prototypes/quic_demo
# Bug in cargo's incremental build logic, triggered by # Bug in cargo's incremental build logic, triggered by
# Docker doing something funny with mtimes? Maybe? # Docker doing something funny with mtimes? Maybe?

View File

@ -226,4 +226,4 @@ For now, either email me (if you know me personally) or make a pull request to a
PTTH is licensed under the PTTH is licensed under the
[GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html) [GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html)
Copyright 2020-2021 "Trish" Copyright 2020 "Trish"

View File

@ -15,14 +15,15 @@ export LC_ALL="C"
TEMP_GIBBERISH="ptth_server_build_BIHWLQXQ" TEMP_GIBBERISH="ptth_server_build_BIHWLQXQ"
DEST="$TEMP_GIBBERISH/ptth" DEST="$TEMP_GIBBERISH/ptth"
rm -rf "$TEMP_GIBBERISH"
mkdir "$TEMP_GIBBERISH" mkdir "$TEMP_GIBBERISH"
mkdir "$DEST" mkdir "$DEST"
cargo build --release -p ptth_multi_call_server cargo build --release -p ptth_server
cp target/release/ptth_multi_call_server "$DEST/" mkdir -p "$DEST/handlebars/server"
rsync -r handlebars/server/ "$DEST/handlebars/server/"
cp target/release/ptth_server "$DEST/ptth_server"
( (
cd "$TEMP_GIBBERISH" || exit cd "$TEMP_GIBBERISH" || exit

View File

@ -1,11 +1,10 @@
[package] [package]
name = "always_equal" name = "always_equal"
version = "1.0.3" version = "1.0.2"
authors = ["Trish"] authors = ["Trish"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "A wrapper for types that can't implement Eq" description = "A wrapper for types that can't implement Eq"
repository = "https://six-five-six-four.com/git/reactor/ptth"
[dependencies] [dependencies]

View File

@ -7,11 +7,9 @@ edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "Common code for the PTTH relay and server" description = "Common code for the PTTH relay and server"
repository = "https://six-five-six-four.com/git/reactor/ptth"
[dependencies] [dependencies]
anyhow = "1.0.38"
base64 = "0.13.0" base64 = "0.13.0"
ctrlc = { version = "3.1.8", features = [ "termination" ] } ctrlc = { version = "3.1.8", features = [ "termination" ] }
futures = "0.3.7" futures = "0.3.7"

View File

@ -1,14 +1,8 @@
pub use std::{ pub use std::{
ffi::OsString,
io::Write,
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
pub use anyhow::{
Context,
bail,
};
pub use tracing::{ pub use tracing::{
debug, error, info, trace, warn, debug, error, info, trace, warn,
instrument, instrument,

View File

@ -1,10 +0,0 @@
[package]
description = "A diceware passphrase generator with 1,200 words, only depends on `rand`"
name = "ptth_diceware"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rand = "0.8.5"

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +0,0 @@
use rand::Rng;
pub fn main () {
let passphrase = passphrase (" ", 8);
println! ("{}", passphrase);
}
pub fn passphrase (separator: &str, len: usize) -> String {
let diceware = Diceware::default ();
let random_words: Vec <&str> = (0..len)
.map (|_| diceware.random_word ())
.collect ();
random_words.join (separator)
}
pub struct Diceware {
words: Vec <String>,
}
impl Default for Diceware {
fn default () -> Self
{
let wordlist = include_str! ("eff_short_wordlist_1.txt");
let words: Vec <_> = wordlist.split ('\n').take (1253).map (str::to_string).collect ();
assert_eq! (words.len (), 1253);
assert_eq! (words [0], "acid");
assert_eq! (words [600], "large");
assert_eq! (words [1252], "zoom");
Self {
words,
}
}
}
impl Diceware {
pub fn random_word (&self) -> &str
{
&self.words [rand::thread_rng ().gen_range (0..self.words.len ())]
}
}

View File

@ -1,3 +0,0 @@
pub fn main () {
ptth_diceware::main ()
}

View File

@ -1,156 +0,0 @@
#![warn (clippy::pedantic)]
use std::{
collections::*,
ffi::OsString,
net::SocketAddr,
path::{PathBuf},
sync::Arc,
};
use arc_swap::ArcSwap;
use hyper::{
Body,
Request,
Response,
Server,
service::{
make_service_fn,
service_fn,
},
StatusCode,
};
use serde::Deserialize;
use tokio_stream::wrappers::ReceiverStream;
use tracing::debug;
use ptth_core::{
http_serde::RequestParts,
prelude::*,
};
use ptth_server::{
file_server::{
self,
FileServer,
},
load_toml,
};
async fn handle_all (req: Request <Body>, state: Arc <FileServer>)
-> anyhow::Result <Response <Body>>
{
use std::str::FromStr;
use hyper::header::HeaderName;
debug! ("req.uri () = {:?}", req.uri ());
let path_and_query = req.uri ().path_and_query ().map_or_else (|| req.uri ().path (), http::uri::PathAndQuery::as_str);
let path_and_query = path_and_query.into ();
let (parts, _) = req.into_parts ();
let ptth_req = RequestParts::from_hyper (parts.method, path_and_query, parts.headers)?;
let ptth_resp = state.serve_all (
ptth_req.method,
&ptth_req.uri,
&ptth_req.headers
).await?;
let mut resp = Response::builder ()
.status (StatusCode::from (ptth_resp.parts.status_code));
for (k, v) in ptth_resp.parts.headers {
resp = resp.header (HeaderName::from_str (&k)?, v);
}
let body = ptth_resp.body.map_or_else (Body::empty, |body| {
Body::wrap_stream (ReceiverStream::new (body))
});
Ok (resp.body (body)?)
}
#[derive (Deserialize)]
struct ConfigFile {
file_server_root: Option <PathBuf>,
file_server_roots: Option <BTreeMap <String, PathBuf>>,
name: Option <String>,
}
pub async fn main (_args: &[OsString]) -> anyhow::Result <()> {
let path = PathBuf::from ("./config/ptth_server.toml");
let file_server_root;
let file_server_roots;
let name;
match load_toml::load::<ConfigFile, _> (&path) {
Ok (config_file) => {
file_server_root = config_file.file_server_root;
file_server_roots = config_file.file_server_roots;
name = config_file.name;
},
_ => {
info! ("No ptth_server.toml file, using default configs");
file_server_root = None;
file_server_roots = None;
name = None;
},
};
let file_server_root = file_server_root.unwrap_or_else (|| PathBuf::from ("."));
let file_server_roots = file_server_roots.unwrap_or_else (|| Default::default ());
let name = name.unwrap_or_else (|| "PTTH File Server".to_string ());
info! ("file_server_root: {:?}", file_server_root);
let addr = SocketAddr::from(([0, 0, 0, 0], 4000));
info! ("Serving at {:?}", addr);
let metrics_interval = Arc::new (ArcSwap::default ());
let interval_writer = Arc::clone (&metrics_interval);
tokio::spawn (async move {
file_server::metrics::Interval::monitor (interval_writer).await;
});
let config = file_server::Config {
file_server_root,
file_server_roots,
};
let state = Arc::new (FileServer::new (
config,
&PathBuf::new (),
name,
metrics_interval,
Some (path),
)?);
let make_svc = make_service_fn (|_conn| {
let state = state.clone ();
async {
Ok::<_, String> (service_fn (move |req| {
let state = state.clone ();
handle_all (req, state)
}))
}
});
let (shutdown_rx, forced_shutdown) = ptth_core::graceful_shutdown::init_with_force ();
let server = Server::bind (&addr)
.serve (make_svc)
.with_graceful_shutdown (async move {
shutdown_rx.await.ok ();
});
forced_shutdown.wrap_server (server).await??;
Ok (())
}

View File

@ -1,12 +1,128 @@
#![warn (clippy::pedantic)]
use std::{ use std::{
iter::FromIterator, net::SocketAddr,
path::PathBuf,
sync::Arc,
}; };
use arc_swap::ArcSwap;
use hyper::{
Body,
Request,
Response,
Server,
service::{
make_service_fn,
service_fn,
},
StatusCode,
};
use serde::Deserialize;
use tokio_stream::wrappers::ReceiverStream;
use tracing::debug;
use ptth_core::{
http_serde::RequestParts,
prelude::*,
};
use ptth_server::{
file_server::{
self,
metrics,
FileServer,
},
load_toml,
};
async fn handle_all (req: Request <Body>, state: Arc <FileServer>)
-> anyhow::Result <Response <Body>>
{
use std::str::FromStr;
use hyper::header::HeaderName;
debug! ("req.uri () = {:?}", req.uri ());
let path_and_query = req.uri ().path_and_query ().map_or_else (|| req.uri ().path (), http::uri::PathAndQuery::as_str);
let path_and_query = path_and_query.into ();
let (parts, _) = req.into_parts ();
let ptth_req = RequestParts::from_hyper (parts.method, path_and_query, parts.headers)?;
let ptth_resp = state.serve_all (
ptth_req.method,
&ptth_req.uri,
&ptth_req.headers
).await?;
let mut resp = Response::builder ()
.status (StatusCode::from (ptth_resp.parts.status_code));
for (k, v) in ptth_resp.parts.headers {
resp = resp.header (HeaderName::from_str (&k)?, v);
}
let body = ptth_resp.body.map_or_else (Body::empty, |body| {
Body::wrap_stream (ReceiverStream::new (body))
});
Ok (resp.body (body)?)
}
#[derive (Deserialize)]
pub struct ConfigFile {
pub file_server_root: Option <PathBuf>,
pub name: Option <String>,
}
#[tokio::main] #[tokio::main]
async fn main () -> anyhow::Result <()> { async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init (); tracing_subscriber::fmt::init ();
let args = Vec::from_iter (std::env::args_os ()); let path = PathBuf::from ("./config/ptth_server.toml");
let config_file: ConfigFile = load_toml::load (&path)?;
info! ("file_server_root: {:?}", config_file.file_server_root);
ptth_file_server::main (&args).await let addr = SocketAddr::from(([0, 0, 0, 0], 4000));
let metrics_interval = Arc::new (ArcSwap::default ());
let interval_writer = Arc::clone (&metrics_interval);
tokio::spawn (async move {
file_server::metrics::Interval::monitor (interval_writer).await;
});
let state = Arc::new (FileServer::new (
config_file.file_server_root,
&PathBuf::new (),
config_file.name.unwrap_or_else (|| "PTTH File Server".to_string ()),
metrics_interval,
Some (path),
)?);
let make_svc = make_service_fn (|_conn| {
let state = state.clone ();
async {
Ok::<_, String> (service_fn (move |req| {
let state = state.clone ();
handle_all (req, state)
}))
}
});
let (shutdown_rx, forced_shutdown) = ptth_core::graceful_shutdown::init_with_force ();
let server = Server::bind (&addr)
.serve (make_svc)
.with_graceful_shutdown (async move {
shutdown_rx.await.ok ();
});
forced_shutdown.wrap_server (server).await??;
Ok (())
} }

12
crates/ptth_kv/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "ptth_kv"
version = "0.1.0"
authors = ["Trish"]
edition = "2018"
[dependencies]
anyhow = "1.0.38"
base64 = "0.13.0"
hyper = { version = "0.14.4", features = ["full"] }
thiserror = "1.0.22"
tokio = { version = "1.8.1", features = ["full"] }

499
crates/ptth_kv/src/main.rs Normal file
View File

@ -0,0 +1,499 @@
use std::{
collections::{
HashMap,
},
iter::FromIterator,
sync::{
Arc,
},
};
use hyper::{
Body,
Request,
Response,
StatusCode,
};
use tokio::{
sync::Mutex,
};
pub struct HttpService {
store: Arc <Store>,
}
pub struct Store {
status_dirs: HashMap <Vec <u8>, StatusKeyDirectory>,
}
#[derive (thiserror::Error, Debug, PartialEq)]
pub enum Error {
#[error ("key too long")]
KeyTooLong,
#[error ("no such key dir")]
NoSuchKeyDir,
#[error ("value too long")]
ValueTooLong,
}
pub struct StatusQuotas {
pub max_keys: usize,
pub max_key_bytes: usize,
pub max_value_bytes: usize,
pub max_payload_bytes: usize,
}
pub struct GetAfter {
pub tuples: Vec <(Vec <u8>, Vec <u8>)>,
pub sequence: u64,
}
impl HttpService {
pub fn new (s: Store) -> Self {
Self {
store: Arc::new (s),
}
}
pub fn inner (&self) -> &Store {
&*self.store
}
pub async fn serve (&self, port: u16) -> Result <(), hyper::Error> {
use std::net::SocketAddr;
use hyper::{
Server,
service::{
make_service_fn,
service_fn,
},
};
let make_svc = make_service_fn (|_conn| {
let state = self.store.clone ();
async {
Ok::<_, String> (service_fn (move |req| {
let state = state.clone ();
Self::handle_all (req, state)
}))
}
});
let addr = SocketAddr::from(([127, 0, 0, 1], port));
let server = Server::bind (&addr)
.serve (make_svc)
;
server.await
}
}
impl Store {
pub fn new <I> (status_dirs: I)
-> Self
where I: Iterator <Item = (Vec <u8>, StatusQuotas)>
{
let status_dirs = status_dirs
.map (|(name, quotas)| (name, StatusKeyDirectory::new (quotas)))
.collect ();
Self {
status_dirs,
}
}
pub fn list_key_dirs (&self) -> Vec <Vec <u8>> {
self.status_dirs.iter ()
.map (|(k, _)| k.clone ())
.collect ()
}
pub async fn set (&self, name: &[u8], key: &[u8], value: Vec <u8>)
-> Result <(), Error>
{
let dir = self.status_dirs.get (name)
.ok_or (Error::NoSuchKeyDir)?;
dir.set (key, value).await
}
async fn set_multi (&self, name: &[u8], tuples: Vec <(&[u8], Vec <u8>)>)
-> Result <(), Error>
{
let dir = self.status_dirs.get (name)
.ok_or (Error::NoSuchKeyDir)?;
dir.set_multi (tuples).await
}
pub async fn get_after (&self, name: &[u8], thresh: Option <u64>)
-> Result <GetAfter, Error>
{
let dir = self.status_dirs.get (name)
.ok_or (Error::NoSuchKeyDir)?;
dir.get_after (thresh).await
}
}
// End of public interface
const SET_BATCH_SIZE: usize = 32;
enum StoreCommand {
SetStatus (SetStatusCommand),
Multi (Vec <StoreCommand>),
}
struct StatusKeyDirectory {
quotas: StatusQuotas,
// TODO: Make this tokio::sync::Mutex.
table: Mutex <StatusTable>,
}
#[derive (Default)]
struct StatusTable {
map: HashMap <Vec <u8>, StatusValue>,
sequence: u64,
}
struct StatusValue {
value: Vec <u8>,
sequence: u64,
}
struct SetStatusCommand {
key_dir: Vec <u8>,
key: Vec <u8>,
value: Vec <u8>,
}
impl HttpService {
async fn handle_all (req: Request <Body>, store: Arc <Store>)
-> Result <Response <Body>, anyhow::Error>
{
Ok (Response::builder ()
.body (Body::from ("hello\n"))?)
}
}
impl StatusKeyDirectory {
fn new (quotas: StatusQuotas) -> Self {
Self {
quotas,
table: Mutex::new (Default::default ()),
}
}
async fn set (&self, key: &[u8], value: Vec <u8>) -> Result <(), Error>
{
if key.len () > self.quotas.max_key_bytes {
return Err (Error::KeyTooLong);
}
if value.len () > self.quotas.max_value_bytes {
return Err (Error::ValueTooLong);
}
{
let mut guard = self.table.lock ().await;
guard.set (&self.quotas, key, value);
}
Ok (())
}
async fn set_multi (&self, tuples: Vec <(&[u8], Vec <u8>)>) -> Result <(), Error>
{
{
let mut guard = self.table.lock ().await;
for (key, value) in tuples {
guard.set (&self.quotas, key, value);
}
}
Ok (())
}
async fn get_after (&self, thresh: Option <u64>) -> Result <GetAfter, Error> {
let guard = self.table.lock ().await;
Ok (guard.get_after (thresh))
}
}
impl StatusTable {
fn payload_bytes (&self) -> usize {
self.map.iter ()
.map (|(k, v)| k.len () + v.len ())
.sum ()
}
fn set (&mut self, quotas: &StatusQuotas, key: &[u8], value: Vec <u8>) {
self.sequence += 1;
if self.map.len () > quotas.max_keys {
self.map.clear ();
}
let new_bytes = key.len () + value.len ();
if self.payload_bytes () + new_bytes > quotas.max_payload_bytes {
self.map.clear ();
}
let value = StatusValue {
value,
sequence: self.sequence,
};
// self.map.insert (key, value);
match self.map.get_mut (key) {
None => {
self.map.insert (key.to_vec (), value);
},
Some (v) => *v = value,
}
}
fn get_after (&self, thresh: Option <u64>) -> GetAfter {
let thresh = thresh.unwrap_or (0);
let tuples = self.map.iter ()
.filter_map (|(key, value)| {
if value.sequence <= thresh {
return None;
}
Some ((key.clone (), value.value.clone ()))
})
.collect ();
GetAfter {
tuples,
sequence: self.sequence,
}
}
}
impl StatusValue {
fn len (&self) -> usize {
self.value.len ()
}
}
#[tokio::main]
async fn main () -> Result <(), hyper::Error> {
use std::time::Duration;
use tokio::{
spawn,
time::interval,
};
let service = HttpService::new (Store::new (vec! [
(b"key_dir".to_vec (), StatusQuotas {
max_keys: 4,
max_key_bytes: 16,
max_value_bytes: 16,
max_payload_bytes: 128,
}),
].into_iter ()));
service.serve (4003).await
}
#[cfg (test)]
mod tests {
use tokio::runtime::Runtime;
use super::*;
fn get_after_eq (a: &GetAfter, b: &GetAfter) {
assert_eq! (a.sequence, b.sequence);
let a = a.tuples.clone ();
let b = b.tuples.clone ();
let a = HashMap::<Vec <u8>, Vec <u8>>::from_iter (a.into_iter ());
let b = HashMap::from_iter (b.into_iter ());
assert_eq! (a, b);
}
#[test]
fn store () {
let rt = Runtime::new ().unwrap ();
rt.block_on (async {
let s = Store::new (vec! [
(b"key_dir".to_vec (), StatusQuotas {
max_keys: 4,
max_key_bytes: 16,
max_value_bytes: 16,
max_payload_bytes: 128,
}),
].into_iter ());
let mut expected_sequence = 0;
assert_eq! (s.list_key_dirs (), vec! [
b"key_dir".to_vec (),
]);
assert_eq! (
s.set (b"key_dir", b"this key is too long and will cause an error", b"bar".to_vec ()).await,
Err (Error::KeyTooLong)
);
assert_eq! (
s.set (b"key_dir", b"foo", b"this value is too long and will cause an error".to_vec ()).await,
Err (Error::ValueTooLong)
);
assert_eq! (
s.set (b"invalid_key_dir", b"foo", b"bar".to_vec ()).await,
Err (Error::NoSuchKeyDir)
);
let ga = s.get_after (b"key_dir", None).await.unwrap ();
assert_eq! (ga.sequence, expected_sequence);
assert_eq! (ga.tuples, vec! []);
s.set (b"key_dir", b"foo_1", b"bar_1".to_vec ()).await.unwrap ();
expected_sequence += 1;
let ga = s.get_after (b"key_dir", None).await.unwrap ();
assert_eq! (ga.sequence, expected_sequence);
assert_eq! (ga.tuples, vec! [
(b"foo_1".to_vec (), b"bar_1".to_vec ()),
]);
get_after_eq (&ga, &GetAfter {
sequence: expected_sequence,
tuples: vec! [
(b"foo_1".to_vec (), b"bar_1".to_vec ()),
]
});
s.set (b"key_dir", b"foo_2", b"bar_2".to_vec ()).await.unwrap ();
expected_sequence += 1;
let ga = s.get_after (b"key_dir", None).await.unwrap ();
get_after_eq (&ga, &GetAfter {
sequence: expected_sequence,
tuples: vec! [
(b"foo_1".to_vec (), b"bar_1".to_vec ()),
(b"foo_2".to_vec (), b"bar_2".to_vec ()),
]
});
s.set (b"key_dir", b"foo_1", b"bar_3".to_vec ()).await.unwrap ();
expected_sequence += 1;
let ga = s.get_after (b"key_dir", None).await.unwrap ();
get_after_eq (&ga, &GetAfter {
sequence: expected_sequence,
tuples: vec! [
(b"foo_1".to_vec (), b"bar_3".to_vec ()),
(b"foo_2".to_vec (), b"bar_2".to_vec ()),
]
});
let ga = s.get_after (b"key_dir", Some (2)).await.unwrap ();
get_after_eq (&ga, &GetAfter {
sequence: expected_sequence,
tuples: vec! [
(b"foo_1".to_vec (), b"bar_3".to_vec ()),
]
});
let ga = s.get_after (b"key_dir", Some (3)).await.unwrap ();
get_after_eq (&ga, &GetAfter {
sequence: expected_sequence,
tuples: vec! []
});
});
}
#[test]
#[cfg (not (debug_assertions))]
fn perf () {
use std::time::Instant;
let rt = Runtime::new ().unwrap ();
rt.block_on (async {
let s = Store::new (vec! [
(b"key_dir".to_vec (), StatusQuotas {
max_keys: 4,
max_key_bytes: 16,
max_value_bytes: 16,
max_payload_bytes: 128,
}),
].into_iter ());
let num_iters = 1_000_000;
let key = b"foo";
let start_time = Instant::now ();
for i in 0..num_iters {
let value = format! ("{}", i);
s.set (b"key_dir", key, value.into ()).await.unwrap ();
}
let end_time = Instant::now ();
let total_dur = end_time - start_time;
let avg_nanos = total_dur.as_nanos () / num_iters;
assert! (avg_nanos < 250, dbg! (avg_nanos));
});
}
#[test]
#[cfg (not (debug_assertions))]
fn perf_multi () {
use std::time::Instant;
let rt = Runtime::new ().unwrap ();
rt.block_on (async {
let s = Store::new (vec! [
(b"key_dir".to_vec (), StatusQuotas {
max_keys: 8,
max_key_bytes: 16,
max_value_bytes: 16,
max_payload_bytes: 128,
}),
].into_iter ());
let num_iters = 1_000_000;
let start_time = Instant::now ();
for i in 0..num_iters {
let value = Vec::<u8>::from (format! ("{}", i));
let tuples = vec! [
(&b"foo_0"[..], value.clone ()),
(b"foo_1", value.clone ()),
(b"foo_2", value.clone ()),
(b"foo_3", value.clone ()),
(b"foo_4", value.clone ()),
(b"foo_5", value.clone ()),
(b"foo_6", value.clone ()),
(b"foo_7", value.clone ()),
];
s.set_multi (b"key_dir", tuples).await.unwrap ();
}
let end_time = Instant::now ();
let total_dur = end_time - start_time;
let avg_nanos = total_dur.as_nanos () / (num_iters * 8);
assert! (avg_nanos < 150, dbg! (avg_nanos));
});
}
}

View File

@ -1,43 +0,0 @@
[package]
name = "ptth_multi_call_server"
# Keep this synced with the Debian package name
# dpkg doesn't let us do side-by-side versioning, shrug
version = "1.1.1"
authors = ["Trish"]
edition = "2018"
license = "AGPL-3.0"
[dependencies]
# Cookie 01FYZ3SDP2XABT7W19ACQVYKXT
# Dependencies should be in sync because ptth_multi_call_server intentionally
# tries to re-use as much code as possible between all of its subcommands,
# including ptth_server and ptth_file_server.
anyhow = "1.0.38"
ctrlc = "3.2.1"
futures-util = "0.3.9"
hex = "0.4.3"
ptth_diceware = { path = "../ptth_diceware" }
ptth_file_server = { path = "../ptth_file_server_bin" }
ptth_server = { path = "../ptth_server" }
ptth_quic = { path = "../ptth_quic" }
rand = "0.8.4"
rusty_ulid = "0.10.1"
sha2 = "0.9.8"
tokio = { version = "1.8.1", features = ["full"] }
tracing-subscriber = "0.2.16"
tracing = "0.1.25"
[dependencies.reqwest]
version = "0.11.1"
default-features = false
features = ["stream", "rustls-tls", "hyper-rustls"]
[package.metadata.deb]
assets = [
["target/release/ptth_multi_call_server", "usr/bin/ptth_multi_call_server_1.1", "755"],
]
name = "ptth-multi-call-server-1.1"

View File

@ -1,121 +0,0 @@
use std::{
ffi::OsString,
io::{
self,
Write,
},
time::Duration,
};
use anyhow::{
anyhow,
bail,
};
use futures_util::StreamExt;
use reqwest::{
StatusCode,
};
use sha2::{
Digest,
Sha512,
};
use tokio::{
sync::mpsc,
task::{
spawn_blocking,
},
};
pub async fn main (args: &[OsString]) -> anyhow::Result <()> {
let mut url = None;
let mut expected_sha512 = None;
let mut args = args [1..].iter ();
loop {
let arg = match args.next () {
None => break,
Some (x) => x,
};
match arg.to_str ().ok_or_else (|| anyhow! ("All arguments must be valid UTF-8"))?
{
"--help" => println! ("For now, just look at the source code"),
"--expect-sha512" => {
let expected = args.next ().ok_or_else (|| anyhow! ("--expect-sha512 needs an argument"))?;
expected_sha512 = Some (expected.to_str ().ok_or_else (|| anyhow! ("--expect-sha512's argument must be valid Unicode"))?);
}
arg => {
url = Some (arg);
break;
},
}
}
let url = match url {
None => bail! ("URL argument is required"),
Some (x) => x,
};
// Cookie 01FYZ3W64SM6KYNP48J6EWSCEF
// Try to keep the Clients similar here
let client = reqwest::Client::builder ()
.connect_timeout (Duration::from_secs (30))
.build ()?;
let resp = client.get (url)
.send ().await?;
if resp.status () != StatusCode::OK {
bail! ("Expected 200 OK, got {}", resp.status ());
}
let mut resp_stream = resp.bytes_stream ();
// The hasher is owned by a task because it makes ownership simpler
let (hash_tx, mut hash_rx) = mpsc::channel (1);
let hasher_task = spawn_blocking (move || {
let mut hasher = Sha512::new ();
while let Some (chunk) = tokio::runtime::Handle::current ().block_on (hash_rx.recv ()) {
hasher.update (&chunk);
}
anyhow::Result::<_>::Ok (hasher.finalize ())
});
while let Some (chunk) = resp_stream.next ().await {
let chunk = chunk?;
hash_tx.send (chunk.clone ()).await?;
{
let chunk = chunk.clone ();
spawn_blocking (move || {
io::stdout ().write_all (&chunk)?;
anyhow::Result::<_>::Ok (())
}).await??;
}
}
drop (hash_tx);
let hash = hasher_task.await??;
let actual_sha512 = hex::encode (&hash);
match expected_sha512 {
None => eprintln! ("Actual SHA512 = {}", actual_sha512),
Some (expected) => if ! actual_sha512.starts_with (&expected) {
bail! ("Expected SHA512 prefix {}, actual SHA512 {}", expected, actual_sha512);
},
}
Ok (())
}

View File

@ -1,172 +0,0 @@
use std::{
ffi::OsString,
iter::FromIterator,
};
use tokio::sync::watch;
mod download;
mod ulid;
#[derive (Clone, Copy, Debug, PartialEq)]
enum Subcommand {
Diceware,
Download,
PtthServer,
PtthFileServer,
PtthQuicEndServer,
Ulid,
}
#[tokio::main]
async fn main () -> anyhow::Result <()> {
use Subcommand::*;
tracing_subscriber::fmt::init ();
let args = Vec::from_iter (std::env::args_os ());
let (subcommand, args) = parse_args (&args)?;
match subcommand {
Diceware => {
ptth_diceware::main ();
Ok (())
},
Download => download::main (args).await,
PtthServer => ptth_server::executable::main (args).await,
PtthFileServer => ptth_file_server::main (args).await,
PtthQuicEndServer => {
let (shutdown_tx, shutdown_rx) = watch::channel (false);
ctrlc::set_handler (move || {
shutdown_tx.send (true).expect ("Couldn't forward Ctrl+C signal");
})?;
tracing::trace! ("Set Ctrl+C handler");
ptth_quic::executable_end_server::main (args, Some (shutdown_rx)).await?;
Ok (())
}
Ulid => ulid::main (args).await,
}
}
fn parse_subcommand (arg: &str) -> Option <Subcommand>
{
use Subcommand::*;
let map = vec! [
("diceware", Diceware),
("download", Download),
("ptth_server", PtthServer),
("ptth_file_server", PtthFileServer),
("ptth_quic_end_server", PtthQuicEndServer),
("ulid", Ulid),
];
let arg = arg.strip_suffix (".exe").unwrap_or (arg);
for (suffix, subcommand) in &map {
if arg.ends_with (suffix) {
return Some (*subcommand);
}
}
None
}
fn parse_args (args: &[OsString]) -> anyhow::Result <(Subcommand, &[OsString])>
{
let arg_0 = match args.get (0) {
Some (x) => x,
None => anyhow::bail! ("arg 0 must be the exe name"),
};
let arg_0 = arg_0.to_str ().ok_or_else (|| anyhow::anyhow! ("arg 0 should be valid UTF-8"))?;
match parse_subcommand (arg_0) {
Some (x) => return Ok ((x, args)),
None => (),
}
let arg_1 = match args.get (1) {
Some (x) => x,
None => anyhow::bail! ("arg 1 must be the subcommand if arg 0 is not"),
};
let arg_1 = arg_1.to_str ().ok_or_else (|| anyhow::anyhow! ("arg 1 subcommand should be valid UTF-8"))?;
match parse_subcommand (arg_1) {
Some (x) => return Ok ((x, &args [1..])),
None => (),
}
anyhow::bail! ("Subcommand must be either arg 0 (exe name) or arg 1")
}
#[cfg (test)]
mod tests {
use super::*;
#[test]
fn multi_call () -> anyhow::Result <()> {
let negative_cases = vec! [
vec! [],
vec! ["invalid_exe_name"],
vec! ["ptth_multi_call_server"],
vec! ["ptth_server.ex"],
vec! ["ptth_multi_call_server", "invalid_subcommand"],
];
for input in &negative_cases {
let input: Vec <_> = input.iter ().map (OsString::from).collect ();
let actual = parse_args (&input);
assert! (actual.is_err ());
}
let positive_cases = vec! [
(vec! ["ptth_server.exe"], (Subcommand::PtthServer, vec! ["ptth_server.exe"])),
(vec! ["ptth_server"], (Subcommand::PtthServer, vec! ["ptth_server"])),
(vec! ["ptth_server", "--help"], (Subcommand::PtthServer, vec! ["ptth_server", "--help"])),
(vec! ["ptth_file_server"], (Subcommand::PtthFileServer, vec! ["ptth_file_server"])),
(vec! ["ptth_quic_end_server", "--help"], (Subcommand::PtthQuicEndServer, vec! ["ptth_quic_end_server", "--help"])),
(vec! ["ptth_multi_call_server", "ptth_server"], (Subcommand::PtthServer, vec! ["ptth_server"])),
(
vec! [
"ptth_multi_call_server",
"ptth_server",
"--help"
],
(
Subcommand::PtthServer,
vec! [
"ptth_server",
"--help"
]
)
),
(
vec! [
"invalid_exe_name",
"ptth_server",
"--help"
],
(
Subcommand::PtthServer,
vec! [
"ptth_server",
"--help"
]
)
),
];
for (input, (expected_subcommand, expected_args)) in &positive_cases {
let input: Vec <_> = input.iter ().map (OsString::from).collect ();
let (actual_subcommand, actual_args) = parse_args (&input)?;
assert_eq! (expected_subcommand, &actual_subcommand);
assert_eq! (expected_args, actual_args);
}
Ok (())
}
}

View File

@ -1,12 +0,0 @@
use std::{
ffi::OsString,
};
use anyhow::Result;
pub async fn main (_args: &[OsString]) -> Result <()>
{
println! ("{}", rusty_ulid::generate_ulid_string ());
Ok (())
}

View File

@ -1,37 +0,0 @@
[package]
name = "ptth_quic"
version = "0.1.0"
authors = ["Trish"]
edition = "2018"
license = "AGPL-3.0"
repository = "https://six-five-six-four.com/git/reactor/ptth"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.66"
arc-swap = "1.5.1"
base64 = "0.20.0"
ctrlc = "3.2.4"
futures-util = "0.3.25"
hyper = { version = "0.14.23", features = ["http1", "server", "stream", "tcp"] }
quinn = "0.9.3"
rand = "0.8.5"
rcgen = "0.10.0"
ring = "0.16.20"
rmp-serde = "1.1.1"
rustls = "0.20.7"
rusty_ulid = "1.0.0"
serde = "1.0.151"
serde_json = "1.0.89"
structopt = "0.3.26"
tokio = { version = "1.23.0", features = ["full"] }
tracing-subscriber = "0.3.16"
tracing = "0.1.37"
udp_over_tcp = { path = "../udp_over_tcp" }
[dependencies.reqwest]
version = "0.11.13"
default-features = false
features = ["stream", "rustls-tls", "hyper-rustls"]

View File

@ -1,136 +0,0 @@
use structopt::StructOpt;
use tokio::{
sync::watch,
};
use ptth_quic::{
client_proxy::{
ForwardingParams,
forward_port,
},
prelude::*,
};
use protocol::PeerId;
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
client_id: Option <PeerId>,
#[structopt (long)]
server_id: Option <PeerId>,
#[structopt (long)]
client_tcp_port: Option <u16>,
#[structopt (long)]
server_tcp_port: Option <u16>,
}
#[tokio::main]
async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let opt = Opt::from_args ();
let conf = opt.into_config ().await?;
let client = P2Client::connect (conf)?;
client.run ().await?;
Ok (())
}
pub struct P2Client {
endpoint: quinn::Endpoint,
conf: Arc <Config>,
}
impl P2Client {
pub fn connect (conf: Config) -> anyhow::Result <Self> {
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&conf.relay_cert])?;
let conf = Arc::new (conf);
Ok (Self {
endpoint,
conf,
})
}
pub async fn run (&self) -> anyhow::Result <()> {
debug! ("P2 client connecting to P3 relay server");
let conf = Arc::clone (&self.conf);
let connection = protocol::p2_connect_to_p3 (&self.endpoint, conf.relay_addr, &conf.client_id).await?;
let client_tcp_port = conf.client_tcp_port;
debug! ("Accepting local TCP connections from P1 at {}", client_tcp_port);
// End of per-port stuff
// Beginning of per-connection stuff
let (_shutdown_flag_tx, shutdown_flag_rx) = watch::channel (true);
let task_tcp_server = {
let connection = connection.clone ();
let server_id = conf.server_id.clone ();
let server_tcp_port = conf.server_tcp_port;
let listener = TcpListener::bind (("127.0.0.1", client_tcp_port)).await?;
trace! ("Accepting local TCP connections from P1 on {}", client_tcp_port);
tokio::spawn (async move {
forward_port (
listener,
connection,
ForwardingParams {
client_tcp_port,
server_id,
server_tcp_port,
},
shutdown_flag_rx,
).await?;
Ok::<_, anyhow::Error> (())
})
};
task_tcp_server.await??;
Ok (())
}
}
/// A filled-out config for constructing a P2 client
#[derive (Clone)]
pub struct Config {
client_tcp_port: u16,
server_tcp_port: u16,
client_id: String,
server_id: String,
relay_addr: SocketAddr,
relay_cert: Vec <u8>,
}
impl Opt {
pub async fn into_config (self) -> anyhow::Result <Config> {
let client_tcp_port = self.client_tcp_port.unwrap_or (30381);
let server_tcp_port = self.server_tcp_port.unwrap_or (30382);
let client_id = self.client_id.unwrap_or_else (|| "bogus_client".to_string ());
let server_id = self.server_id.unwrap_or_else (|| "bogus_server".to_string ());
let relay_addr = self.relay_addr.unwrap_or_else (|| String::from ("127.0.0.1:30380")).parse ()?;
// Begin I/O
let relay_cert = tokio::fs::read ("ptth_quic_output/quic_server.crt").await?;
Ok (Config {
client_tcp_port,
server_tcp_port,
client_id,
server_id,
relay_addr,
relay_cert,
})
}
}

View File

@ -1,20 +0,0 @@
use tokio::sync::watch;
use ptth_quic::prelude::*;
use ptth_quic::executable_end_server as server;
#[tokio::main]
async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let args: Vec <_> = std::env::args_os ().collect ();
let (shutdown_tx, shutdown_rx) = watch::channel (false);
ctrlc::set_handler (move || {
shutdown_tx.send (true).expect ("Couldn't forward Ctrl+C signal");
})?;
trace! ("Set Ctrl+C handler");
server::main (&args, Some (shutdown_rx)).await
}

View File

@ -1,38 +0,0 @@
use tokio::sync::watch;
use ptth_quic::prelude::*;
use ptth_quic::executable_relay_server as relay;
#[tokio::main]
async fn main () -> anyhow::Result <()> {
use structopt::StructOpt;
tracing_subscriber::fmt::init ();
let opt = relay::Opt::from_args ();
let (running_tx, mut running_rx) = watch::channel (true);
ctrlc::set_handler (move || {
running_tx.send (false).expect ("Couldn't forward Ctrl+C signal");
})?;
trace! ("Set Ctrl+C handler");
let app = relay::App::new (opt).await?;
println! ("Base64 cert: {}", base64::encode (app.server_cert ()));
println! ("Listening on {}", app.listen_addr ());
tokio::fs::create_dir_all ("ptth_quic_output").await?;
tokio::fs::write ("ptth_quic_output/quic_server.crt", app.server_cert ()).await?;
tokio::select! {
_val = app.run () => {
},
_val = running_rx.changed () => {
},
}
Ok (())
}

View File

@ -1,32 +0,0 @@
#[cfg (test)]
mod test {
#[test]
fn signing () -> anyhow::Result <()> {
use std::fs;
use ring::{
signature::{
self,
Ed25519KeyPair,
KeyPair,
},
};
fs::create_dir_all ("untracked")?;
let rng = ring::rand::SystemRandom::new ();
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8 (&rng).map_err (|_| anyhow::anyhow! ("generate_pkcs8"))?;
let key_pair = Ed25519KeyPair::from_pkcs8 (pkcs8_bytes.as_ref ()).map_err (|_| anyhow::anyhow! ("from_pkcs8"))?;
const MESSAGE: &[u8] = b":V";
let sig = key_pair.sign (MESSAGE);
let peer_public_key_bytes = key_pair.public_key ().as_ref ();
let peer_public_key = signature::UnparsedPublicKey::new (&signature::ED25519, peer_public_key_bytes);
peer_public_key.verify (MESSAGE, sig.as_ref ()).map_err (|_| anyhow::anyhow! ("verify"))?;
Ok (())
}
}

View File

@ -1,235 +0,0 @@
use structopt::StructOpt;
use tokio::{
net::TcpStream,
sync::watch,
};
use crate::prelude::*;
use protocol::PeerId;
/// A partially-filled-out config that structopt can deal with
/// Try to turn this into a Config as soon as possible.
#[derive (Debug, StructOpt)]
pub struct Opt {
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
server_id: Option <PeerId>,
#[structopt (long)]
debug_echo: bool,
#[structopt (long)]
cert_url: Option <String>,
#[structopt (long)]
use_udp_over_tcp: Option <bool>,
}
/// A filled-out config for constructing an end server
#[derive (Clone)]
pub (crate) struct Config {
pub debug_echo: bool,
pub id: String,
pub relay_addr: SocketAddr,
pub relay_cert: Vec <u8>,
pub use_udp_over_tcp: bool,
}
pub async fn main (args: &[OsString], shutdown_rx: Option <watch::Receiver <bool>>) -> anyhow::Result <()> {
trace! ("executable_end_server::main");
let opt = Opt::from_iter (args);
let conf = opt.into_config ().await?;
let (end_server, shutdown_tx) = P4EndServer::connect (conf).await?;
let run_task = {
tokio::spawn (async move {
end_server.run ().await?;
Ok::<_, anyhow::Error> (())
})
};
if let Some (mut shutdown_rx) = shutdown_rx {
while ! *shutdown_rx.borrow () {
shutdown_rx.changed ().await?;
}
trace! ("P4 end server shutting down...");
shutdown_tx.send (true)?
}
run_task.await??;
trace! ("P4 end server shut down gracefully.");
Ok (())
}
impl Opt {
/// Converts self into a Config that the server can use.
/// Performs I/O to load the relay cert from disk or from HTTP.
/// Fails if arguments can't be parsed or if I/O fails.
pub (crate) async fn into_config (self) -> anyhow::Result <Config> {
let id = self.server_id.clone ().unwrap_or_else (|| "bogus_server".to_string ());
let relay_addr: SocketAddr = self.relay_addr.clone ().unwrap_or_else (|| String::from ("127.0.0.1:30380")).parse ()?;
// Do I/O after all parsing is done.
// We don't want to waste a network request only to come back and error
// out on like "127.oooo.1" not parsing into a relay address.
let relay_cert: Vec <u8> = match self.cert_url.as_ref () {
Some (url) => reqwest::get (url).await?.bytes ().await?.into_iter ().collect (),
None => tokio::fs::read ("ptth_quic_output/quic_server.crt").await?,
};
Ok (Config {
debug_echo: self.debug_echo,
id,
relay_addr,
relay_cert,
use_udp_over_tcp: self.use_udp_over_tcp.unwrap_or (false),
})
}
}
pub struct P4EndServer {
conf: Config,
conn: quinn::Connection,
shutdown_rx: watch::Receiver <bool>,
}
impl P4EndServer {
pub (crate) async fn connect (conf: Config) -> anyhow::Result <(Self, watch::Sender <bool>)> {
debug! ("P4 end server making its QUIC endpoint");
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&conf.relay_cert])?;
let conf = if conf.use_udp_over_tcp {
let udp_sock = UdpSocket::bind (SocketAddrV4::new (Ipv4Addr::UNSPECIFIED, 0)).await?;
udp_sock.connect ((Ipv4Addr::LOCALHOST, endpoint.local_addr ()?.port ())).await?;
let udp_local_server_port = udp_sock.local_addr ()?.port ();
let tcp_sock = TcpSocket::new_v4 ()?;
let tcp_conn = tcp_sock.connect (conf.relay_addr).await?;
tokio::spawn (async move {
udp_over_tcp::client::main_with_sockets (udp_sock, tcp_conn).await
});
Config {
debug_echo: conf.debug_echo,
id: conf.id,
relay_addr: SocketAddr::V4 (SocketAddrV4::new (Ipv4Addr::LOCALHOST, udp_local_server_port)),
relay_cert: conf.relay_cert,
use_udp_over_tcp: true,
}
}
else {
conf
};
debug! ("P4 end server connecting to P3 relay server");
let conn = protocol::p4_connect_to_p3 (
&endpoint,
conf.relay_addr,
&conf.id
).await?;
debug! ("Connected to relay server");
let (shutdown_tx, shutdown_rx) = watch::channel (false);
Ok ((P4EndServer {
conf,
conn,
shutdown_rx,
}, shutdown_tx))
}
pub (crate) async fn run (self) -> anyhow::Result <()> {
trace! ("Accepting bi streams from P3");
let mut shutdown_rx = self.shutdown_rx.clone ();
let conf = Arc::new (self.conf);
loop {
tokio::select! {
_ = shutdown_rx.changed () => {
if *shutdown_rx.borrow () {
trace! ("P4 incoming bi streams task caught graceful shutdown");
break;
}
}
stream_opt = self.conn.accept_bi () => {
let (relay_send, relay_recv) = stream_opt?;
tokio::spawn (handle_bi_stream (Arc::clone (&conf), relay_send, relay_recv));
}
};
}
Ok (())
}
pub fn shutting_down (&self) -> bool {
*self.shutdown_rx.borrow ()
}
}
async fn handle_bi_stream (
conf: Arc <Config>,
relay_send: quinn::SendStream,
mut relay_recv: quinn::RecvStream,
) -> anyhow::Result <()>
{
match protocol::p4_accept_p3_stream (&mut relay_recv).await? {
protocol::P3ToP4Stream::NewPtthConnection {
client_id,
..
} => handle_new_ptth_connection (conf, relay_send, relay_recv, client_id).await?,
}
Ok (())
}
async fn handle_new_ptth_connection (
conf: Arc <Config>,
mut relay_send: quinn::SendStream,
mut relay_recv: quinn::RecvStream,
_client_id: String,
) -> anyhow::Result <()>
{
// TODO: Check authorization for P2 --> P4
protocol::p4_authorize_p2_connection (&mut relay_send).await?;
let p4_to_p5_req = protocol::p4_expect_p5_request (&mut relay_recv).await?;
// TODO: Check authorization for P1 --> P5
protocol::p4_authorize_p1_connection (&mut relay_send).await?;
debug! ("Started PTTH connection");
if conf.debug_echo {
relay_send.write (b"Connected to P4=P5 debug echo server\n").await?;
debug! ("Relaying bytes using internal debug echo server (P4=P5)");
tokio::io::copy (&mut relay_recv, &mut relay_send).await?;
}
else {
let stream = TcpStream::connect (("127.0.0.1", p4_to_p5_req.port)).await?;
let (local_recv, local_send) = stream.into_split ();
trace! ("Relaying bytes...");
let ptth_conn = crate::connection::NewConnection {
local_send,
local_recv,
relay_send,
relay_recv,
}.build ();
ptth_conn.wait_for_close ().await?;
}
Ok (())
}

View File

@ -1,568 +0,0 @@
use hyper::{
Body,
Request,
Response,
Server,
service::{
make_service_fn,
service_fn,
},
StatusCode,
};
use structopt::StructOpt;
use crate::prelude::*;
use protocol::PeerId;
#[derive (Debug, StructOpt)]
pub struct Opt {
#[structopt (long)]
pub (crate) listen_addr: Option <String>,
#[structopt (long)]
pub (crate) tcp_listen_port: Option <u16>,
}
pub struct App {
endpoint: quinn::Endpoint,
listen_addr: SocketAddr,
pub (crate) metrics: Arc <RwLock <Metrics>>,
server_cert: Vec <u8>,
tcp_listener: Option <udp_over_tcp::server::Listener>,
}
#[derive (Default)]
pub (crate) struct Metrics {
pub (crate) connected_end_servers: usize,
}
impl App {
pub async fn new (opt: Opt) -> anyhow::Result <Self> {
let config = load_config ().await.ok ();
let listen_addr = opt.listen_addr.unwrap_or_else (|| String::from ("0.0.0.0:30380")).parse ()?;
let (endpoint, server_cert) = make_server_endpoint (listen_addr)?;
let listen_addr = endpoint.local_addr ()?;
let tcp_port = opt.tcp_listen_port.or (config.map (|cfg| cfg.tcp_listen_port).flatten ());
let tcp_listener = if let Some (tcp_port) = tcp_port {
let cfg = udp_over_tcp::server::Config {
tcp_port,
udp_port: listen_addr.port (),
};
Some (udp_over_tcp::server::Listener::new (cfg).await?)
}
else {
None
};
Ok (Self {
endpoint,
listen_addr,
metrics: Default::default (),
server_cert,
tcp_listener,
})
}
pub fn listen_addr (&self) -> SocketAddr {
self.listen_addr
}
pub fn server_cert (&self) -> &[u8] {
&self.server_cert
}
pub fn tcp_listen_port (&self) -> anyhow::Result <Option <u16>> {
match self.tcp_listener.as_ref () {
None => Ok (None),
Some (tcp_listener) => Ok (tcp_listener.tcp_port ()?.into ()),
}
}
pub async fn run (self) -> anyhow::Result <()> {
let Self {
endpoint,
listen_addr: _,
metrics,
server_cert: _,
tcp_listener,
} = self;
let mut relay_state = RelayState::default ();
relay_state.metrics = metrics;
if let Err (e) = relay_state.reload_config ().await {
error! ("{:?}", e);
}
let relay_state = Arc::new (relay_state);
let make_svc = {
let relay_state = Arc::clone (&relay_state);
make_service_fn (move |_conn| {
let relay_state = Arc::clone (&relay_state);
async move {
Ok::<_, String> (service_fn (move |req| {
let relay_state = Arc::clone (&relay_state);
handle_http (req, relay_state)
}))
}
})
};
let http_addr = SocketAddr::from (([0, 0, 0, 0], 4004));
let http_server = Server::bind (&http_addr);
let _task_reload_config = {
let relay_state = Arc::clone (&relay_state);
tokio::spawn (async move {
let mut interval = tokio::time::interval (std::time::Duration::from_secs (60));
interval.set_missed_tick_behavior (tokio::time::MissedTickBehavior::Skip);
loop {
interval.tick ().await;
relay_state.reload_config ().await.ok ();
}
})
};
let task_quic_server = {
let relay_state = Arc::clone (&relay_state);
tokio::spawn (async move {
while let Some (conn) = endpoint.accept ().await {
let relay_state = Arc::clone (&relay_state);
// Each new peer QUIC connection gets its own task
tokio::spawn (async move {
let active = relay_state.stats.quic.connect ();
debug! ("QUIC connections: {}", active);
match handle_quic_connection (Arc::clone (&relay_state), conn).await {
Ok (_) => (),
Err (e) => warn! ("handle_quic_connection `{:?}`", e),
}
let active = relay_state.stats.quic.disconnect ();
debug! ("QUIC connections: {}", active);
});
}
Ok::<_, anyhow::Error> (())
})
};
let task_http_server = tokio::spawn (async move {
http_server.serve (make_svc).await?;
Ok::<_, anyhow::Error> (())
});
debug! ("Serving HTTP on {:?}", http_addr);
if let Some (tcp_listener) = tcp_listener {
tokio::spawn (async move {
if let Err (e) = tcp_listener.run ().await {
eprintln! ("udp_over_tcp::server::main exited with err {:?}", e);
}
Ok::<_, anyhow::Error> (())
});
}
{
let config = relay_state.config.load ();
dbg! (&config.webhook_url);
if let Some (webhook_url) = config.webhook_url.clone () {
let j = json! ({
"text": "Booting up",
}).to_string ();
let http_client = relay_state.http_client.clone ();
tokio::spawn (async move {
http_client.post (webhook_url).body (j).send ().await
});
}
}
tokio::select! {
_val = task_quic_server => {
eprintln! ("QUIC relay server exited, exiting");
},
_val = task_http_server => {
eprintln! ("HTTP server exited, exiting");
},
}
Ok (())
}
}
async fn handle_http (_req: Request <Body>, relay_state: Arc <RelayState>)
-> anyhow::Result <Response <Body>>
{
let debug_string;
{
let p4_server_proxies = relay_state.p4_server_proxies.lock ().await;
debug_string = format! ("{:#?}\n", p4_server_proxies.keys ().collect::<Vec<_>> ());
}
let resp = Response::builder ()
.status (StatusCode::OK)
.header ("content-type", "text/plain")
.body (Body::from (debug_string))?;
Ok (resp)
}
#[derive (Default)]
struct RelayState {
config: arc_swap::ArcSwap <Config>,
p4_server_proxies: Mutex <HashMap <PeerId, P4State>>,
metrics: Arc <RwLock <Metrics>>,
stats: Stats,
http_client: reqwest::Client,
}
#[derive (Default)]
struct Config {
ip_nicknames: BTreeMap <[u8; 4], String>,
webhook_url: Option <String>,
}
impl From <ConfigFile> for Config {
fn from (x: ConfigFile) -> Self {
Self {
ip_nicknames: x.ip_nicknames.into_iter ().collect (),
webhook_url: x.webhook_url,
}
}
}
#[derive (Deserialize)]
struct ConfigFile {
ip_nicknames: Vec <([u8; 4], String)>,
tcp_listen_port: Option <u16>,
webhook_url: Option <String>,
}
#[derive (Default)]
struct Stats {
quic: ConnectEvents,
}
#[derive (Default)]
struct ConnectEvents {
connects: AtomicU64,
disconnects: AtomicU64,
}
impl ConnectEvents {
fn connect (&self) -> u64 {
let connects = self.connects.fetch_add (1, Ordering::Relaxed) + 1;
let disconnects = self.disconnects.load (Ordering::Relaxed);
connects - disconnects
}
fn disconnect (&self) -> u64 {
let disconnects = self.disconnects.fetch_add (1, Ordering::Relaxed) + 1;
let connects = self.connects.load (Ordering::Relaxed);
connects - disconnects
}
fn _active (&self) -> u64 {
let connects = self.connects.load (Ordering::Relaxed);
let disconnects = self.disconnects.load (Ordering::Relaxed);
connects - disconnects
}
}
struct P4State {
req_channel: mpsc::Sender <RequestP2ToP4>,
}
async fn load_config () -> anyhow::Result <ConfigFile>
{
let s = tokio::fs::read_to_string ("config/ptth_quic_relay_server.json").await?;
let cfg: ConfigFile = serde_json::from_str (&s)?;
Ok (cfg)
}
impl RelayState {
async fn reload_config (&self) -> anyhow::Result <()> {
let config = load_config ().await?;
let config = Arc::new (Config::from (config));
self.config.store (config);
Ok (())
}
}
struct RequestP2ToP4 {
client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
client_id: String,
}
struct PtthNewConnection {
client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
server_send: quinn::SendStream,
server_recv: quinn::RecvStream,
}
struct PtthConnection {
uplink_task: JoinHandle <anyhow::Result <()>>,
downlink_task: JoinHandle <anyhow::Result <()>>,
}
impl PtthNewConnection {
fn build (self) -> PtthConnection {
let Self {
mut client_send,
mut client_recv,
mut server_send,
mut server_recv,
} = self;
let uplink_task = tokio::spawn (async move {
// Uplink - Client to end server
let mut buf = vec! [0u8; 65_536];
while let Some (bytes_read) = client_recv.read (&mut buf).await? {
if bytes_read == 0 {
break;
}
let buf_slice = &buf [0..bytes_read];
trace! ("Uplink relaying {} bytes", bytes_read);
server_send.write_all (buf_slice).await?;
}
trace! ("Uplink closed");
Ok::<_, anyhow::Error> (())
});
let downlink_task = tokio::spawn (async move {
// Downlink - End server to client
let mut buf = vec! [0u8; 65_536];
while let Some (bytes_read) = server_recv.read (&mut buf).await? {
let buf_slice = &buf [0..bytes_read];
trace! ("Downlink relaying {} bytes", bytes_read);
client_send.write_all (buf_slice).await?;
}
trace! ("Downlink closed");
Ok::<_, anyhow::Error> (())
});
PtthConnection {
uplink_task,
downlink_task,
}
}
}
async fn handle_quic_connection (
relay_state: Arc <RelayState>,
conn: quinn::Connecting,
) -> anyhow::Result <()>
{
let id = Ulid::generate ();
let config = relay_state.config.load ();
let remote_addr = conn.remote_address ();
let ip_nickname = match remote_addr {
SocketAddr::V4 (x) => {
let ip = x.ip ().octets ();
match config.ip_nicknames.get (&ip) {
Some (nick) => nick.as_str (),
_ => "Unknown",
}
},
_ => "Unknown, not IPv4",
};
debug! ("EHG7NVUD Incoming QUIC connection {} from {:?} ({})", id, remote_addr, ip_nickname);
if let Some (webhook_url) = config.webhook_url.clone () {
let j = json! ({
"text": format! ("Incoming QUIC connection from {:?} ({})", remote_addr, ip_nickname),
}).to_string ();
let http_client = relay_state.http_client.clone ();
tokio::spawn (async move {
http_client.post (webhook_url).body (j).send ().await
});
}
let conn = conn.await?;
// Everyone who connects must identify themselves with the first
// bi stream
// TODO: Timeout
let (mut send, mut recv) = conn.accept_bi ().await?;
let peer = protocol::p3_accept_peer (&mut recv).await?;
match peer {
protocol::P3Peer::P2ClientProxy (peer) => {
trace! ("H36JTVE5 Handling connection {} as P2 client", id);
// TODO: Check authorization for P2 peers
protocol::p3_authorize_p2_peer (&mut send).await?;
handle_p2_connection (relay_state, conn, peer).await?;
},
protocol::P3Peer::P4ServerProxy (peer) => {
trace! ("LRHUKB7K Handling connection {} as P4 end server", id);
// TODO: Check authorization for P4 peers
protocol::p3_authorize_p4_peer (&mut send).await?;
let metrics = Arc::clone (&relay_state.metrics);
{
let mut m = metrics.write ().await;
m.connected_end_servers += 1;
}
handle_p4_connection (relay_state, conn, peer).await?;
{
let mut m = metrics.write ().await;
m.connected_end_servers -= 1;
}
},
}
Ok::<_, anyhow::Error> (())
}
async fn handle_p2_connection (
relay_state: Arc <RelayState>,
conn: quinn::Connection,
peer: protocol::P2ClientProxy,
) -> anyhow::Result <()>
{
let client_id = peer.id;
while let Ok ((send, mut recv)) = conn.accept_bi ().await {
let relay_state = Arc::clone (&relay_state);
let client_id = client_id.clone ();
tokio::spawn (async move {
debug! ("Request started for P2");
match protocol::p3_accept_p2_stream (&mut recv).await? {
protocol::P2ToP3Stream::ConnectP2ToP4 {
server_id,
} => {
handle_request_p2_to_p4 (
relay_state,
client_id,
server_id,
send,
recv
).await?
},
_ => (),
}
debug! ("Request ended for P2");
Ok::<_, anyhow::Error> (())
});
}
debug! ("P2 {} disconnected", client_id);
Ok (())
}
async fn handle_request_p2_to_p4 (
relay_state: Arc <RelayState>,
client_id: String,
server_id: PeerId,
mut client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
) -> anyhow::Result <()>
{
trace! ("P2 {} wants to connect to P4 {}", client_id, server_id);
// TODO: Check authorization for P2 to connect to P4
protocol::p3_authorize_p2_to_p4_connection (&mut client_send).await?;
{
let p4_server_proxies = relay_state.p4_server_proxies.lock ().await;
match p4_server_proxies.get (&server_id) {
Some (p4_state) => {
p4_state.req_channel.send (RequestP2ToP4 {
client_send,
client_recv,
client_id,
}).await.map_err (|_| anyhow::anyhow! ("Can't send request to P4 server"))?;
},
None => warn! ("That server isn't connected"),
}
}
Ok (())
}
async fn handle_p4_connection (
relay_state: Arc <RelayState>,
connection: quinn::Connection,
peer: protocol::P4ServerProxy,
) -> anyhow::Result <()>
{
let server_id = peer.id;
let (tx, mut rx) = mpsc::channel (2);
let p4_state = P4State {
req_channel: tx,
};
{
let mut p4_server_proxies = relay_state.p4_server_proxies.lock ().await;
p4_server_proxies.insert (server_id.clone (), p4_state);
}
while let Some (req) = rx.recv ().await {
let connection = connection.clone ();
let server_id = server_id.clone ();
tokio::spawn (async move {
let RequestP2ToP4 {
client_send,
client_recv,
client_id,
} = req;
debug! ("P4 {} got a request from P2 {}", server_id, client_id);
let (server_send, server_recv) = protocol::p3_connect_p2_to_p4 (&connection, &client_id).await?;
trace! ("Relaying bytes...");
let ptth_conn = PtthNewConnection {
client_send,
client_recv,
server_send,
server_recv,
}.build ();
ptth_conn.uplink_task.await??;
ptth_conn.downlink_task.await??;
debug! ("Request ended for P4");
Ok::<_, anyhow::Error> (())
});
}
debug! ("P4 {} disconnected", server_id);
Ok (())
}

View File

@ -1,11 +0,0 @@
pub mod client_proxy;
pub mod connection;
pub mod crypto;
pub mod executable_end_server;
pub mod executable_relay_server;
pub mod prelude;
pub mod protocol;
pub mod quinn_utils;
#[cfg (test)]
mod tests;

View File

@ -1,110 +0,0 @@
use crate::prelude::*;
#[test]
fn end_to_end () -> anyhow::Result <()> {
let rt = tokio::runtime::Runtime::new ()?;
rt.block_on (end_to_end_async ())?;
Ok (())
}
async fn end_to_end_async () -> anyhow::Result <()> {
use crate::executable_end_server as server;
use crate::executable_relay_server as relay;
let relay_opt = relay::Opt {
listen_addr: "127.0.0.1:0".to_string ().into (),
tcp_listen_port: Some (0),
};
let relay_app = relay::App::new (relay_opt).await?;
let relay_quic_port = relay_app.listen_addr ().port ();
let relay_cert = Vec::from (relay_app.server_cert ());
let relay_metrics = Arc::clone (&relay_app.metrics);
let tcp_listen_port = relay_app.tcp_listen_port ()?.unwrap ();
assert_ne! (tcp_listen_port, 0);
let task_relay = tokio::spawn (async move {
relay_app.run ().await
});
{
let m = relay_metrics.read ().await;
assert_eq! (m.connected_end_servers, 0);
}
// Connect with wrong port, should fail
let server_conf = server::Config {
debug_echo: false,
id: "bogus".into (),
relay_addr: "127.0.0.1:80".parse ()?,
relay_cert: relay_cert.clone (),
use_udp_over_tcp: false,
};
let server_err = server::P4EndServer::connect (server_conf).await;
assert! (server_err.is_err ());
// Connect with wrong cert, should fail
let server_conf = server::Config {
debug_echo: false,
id: "bogus".into (),
relay_addr: ([127, 0, 0, 1], relay_quic_port).into (),
relay_cert: vec! [],
use_udp_over_tcp: false,
};
let server_err = server::P4EndServer::connect (server_conf).await;
assert! (server_err.is_err ());
{
let m = relay_metrics.read ().await;
assert_eq! (m.connected_end_servers, 0);
}
// Connect over UDP
let server_conf = server::Config {
debug_echo: false,
id: "bogus_VZBNRUA5".into (),
relay_addr: ([127, 0, 0, 1], relay_quic_port).into (),
relay_cert: relay_cert.clone (),
use_udp_over_tcp: false,
};
let t = Instant::now ();
let (server, _) = server::P4EndServer::connect (server_conf).await?;
let dur = t.elapsed ();
assert! (dur < Duration::from_millis (1_000), "{:?}", dur);
{
let m = relay_metrics.read ().await;
assert_eq! (m.connected_end_servers, 1);
}
// Connect over TCP
let server_conf = server::Config {
debug_echo: false,
id: "bogus_6E5CZIAI".into (),
relay_addr: ([127, 0, 0, 1], tcp_listen_port).into (),
relay_cert: relay_cert.clone (),
use_udp_over_tcp: true,
};
let t = Instant::now ();
let (server, _) = server::P4EndServer::connect (server_conf).await?;
let dur = t.elapsed ();
assert! (dur < Duration::from_millis (1_000), "{:?}", dur);
{
let m = relay_metrics.read ().await;
assert_eq! (m.connected_end_servers, 2);
}
Ok (())
}

View File

@ -1,407 +0,0 @@
use std::{
str::FromStr,
};
use fltk::{
app,
button::Button,
enums::CallbackTrigger,
frame::Frame,
group::Flex,
input::*,
prelude::*,
window::Window
};
use rand::{
Rng,
SeedableRng,
};
use structopt::StructOpt;
use tokio::runtime::Runtime;
use ptth_quic::{
client_proxy::*,
prelude::*,
protocol::PeerId,
};
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
window_title: Option <String>,
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
client_id: Option <PeerId>,
#[structopt (long)]
cert_url: Option <String>,
}
#[derive (Clone, Copy)]
enum Message {
OpenPort (usize),
ClosePort (usize),
AddPort,
}
struct GuiClient <'a> {
rt: &'a Runtime,
frame_status: Frame,
ports: Vec <Port>,
but_add_port: Button,
}
struct Port {
gui: GuiPort,
forwarding_instance: Option <ForwardingInstance>,
}
impl Port {
pub fn open_port (
&mut self,
rt: &Runtime,
connection_p2_p3: quinn::Connection,
) -> anyhow::Result <()>
{
let params = self.gui.get_params ()?;
let _guard = rt.enter ();
let forwarding_instance = rt.block_on (ForwardingInstance::new (
connection_p2_p3,
params,
))?;
self.gui.input_client_port.set_value (&forwarding_instance.local_port ().to_string ());
self.forwarding_instance.replace (forwarding_instance);
self.gui.set_forwarding (true);
Ok (())
}
}
struct GuiPort {
row: fltk::group::Flex,
input_client_port: Input,
input_server_id: Input,
input_server_port: Input,
but_open: Button,
but_close: Button,
}
impl GuiClient <'_> {
pub fn open_port (
&mut self,
connection_p2_p3: quinn::Connection,
port_idx: usize,
) -> anyhow::Result <()>
{
self.ports [port_idx].open_port (self.rt, connection_p2_p3)?;
self.sync_status ();
Ok (())
}
pub fn close_port (&mut self, port_idx: usize) -> anyhow::Result <()> {
if let Some (old_instance) = self.ports [port_idx].forwarding_instance.take () {
self.rt.block_on (async {
old_instance.close ()
.await
.context ("closing ForwardingInstance")?;
Ok::<_, anyhow::Error> (())
})?;
}
self.ports [port_idx].gui.set_forwarding (false);
self.sync_status ();
Ok (())
}
fn open_ports (&self) -> usize {
self.ports.iter ()
.map (|x| if x.forwarding_instance.is_some () { 1 } else { 0 })
.sum ()
}
pub fn sync_status (&mut self) {
let open_ports = self.open_ports ();
self.frame_status.set_label (&format! ("Forwarding {} ports", open_ports));
}
pub fn add_port (
&mut self,
ports_col: &mut Flex,
fltk_tx: fltk::app::Sender <Message>
) {
const MAX_PORTS: usize = 15;
if self.ports.len () >= MAX_PORTS {
return;
}
let gui = GuiPort::new (fltk_tx, self.ports.len ());
ports_col.add (&gui.row);
ports_col.fixed (&gui.row, 30);
let port = Port {
gui,
forwarding_instance: None,
};
self.ports.push (port);
if self.ports.len () >= MAX_PORTS {
self.but_add_port.deactivate ();
}
}
}
fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let rt = Runtime::new ()?;
let opt = Opt::from_args ();
let (fltk_tx, fltk_rx) = app::channel::<Message> ();
let app = app::App::default ();
let window_title = opt.window_title.clone ().unwrap_or_else (|| "PTTH client proxy".to_string ());
let mut wind = Window::new (100, 100, 800, 600, None)
.with_label (&window_title);
wind.make_resizable (true);
let mut col = Flex::default ().column ().size_of_parent ();
let frame_status = Frame::default ();
col.fixed (&frame_status, 30);
{
let mut row = Flex::default ().row ();
let l = Frame::default ().with_label ("Server ID");
row.fixed (&l, 120);
let l = Frame::default ().with_label ("Server port");
row.fixed (&l, 80);
let l = Frame::default ().with_label ("Local port");
row.fixed (&l, 80);
row.end ();
col.fixed (&row, 30);
}
let mut ports_col = Flex::default ().column ();
ports_col.end ();
let mut but_add_port = Button::default ().with_label ("+");
but_add_port.set_trigger (CallbackTrigger::Release);
but_add_port.emit (fltk_tx, Message::AddPort);
col.fixed (&but_add_port, 30);
col.end ();
let relay_addr = opt.relay_addr.as_ref ()
.map (|s| &s[..])
.unwrap_or ("127.0.0.1:30380")
.parse ()
.context ("relay_addr should be like 127.0.0.1:30380")?;
let mut gui_client = GuiClient {
rt: &rt,
frame_status,
ports: Default::default (),
but_add_port,
};
gui_client.add_port (&mut ports_col, fltk_tx);
ports_col.recalc ();
gui_client.sync_status ();
wind.end ();
wind.show ();
let connection_p2_p3 = rt.block_on (async move {
let server_cert = match opt.cert_url.as_ref () {
Some (url) => reqwest::get (url).await?.bytes ().await?,
None => tokio::fs::read ("ptth_quic_output/quic_server.crt").await.context ("can't read quic_server.crt from disk")?.into (),
};
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&server_cert])?;
trace! ("Connecting to relay server");
let client_id = opt.client_id.unwrap_or_else (|| "bogus_client".to_string ());
let connection = protocol::p2_connect_to_p3 (&endpoint, relay_addr, &client_id).await
.context ("P2 can't connect to P3")?;
Ok::<_, anyhow::Error> (connection)
})?;
while app.wait () {
match fltk_rx.recv () {
Some (Message::OpenPort (port_idx)) => {
if let Err (e) = gui_client.open_port (connection_p2_p3.clone (), port_idx)
{
error! ("{:?}", e);
}
},
Some (Message::ClosePort (port_idx)) => {
gui_client.close_port (port_idx)?;
},
Some (Message::AddPort) => {
gui_client.add_port (&mut ports_col, fltk_tx);
ports_col.recalc ();
ports_col.redraw ();
},
None => (),
}
}
Ok (())
}
fn set_active <W: WidgetExt> (w: &mut W, b: bool) {
if b {
w.activate ();
}
else {
w.deactivate ();
}
}
impl GuiPort {
fn new (fltk_tx: fltk::app::Sender <Message>, port_idx: usize) -> Self {
let mut row = Flex::default ().row ();
let mut input_server_id = Input::default ();
let mut input_server_port = Input::default ();
let mut input_client_port = Input::default ();
let mut but_open = Button::default ().with_label ("Open");
let mut but_close = Button::default ().with_label ("Close");
row.fixed (&input_server_id, 120);
row.fixed (&input_server_port, 80);
row.fixed (&input_client_port, 80);
row.fixed (&but_open, 80);
row.fixed (&but_close, 80);
input_client_port.set_value ("");
input_client_port.set_readonly (true);
input_server_id.set_value ("bogus_server");
input_server_port.set_value ("5900");
but_open.set_trigger (CallbackTrigger::Release);
but_open.emit (fltk_tx, Message::OpenPort (port_idx));
but_close.set_trigger (CallbackTrigger::Release);
but_close.emit (fltk_tx, Message::ClosePort (port_idx));
row.end ();
let mut output = Self {
row,
input_client_port,
input_server_id,
input_server_port,
but_open,
but_close,
};
output.set_forwarding (false);
output
}
fn get_params (&self) -> anyhow::Result <ForwardingParams>
{
let server_tcp_port = u16::from_str (&self.input_server_port.value ())?;
let server_id = self.input_server_id.value ();
let client_tcp_port = PortInfo {
server_id: &server_id,
server_tcp_port,
}.random_eph_port ();
Ok (ForwardingParams {
client_tcp_port,
server_id,
server_tcp_port,
})
}
fn set_forwarding (&mut self, x: bool) {
set_active (&mut self.input_client_port, x);
set_active (&mut self.input_server_id, !x);
set_active (&mut self.input_server_port, !x);
set_active (&mut self.but_open, !x);
set_active (&mut self.but_close, x);
self.but_open.set (x);
self.but_close.set (!x);
}
}
// This can collide, but who cares
// It's not secure or anything - It's just supposed to pick a port somewhat
// deterministically based on the server and relay info.
#[derive (serde::Serialize)]
struct PortInfo <'a> {
// relay_addr: SocketAddr,
server_id: &'a str,
server_tcp_port: u16
}
impl PortInfo <'_> {
// https://en.wikipedia.org/wiki/TCP_ports#Dynamic,_private_or_ephemeral_ports
fn random_eph_port (&self) -> u16
{
let seed = blake3::hash (&rmp_serde::to_vec (self).expect ("Can't hash PortInfo - impossible error"));
let mut rng = rand_chacha::ChaCha20Rng::from_seed (*seed.as_bytes ());
let tcp_eph_range = 49152..=65535;
rng.gen_range (tcp_eph_range)
}
}
#[cfg (test)]
mod test {
use blake3::Hasher;
use super::*;
#[test]
fn prng () {
let hasher = Hasher::default ();
let seed = hasher.finalize ();
let mut rng = rand_chacha::ChaCha20Rng::from_seed (*seed.as_bytes ());
let tcp_eph_range = 49152..=65535;
let port = rng.gen_range (tcp_eph_range);
assert_eq! (port, 49408);
for (input, expected) in [
(("127.0.0.1:4000", "bogus_server", 22), 51168),
// The relay address is explicitly excluded from the eph port
// computation in case I want to support connecting to a server
// across multiple relays
(("127.0.0.1:30380", "bogus_server", 22), 51168),
(("127.0.0.1:4000", "real_server", 22), 53873),
(("127.0.0.1:4000", "bogus_server", 5900), 53844),
] {
let (_relay_addr, server_id, server_tcp_port) = input;
let input = PortInfo {
server_id,
server_tcp_port,
};
let actual = input.random_eph_port ();
assert_eq! (expected, actual);
}
}
}

View File

@ -7,39 +7,33 @@ edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "The PTTH relay" description = "The PTTH relay"
repository = "https://six-five-six-four.com/git/reactor/ptth"
[dependencies] [dependencies]
anyhow = "1.0.66" anyhow = "1.0.38"
base64 = "0.13.0" base64 = "0.13.0"
blake3 = "1.0.0" blake3 = "0.3.7"
chrono = { version = "0.4.23", features = ["serde"] } chrono = { version = "0.4.19", features = ["serde"] }
clap = "2.33.3" clap = "2.33.3"
dashmap = "4.0.2" dashmap = "4.0.2"
futures = "0.3.7" futures = "0.3.7"
futures-util = "0.3.8" futures-util = "0.3.8"
handlebars = "3.5.3" handlebars = "3.5.3"
http = "0.2.3" http = "0.2.3"
hyper = { version = "0.14.23", features = ["http1", "http2", "server", "stream", "tcp"] } hyper = { version = "0.14.4", features = ["http1", "server", "stream", "tcp"] }
itertools = "0.9.0" itertools = "0.9.0"
rand = "0.8.5" rand = "0.8.3"
rmp-serde = "0.15.5" rmp-serde = "0.15.5"
rusty_ulid = "1.0.0" rusty_ulid = "0.10.1"
serde = { version = "1.0.150", features = ["derive"] } serde = { version = "1.0.117", features = ["derive"] }
serde_json = "1.0.89" serde_json = "1.0.60"
serde_urlencoded = "0.7.1" serde_urlencoded = "0.7.0"
thiserror = "1.0.37" thiserror = "1.0.22"
tokio = { version = "1.23.0", features = [] } tokio = { version = "1.8.1", features = [] }
tokio-stream = "0.1.11" tokio-stream = "0.1.3"
toml = "0.5.10" toml = "0.5.7"
tracing = "0.1.37" tracing = "0.1.25"
tracing-futures = "0.2.4" tracing-futures = "0.2.4"
tracing-subscriber = "0.2.15" tracing-subscriber = "0.2.15"
ptth_core = { path = "../ptth_core", version = "2.0.0" } ptth_core = { path = "../ptth_core", version = "2.0.0" }
[dependencies.reqwest]
version = "0.11.13"
default-features = false
features = ["stream", "rustls-tls", "hyper-rustls"]

View File

@ -15,6 +15,7 @@ use crate::{
errors::ConfigError, errors::ConfigError,
key_validity::{ key_validity::{
ScraperKey, ScraperKey,
Valid30Days,
}, },
}; };
@ -98,6 +99,7 @@ pub mod file {
use crate::key_validity::{ use crate::key_validity::{
BlakeHashWrapper, BlakeHashWrapper,
ScraperKey, ScraperKey,
Valid30Days,
}; };
#[derive (Clone, Debug, Deserialize, Serialize)] #[derive (Clone, Debug, Deserialize, Serialize)]
@ -140,12 +142,9 @@ pub mod file {
pub servers: Option <Vec <Server>>, pub servers: Option <Vec <Server>>,
// Adding a DB will take a while, so I'm moving these out of dev mode. // Adding a DB will take a while, so I'm moving these out of dev mode.
pub scraper_keys: Option <Vec <ScraperKey>>, pub scraper_keys: Option <Vec <ScraperKey <Valid30Days>>>,
pub news_url: Option <String>, pub news_url: Option <String>,
pub hide_audit_log: Option <bool>,
pub webhook_url: Option <String>,
pub webhook_interval_s: Option <u32>,
} }
} }
@ -157,11 +156,8 @@ pub struct Config {
pub address: IpAddr, pub address: IpAddr,
pub port: Option <u16>, pub port: Option <u16>,
pub servers: HashMap <String, file::Server>, pub servers: HashMap <String, file::Server>,
pub scraper_keys: HashMap <String, ScraperKey>, pub scraper_keys: HashMap <String, ScraperKey <Valid30Days>>,
pub news_url: Option <String>, pub news_url: Option <String>,
pub hide_audit_log: bool,
pub webhook_url: Option <String>,
pub webhook_interval_s: u32,
} }
impl Default for Config { impl Default for Config {
@ -173,9 +169,6 @@ impl Default for Config {
servers: Default::default (), servers: Default::default (),
scraper_keys: Default::default (), scraper_keys: Default::default (),
news_url: None, news_url: None,
hide_audit_log: false,
webhook_url: None,
webhook_interval_s: 7200,
} }
} }
} }
@ -204,9 +197,6 @@ impl TryFrom <file::Config> for Config {
servers, servers,
scraper_keys, scraper_keys,
news_url: f.news_url, news_url: f.news_url,
hide_audit_log: f.hide_audit_log.unwrap_or (false),
webhook_url: f.webhook_url,
webhook_interval_s: f.webhook_interval_s.unwrap_or (7200),
}) })
} }
} }

View File

@ -78,17 +78,36 @@ impl Serialize for BlakeHashWrapper {
} }
} }
pub struct Valid7Days;
pub struct Valid30Days;
//pub struct Valid90Days;
pub trait MaxValidDuration { pub trait MaxValidDuration {
fn dur () -> Duration; fn dur () -> Duration;
} }
impl MaxValidDuration for Valid7Days {
fn dur () -> Duration {
Duration::days (7)
}
}
impl MaxValidDuration for Valid30Days {
fn dur () -> Duration {
Duration::days (30)
}
}
#[derive (Deserialize)] #[derive (Deserialize)]
pub struct ScraperKey { pub struct ScraperKey <V: MaxValidDuration> {
pub name: String, name: String,
not_before: DateTime <Utc>, not_before: DateTime <Utc>,
not_after: DateTime <Utc>, not_after: DateTime <Utc>,
pub hash: BlakeHashWrapper, pub hash: BlakeHashWrapper,
#[serde (default)]
_phantom: std::marker::PhantomData <V>,
} }
#[derive (Copy, Clone, Debug, PartialEq)] #[derive (Copy, Clone, Debug, PartialEq)]
@ -102,20 +121,21 @@ pub enum KeyValidity {
DurationNegative, DurationNegative,
} }
impl ScraperKey { impl <V: MaxValidDuration> ScraperKey <V> {
pub fn new_30_day <S: Into <String>> (name: S, input: &[u8]) -> Self { pub fn new_30_day <S: Into <String>> (name: S, input: &[u8]) -> Self {
let now = Utc::now (); let now = Utc::now ();
Self { Self {
name: name.into (), name: name.into (),
not_before: now, not_before: now,
not_after: now + Duration::days (30), not_after: now + V::dur (),
hash: BlakeHashWrapper::from_key (input), hash: BlakeHashWrapper::from_key (input),
_phantom: Default::default (),
} }
} }
} }
impl ScraperKey { impl <V: MaxValidDuration> ScraperKey <V> {
#[must_use] #[must_use]
pub fn is_valid (&self, now: DateTime <Utc>, input: &[u8]) -> KeyValidity { pub fn is_valid (&self, now: DateTime <Utc>, input: &[u8]) -> KeyValidity {
use KeyValidity::*; use KeyValidity::*;
@ -132,6 +152,13 @@ impl ScraperKey {
return DurationNegative; return DurationNegative;
} }
let max_dur = V::dur ();
let actual_dur = self.not_after - self.not_before;
if actual_dur > max_dur {
return DurationTooLong (max_dur);
}
if now >= self.not_after { if now >= self.not_after {
return Expired; return Expired;
} }
@ -169,11 +196,12 @@ mod tests {
fn duration_negative () { fn duration_negative () {
let zero_time = Utc::now (); let zero_time = Utc::now ();
let key = ScraperKey { let key = ScraperKey::<Valid30Days> {
name: "automated testing".to_string (), name: "automated testing".to_string (),
not_before: zero_time + Duration::days (1 + 2), not_before: zero_time + Duration::days (1 + 2),
not_after: zero_time + Duration::days (1), not_after: zero_time + Duration::days (1),
hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()), hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()),
_phantom: Default::default (),
}; };
let err = DurationNegative; let err = DurationNegative;
@ -187,22 +215,46 @@ mod tests {
} }
} }
#[test]
fn key_valid_too_long () {
let zero_time = Utc::now ();
let key = ScraperKey::<Valid30Days> {
name: "automated testing".to_string (),
not_before: zero_time + Duration::days (1),
not_after: zero_time + Duration::days (1 + 31),
hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()),
_phantom: Default::default (),
};
let err = DurationTooLong (Duration::days (30));
for (input, expected) in &[
(zero_time + Duration::days (0), err),
(zero_time + Duration::days (2), err),
(zero_time + Duration::days (100), err),
] {
assert_eq! (key.is_valid (*input, "bad_password".as_bytes ()), *expected);
}
}
#[test] #[test]
fn normal_key () { fn normal_key () {
let zero_time = Utc::now (); let zero_time = Utc::now ();
let key = ScraperKey { let key = ScraperKey::<Valid30Days> {
name: "automated testing".to_string (), name: "automated testing".to_string (),
not_before: zero_time + Duration::days (1), not_before: zero_time + Duration::days (1),
not_after: zero_time + Duration::days (1 + 60), not_after: zero_time + Duration::days (1 + 30),
hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()), hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()),
_phantom: Default::default (),
}; };
for (input, expected) in &[ for (input, expected) in &[
(zero_time + Duration::days (0), ClockIsBehind), (zero_time + Duration::days (0), ClockIsBehind),
(zero_time + Duration::days (2), Valid), (zero_time + Duration::days (2), Valid),
(zero_time + Duration::days (60 - 1), Valid), (zero_time + Duration::days (29), Valid),
(zero_time + Duration::days (60 + 1), Expired), (zero_time + Duration::days (1 + 30), Expired),
(zero_time + Duration::days (100), Expired), (zero_time + Duration::days (100), Expired),
] { ] {
assert_eq! (key.is_valid (*input, "bad_password".as_bytes ()), *expected); assert_eq! (key.is_valid (*input, "bad_password".as_bytes ()), *expected);
@ -213,11 +265,12 @@ mod tests {
fn wrong_key () { fn wrong_key () {
let zero_time = Utc::now (); let zero_time = Utc::now ();
let key = ScraperKey { let key = ScraperKey::<Valid30Days> {
name: "automated testing".to_string (), name: "automated testing".to_string (),
not_before: zero_time + Duration::days (1), not_before: zero_time + Duration::days (1),
not_after: zero_time + Duration::days (1 + 30), not_after: zero_time + Duration::days (1 + 30),
hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()), hash: BlakeHashWrapper::from_key ("bad_password".as_bytes ()),
_phantom: Default::default (),
}; };
for input in &[ for input in &[

View File

@ -121,28 +121,27 @@ async fn handle_http_request (
{ {
use RequestError::*; use RequestError::*;
let req_id = rusty_ulid::generate_ulid_string ();
debug! ("Created request {}", req_id);
let req_method = req.method.clone (); let req_method = req.method.clone ();
if ! state.server_exists (server_name).await { if ! state.server_exists (server_name).await {
return Err (UnknownServer); return Err (UnknownServer);
} }
let user = get_user_name (&req);
let req = http_serde::RequestParts::from_hyper (req.method, uri.clone (), req.headers) let req = http_serde::RequestParts::from_hyper (req.method, uri.clone (), req.headers)
.map_err (|_| BadRequest)?; .map_err (|_| BadRequest)?;
let (tx, rx) = oneshot::channel (); let (tx, rx) = oneshot::channel ();
let tx = relay_state::ResponseRendezvous {
timeout: Instant::now () + Duration::from_secs (120),
tx,
};
let req_id = rusty_ulid::generate_ulid_string (); let req_id = rusty_ulid::generate_ulid_string ();
debug! ("Forwarding {}", req_id); state.audit_log.push (AuditEvent::new (AuditData::WebClientGet {
user,
server_name: server_name.to_string (),
uri,
})).await;
trace! ("Created request {}", req_id);
{ {
let response_rendezvous = state.response_rendezvous.read ().await; let response_rendezvous = state.response_rendezvous.read ().await;
@ -441,13 +440,6 @@ async fn handle_audit_log (
handlebars: Arc <Handlebars <'static>> handlebars: Arc <Handlebars <'static>>
) -> Result <Response <Body>, RequestError> ) -> Result <Response <Body>, RequestError>
{ {
{
let cfg = state.config.read ().await;
if cfg.hide_audit_log {
return Ok (error_reply (StatusCode::FORBIDDEN, "Forbidden")?);
}
}
let page = handle_audit_log_internal (state).await; let page = handle_audit_log_internal (state).await;
let s = handlebars.render ("audit_log", &page)?; let s = handlebars.render ("audit_log", &page)?;
@ -589,10 +581,6 @@ async fn handle_all (
use routing::Route::*; use routing::Route::*;
let state = &*state; let state = &*state;
{
let mut counters = state.webhook_counters.write ().await;
counters.requests_total += 1;
}
// The path is cloned here, so it's okay to consume the request // The path is cloned here, so it's okay to consume the request
// later. // later.
@ -622,13 +610,6 @@ async fn handle_all (
} => { } => {
let (parts, _) = req.into_parts (); let (parts, _) = req.into_parts ();
let user = get_user_name (&parts);
state.audit_log.push (AuditEvent::new (AuditData::WebClientGet {
user,
server_name: listen_code.to_string (),
uri: path.to_string (),
})).await;
handle_http_request (parts, path.to_string (), &state, listen_code).await? handle_http_request (parts, path.to_string (), &state, listen_code).await?
}, },
ClientServerList => handle_server_list (state, handlebars).await?, ClientServerList => handle_server_list (state, handlebars).await?,
@ -800,45 +781,6 @@ pub async fn run_relay (
}); });
} }
// Set a task to periodically sweep and time-out requests where the client
// and server are never going to rendezvous
let state_2 = Arc::clone (&state);
tokio::spawn (async move {
let mut interval = tokio::time::interval (Duration::from_secs (60));
interval.set_missed_tick_behavior (tokio::time::MissedTickBehavior::Skip);
loop {
use std::convert::TryFrom;
use rusty_ulid::Ulid;
interval.tick ().await;
{
let timeout_ms = Utc::now ().timestamp () - 120_000;
if let Ok (timeout_ms) = u64::try_from (timeout_ms) {
let timeout_ulid = Ulid::from_timestamp_with_rng (timeout_ms, &mut rand::thread_rng ()).to_string ();
let mut request_rendezvous = state_2.request_rendezvous.lock ().await;
request_rendezvous.iter_mut ()
.for_each (|(_k, v)| {
match v {
RequestRendezvous::ParkedServer (_) => (),
RequestRendezvous::ParkedClients (requests) => requests.retain (|req| req.id.as_str () >= timeout_ulid.as_str ()),
}
});
}
}
{
let now = Instant::now ();
let response_rendezvous = state_2.response_rendezvous.read ().await;
response_rendezvous.retain (|_, v| v.timeout >= now);
}
}
});
let make_svc = make_service_fn (|_conn| { let make_svc = make_service_fn (|_conn| {
let state = state.clone (); let state = state.clone ();
let handlebars = handlebars.clone (); let handlebars = handlebars.clone ();
@ -882,11 +824,6 @@ pub async fn run_relay (
state.audit_log.push (AuditEvent::new (AuditData::RelayStart)).await; state.audit_log.push (AuditEvent::new (AuditData::RelayStart)).await;
{
let state = state.clone ();
tokio::spawn (webhook_task (state));
}
trace! ("Serving relay on {:?}", addr); trace! ("Serving relay on {:?}", addr);
server.with_graceful_shutdown (async { server.with_graceful_shutdown (async {
@ -902,7 +839,7 @@ pub async fn run_relay (
std::mem::swap (&mut swapped, &mut response_rendezvous); std::mem::swap (&mut swapped, &mut response_rendezvous);
for (_, sender) in swapped { for (_, sender) in swapped {
sender.tx.send (Err (ShuttingDown)).ok (); sender.send (Err (ShuttingDown)).ok ();
} }
let mut request_rendezvous = state.request_rendezvous.lock ().await; let mut request_rendezvous = state.request_rendezvous.lock ().await;
@ -922,64 +859,5 @@ pub async fn run_relay (
Ok (()) Ok (())
} }
async fn webhook_task (state: Arc <Relay>) {
use crate::relay_state::MonitoringCounters;
let client = reqwest::Client::default ();
let webhook_interval_s = {
let config = state.config.read ().await;
config.webhook_interval_s
};
dbg! (webhook_interval_s);
let mut interval = tokio::time::interval (std::time::Duration::from_secs (webhook_interval_s.into ()));
interval.set_missed_tick_behavior (tokio::time::MissedTickBehavior::Skip);
let mut tick_seq = 1;
let mut last_counters_reported = (MonitoringCounters::default (), Utc::now (), 0);
loop {
interval.tick ().await;
let webhook_url = {
let config = state.config.read ().await;
config.webhook_url.clone ()
};
let webhook_url = match webhook_url {
Some (x) => x,
None => {
continue;
},
};
let now = Utc::now ();
let counters = {
state.webhook_counters.read ().await.clone ()
};
let requests_total_diff = counters.requests_total - last_counters_reported.0.requests_total;
let j = serde_json::json! ({
"text": format! ("From tick {} to {}: Handled {} requests", last_counters_reported.2, tick_seq, requests_total_diff),
}).to_string ();
match client.post (webhook_url).body (j).send ().await {
Ok (resp) => {
if resp.status () == StatusCode::OK {
last_counters_reported = (counters, now, tick_seq);
}
else {
dbg! (resp.status ());
}
},
Err (e) => { dbg! (e); },
}
tick_seq += 1;
}
}
#[cfg (test)] #[cfg (test)]
mod tests; mod tests;

View File

@ -2,11 +2,11 @@
use std::{ use std::{
convert::TryFrom, convert::TryFrom,
error::Error,
path::PathBuf, path::PathBuf,
sync::Arc, sync::Arc,
}; };
use anyhow::Context;
use clap::{App, SubCommand}; use clap::{App, SubCommand};
use tracing_subscriber::{ use tracing_subscriber::{
fmt, fmt,
@ -21,7 +21,7 @@ use ptth_relay::{
}; };
#[tokio::main] #[tokio::main]
async fn main () -> anyhow::Result<()> { async fn main () -> Result <(), Box <dyn Error>> {
fmt () fmt ()
.with_env_filter (EnvFilter::from_default_env ()) .with_env_filter (EnvFilter::from_default_env ())
.with_span_events (FmtSpan::CLOSE) .with_span_events (FmtSpan::CLOSE)
@ -48,7 +48,7 @@ async fn main () -> anyhow::Result<()> {
} }
let config_path = PathBuf::from ("config/ptth_relay.toml"); let config_path = PathBuf::from ("config/ptth_relay.toml");
let config = Config::from_file (&config_path).await.context("couldn't load ptth_relay.toml")?; let config = Config::from_file (&config_path).await?;
let (shutdown_rx, forced_shutdown) = ptth_core::graceful_shutdown::init_with_force (); let (shutdown_rx, forced_shutdown) = ptth_core::graceful_shutdown::init_with_force ();

View File

@ -61,10 +61,7 @@ pub enum RequestRendezvous {
ParkedServer (oneshot::Sender <Result <http_serde::WrappedRequest, ShuttingDownError>>), ParkedServer (oneshot::Sender <Result <http_serde::WrappedRequest, ShuttingDownError>>),
} }
pub (crate) struct ResponseRendezvous { type ResponseRendezvous = oneshot::Sender <Result <(http_serde::ResponseParts, Body), ShuttingDownError>>;
pub timeout: Instant,
pub tx: oneshot::Sender <Result <(http_serde::ResponseParts, Body), ShuttingDownError>>,
}
#[derive (Clone)] #[derive (Clone)]
pub struct ServerStatus { pub struct ServerStatus {
@ -101,14 +98,6 @@ pub struct Relay {
/// Memory backend for audit logging /// Memory backend for audit logging
// TODO: Add file / database / network server logging backend // TODO: Add file / database / network server logging backend
pub (crate) audit_log: BoundedVec <AuditEvent>, pub (crate) audit_log: BoundedVec <AuditEvent>,
/// Counters for webhook reporting
pub (crate) webhook_counters: RwLock <MonitoringCounters>,
}
#[derive (Clone, Default)]
pub (crate) struct MonitoringCounters {
pub (crate) requests_total: u64,
} }
#[derive (Clone)] #[derive (Clone)]
@ -120,6 +109,7 @@ pub struct RejectedServer {
#[derive (Clone, Debug)] #[derive (Clone, Debug)]
pub struct AuditEvent { pub struct AuditEvent {
time_monotonic: Instant,
pub time_utc: DateTime <Utc>, pub time_utc: DateTime <Utc>,
pub data: AuditData, pub data: AuditData,
} }
@ -131,10 +121,6 @@ pub enum AuditData {
server: crate::config::file::Server, server: crate::config::file::Server,
}, },
RelayStart, RelayStart,
ScraperGet {
key_name: String,
path: String,
},
WebClientGet { WebClientGet {
user: Option <String>, user: Option <String>,
server_name: String, server_name: String,
@ -145,6 +131,7 @@ pub enum AuditData {
impl AuditEvent { impl AuditEvent {
pub fn new (data: AuditData) -> Self { pub fn new (data: AuditData) -> Self {
Self { Self {
time_monotonic: Instant::now (),
time_utc: Utc::now (), time_utc: Utc::now (),
data, data,
} }
@ -210,7 +197,6 @@ impl TryFrom <Config> for Relay {
shutdown_watch_rx, shutdown_watch_rx,
unregistered_servers: BoundedVec::new (20), unregistered_servers: BoundedVec::new (20),
audit_log: BoundedVec::new (256), audit_log: BoundedVec::new (256),
webhook_counters: Default::default (),
}) })
} }
} }
@ -326,7 +312,7 @@ impl Builder {
self self
} }
pub fn scraper_key (mut self, key: crate::key_validity::ScraperKey) pub fn scraper_key (mut self, key: crate::key_validity::ScraperKey <crate::key_validity::Valid30Days>)
-> Self -> Self
{ {
self.config.scraper_keys.insert (key.hash.encode_base64 (), key); self.config.scraper_keys.insert (key.hash.encode_base64 (), key);

View File

@ -119,21 +119,6 @@ pub async fn v1_server_list (state: &Relay)
} }
} }
fn get_api_key (headers: &hyper::HeaderMap) -> Option <&str>
{
if let Some (key) = headers.get ("X-ApiKey").and_then (|v| v.to_str ().ok ()) {
return Some (key);
}
if let Some (s) = headers.get ("Authorization").and_then (|v| v.to_str ().ok ()) {
if let Some (key) = s.strip_prefix ("Bearer ") {
return Some (key);
}
}
None
}
#[instrument (level = "trace", skip (req, state))] #[instrument (level = "trace", skip (req, state))]
async fn api_v1 ( async fn api_v1 (
req: Request <Body>, req: Request <Body>,
@ -142,12 +127,7 @@ async fn api_v1 (
) )
-> Result <Response <Body>, RequestError> -> Result <Response <Body>, RequestError>
{ {
use crate::{ let api_key = req.headers ().get ("X-ApiKey");
AuditData,
AuditEvent,
};
let api_key = get_api_key (req.headers ());
let api_key = match api_key { let api_key = match api_key {
None => return Ok (error_reply (StatusCode::FORBIDDEN, strings::NO_API_KEY)?), None => return Ok (error_reply (StatusCode::FORBIDDEN, strings::NO_API_KEY)?),
@ -158,8 +138,6 @@ async fn api_v1 (
let bad_key = || error_reply (StatusCode::FORBIDDEN, strings::FORBIDDEN); let bad_key = || error_reply (StatusCode::FORBIDDEN, strings::FORBIDDEN);
let key_name;
{ {
let config = state.config.read ().await; let config = state.config.read ().await;
@ -182,19 +160,9 @@ async fn api_v1 (
return Ok (bad_key ()?); return Ok (bad_key ()?);
}, },
} }
key_name = expected_key.name.to_string ();
} }
state.audit_log.push (AuditEvent::new (AuditData::ScraperGet { if path_rest == "test" {
key_name,
path: path_rest.to_string (),
})).await;
if path_rest == "metrics" {
Ok (metrics (state).await?)
}
else if path_rest == "test" {
Ok (error_reply (StatusCode::OK, "You're valid!")?) Ok (error_reply (StatusCode::OK, "You're valid!")?)
} }
else if path_rest == "server_list" { else if path_rest == "server_list" {
@ -223,64 +191,6 @@ async fn api_v1 (
} }
} }
#[instrument (level = "trace", skip (state))]
async fn metrics (
state: &Relay,
)
-> Result <Response <Body>, RequestError>
{
let mut s = String::with_capacity (4 * 1_024);
let mut push_metric = |name, help: Option<&str>, kind, value| {
if let Some (help) = help {
s.push_str (format! ("# HELP {} {}\n", name, help).as_str ());
}
s.push_str (format! ("# TYPE {} {}\n", name, kind).as_str ());
s.push_str (format! ("{} {}\n", name, value).as_str ());
};
let request_rendezvous_count = {
let g = state.request_rendezvous.lock ().await;
g.len ()
};
let server_status_count;
let connected_server_count;
let now = Utc::now ();
{
let g = state.server_status.lock ().await;
server_status_count = g.len ();
connected_server_count = g.iter ()
.filter (|(_, s)| now - s.last_seen < chrono::Duration::seconds (60))
.count ();
}
let response_rendezvous_count = {
let g = state.response_rendezvous.read ().await;
g.len ()
};
push_metric ("request_rendezvous_count", None, "gauge", request_rendezvous_count.to_string ());
push_metric ("server_status_count", None, "gauge", server_status_count.to_string ());
push_metric ("connected_server_count", None, "gauge", connected_server_count.to_string ());
push_metric ("response_rendezvous_count", None, "gauge", response_rendezvous_count.to_string ());
#[cfg (target_os = "linux")]
{
if let Some (rss) = tokio::fs::read_to_string ("/proc/self/status").await
.ok ()
.and_then (|s| get_rss_from_status (s.as_str ()))
{
push_metric ("relay_vm_rss", Some ("VmRSS of the relay process, in kB"), "gauge", rss.to_string ());
}
}
Ok (Response::builder ()
.body (Body::from (s))?)
}
#[instrument (level = "trace", skip (req, state))] #[instrument (level = "trace", skip (req, state))]
pub async fn handle ( pub async fn handle (
req: Request <Body>, req: Request <Body>,
@ -306,20 +216,6 @@ pub async fn handle (
} }
} }
fn get_rss_from_status (proc_status: &str) -> Option <u64>
{
use std::str::FromStr;
for line in proc_status.lines () {
if let Some (rest) = line.strip_prefix ("VmRSS:\t").and_then (|s| s.strip_suffix (" kB"))
{
return u64::from_str (rest.trim_start ()).ok ();
}
}
None
}
#[cfg (test)] #[cfg (test)]
mod tests { mod tests {
use std::{ use std::{
@ -328,6 +224,7 @@ mod tests {
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use crate::{ use crate::{
config,
key_validity, key_validity,
}; };
use super::*; use super::*;
@ -336,9 +233,8 @@ mod tests {
struct TestCase { struct TestCase {
// Inputs // Inputs
path_rest: &'static str, path_rest: &'static str,
auth_header: Option <&'static str>,
valid_key: Option <&'static str>, valid_key: Option <&'static str>,
x_api_key: Option <&'static str>, input_key: Option <&'static str>,
// Expected // Expected
expected_status: StatusCode, expected_status: StatusCode,
@ -359,15 +255,9 @@ mod tests {
x x
} }
fn auth_header (&self, v: Option <&'static str>) -> Self { fn input_key (&self, v: Option <&'static str>) -> Self {
let mut x = self.clone (); let mut x = self.clone ();
x.auth_header = v; x.input_key = v;
x
}
fn x_api_key (&self, v: Option <&'static str>) -> Self {
let mut x = self.clone ();
x.x_api_key = v;
x x
} }
@ -395,16 +285,13 @@ mod tests {
.expected_body (format! ("{}\n", body)) .expected_body (format! ("{}\n", body))
} }
async fn test (&self, name: &str) { async fn test (&self) {
let mut input = Request::builder () let mut input = Request::builder ()
.method ("GET") .method ("GET")
.uri (format! ("http://127.0.0.1:4000/scraper/{}", self.path_rest)); .uri (format! ("http://127.0.0.1:4000/scraper/{}", self.path_rest));
if let Some (auth_header) = self.auth_header { if let Some (input_key) = self.input_key {
input = input.header ("Authorization", auth_header); input = input.header ("X-ApiKey", input_key);
}
if let Some (x_api_key) = self.x_api_key {
input = input.header ("X-ApiKey", x_api_key);
} }
let input = input.body (Body::empty ()).unwrap (); let input = input.body (Body::empty ()).unwrap ();
@ -431,15 +318,15 @@ mod tests {
expected_headers.insert (*key, (*value).try_into ().expect ("Couldn't convert header value")); expected_headers.insert (*key, (*value).try_into ().expect ("Couldn't convert header value"));
} }
assert_eq! (actual_head.status, self.expected_status, "{}", name); assert_eq! (actual_head.status, self.expected_status);
assert_eq! (actual_head.headers, expected_headers, "{}", name); assert_eq! (actual_head.headers, expected_headers);
let actual_body = hyper::body::to_bytes (actual_body).await; let actual_body = hyper::body::to_bytes (actual_body).await;
let actual_body = actual_body.expect ("Body should be convertible to bytes"); let actual_body = actual_body.expect ("Body should be convertible to bytes");
let actual_body = actual_body.to_vec (); let actual_body = actual_body.to_vec ();
let actual_body = String::from_utf8 (actual_body).expect ("Body should be UTF-8"); let actual_body = String::from_utf8 (actual_body).expect ("Body should be UTF-8");
assert_eq! (actual_body, self.expected_body, "{}", name); assert_eq! (actual_body, self.expected_body);
} }
} }
@ -451,8 +338,7 @@ mod tests {
let base_case = TestCase { let base_case = TestCase {
path_rest: "v1/test", path_rest: "v1/test",
valid_key: Some ("bogus"), valid_key: Some ("bogus"),
auth_header: None, input_key: Some ("bogus"),
x_api_key: Some ("bogus"),
expected_status: StatusCode::OK, expected_status: StatusCode::OK,
expected_headers: vec! [ expected_headers: vec! [
("content-type", "text/plain"), ("content-type", "text/plain"),
@ -460,47 +346,21 @@ mod tests {
expected_body: "You're valid!\n".to_string (), expected_body: "You're valid!\n".to_string (),
}; };
base_case for case in &[
.test ("00").await; base_case.clone (),
base_case.path_rest ("v9999/test")
base_case .expected (StatusCode::NOT_FOUND, strings::UNKNOWN_API_VERSION),
.path_rest ("v9999/test") base_case.valid_key (None)
.expected (StatusCode::NOT_FOUND, strings::UNKNOWN_API_VERSION) .expected (StatusCode::FORBIDDEN, strings::FORBIDDEN),
.test ("01").await; base_case.input_key (Some ("borgus"))
.expected (StatusCode::FORBIDDEN, strings::FORBIDDEN),
base_case base_case.path_rest ("v1/toast")
.valid_key (None) .expected (StatusCode::NOT_FOUND, strings::UNKNOWN_API_ENDPOINT),
.expected (StatusCode::FORBIDDEN, strings::FORBIDDEN) base_case.input_key (None)
.test ("02").await; .expected (StatusCode::FORBIDDEN, strings::NO_API_KEY),
] {
base_case case.test ().await;
.x_api_key (Some ("borgus")) }
.expected (StatusCode::FORBIDDEN, strings::FORBIDDEN)
.test ("03").await;
base_case
.path_rest ("v1/toast")
.expected (StatusCode::NOT_FOUND, strings::UNKNOWN_API_ENDPOINT)
.test ("04").await;
base_case
.x_api_key (None)
.expected (StatusCode::FORBIDDEN, strings::NO_API_KEY)
.test ("05").await;
base_case
.x_api_key (None)
.auth_header (Some ("Bearer bogus"))
.expected (StatusCode::OK, "You're valid!")
.test ("06").await;
}); });
} }
#[test]
fn rss () {
let input = "VmHWM: 584 kB\nVmRSS: 584 kB\nRssAnon: 68 kB\n";
assert_eq! (get_rss_from_status (input), Some (584));
}
} }

View File

@ -57,7 +57,7 @@ pub async fn handle_listen (
let mut server_status = state.server_status.lock ().await; let mut server_status = state.server_status.lock ().await;
let status = server_status.entry (watcher_code.clone ()).or_insert_with (Default::default); let mut status = server_status.entry (watcher_code.clone ()).or_insert_with (Default::default);
status.last_seen = now; status.last_seen = now;
} }
@ -129,7 +129,7 @@ pub async fn handle_response (
let magic_header = base64::decode (magic_header).map_err (PtthMagicHeaderNotBase64)?; let magic_header = base64::decode (magic_header).map_err (PtthMagicHeaderNotBase64)?;
let resp_parts: http_serde::ResponseParts = rmp_serde::from_slice (&magic_header).map_err (PtthMagicHeaderNotMsgPack)?; let resp_parts: http_serde::ResponseParts = rmp_serde::from_read_ref (&magic_header).map_err (PtthMagicHeaderNotMsgPack)?;
// Intercept the body packets here so we can check when the stream // Intercept the body packets here so we can check when the stream
// ends or errors out // ends or errors out
@ -188,7 +188,7 @@ pub async fn handle_response (
}; };
// UKAUFFY4 (Send half) // UKAUFFY4 (Send half)
if tx.tx.send (Ok ((resp_parts, body))).is_err () { if tx.send (Ok ((resp_parts, body))).is_err () {
let msg = "Failed to connect to client"; let msg = "Failed to connect to client";
error! (msg); error! (msg);
return Ok (error_reply (StatusCode::BAD_GATEWAY, msg)?); return Ok (error_reply (StatusCode::BAD_GATEWAY, msg)?);

View File

@ -8,22 +8,16 @@ license = "AGPL-3.0"
categories = ["command-line-utilities", "web-programming::http-server"] categories = ["command-line-utilities", "web-programming::http-server"]
description = "The server for PTTH" description = "The server for PTTH"
repository = "https://six-five-six-four.com/git/reactor/ptth"
documentation = "https://docs.rs/ptth_server/" documentation = "https://docs.rs/ptth_server/"
default-run = "ptth_server" default-run = "ptth_server"
[dependencies] [dependencies]
# Cookie 01FYZ3SDP2XABT7W19ACQVYKXT
# Dependencies should be in sync because ptth_multi_call_server intentionally
# tries to re-use as much code as possible between all of its subcommands,
# including ptth_server and ptth_file_server.
aho-corasick = "0.7.15" aho-corasick = "0.7.15"
anyhow = "1.0.38" anyhow = "1.0.38"
arc-swap = "1.2.0" arc-swap = "1.2.0"
base64 = "0.13.0" base64 = "0.13.0"
blake3 = "1.0.0" blake3 = "0.3.7"
chrono = {version = "0.4.19", features = ["serde"]} chrono = {version = "0.4.19", features = ["serde"]}
futures = "0.3.7" futures = "0.3.7"
handlebars = "3.5.1" handlebars = "3.5.1"
@ -34,13 +28,12 @@ pulldown-cmark = { version = "0.8.0", optional = true }
rand = "0.8.3" rand = "0.8.3"
regex = "1.4.1" regex = "1.4.1"
rmp-serde = "0.15.5" rmp-serde = "0.15.5"
rust-embed = "6.2.0"
rusty_ulid = "0.10.1" rusty_ulid = "0.10.1"
serde = {version = "1.0.117", features = ["derive"]} serde = {version = "1.0.117", features = ["derive"]}
serde_json = "1.0.60" serde_json = "1.0.60"
structopt = "0.3.20" structopt = "0.3.20"
thiserror = "1.0.24" thiserror = "1.0.24"
tokio = { version = "1.17.0", features = [] } tokio = { version = "1.8.1", features = [] }
tokio-stream = "0.1.3" tokio-stream = "0.1.3"
tracing = "0.1.25" tracing = "0.1.25"
tracing-futures = "0.2.5" tracing-futures = "0.2.5"
@ -51,7 +44,7 @@ always_equal = { path = "../always_equal", version = "1.0.0" }
ptth_core = { path = "../ptth_core", version = "2.0.0" } ptth_core = { path = "../ptth_core", version = "2.0.0" }
[dependencies.reqwest] [dependencies.reqwest]
version = "0.11.10" version = "0.11.1"
default-features = false default-features = false
features = ["stream", "rustls-tls", "hyper-rustls"] features = ["stream", "rustls-tls", "hyper-rustls"]

File diff suppressed because one or more lines are too long

View File

@ -1,12 +1,126 @@
#![warn (clippy::pedantic)]
use std::{ use std::{
iter::FromIterator, fs::File,
path::{Path, PathBuf},
}; };
use structopt::StructOpt;
use ptth_server::{
load_toml,
prelude::*,
run_server,
};
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
auto_gen_key: bool,
#[structopt (long)]
throttle_upload: bool,
#[structopt (long)]
file_server_root: Option <PathBuf>,
#[structopt (long)]
asset_root: Option <PathBuf>,
#[structopt (long)]
config_path: Option <PathBuf>,
#[structopt (long)]
name: Option <String>,
#[structopt (long)]
print_tripcode: bool,
#[structopt (long)]
relay_url: Option <String>,
}
#[derive (Default, serde::Deserialize)]
pub struct ConfigFile {
pub name: Option <String>,
pub api_key: String,
pub relay_url: Option <String>,
pub file_server_root: Option <PathBuf>,
}
fn gen_and_save_key (path: &Path) -> anyhow::Result <()> {
let api_key = ptth_core::gen_key ();
let mut f = File::create (path).with_context (|| format! ("Can't create config file `{:?}`", path))?;
#[cfg (unix)]
{
use std::os::unix::fs::PermissionsExt;
let metadata = f.metadata ()?;
let mut permissions = metadata.permissions ();
permissions.set_mode (0o600);
f.set_permissions (permissions)?;
}
#[cfg (not (unix))]
{
tracing::warn! ("Error VR6VW5QT: API keys aren't protected from clients on non-Unix OSes yet");
}
f.write_all (format! ("api_key = \"{}\"\n", api_key).as_bytes ())?;
Ok (())
}
#[tokio::main] #[tokio::main]
async fn main () -> anyhow::Result <()> { async fn main () -> Result <(), anyhow::Error> {
tracing_subscriber::fmt::init (); tracing_subscriber::fmt::init ();
let args = Vec::from_iter (std::env::args_os ()); let opt = Opt::from_args ();
let asset_root = opt.asset_root;
ptth_server::executable::main (&args).await let path = opt.config_path.clone ().unwrap_or_else (|| PathBuf::from ("./config/ptth_server.toml"));
let config_file: ConfigFile = if opt.auto_gen_key {
// If we're in autonomous mode, try harder to fix things
match load_toml::load (&path) {
Err (_) => {
gen_and_save_key (&path)?;
load_toml::load (&path)?
},
Ok (x) => x,
}
}
else {
match load_toml::load (&path) {
Err (ptth_server::errors::LoadTomlError::Io (_)) => bail! ("API key not provided in config file and auto-gen-key not provided"),
Ok (x) => x,
Err (e) => return Err (e.into ()),
}
};
let config_file = ptth_server::ConfigFile {
name: opt.name.or (config_file.name).ok_or (anyhow::anyhow! ("`name` must be provided in command line or config file"))?,
api_key: config_file.api_key,
relay_url: opt.relay_url.or (config_file.relay_url).ok_or (anyhow::anyhow! ("`--relay-url` must be provided in command line or `relay_url` in config file"))?,
file_server_root: opt.file_server_root.or (config_file.file_server_root),
throttle_upload: opt.throttle_upload,
};
if opt.print_tripcode {
println! (r#"name = "{}""#, config_file.name);
println! (r#"tripcode = "{}""#, config_file.tripcode ());
return Ok (());
}
run_server (
config_file,
ptth_core::graceful_shutdown::init (),
Some (path),
asset_root
).await?;
Ok (())
} }

View File

@ -63,15 +63,9 @@ pub enum ServerError {
#[error ("Can't build HTTP client")] #[error ("Can't build HTTP client")]
CantBuildHttpClient (reqwest::Error), CantBuildHttpClient (reqwest::Error),
#[error ("Can't get response from server in Step 3")]
Step3Response (reqwest::Error),
#[error ("Can't collect non-200 error response body in Step 3")] #[error ("Can't collect non-200 error response body in Step 3")]
Step3CollectBody (reqwest::Error), Step3CollectBody (reqwest::Error),
#[error ("Step 3 unknown error")]
Step3Unknown,
#[error ("Can't collect wrapped requests in Step 3")] #[error ("Can't collect wrapped requests in Step 3")]
CantCollectWrappedRequests (reqwest::Error), CantCollectWrappedRequests (reqwest::Error),

View File

@ -5,7 +5,7 @@
use std::{ use std::{
cmp::min, cmp::min,
collections::*, collections::HashMap,
convert::{Infallible, TryFrom}, convert::{Infallible, TryFrom},
io::SeekFrom, io::SeekFrom,
path::{ path::{
@ -32,6 +32,7 @@ use tokio::{
channel, channel,
}, },
}; };
use tracing::instrument;
use ptth_core::{ use ptth_core::{
http_serde::{ http_serde::{
@ -55,8 +56,7 @@ use errors::FileServerError;
#[derive (Default)] #[derive (Default)]
pub struct Config { pub struct Config {
pub file_server_root: PathBuf, pub file_server_root: Option <PathBuf>,
pub file_server_roots: BTreeMap <String, PathBuf>,
} }
pub struct FileServer { pub struct FileServer {
@ -69,7 +69,7 @@ pub struct FileServer {
impl FileServer { impl FileServer {
pub fn new ( pub fn new (
config: Config, file_server_root: Option <PathBuf>,
asset_root: &Path, asset_root: &Path,
name: String, name: String,
metrics_interval: Arc <ArcSwap <Option <metrics::Interval>>>, metrics_interval: Arc <ArcSwap <Option <metrics::Interval>>>,
@ -77,7 +77,9 @@ impl FileServer {
) -> Result <Self, FileServerError> ) -> Result <Self, FileServerError>
{ {
Ok (Self { Ok (Self {
config, config: Config {
file_server_root,
},
handlebars: load_templates (asset_root)?, handlebars: load_templates (asset_root)?,
metrics_startup: metrics::Startup::new (name), metrics_startup: metrics::Startup::new (name),
metrics_interval, metrics_interval,
@ -137,9 +139,8 @@ async fn serve_dir_json (
Ok (response) Ok (response)
} }
// #[instrument (level = "debug", skip (f))] #[instrument (level = "debug", skip (f))]
async fn serve_file ( async fn serve_file (
uri: &str,
mut f: File, mut f: File,
client_wants_body: bool, client_wants_body: bool,
range: range::ValidParsed, range: range::ValidParsed,
@ -164,26 +165,22 @@ async fn serve_file (
let (range, range_requested) = (range.range, range.range_requested); let (range, range_requested) = (range.range, range.range_requested);
info! ("Serving range {}-{}", range.start, range.end);
let content_length = range.end - range.start; let content_length = range.end - range.start;
let body = if decision.should_send_body { let body = if decision.should_send_body {
trace! ("Sending range {}-{}", range.start, range.end);
let seek = SeekFrom::Start (range.start); let seek = SeekFrom::Start (range.start);
f.seek (seek).await?; f.seek (seek).await?;
let (tx, rx) = channel (1); let (tx, rx) = channel (1);
tokio::spawn (async move { tokio::spawn (async move {
debug! ("stream_file task begin");
stream_file (f, content_length, tx).await; stream_file (f, content_length, tx).await;
debug! ("stream_file task end");
}); });
Some (rx) Some (rx)
} }
else { else {
debug! ("Not sending body");
None None
}; };
@ -200,12 +197,11 @@ async fn serve_file (
// - no-cache - Clients and the relay can store this, but should revalidate // - no-cache - Clients and the relay can store this, but should revalidate
// with the origin server (us) because only we can check if the file // with the origin server (us) because only we can check if the file
// changed on disk. // changed on disk.
// - max-age=5 - Caches can keep the file for 5 seconds. This is just enough // - max-age=0 - The file might change at any point during or after the
// to let a cache like Nginx or Varnish on a relay soak up some of a // request, so for proper invalidation, the client should immediately
// slashdotting for us, but not so much that a low-traffic site would // consider it stale.
// suffer from seeing stale data.
response.header ("cache-control".to_string (), b"max-age=2".to_vec ()); response.header ("cache-control".to_string (), b"no-cache,max-age=0".to_vec ());
if let Some (etag) = input.actual_etag { if let Some (etag) = input.actual_etag {
response.header ("etag".to_string (), etag); response.header ("etag".to_string (), etag);
}; };
@ -215,17 +211,6 @@ async fn serve_file (
response.header (String::from ("content-range"), format! ("bytes {}-{}/{}", range.start, range.end - 1, range.end).into_bytes ()); response.header (String::from ("content-range"), format! ("bytes {}-{}/{}", range.start, range.end - 1, range.end).into_bytes ());
} }
// Guess MIME type based on the URI so that we can serve web games
if uri.ends_with (".js") {
response.header ("content-type".into (), b"application/javascript".to_vec ());
}
else if uri.ends_with (".html") {
response.header ("content-type".into (), b"text/html; charset=UTF-8".to_vec ());
}
response.header ("content-length".into (), content_length.to_string ().into_bytes ());
response.content_length = Some (content_length); response.content_length = Some (content_length);
if let Some (body) = body { if let Some (body) = body {
@ -254,7 +239,6 @@ fn serve_file_decision (input: &ServeFileInput) -> ServeFileOutput
if let (Some (if_none_match), Some (actual_etag)) = (&input.if_none_match, &input.actual_etag) if let (Some (if_none_match), Some (actual_etag)) = (&input.if_none_match, &input.actual_etag)
{ {
if actual_etag == if_none_match { if actual_etag == if_none_match {
// info! ("Not Modified");
return ServeFileOutput { return ServeFileOutput {
status_code: StatusCode::NotModified, status_code: StatusCode::NotModified,
should_send_body: false, should_send_body: false,
@ -321,12 +305,12 @@ async fn stream_file (
break; break;
} }
buffer.truncate (bytes_read);
let bytes_read_64 = u64::try_from (bytes_read).expect ("Couldn't fit usize into u64"); let bytes_read_64 = u64::try_from (bytes_read).expect ("Couldn't fit usize into u64");
let bytes_read_64 = min (bytes_left, bytes_read_64); let bytes_read_64 = min (bytes_left, bytes_read_64);
buffer.truncate (bytes_read_64 as usize);
if tx.send (Ok::<_, Infallible> (buffer)).await.is_err () { if tx.send (Ok::<_, Infallible> (buffer)).await.is_err () {
warn! ("Cancelling file stream (Sent {} out of {} bytes)", bytes_sent, content_length); warn! ("Cancelling file stream (Sent {} out of {} bytes)", bytes_sent, content_length);
break; break;
@ -334,7 +318,7 @@ async fn stream_file (
bytes_left -= bytes_read_64; bytes_left -= bytes_read_64;
if bytes_left == 0 { if bytes_left == 0 {
// debug! ("Finished"); debug! ("Finished");
break; break;
} }
@ -354,7 +338,7 @@ impl FileServer {
/// Passes a request to the internal file server logic. /// Passes a request to the internal file server logic.
/// Returns an HTTP response as HTML or JSON, depending on the request. /// Returns an HTTP response as HTML or JSON, depending on the request.
// #[instrument (level = "debug", skip (self, headers))] #[instrument (level = "debug", skip (self, headers))]
pub async fn serve_all ( pub async fn serve_all (
&self, &self,
method: Method, method: Method,
@ -380,12 +364,12 @@ impl FileServer {
resp resp
} }
let roots = internal::FileRoots { let default_root = PathBuf::from ("./");
files: &self.config.file_server_root, let root: &std::path::Path = self.config.file_server_root
dirs: &self.config.file_server_roots, .as_ref ()
}; .unwrap_or (&default_root);
Ok (match internal::serve_all (roots, method, uri, headers, self.hidden_path.as_deref ()).await? { Ok (match internal::serve_all (root, method, uri, headers, self.hidden_path.as_deref ()).await? {
Favicon => serve_error (StatusCode::NotFound, "Not found\n"), Favicon => serve_error (StatusCode::NotFound, "Not found\n"),
Forbidden => serve_error (StatusCode::Forbidden, "403 Forbidden\n"), Forbidden => serve_error (StatusCode::Forbidden, "403 Forbidden\n"),
MethodNotAllowed => serve_error (StatusCode::MethodNotAllowed, "Unsupported method\n"), MethodNotAllowed => serve_error (StatusCode::MethodNotAllowed, "Unsupported method\n"),
@ -418,32 +402,47 @@ impl FileServer {
file, file,
send_body, send_body,
range, range,
}) => serve_file (uri, file.into_inner (), send_body, range, headers.get ("if-none-match").map (|v| &v[..])).await?, }) => serve_file (file.into_inner (), send_body, range, headers.get ("if-none-match").map (|v| &v[..])).await?,
MarkdownErr (e) => {
#[cfg (feature = "markdown")]
{
use markdown::Error::*;
let e = e.inner;
let code = match &e {
TooBig => StatusCode::InternalServerError,
//NotMarkdown => serve_error (StatusCode::BadRequest, "File is not Markdown"),
NotUtf8 => StatusCode::BadRequest,
};
return Ok (serve_error (code, e.to_string ()));
}
#[cfg (not (feature = "markdown"))]
{
let _e = e;
serve_error (StatusCode::BadRequest, "Markdown feature is disabled")
}
},
MarkdownPreview (s) => html::serve (s),
}) })
} }
} }
fn load_templates ( fn load_templates (
_asset_root: &Path asset_root: &Path
) )
-> anyhow::Result <Handlebars <'static>> -> Result <Handlebars <'static>, anyhow::Error>
{ {
use rust_embed::RustEmbed;
#[derive (RustEmbed)]
#[folder = "../../handlebars/server"]
struct HandlebarsServer;
let mut handlebars = Handlebars::new (); let mut handlebars = Handlebars::new ();
handlebars.set_strict_mode (true); handlebars.set_strict_mode (true);
let asset_root = asset_root.join ("handlebars/server");
for (k, v) in &[ for (k, v) in &[
("file_server_dir", "file_server_dir.html"), ("file_server_dir", "file_server_dir.html"),
("file_server_root", "file_server_root.html"), ("file_server_root", "file_server_root.html"),
] { ] {
let asset_file = HandlebarsServer::get (v) handlebars.register_template_file (k, asset_root.join (v))?;
.ok_or_else (|| anyhow::anyhow! ("failed to load handlebars template file"))?;
let s = std::str::from_utf8 (asset_file.data.as_ref ())?;
handlebars.register_template_string (k, s)?;
} }
Ok (handlebars) Ok (handlebars)

View File

@ -4,7 +4,7 @@
// human-readable HTML // human-readable HTML
use std::{ use std::{
collections::*, collections::HashMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
@ -76,6 +76,9 @@ pub enum Response {
Root, Root,
ServeDir (ServeDirParams), ServeDir (ServeDirParams),
ServeFile (ServeFileParams), ServeFile (ServeFileParams),
MarkdownErr (MarkdownErrWrapper),
MarkdownPreview (String),
} }
#[cfg (feature = "markdown")] #[cfg (feature = "markdown")]
@ -128,7 +131,7 @@ fn serve_dir (
async fn serve_file ( async fn serve_file (
file: tokio::fs::File, file: tokio::fs::File,
_uri: &http::Uri, uri: &http::Uri,
send_body: bool, send_body: bool,
headers: &HashMap <String, Vec <u8>> headers: &HashMap <String, Vec <u8>>
) )
@ -241,59 +244,11 @@ async fn serve_api (
Ok (NotFound) Ok (NotFound)
} }
#[derive (Clone, Copy)]
pub struct FileRoots <'a> {
pub files: &'a Path,
pub dirs: &'a BTreeMap <String, PathBuf>,
}
struct RoutedPath <'a> {
root: &'a Path,
path_s: std::borrow::Cow <'a, str>,
}
impl <'a> FileRoots <'a> {
fn route (self, input: &'a str) -> Result <Option <RoutedPath>, FileServerError> {
// TODO: There is totally a dir traversal attack in here somewhere
if let Some (path) = input.strip_prefix ("/dirs/") {
if let Some ((dir, path)) = path.split_once ('/') {
let root = match self.dirs.get (dir) {
None => return Ok (None),
Some (x) => x,
};
let path_s = percent_decode (path.as_bytes ()).decode_utf8 ().map_err (FileServerError::PathNotUtf8)?;
return Ok (Some (RoutedPath {
root,
path_s,
}));
}
else {
return Ok (None);
}
}
if let Some (path) = input.strip_prefix ("/files/") {
let encoded_path = &path [0..];
let path_s = percent_decode (encoded_path.as_bytes ()).decode_utf8 ().map_err (FileServerError::PathNotUtf8)?;
return Ok (Some (RoutedPath {
root: self.files,
path_s,
}));
}
return Ok (None);
}
}
// Handle the requests internally without knowing anything about PTTH or // Handle the requests internally without knowing anything about PTTH or
// HTML / handlebars // HTML / handlebars
pub async fn serve_all ( pub async fn serve_all (
roots: FileRoots <'_>, root: &Path,
method: Method, method: Method,
uri: &str, uri: &str,
headers: &HashMap <String, Vec <u8>>, headers: &HashMap <String, Vec <u8>>,
@ -304,7 +259,7 @@ pub async fn serve_all (
use std::str::FromStr; use std::str::FromStr;
use Response::*; use Response::*;
trace! ("Client requested {}", uri); info! ("Client requested {}", uri);
let uri = http::Uri::from_str (uri).map_err (FileServerError::InvalidUri)?; let uri = http::Uri::from_str (uri).map_err (FileServerError::InvalidUri)?;
@ -328,22 +283,24 @@ pub async fn serve_all (
} }
if let Some (path) = path.strip_prefix ("/api") { if let Some (path) = path.strip_prefix ("/api") {
return serve_api (roots.files, &uri, hidden_path, path).await; return serve_api (root, &uri, hidden_path, path).await;
} }
let RoutedPath { let path = match path.strip_prefix ("/files/") {
root,
path_s,
} = match roots.route (path)? {
None => return Ok (NotFound),
Some (x) => x, Some (x) => x,
None => return Ok (NotFound),
}; };
// TODO: There is totally a dir traversal attack in here somewhere
let encoded_path = &path [0..];
let path_s = percent_decode (encoded_path.as_bytes ()).decode_utf8 ().map_err (FileServerError::PathNotUtf8)?;
let path = Path::new (&*path_s); let path = Path::new (&*path_s);
let full_path = root.join (path); let full_path = root.join (path);
trace! ("full_path = {:?}", full_path); debug! ("full_path = {:?}", full_path);
if let Some (hidden_path) = hidden_path { if let Some (hidden_path) = hidden_path {
if full_path == hidden_path { if full_path == hidden_path {

View File

@ -67,25 +67,29 @@ fn file_server () {
let rt = Runtime::new ().expect ("Can't create runtime"); let rt = Runtime::new ().expect ("Can't create runtime");
rt.block_on (async { rt.block_on (async {
let files_root = PathBuf::from ("./"); let file_server_root = PathBuf::from ("./");
let dirs_roots = Default::default ();
let roots = internal::FileRoots {
files: &files_root,
dirs: &dirs_roots,
};
let headers = Default::default (); let headers = Default::default ();
{ {
use internal::Response::*; use internal::Response::*;
use crate::file_server::FileServerError; use crate::file_server::FileServerError;
let bad_passwords_path = "/files/src/bad_passwords.txt";
for (uri_path, expected) in vec! [ for (uri_path, expected) in vec! [
("/", Root), ("/", Root),
("/files", NotFound), ("/files", NotFound),
("/files/?", InvalidQuery), ("/files/?", InvalidQuery),
("/files/src", Redirect ("src/".to_string ())), ("/files/src", Redirect ("src/".to_string ())),
("/files/src/?", InvalidQuery), ("/files/src/?", InvalidQuery),
(bad_passwords_path, ServeFile (internal::ServeFileParams {
send_body: true,
range: range::ValidParsed {
range: 0..1_048_576,
range_requested: false,
},
file: AlwaysEqual::testing_blank (),
})),
("/files/test/test.md", ServeFile (internal::ServeFileParams { ("/files/test/test.md", ServeFile (internal::ServeFileParams {
send_body: true, send_body: true,
range: range::ValidParsed { range: range::ValidParsed {
@ -96,7 +100,7 @@ fn file_server () {
})), })),
] { ] {
let resp = internal::serve_all ( let resp = internal::serve_all (
roots, &file_server_root,
Method::Get, Method::Get,
uri_path, uri_path,
&headers, &headers,
@ -113,7 +117,7 @@ fn file_server () {
}), }),
] { ] {
let resp = internal::serve_all ( let resp = internal::serve_all (
roots, &file_server_root,
Method::Get, Method::Get,
uri_path, uri_path,
&headers, &headers,
@ -122,6 +126,35 @@ fn file_server () {
checker (resp.unwrap_err ()); checker (resp.unwrap_err ());
} }
let resp = internal::serve_all (
&file_server_root,
Method::Get,
bad_passwords_path,
&hashmap! {
"range".into () => b"bytes=0-2000000".to_vec (),
},
None
).await;
assert_eq! (resp.expect ("Should be Ok (_)"), RangeNotSatisfiable (1_048_576));
let resp = internal::serve_all (
&file_server_root,
Method::Head,
bad_passwords_path,
&headers,
None
).await;
assert_eq! (resp.expect ("Should be Ok (_)"), ServeFile (internal::ServeFileParams {
send_body: false,
range: range::ValidParsed {
range: 0..1_048_576,
range_requested: false,
},
file: AlwaysEqual::testing_blank (),
}));
} }
}); });
} }

View File

@ -39,19 +39,7 @@
// False positive on futures::select! macro // False positive on futures::select! macro
#![allow (clippy::mut_mut)] #![allow (clippy::mut_mut)]
pub mod errors;
/// In-process file server module with byte range and ETag support
pub mod file_server;
/// Load and de-serialize structures from TOML, with a size limit
/// and checking permissions (On Unix)
pub mod load_toml;
pub mod prelude;
use std::{ use std::{
collections::*,
future::Future, future::Future,
path::PathBuf, path::PathBuf,
sync::Arc, sync::Arc,
@ -70,11 +58,21 @@ use tokio_stream::wrappers::ReceiverStream;
use ptth_core::{ use ptth_core::{
http_serde, http_serde,
prelude::*,
}; };
// use crate::key_validity::BlakeHashWrapper;
pub mod errors;
/// In-process file server module with byte range and ETag support
pub mod file_server;
/// Load and de-serialize structures from TOML, with a size limit
/// and checking permissions (On Unix)
pub mod load_toml;
pub mod prelude;
use errors::ServerError; use errors::ServerError;
use prelude::*;
pub struct State { pub struct State {
// file_server: file_server::FileServer, // file_server: file_server::FileServer,
@ -91,18 +89,14 @@ async fn handle_one_req (
response: http_serde::Response, response: http_serde::Response,
) -> Result <(), ServerError> ) -> Result <(), ServerError>
{ {
let url = format! ("{}/http_response/{}", state.config.relay_url, req_id);
let mut resp_req = state.client let mut resp_req = state.client
.post (&url) .post (&format! ("{}/http_response/{}", state.config.relay_url, req_id))
.header (ptth_core::PTTH_MAGIC_HEADER, base64::encode (rmp_serde::to_vec (&response.parts).map_err (ServerError::MessagePackEncodeResponse)?)) .header (ptth_core::PTTH_MAGIC_HEADER, base64::encode (rmp_serde::to_vec (&response.parts).map_err (ServerError::MessagePackEncodeResponse)?))
.header ("X-PTTH-SERVER-NAME", &state.config.name); .header ("X-PTTH-SERVER-NAME", &state.config.name);
if response.parts.status_code != ptth_core::http_serde::StatusCode::NotModified { if let Some (length) = response.content_length {
if let Some (length) = response.content_length { resp_req = resp_req.header ("Content-Length", length.to_string ());
resp_req = resp_req.header ("Content-Length", length.to_string ());
}
} }
if let Some (mut body) = response.body { if let Some (mut body) = response.body {
if state.config.throttle_upload { if state.config.throttle_upload {
// Spawn another task to throttle the chunks // Spawn another task to throttle the chunks
@ -133,14 +127,14 @@ async fn handle_one_req (
let req = resp_req.build ().map_err (ServerError::Step5Responding)?; let req = resp_req.build ().map_err (ServerError::Step5Responding)?;
trace! ("{:?}", req.headers ()); debug! ("{:?}", req.headers ());
//println! ("Step 6"); //println! ("Step 6");
match state.client.execute (req).await { match state.client.execute (req).await {
Ok (r) => { Ok (r) => {
let status = r.status (); let status = r.status ();
let text = r.text ().await.map_err (ServerError::Step7AfterResponse)?; let text = r.text ().await.map_err (ServerError::Step7AfterResponse)?;
debug! ("http_response {} {:?} {:?}", req_id, status, text); debug! ("{:?} {:?}", status, text);
}, },
Err (e) => { Err (e) => {
if e.is_request () { if e.is_request () {
@ -156,7 +150,7 @@ async fn handle_one_req (
async fn handle_requests <F, H, SH> ( async fn handle_requests <F, H, SH> (
state: &Arc <State>, state: &Arc <State>,
wrapped_reqs: Vec <http_serde::WrappedRequest>, req_resp: reqwest::Response,
spawn_handler: &mut SH, spawn_handler: &mut SH,
) -> Result <(), ServerError> ) -> Result <(), ServerError>
where where
@ -166,6 +160,18 @@ SH: Send + FnMut () -> H
{ {
//println! ("Step 1"); //println! ("Step 1");
let body = req_resp.bytes ().await.map_err (ServerError::CantCollectWrappedRequests)?;
let wrapped_reqs: Vec <http_serde::WrappedRequest> = match rmp_serde::from_read_ref (&body)
{
Ok (x) => x,
Err (e) => {
error! ("Can't parse wrapped requests: {:?}", e);
return Err (ServerError::CantParseWrappedRequests (e));
},
};
debug! ("Unwrapped {} requests", wrapped_reqs.len ());
for wrapped_req in wrapped_reqs { for wrapped_req in wrapped_reqs {
let state = Arc::clone (&state); let state = Arc::clone (&state);
let handler = spawn_handler (); let handler = spawn_handler ();
@ -175,15 +181,12 @@ SH: Send + FnMut () -> H
tokio::spawn (async move { tokio::spawn (async move {
let (req_id, parts) = (wrapped_req.id, wrapped_req.req); let (req_id, parts) = (wrapped_req.id, wrapped_req.req);
info! ("Req {} {}", req_id, parts.uri); debug! ("Handling request {}", req_id);
let f = handler (parts); let f = handler (parts);
let response = f.await?; let response = f.await?;
let output = handle_one_req (&state, req_id.clone (), response).await; handle_one_req (&state, req_id, response).await
debug! ("Req {} task exiting", req_id);
output
}); });
} }
@ -210,22 +213,11 @@ pub struct ConfigFile {
pub relay_url: String, pub relay_url: String,
/// Directory that the file server module will expose to clients /// Directory that the file server module will expose to clients
/// over the relay, under `/files`. If None, the current working dir is used. /// over the relay. If None, the current working dir is used.
pub file_server_root: PathBuf, pub file_server_root: Option <PathBuf>,
/// The file server module will expose these directories to clients under
/// `/dirs`. If symlinks can't be used (like on Windows), this allows PTTH
/// to serve multiple directories easily.
pub file_server_roots: BTreeMap <String, PathBuf>,
/// For debugging. /// For debugging.
pub throttle_upload: bool, pub throttle_upload: bool,
pub client_keys: HashSet <String>,
pub allow_any_client: bool,
pub index_directories: bool,
} }
impl ConfigFile { impl ConfigFile {
@ -235,12 +227,8 @@ impl ConfigFile {
name, name,
api_key, api_key,
relay_url, relay_url,
file_server_root: PathBuf::from ("."), file_server_root: None,
file_server_roots: Default::default (),
throttle_upload: false, throttle_upload: false,
client_keys: Default::default (),
allow_any_client: true,
index_directories: true,
} }
} }
@ -266,6 +254,8 @@ pub struct Config {
pub struct Builder { pub struct Builder {
config_file: ConfigFile, config_file: ConfigFile,
hidden_path: Option <PathBuf>,
asset_root: Option <PathBuf>,
} }
impl Builder { impl Builder {
@ -277,23 +267,23 @@ impl Builder {
name, name,
api_key: ptth_core::gen_key (), api_key: ptth_core::gen_key (),
relay_url, relay_url,
file_server_root: PathBuf::from ("."), file_server_root: None,
file_server_roots: Default::default (),
throttle_upload: false, throttle_upload: false,
client_keys: Default::default (),
allow_any_client: true,
index_directories: true,
}; };
Self { Self {
config_file, config_file,
hidden_path: None,
asset_root: None,
} }
} }
pub fn build (self) -> Result <State, ServerError> pub fn build (self) -> Result <State, ServerError>
{ {
State::new ( State::new (
self.config_file self.config_file,
self.hidden_path,
self.asset_root
) )
} }
@ -309,8 +299,7 @@ pub async fn run_server (
config_file: ConfigFile, config_file: ConfigFile,
shutdown_oneshot: oneshot::Receiver <()>, shutdown_oneshot: oneshot::Receiver <()>,
hidden_path: Option <PathBuf>, hidden_path: Option <PathBuf>,
asset_root: Option <PathBuf>, asset_root: Option <PathBuf>
hit_counter: Option <mpsc::Sender <()>>,
) )
-> Result <(), ServerError> -> Result <(), ServerError>
{ {
@ -321,11 +310,8 @@ pub async fn run_server (
}); });
let file_server = file_server::FileServer::new ( let file_server = file_server::FileServer::new (
file_server::Config { config_file.file_server_root.clone (),
file_server_root: config_file.file_server_root.clone (), &asset_root.clone ().unwrap_or_else (PathBuf::new),
file_server_roots: config_file.file_server_roots.clone (),
},
&asset_root.clone ().unwrap_or_else (|| PathBuf::from (".")),
config_file.name.clone (), config_file.name.clone (),
metrics_interval, metrics_interval,
hidden_path.clone (), hidden_path.clone (),
@ -334,17 +320,16 @@ pub async fn run_server (
let state = Arc::new (State::new ( let state = Arc::new (State::new (
config_file, config_file,
hidden_path,
asset_root,
)?); )?);
let file_server_2 = Arc::clone (&file_server);
let mut spawn_handler = || { let mut spawn_handler = || {
let file_server = Arc::clone (&file_server); let file_server = Arc::clone (&file_server);
let hit_counter = hit_counter.clone ();
|req: http_serde::RequestParts| async move { |req: http_serde::RequestParts| async move {
if let Some (hit_tx) = &hit_counter {
eprintln! ("hit_tx.send");
hit_tx.send (()).await.ok ();
}
Ok (file_server.serve_all (req.method, &req.uri, &req.headers).await?) Ok (file_server.serve_all (req.method, &req.uri, &req.headers).await?)
} }
}; };
@ -359,23 +344,23 @@ pub async fn run_server (
impl State { impl State {
pub fn new ( pub fn new (
config_file: ConfigFile, config_file: ConfigFile,
hidden_path: Option <PathBuf>,
asset_root: Option <PathBuf>
) )
-> Result <Self, ServerError> -> Result <Self, ServerError>
{ {
use std::convert::TryInto; use std::convert::TryInto;
let asset_root = asset_root.unwrap_or_else (PathBuf::new);
info! ("Server name is {}", config_file.name); info! ("Server name is {}", config_file.name);
info! ("Tripcode is {}", config_file.tripcode ()); info! ("Tripcode is {}", config_file.tripcode ());
let mut headers = reqwest::header::HeaderMap::new (); let mut headers = reqwest::header::HeaderMap::new ();
headers.insert ("X-ApiKey", config_file.api_key.try_into ().map_err (ServerError::ApiKeyInvalid)?); headers.insert ("X-ApiKey", config_file.api_key.try_into ().map_err (ServerError::ApiKeyInvalid)?);
// Cookie 01FYZ3W64SM6KYNP48J6EWSCEF
// Try to keep the Clients similar here
let client = Client::builder () let client = Client::builder ()
.default_headers (headers) .default_headers (headers)
.timeout (Duration::from_secs (7 * 86400))
.connect_timeout (Duration::from_secs (30)) .connect_timeout (Duration::from_secs (30))
.build ().map_err (ServerError::CantBuildHttpClient)?; .build ().map_err (ServerError::CantBuildHttpClient)?;
@ -390,35 +375,6 @@ impl State {
Ok (state) Ok (state)
} }
async fn http_listen (
state: &Arc <Self>,
) -> Result <Vec <http_serde::WrappedRequest>, ServerError>
{
use http::status::StatusCode;
let req_resp = state.client.get (&format! ("{}/http_listen/{}", state.config.relay_url, state.config.name))
.timeout (Duration::from_secs (30))
.send ().await.map_err (ServerError::Step3Response)?;
if req_resp.status () == StatusCode::NO_CONTENT {
return Ok (Vec::new ());
}
if req_resp.status () != StatusCode::OK {
error! ("{}", req_resp.status ());
let body = req_resp.bytes ().await.map_err (ServerError::Step3CollectBody)?;
let body = String::from_utf8 (body.to_vec ()).map_err (ServerError::Step3ErrorResponseNotUtf8)?;
error! ("{}", body);
return Err (ServerError::Step3Unknown);
}
let body = req_resp.bytes ().await.map_err (ServerError::CantCollectWrappedRequests)?;
let wrapped_reqs: Vec <http_serde::WrappedRequest> = rmp_serde::from_slice (&body)
.map_err (ServerError::CantParseWrappedRequests)?;
Ok (wrapped_reqs)
}
pub async fn run <F, H, SH> ( pub async fn run <F, H, SH> (
state: &Arc <Self>, state: &Arc <Self>,
@ -430,10 +386,12 @@ impl State {
H: Send + 'static + FnOnce (http_serde::RequestParts) -> F, H: Send + 'static + FnOnce (http_serde::RequestParts) -> F,
SH: Send + FnMut () -> H SH: Send + FnMut () -> H
{ {
use http::status::StatusCode;
let mut backoff_delay = 0; let mut backoff_delay = 0;
let mut shutdown_oneshot = shutdown_oneshot.fuse (); let mut shutdown_oneshot = shutdown_oneshot.fuse ();
for i in 0u64.. { loop {
// TODO: Extract loop body to function? // TODO: Extract loop body to function?
if backoff_delay > 0 { if backoff_delay > 0 {
@ -449,37 +407,61 @@ impl State {
} }
} }
trace! ("http_listen {}...", i); debug! ("http_listen");
let http_listen_fut = Self::http_listen (state); let req_req = state.client.get (&format! ("{}/http_listen/{}", state.config.relay_url, state.config.name))
.timeout (Duration::from_secs (30))
.send ();
let http_listen = futures::select! { let err_backoff_delay = std::cmp::min (30_000, backoff_delay * 2 + 500);
r = http_listen_fut.fuse () => r,
let req_req = futures::select! {
r = req_req.fuse () => r,
_ = shutdown_oneshot => { _ = shutdown_oneshot => {
info! ("Received graceful shutdown"); info! ("Received graceful shutdown");
break; break;
}, },
}; };
let err_backoff_delay = std::cmp::min (30_000, backoff_delay * 2 + 500); let req_resp = match req_req {
let reqs = match http_listen {
Err (e) => { Err (e) => {
backoff_delay = err_backoff_delay; if e.is_timeout () {
error! ("http_listen {} error, backing off... {:?}", i, e); error! ("Client-side timeout. Is an overly-aggressive firewall closing long-lived connections? Is the network flakey?");
}
else {
error! ("Err: {:?}", e);
if backoff_delay != err_backoff_delay {
error! ("Non-timeout issue, increasing backoff_delay");
backoff_delay = err_backoff_delay;
}
}
continue; continue;
}, },
Ok (x) => x, Ok (x) => x,
}; };
trace! ("http_listen {} unwrapped {} requests", i, reqs.len ()); if req_resp.status () == StatusCode::NO_CONTENT {
debug! ("http_listen long poll timed out on the server, good.");
continue;
}
else if req_resp.status () != StatusCode::OK {
error! ("{}", req_resp.status ());
let body = req_resp.bytes ().await.map_err (ServerError::Step3CollectBody)?;
let body = String::from_utf8 (body.to_vec ()).map_err (ServerError::Step3ErrorResponseNotUtf8)?;
error! ("{}", body);
if backoff_delay != err_backoff_delay {
error! ("Non-timeout issue, increasing backoff_delay");
backoff_delay = err_backoff_delay;
}
continue;
}
// Unpack the requests, spawn them into new tasks, then loop back // Unpack the requests, spawn them into new tasks, then loop back
// around. // around.
if handle_requests ( if handle_requests (
&state, &state,
reqs, req_resp,
spawn_handler, spawn_handler,
).await.is_err () { ).await.is_err () {
backoff_delay = err_backoff_delay; backoff_delay = err_backoff_delay;
@ -498,141 +480,6 @@ impl State {
} }
} }
pub mod executable {
use std::{
collections::*,
path::{Path, PathBuf},
};
use structopt::StructOpt;
use super::{
load_toml,
prelude::*,
};
pub async fn main (args: &[OsString]) -> anyhow::Result <()> {
let opt = Opt::from_iter (args);
let asset_root = opt.asset_root;
let path = opt.config_path.clone ().unwrap_or_else (|| PathBuf::from ("./config/ptth_server.toml"));
let config_file: ConfigFile = if opt.auto_gen_key {
// If we're in autonomous mode, try harder to fix things
match load_toml::load (&path) {
Err (_) => {
gen_and_save_key (&path)?;
load_toml::load (&path)?
},
Ok (x) => x,
}
}
else {
match load_toml::load (&path) {
Err (super::errors::LoadTomlError::Io (_)) => bail! ("API key not provided in config file and auto-gen-key not provided"),
Ok (x) => x,
Err (e) => return Err (e.into ()),
}
};
let file_server_roots = config_file.file_server_roots
.unwrap_or_else (|| Default::default ());
// `git grep JRY5NXZU` # duplicated code?
let config_file = super::ConfigFile {
name: opt.name.or (config_file.name).ok_or (anyhow::anyhow! ("`name` must be provided in command line or config file"))?,
api_key: config_file.api_key,
relay_url: opt.relay_url.or (config_file.relay_url).ok_or (anyhow::anyhow! ("`--relay-url` must be provided in command line or `relay_url` in config file"))?,
file_server_root: opt.file_server_root.or (config_file.file_server_root).unwrap_or_else (|| PathBuf::from (".")),
file_server_roots,
throttle_upload: opt.throttle_upload,
allow_any_client: true,
client_keys: Default::default (),
index_directories: config_file.index_directories.unwrap_or (true),
};
if opt.print_tripcode {
println! (r#"name = "{}""#, config_file.name);
println! (r#"tripcode = "{}""#, config_file.tripcode ());
return Ok (());
}
super::run_server (
config_file,
ptth_core::graceful_shutdown::init (),
Some (path),
asset_root,
None,
).await?;
Ok (())
}
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
auto_gen_key: bool,
#[structopt (long)]
throttle_upload: bool,
#[structopt (long)]
file_server_root: Option <PathBuf>,
#[structopt (long)]
asset_root: Option <PathBuf>,
#[structopt (long)]
config_path: Option <PathBuf>,
#[structopt (long)]
name: Option <String>,
#[structopt (long)]
print_tripcode: bool,
#[structopt (long)]
relay_url: Option <String>,
}
#[derive (Default, serde::Deserialize)]
struct ConfigFile {
pub name: Option <String>,
pub api_key: String,
pub relay_url: Option <String>,
pub file_server_root: Option <PathBuf>,
pub file_server_roots: Option <BTreeMap <String, PathBuf>>,
pub index_directories: Option <bool>,
}
fn gen_and_save_key (path: &Path) -> anyhow::Result <()> {
use std::fs::File;
let api_key = ptth_core::gen_key ();
let mut f = File::create (path).with_context (|| format! ("Can't create config file `{:?}`", path))?;
#[cfg (unix)]
{
use std::os::unix::fs::PermissionsExt;
let metadata = f.metadata ()?;
let mut permissions = metadata.permissions ();
permissions.set_mode (0o600);
f.set_permissions (permissions)?;
}
#[cfg (not (unix))]
{
tracing::warn! ("Error VR6VW5QT: API keys aren't protected from clients on non-Unix OSes yet");
}
f.write_all (format! ("api_key = \"{}\"\n", api_key).as_bytes ())?;
Ok (())
}
}
#[cfg (test)] #[cfg (test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -24,6 +24,18 @@ fn load_inner <
Ok (toml::from_str (&config_s)?) Ok (toml::from_str (&config_s)?)
} }
/// For files that contain public-viewable information
fn load_public <
T: DeserializeOwned,
P: AsRef <Path> + Debug
> (
config_file_path: P
) -> Result <T, LoadTomlError> {
let f = File::open (&config_file_path)?;
load_inner (f)
}
/// For files that may contain secrets and should have permissions or other /// For files that may contain secrets and should have permissions or other
/// safeties checked /// safeties checked
@ -55,6 +67,5 @@ pub fn load <
> ( > (
config_file_path: P config_file_path: P
) -> Result <T, LoadTomlError> { ) -> Result <T, LoadTomlError> {
let f = File::open (&config_file_path)?; load_public (config_file_path)
load_inner (f)
} }

View File

@ -1 +1,8 @@
pub use ptth_core::prelude::*; pub use std::{
io::Write,
};
pub use anyhow::{
Context,
bail,
};

View File

@ -4,13 +4,11 @@ version = "0.1.0"
authors = ["Trish"] authors = ["Trish"]
edition = "2018" edition = "2018"
repository = "https://six-five-six-four.com/git/reactor/ptth"
[dependencies] [dependencies]
anyhow = "1.0.38" anyhow = "1.0.38"
fltk = "1.3.1" fltk = "1.1.0"
serde = {version = "1.0.117", features = ["derive"]} serde = {version = "1.0.117", features = ["derive"]}
tokio = "1.17.0" tokio = "1.4.0"
tracing = "0.1.25" tracing = "0.1.25"
tracing-subscriber = "0.2.16" tracing-subscriber = "0.2.16"

View File

@ -1,5 +1,4 @@
use std::{ use std::{
collections::*,
path::PathBuf, path::PathBuf,
}; };
@ -14,10 +13,7 @@ use fltk::{
}; };
use tokio::{ use tokio::{
sync::{ sync::oneshot,
mpsc,
oneshot,
},
}; };
struct ServerInstance { struct ServerInstance {
@ -27,7 +23,6 @@ struct ServerInstance {
#[derive (Clone, Copy)] #[derive (Clone, Copy)]
enum Message { enum Message {
Hit,
RunServer, RunServer,
StopServer, StopServer,
} }
@ -47,26 +42,7 @@ fn main ()
let app = app::App::default(); let app = app::App::default();
let mut wind = Window::new (100, 100, 500, 180, "PTTH server"); let mut wind = Window::new (100, 100, 500, 180, "PTTH server");
let config_file_opt = match ptth_server::load_toml::load::<ConfigFile, _> ("./config/ptth_server.toml") { let mut gui = Gui::new (fltk_tx);
Ok (x) => Some (x),
Err (e) => {
eprintln! ("Error in `./config/ptth_server.toml`: {:?}", e);
None
},
};
let (hit_tx, mut hit_rx) = mpsc::channel (1);
{
let fltk_tx = fltk_tx;
rt.spawn (async move {
while hit_rx.recv ().await.is_some () {
fltk_tx.send (Message::Hit);
}
});
}
let mut gui = Gui::new (fltk_tx, config_file_opt.as_ref ());
gui.set_server_running (false); gui.set_server_running (false);
wind.end (); wind.end ();
@ -74,43 +50,23 @@ fn main ()
while app.wait () { while app.wait () {
match fltk_rx.recv () { match fltk_rx.recv () {
Some (Message::Hit) => {
gui.hits += 1;
gui.refresh_label ();
},
Some (Message::RunServer) => { Some (Message::RunServer) => {
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel (); let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel ();
let roots = match &config_file_opt {
None => Default::default (),
Some (cf) => match &cf.file_server_roots {
None => Default::default (),
Some (x) => x.clone (),
},
};
// `git grep JRY5NXZU` # duplicated code?
let config_file = ptth_server::ConfigFile { let config_file = ptth_server::ConfigFile {
name: gui.input_name.value ().to_string (), name: gui.input_name.value ().to_string (),
api_key: gui.input_api_key.value ().to_string (), api_key: gui.input_api_key.value ().to_string (),
relay_url: gui.input_relay_url.value ().to_string (), relay_url: gui.input_relay_url.value ().to_string (),
file_server_root: PathBuf::from (gui.input_file_server_root.value ()), file_server_root: Some (std::path::PathBuf::from (gui.input_file_server_root.value ())),
file_server_roots: roots,
throttle_upload: false, throttle_upload: false,
client_keys: Default::default (),
allow_any_client: true,
index_directories: true,
}; };
let hit_tx = hit_tx.clone ();
let task = rt.spawn (async { let task = rt.spawn (async {
if let Err (e) = ptth_server::run_server ( if let Err (e) = ptth_server::run_server (
config_file, config_file,
shutdown_rx, shutdown_rx,
None, None,
None, None
Some (hit_tx),
).await ).await
{ {
tracing::error! ("ptth_server crashed: {}", e); tracing::error! ("ptth_server crashed: {}", e);
@ -142,9 +98,6 @@ struct Gui {
input_relay_url: Input, input_relay_url: Input,
input_file_server_root: Input, input_file_server_root: Input,
input_api_key: SecretInput, input_api_key: SecretInput,
server_is_running: bool,
hits: u64,
} }
#[derive (Default, serde::Deserialize)] #[derive (Default, serde::Deserialize)]
@ -153,16 +106,10 @@ pub struct ConfigFile {
pub api_key: String, pub api_key: String,
pub relay_url: Option <String>, pub relay_url: Option <String>,
pub file_server_root: Option <PathBuf>, pub file_server_root: Option <PathBuf>,
pub file_server_roots: Option <BTreeMap <String, PathBuf>>,
} }
impl Gui { impl Gui {
fn new ( fn new (fltk_tx: app::Sender <Message>) -> Self {
fltk_tx: app::Sender <Message>,
config_file_opt: Option <&ConfigFile>,
)
-> Self
{
let mut input_name = Input::new (200, 10, 290, 20, "name"); let mut input_name = Input::new (200, 10, 290, 20, "name");
input_name.set_value ("my_ptth_server"); input_name.set_value ("my_ptth_server");
@ -185,7 +132,7 @@ impl Gui {
but_stop.set_trigger (CallbackTrigger::Changed); but_stop.set_trigger (CallbackTrigger::Changed);
but_stop.emit (fltk_tx, Message::StopServer); but_stop.emit (fltk_tx, Message::StopServer);
if let Some (config_file) = config_file_opt if let Ok (config_file) = ptth_server::load_toml::load::<ConfigFile, _> ("./config/ptth_server.toml")
{ {
if let Some (v) = config_file.name.as_ref () { if let Some (v) = config_file.name.as_ref () {
input_name.set_value (v); input_name.set_value (v);
@ -209,16 +156,16 @@ impl Gui {
input_relay_url, input_relay_url,
input_file_server_root, input_file_server_root,
input_api_key, input_api_key,
server_is_running: false,
hits: 0,
} }
} }
fn set_server_running (&mut self, b: bool) { fn set_server_running (&mut self, b: bool) {
self.server_is_running = b; self.frame.set_label (if b {
"Running"
self.refresh_label (); }
else {
"Stopped"
});
set_active (&mut self.but_run, ! b); set_active (&mut self.but_run, ! b);
set_active (&mut self.but_stop, b); set_active (&mut self.but_stop, b);
@ -230,18 +177,6 @@ impl Gui {
set_active (&mut self.input_file_server_root, ! b); set_active (&mut self.input_file_server_root, ! b);
set_active (&mut self.input_api_key, ! b); set_active (&mut self.input_api_key, ! b);
} }
fn refresh_label (&mut self) {
let s =
if self.server_is_running {
format! ("Running. Requests: {}", self.hits)
}
else {
"Stopped".to_string ()
};
self.frame.set_label (&s);
}
} }
fn set_active <W: WidgetExt> (w: &mut W, b: bool) { fn set_active <W: WidgetExt> (w: &mut W, b: bool) {

View File

@ -1,11 +0,0 @@
[package]
name = "udp_over_tcp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.66"
tokio = { version = "1.23.0", features = ["full"] }
tracing = "0.1.37"

View File

@ -1,59 +0,0 @@
use std::{
net::{
Ipv4Addr,
SocketAddr,
SocketAddrV4,
},
sync::Arc,
};
use tokio::{
net::{
TcpSocket,
TcpStream,
UdpSocket,
},
spawn,
};
use crate::loops;
pub struct Config {
pub udp_eph_port: u16,
pub udp_local_server_port: u16,
pub tcp_server_addr: SocketAddr,
}
pub async fn main (cfg: Config) -> anyhow::Result <()> {
let udp_sock = UdpSocket::bind (SocketAddrV4::new (Ipv4Addr::UNSPECIFIED, cfg.udp_local_server_port)).await?;
udp_sock.connect ((Ipv4Addr::LOCALHOST, cfg.udp_eph_port)).await?;
let tcp_sock = TcpSocket::new_v4 ()?;
let tcp_conn = tcp_sock.connect (cfg.tcp_server_addr).await?;
main_with_sockets (udp_sock, tcp_conn).await
}
pub async fn main_with_sockets (udp_sock: UdpSocket, tcp_conn: TcpStream) -> anyhow::Result <()> {
let (tcp_read, tcp_write) = tcp_conn.into_split ();
let tx_task;
let rx_task;
{
let udp_sock = Arc::new (udp_sock);
rx_task = spawn (loops::rx (Arc::clone (&udp_sock), tcp_read));
tx_task = spawn (loops::tx (Arc::clone (&udp_sock), tcp_write));
}
tokio::select! {
_val = tx_task => {
println! ("client_main: tx_task exited, exiting");
}
_val = rx_task => {
println! ("client_main: rx_task exited, exiting");
}
}
Ok (())
}

View File

@ -1,4 +0,0 @@
pub mod client;
pub mod server;
mod loops;

View File

@ -1,84 +0,0 @@
use std::{
sync::Arc,
};
use anyhow::bail;
use tokio::{
io::{
AsyncReadExt,
AsyncWriteExt,
},
net::{
UdpSocket,
tcp,
}
};
pub async fn rx (
udp_sock: Arc <UdpSocket>,
mut tcp_read: tcp::OwnedReadHalf,
) -> anyhow::Result <()> {
for i in 0u64.. {
// Optimizes down to a bitwise AND
if i % 8_192 == 0 {
tracing::trace! ("rx loop");
}
let mut tag = [0u8, 0, 0, 0];
let bytes_read = tcp_read.read (&mut tag).await?;
if bytes_read != 4 {
bail! ("loops::rx: Couldn't read 4 bytes for tag");
}
if tag != [1, 0, 0, 0] {
bail! ("loops::rx: unexpected tag in framing");
}
let mut length = [0u8, 0, 0, 0];
let bytes_read = tcp_read.read (&mut length).await?;
if bytes_read != 4 {
bail! ("loops::rx: Couldn't read 4 bytes for tag");
}
let length = usize::try_from (u32::from_le_bytes (length))?;
if length >= 8_192 {
bail! ("loops::rx: Length too big for UDP packets");
}
let mut buf = vec! [0u8; length];
let bytes_read = tcp_read.read_exact (&mut buf).await?;
if length != bytes_read {
bail! ("loops::rx: read_exact failed");
}
buf.truncate (bytes_read);
udp_sock.send (&buf).await?;
}
Ok (())
}
pub async fn tx (
udp_sock: Arc <UdpSocket>,
mut tcp_write: tcp::OwnedWriteHalf,
) -> anyhow::Result <()>
{
for i in 0u64.. {
// Optimizes down to a bitwise AND
if i % 8_192 == 0 {
tracing::trace! ("tx loop");
}
let mut buf = vec! [0u8; 8_192];
let bytes_read = udp_sock.recv (&mut buf).await?;
buf.truncate (bytes_read);
let tag = [1u8, 0, 0, 0];
let length = u32::try_from (bytes_read)?.to_le_bytes ();
tcp_write.write_all (&tag).await?;
tcp_write.write_all (&length).await?;
tcp_write.write_all (&buf).await?;
}
Ok (())
}

View File

@ -1,76 +0,0 @@
/*
To test manually, run this 3 commands:
- Terminal A: `nc -l -u -p 9502`
- Terminal B: `cargo run -p udp_over_tcp`
- Terminal C: `nc -p 9500 -u 127.0.0.1 9501`
Terminals A and C should be connected through the UDP-over-TCP connection
*/
use std::{
net::{
Ipv4Addr,
SocketAddr,
SocketAddrV4,
},
};
use tokio::{
runtime,
spawn,
};
mod client;
mod loops;
mod server;
// The ephemeral UDP port that the PTTH_QUIC client will bind
const PORT_0: u16 = 9500;
// The well-known UDP port that the UDP-over-TCP client will bind
// The PTTH_QUIC client must connect to this instead of the real relay address
const PORT_1: u16 = 9501;
// The well-known TCP port that the UDP-over-TCP server will bind
const PORT_2: u16 = 9502;
// The well-known UDP port that the PTTH_QUIC relay will bind
const PORT_3: u16 = 9502;
fn main () -> anyhow::Result <()> {
let rt = runtime::Runtime::new ()?;
rt.block_on (async_main ())?;
Ok (())
}
async fn async_main () -> anyhow::Result <()> {
let server_cfg = server::Config {
tcp_port: PORT_2,
udp_port: PORT_3,
};
let server_app = server::Listener::new (server_cfg).await?;
let server_task = spawn (server_app.run ());
let client_cfg = client::Config {
udp_eph_port: PORT_0,
udp_local_server_port: PORT_1,
tcp_server_addr: SocketAddr::V4 (SocketAddrV4::new (Ipv4Addr::LOCALHOST, PORT_2)),
};
let client_task = spawn (client::main (client_cfg));
tokio::select! {
_val = client_task => {
println! ("Client exited, exiting");
},
_val = server_task => {
println! ("Server exited, exiting");
},
}
Ok (())
}

View File

@ -1,88 +0,0 @@
use std::{
net::{
Ipv4Addr,
SocketAddrV4,
},
sync::Arc,
};
use tokio::{
net::{
TcpListener,
TcpStream,
UdpSocket,
},
spawn,
};
use crate::loops;
#[derive (Clone)]
pub struct Config {
/// The well-known TCP port that the UDP-over-TCP server will bind
pub tcp_port: u16,
/// The well-known UDP port that the PTTH_QUIC relay will bind
pub udp_port: u16,
}
pub struct Listener {
cfg: Config,
tcp_listener: TcpListener,
}
impl Listener {
pub async fn new (cfg: Config) -> anyhow::Result <Self> {
let tcp_listener = TcpListener::bind ((Ipv4Addr::UNSPECIFIED, cfg.tcp_port)).await?;
Ok (Self {
cfg,
tcp_listener,
})
}
pub fn tcp_port (&self) -> anyhow::Result <u16> {
Ok (self.tcp_listener.local_addr ()?.port ())
}
pub async fn run (self) -> anyhow::Result <()> {
let Self {
cfg,
tcp_listener,
} = self;
loop {
let (conn, _peer_addr) = tcp_listener.accept ().await?;
let cfg = cfg.clone ();
spawn (handle_connection (cfg, conn));
}
}
}
async fn handle_connection (cfg: Config, conn: TcpStream) -> anyhow::Result <()> {
let udp_sock = UdpSocket::bind (SocketAddrV4::new (Ipv4Addr::UNSPECIFIED, 0)).await?;
udp_sock.connect ((Ipv4Addr::LOCALHOST, cfg.udp_port)).await?;
let (tcp_read, tcp_write) = conn.into_split ();
let rx_task;
let tx_task;
{
let udp_sock = Arc::new (udp_sock);
rx_task = spawn (loops::rx (Arc::clone (&udp_sock), tcp_read));
tx_task = spawn (loops::tx (Arc::clone (&udp_sock), tcp_write));
}
tokio::select! {
_val = tx_task => {
println! ("server_handle_connection: tx_task exited, exiting");
}
_val = rx_task => {
println! ("server_handle_connection: rx_task exited, exiting");
}
}
Ok (())
}

View File

@ -1,73 +0,0 @@
# PTTH_DIREC - Direct P2P connections
_It could work, even!_
To keep each ridiculous new feature simple, we'll rely on bootstrapping:
1. PTTH is just HTTPS connections
2. PTTH_QUIC uses a PTTH relay to download the QUIC cert and bootstrap
3. PTTH_DIREC will use a PTTH_QUIC relay to bootstrap
# Overview
Given that:
- P2 is connected to P3
- P4 is connected to P3
Steps:
- S1. P2 starts a bi stream to P3
- S2.0. P2 says, "I want to initiate a PTTH_DIREC connection..."
- "... And I'll send you cookie X to do hole-punching..."
- "... And I want to connect to end server Y..."
- S3.0. P3 creates an ID for this connection
- S3.1. P3 replies "go ahead" to P2
- S4. P3 starts a bi stream to P4 (end server Y)
- S5. P3 says, "I want you to accept a PTTH_DIREC connection..."
- "... And you should send me cookie Z to do hole-punching..."
- "... And the client will be client W..."
- S6. P3 waits for P4 to accept the offer
- S7. P3 waits for both cookies to arrive
- S8. When the cookies arrive, P3 learns the WAN addresses of P2 and P4
- S9. P3 sends the WAN addresses of P2 and P4 to each other (on the existing bi streams)
- S10. P4 tries to connect directly to P2
- S11. P2 does the same to P4
- S12. When P4 sees round-tripped data, it attempts to upgrade to QUIC
- S13. When P2 sees round-tripped data, it attempts to upgrade to QUIC
- Cool stuff happens over QUIC
- ReactorScram implements the rest of the protocol
P2's PoV:
- S1. Start a bi stream to P3
- S2.0. Send cookie and server ID
- S2.1. Wait for go-ahead signal (this may contain the hole-punch address and a new cookie for P4)
- S2.2. Send cookie to hole-punch address repeatedly
- S2.3. While you're sending the cookie, wait to hear P4's WAN address
- S9. Learn P4's WAN address
- S10. Send the new cookie to P4's address
- S12. When you see round-tripped data, upgrade to QUIC
P4's PoV:
- S4. Accept a bi stream from P3
- S5. Receive cookie and client ID
- S6. Reply "OK"
- S7.0. Send cookie to hole-punch address repeatedly
- S7.1. While sending the cookie, wait to hear P2's WAN address
- S9. Learn P2's WAN address
- S10. Try to connect directly to P2
- S12. When you see round-tripped data, upgrade to QUIC
Commands needed:
- ???
# Decisions
I'll add a delay between giving P2's address to P4, and giving P4's address to P2.
This miiiight change things slightly if P4's firewall is suspicious of packets
coming in too early, but I doubt it.
The delay is easy to remove relay-side if it doesn't help.

View File

@ -1,24 +0,0 @@
# How-to: Test PTTH_QUIC
## Initial setup
- Open 3 terminals in `crates/ptth_quic`
- Use `export RUST_LOG=ptth_quic_relay_server=debug` to enable debug logging
for the terminal that will run the relay server (P3)
- Use `export RUST_LOG=ptth_quic=debug` for the terminal that
will run the end server (P4)
- Use `export RUST_LOG=ptth_quic_client=debug` for the terminal that
will run the client (P2)
When the relay server is running, use curl to get the list of connected
end servers: `curl 127.0.0.1:4004`
## Test loop - Happy path
- Start a relay `cargo run --bin ptth_quic_relay_server`
- Verify that the relay has no end servers connected
- Start an end server `cargo run --bin ptth_quic_end_server -- --debug-echo`
- Verify that the end server connected
- Start a client `cargo run --bin ptth_quic_client`
- Connect to the client and verify that the debug echo server is running
`nc 127.0.0.1 30381`

View File

@ -1,31 +0,0 @@
# How scraper keys work
Come up with a random passphrase:
`not this, this is a bogus passphrase for documentation`
Run that through the `hash-api-key` subcommand of any `ptth_relay` instance:
`ptth_relay hash-api-key`
You'll get a hash like this:
`RUWt1hQQuHIRjftOdgeZf0PG/DtAmIaMqot/nwBAZXQ=`
Make sure that gets into the relay's config file, `ptth_relay.toml`:
```
[[scraper_keys]]
name = "shudder_mummy"
not_before = "2021-08-27T19:20:25-05:00"
not_after = "2031-08-27T19:20:25-05:00"
hash = "RUWt1hQQuHIRjftOdgeZf0PG/DtAmIaMqot/nwBAZXQ="
```
Use curl to like, try it out:
```
curl \
--header "X-ApiKey: not this, this is a bogus passphrase for documentation" \
http://localhost:4000/scraper/v1/test
```

View File

@ -52,6 +52,8 @@ Lorem ipsum dolor set amet
<p> <p>
<a href="endless_sink">Data sink (POST only)</a> <a href="endless_sink">Data sink (POST only)</a>
<p>Persistent toggle is <pre>{{persistent_toggle}}</pre></p>
<form action="toggle" method="post"> <form action="toggle" method="post">
<input type="submit" value="Toggle"> <input type="submit" value="Toggle">
</form> </form>

View File

@ -14,10 +14,6 @@
.entry_list div:nth-child(odd) { .entry_list div:nth-child(odd) {
background-color: #ddd; background-color: #ddd;
} }
.fine {
color: #444;
font-size: 12px;
}
</style> </style>
<title>PTTH relay</title> <title>PTTH relay</title>
</head> </head>
@ -37,12 +33,5 @@
</div> </div>
<p class="fine">
Rendered by PTTH relay server.
Copyright 2020-2024 "Trish" ReactorScram. Licensed under the
<a href="https://www.gnu.org/licenses/agpl-3.0.html">GNU AGPLv3.</a>
<a href="https://six-five-six-four.com/git/reactor/ptth">Download source code</a>
</p>
</body> </body>
</html> </html>

View File

@ -95,13 +95,6 @@ Git version: {{git_version}}
{{else}} {{else}}
Not built from Git Not built from Git
{{/if}} {{/if}}
<p>
Rendered by PTTH relay server.
Copyright 2020-2024 "Trish" ReactorScram. Licensed under the
<a href="https://www.gnu.org/licenses/agpl-3.0.html">GNU AGPLv3.</a>
<a href="https://six-five-six-four.com/git/reactor/ptth">Download source code</a>
</p>
</div> </div>
</body> </body>

View File

@ -27,10 +27,6 @@ AIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAA" rel="icon" type="image/x-icon" />
transition: var(--dark-mode-transition); transition: var(--dark-mode-transition);
} }
.fine {
color: #444;
font-size: 12px;
}
.light_switch { .light_switch {
display: none; display: none;
} }
@ -44,7 +40,7 @@ AIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAA" rel="icon" type="image/x-icon" />
.light_switch_label::before { .light_switch_label::before {
content: "[ ] "; content: "[ ] ";
} }
.app a { a {
color: var(--main-link-color); color: var(--main-link-color);
transition: var(--dark-mode-transition); transition: var(--dark-mode-transition);
} }
@ -115,7 +111,7 @@ AIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAA" rel="icon" type="image/x-icon" />
{{#each entries}} {{#each entries}}
<tr> <tr>
<td><a class="entry" href="./{{this.encoded_file_name}}{{this.trailing_slash}}"> <td><a class="entry" href="{{this.encoded_file_name}}{{this.trailing_slash}}">
{{this.icon}} {{this.file_name}}{{this.trailing_slash}}</a></td> {{this.icon}} {{this.file_name}}{{this.trailing_slash}}</a></td>
<td><span class="grey">{{this.size}}</span></td> <td><span class="grey">{{this.size}}</span></td>
</tr> </tr>
@ -126,15 +122,6 @@ AIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAA" rel="icon" type="image/x-icon" />
<!-- Doesn't work perfectly yet --> <!-- Doesn't work perfectly yet -->
<!--<label for="light_switch" class="light_switch_label">Dark mode</label>--> <!--<label for="light_switch" class="light_switch_label">Dark mode</label>-->
</page>
</div>
<p class="fine">
Rendered by PTTH end server.
Copyright 2020-2024 "Trish" ReactorScram. Licensed under the
<a href="https://www.gnu.org/licenses/agpl-3.0.html">GNU AGPLv3.</a>
<a href="https://six-five-six-four.com/git/reactor/ptth">Download source code</a>
</p>
</body> </body>
</html> </html>

View File

@ -15,10 +15,6 @@
.entry_list div:nth-child(even) { .entry_list div:nth-child(even) {
background-color: #ddd; background-color: #ddd;
} }
.fine {
color: #444;
font-size: 12px;
}
</style> </style>
<title>{{metrics_startup.server_name}}</title> <title>{{metrics_startup.server_name}}</title>
</head> </head>
@ -26,6 +22,10 @@
<h1>{{metrics_startup.server_name}}</h1> <h1>{{metrics_startup.server_name}}</h1>
<h2>Gauges</h2>
<p>RSS MiB: {{metrics_interval.rss_mib}}</p>
<div class="entry_list"> <div class="entry_list">
<div> <div>
@ -40,12 +40,5 @@
</div> </div>
<p class="fine">
Rendered by PTTH end server.
Copyright 2020-2024 "Trish" ReactorScram. Licensed under the
<a href="https://www.gnu.org/licenses/agpl-3.0.html">GNU AGPLv3.</a>
<a href="https://six-five-six-four.com/git/reactor/ptth">Download source code</a>
</p>
</body> </body>
</html> </html>

View File

@ -5,26 +5,14 @@ authors = ["Trish"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
repository = "https://six-five-six-four.com/git/reactor/ptth"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
anyhow = "1.0.38" anyhow = "1.0.38"
blake3 = "1.0.0" fltk = "1.1.1"
fltk = "1.4.22" quic_demo = { path = "../quic_demo" }
ptth_quic = { path = "../ptth_quic" } quinn = "0.7.2"
quinn = "0.9.3"
rand = "0.8.4"
rand_chacha = "0.3.1"
rmp-serde = "0.15.5"
serde = "1.0.130"
structopt = "0.3.20" structopt = "0.3.20"
tokio = { version = "1.8.1", features = ["full"] } tokio = { version = "1.8.1", features = ["full"] }
tracing-subscriber = "0.2.16" tracing-subscriber = "0.2.16"
tracing = "0.1.25" tracing = "0.1.25"
[dependencies.reqwest]
version = "0.11.4"
default-features = false
features = ["stream", "rustls-tls", "hyper-rustls"]

View File

@ -0,0 +1,240 @@
use std::str::FromStr;
use fltk::{
app,
button::Button,
enums::CallbackTrigger,
frame::Frame,
input::*,
prelude::*,
window::Window
};
use structopt::StructOpt;
use quic_demo::{
client_proxy::*,
prelude::*,
protocol::PeerId,
};
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
window_title: Option <String>,
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
client_id: Option <PeerId>,
}
#[derive (Clone, Copy)]
enum Message {
OpenPort (usize),
ClosePort (usize),
}
fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let rt = tokio::runtime::Runtime::new ()?;
let opt = Opt::from_args ();
let (fltk_tx, fltk_rx) = app::channel::<Message> ();
let app = app::App::default ();
let window_title = opt.window_title.clone ().unwrap_or_else (|| "PTTH client proxy".to_string ());
let mut wind = Window::new (100, 100, 800, 600, None)
.with_label (&window_title);
let margin = 10;
let h = 30;
let mut x = margin;
let mut y = margin;
let mut frame_status = Frame::new (x, y, 800 - 20, h, "Forwarding 0 ports");
y += h + margin;
x = margin;
{
let w = 80;
Frame::new (x, y, w, h, "Local port");
x += w + margin;
let w = 120;
Frame::new (x, y, w, h, "Server ID");
x += w + margin;
let w = 80;
Frame::new (x, y, w, h, "Server port");
// x += w + margin;
}
y += h + margin;
x = margin;
let gui_port_0 = GuiPort::new (fltk_tx, &mut x, y, 0);
y += h + margin;
x = margin;
let gui_port_1 = GuiPort::new (fltk_tx, &mut x, y, 1);
y += h + margin;
x = margin;
let gui_port_2 = GuiPort::new (fltk_tx, &mut x, y, 2);
// y += h + margin;
// x = margin;
let mut gui_ports = vec! [
gui_port_0,
gui_port_1,
gui_port_2,
];
let mut forwarding_instances = vec! [
None,
None,
None,
];
// y += h + margin;
wind.end ();
wind.show ();
let connection_p2_p3 = rt.block_on (async move {
let server_cert = tokio::fs::read ("quic_server.crt").await?;
let relay_addr = opt.relay_addr.unwrap_or_else (|| String::from ("127.0.0.1:30380")).parse ()?;
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&server_cert])?;
trace! ("Connecting to relay server");
let client_id = opt.client_id.unwrap_or_else (|| "bogus_client".to_string ());
let quinn::NewConnection {
connection,
..
} = protocol::p2_connect_to_p3 (&endpoint, &relay_addr, &client_id).await?;
Ok::<_, anyhow::Error> (connection)
})?;
while app.wait () {
match fltk_rx.recv () {
Some (Message::OpenPort (port_idx)) => {
if let Ok (params) = gui_ports [port_idx].get_params () {
let connection_p2_p3 = connection_p2_p3.clone ();
let _guard = rt.enter ();
forwarding_instances [port_idx].replace (ForwardingInstance::new (
connection_p2_p3,
params,
));
gui_ports [port_idx].set_forwarding (true);
frame_status.set_label ("Forwarding 1 port");
}
},
Some (Message::ClosePort (port_idx)) => {
if let Some (old_instance) = forwarding_instances [port_idx].take () {
rt.block_on (old_instance.close ())?;
}
gui_ports [port_idx].set_forwarding (false);
frame_status.set_label ("Forwarding 0 ports");
},
None => (),
}
}
Ok (())
}
fn set_active <W: WidgetExt> (w: &mut W, b: bool) {
if b {
w.activate ();
}
else {
w.deactivate ();
}
}
struct GuiPort {
input_client_port: Input,
input_server_id: Input,
input_server_port: Input,
but_open: Button,
but_close: Button,
}
impl GuiPort {
fn new (fltk_tx: fltk::app::Sender <Message>, x: &mut i32, y: i32, port_idx: usize) -> Self {
let margin = 10;
let h = 30;
let w = 80;
let mut input_client_port = Input::new (*x, y, w, h, "");
*x += w + margin;
let w = 120;
let mut input_server_id = Input::new (*x, y, w, h, "");
*x += w + margin;
let w = 80;
let mut input_server_port = Input::new (*x, y, w, h, "");
*x += w + margin;
let w = 80;
let mut but_open = Button::new (*x, y, w, h, "Open");
*x += w + margin;
let w = 80;
let mut but_close = Button::new (*x, y, w, h, "Close");
// *x += w + margin;
input_client_port.set_value ("5901");
input_server_id.set_value ("bogus_server");
input_server_port.set_value ("5900");
but_open.set_trigger (CallbackTrigger::Changed);
but_open.emit (fltk_tx, Message::OpenPort (port_idx));
but_close.set_trigger (CallbackTrigger::Changed);
but_close.emit (fltk_tx, Message::ClosePort (port_idx));
set_active (&mut but_open, true);
set_active (&mut but_close, false);
Self {
input_client_port,
input_server_id,
input_server_port,
but_open,
but_close,
}
}
fn get_params (&self) -> anyhow::Result <ForwardingParams>
{
let client_tcp_port = u16::from_str (&self.input_client_port.value ())?;
let server_id = self.input_server_id.value ();
let server_tcp_port = u16::from_str (&self.input_server_port.value ())?;
Ok (ForwardingParams {
client_tcp_port,
server_id,
server_tcp_port,
})
}
fn set_forwarding (&mut self, x: bool) {
set_active (&mut self.input_client_port, !x);
set_active (&mut self.input_server_id, !x);
set_active (&mut self.input_server_port, !x);
set_active (&mut self.but_open, !x);
set_active (&mut self.but_close, x);
self.but_open.set (x);
self.but_close.set (!x);
}
}

View File

@ -0,0 +1,20 @@
[package]
name = "quic_demo"
version = "0.1.0"
authors = ["Trish"]
edition = "2018"
license = "AGPL-3.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.38"
base64 = "0.13.0"
futures-util = "0.3.9"
quinn = "0.7.2"
rcgen = "0.8.11"
rmp-serde = "0.15.5"
structopt = "0.3.20"
tokio = { version = "1.8.1", features = ["full"] }
tracing-subscriber = "0.2.16"
tracing = "0.1.25"

View File

@ -1,8 +1,8 @@
# https://whitfin.io/speeding-up-rust-docker-builds/ # https://whitfin.io/speeding-up-rust-docker-builds/
# TODO: https://stackoverflow.com/questions/57389547/how-to-define-the-context-for-a-docker-build-as-a-specific-commit-on-one-of-the # TODO: https://stackoverflow.com/questions/57389547/how-to-define-the-context-for-a-docker-build-as-a-specific-commit-on-one-of-the
# docker pull rust:1.66-slim-buster # rust:1.50-slim-buster
FROM rust@sha256:98c9b1fca0c9a6183369daf9efadb57c634340ae877bb027aeadf72afdd086a3 as build FROM rust@sha256:5dd85eb0c60bbdea14a6ecba1f6fe4a0f5c878bcf06d2cdfae0aff3a19ed4b10 as build
WORKDIR / WORKDIR /
ENV USER root ENV USER root
@ -20,8 +20,7 @@ cargo new --bin crates/ptth_server && \
cargo new --bin crates/ptth_file_server_bin && \ cargo new --bin crates/ptth_file_server_bin && \
cargo new --bin tools/ptth_tail && \ cargo new --bin tools/ptth_tail && \
cargo new --bin crates/debug_proxy && \ cargo new --bin crates/debug_proxy && \
cargo new --bin crates/ptth_quic && \ cargo new --bin prototypes/quic_demo
cargo new --lib crates/udp_over_tcp
# copy over your manifests # copy over your manifests
COPY ./Cargo.lock ./ COPY ./Cargo.lock ./
@ -29,11 +28,10 @@ COPY ./Cargo.toml ./
COPY ./crates/always_equal/Cargo.toml ./crates/always_equal/ COPY ./crates/always_equal/Cargo.toml ./crates/always_equal/
COPY ./crates/ptth_core/Cargo.toml ./crates/ptth_core/ COPY ./crates/ptth_core/Cargo.toml ./crates/ptth_core/
COPY ./crates/ptth_relay/Cargo.toml ./crates/ptth_relay/ COPY ./crates/ptth_relay/Cargo.toml ./crates/ptth_relay/
COPY ./crates/ptth_quic/Cargo.toml ./crates/ptth_quic/ COPY ./prototypes/quic_demo/Cargo.toml ./prototypes/quic_demo/
COPY ./crates/udp_over_tcp/Cargo.toml ./crates/udp_over_tcp/
# this build step will cache your dependencies # this build step will cache your dependencies
RUN cargo build --release -p ptth_quic RUN cargo build --release -p quic_demo
RUN \ RUN \
rm \ rm \
@ -41,8 +39,7 @@ src/*.rs \
crates/always_equal/src/*.rs \ crates/always_equal/src/*.rs \
crates/ptth_core/src/*.rs \ crates/ptth_core/src/*.rs \
crates/ptth_relay/src/*.rs \ crates/ptth_relay/src/*.rs \
crates/ptth_quic/src/*.rs \ prototypes/quic_demo/src/*.rs
crates/udp_over_tcp/src/*.rs
# Copy source tree # Copy source tree
# Yes, I tried a few variations on the syntax. Dockerfiles are just rough. # Yes, I tried a few variations on the syntax. Dockerfiles are just rough.
@ -52,8 +49,7 @@ COPY ./crates/always_equal ./crates/always_equal
COPY ./crates/ptth_core ./crates/ptth_core COPY ./crates/ptth_core ./crates/ptth_core
COPY ./crates/ptth_relay ./crates/ptth_relay COPY ./crates/ptth_relay ./crates/ptth_relay
COPY ./handlebars/ ./handlebars COPY ./handlebars/ ./handlebars
COPY ./crates/ptth_quic ./crates/ptth_quic COPY ./prototypes/quic_demo ./prototypes/quic_demo
COPY ./crates/udp_over_tcp ./crates/udp_over_tcp
# Bug in cargo's incremental build logic, triggered by # Bug in cargo's incremental build logic, triggered by
# Docker doing something funny with mtimes? Maybe? # Docker doing something funny with mtimes? Maybe?
@ -62,9 +58,8 @@ RUN touch crates/ptth_core/src/lib.rs
# build for release # build for release
# gate only on ptth_relay tests for now # gate only on ptth_relay tests for now
RUN \ RUN \
find . && \ cargo build --release -p quic_demo --bin quic_demo_relay_server && \
cargo build --release -p ptth_quic --bin ptth_quic_relay_server && \ cargo test --release -p quic_demo --bin quic_demo_relay_server
cargo test --release -p ptth_quic --bin ptth_quic_relay_server
# debian:buster-slim # debian:buster-slim
FROM debian@sha256:13f0764262a064b2dd9f8a828bbaab29bdb1a1a0ac6adc8610a0a5f37e514955 FROM debian@sha256:13f0764262a064b2dd9f8a828bbaab29bdb1a1a0ac6adc8610a0a5f37e514955
@ -78,11 +73,11 @@ RUN addgroup --gid 10001 ptth_user && adduser --system --uid 10000 --gid 10001 p
USER ptth_user USER ptth_user
WORKDIR /home/ptth_user WORKDIR /home/ptth_user
COPY --from=build /ptth/target/release/ptth_quic_relay_server ./ COPY --from=build /ptth/target/release/quic_demo_relay_server ./
ARG git_version ARG git_version
RUN \ RUN \
echo -n "$git_version" > ./git_version.txt && \ echo -n "$git_version" > ./git_version.txt && \
ln -s ptth_quic_relay_server app ln -s quic_demo_relay_server app
CMD ["/usr/bin/tini", "--", "./ptth_quic_relay_server"] CMD ["/usr/bin/tini", "--", "./quic_demo_relay_server"]

View File

@ -3,9 +3,9 @@
There are 5 processes, so you'll need 5 terminal windows or screen / tmux There are 5 processes, so you'll need 5 terminal windows or screen / tmux
sessions. Run the processes in this order: sessions. Run the processes in this order:
1. QUIC relay server: `RUST_LOG=ptth_quic_relay_server=debug cargo run --bin ptth_quic_relay_server` 1. QUIC relay server: `RUST_LOG=quic_demo_relay_server=debug cargo run --bin quic_demo_relay_server`
2. Server-side proxy: `RUST_LOG=ptth_quic_end_server=debug cargo run --bin ptth_quic_end_server` 2. Server-side proxy: `RUST_LOG=quic_demo_end_server=debug cargo run --bin quic_demo_end_server`
3. Client-side proxy: `RUST_LOG=ptth_quic_client cargo run --bin ptth_quic_client` 3. Client-side proxy: `RUST_LOG=quic_demo_client cargo run --bin quic_demo_client`
4. TCP end server: `nc -l -p 30382` 4. TCP end server: `nc -l -p 30382`
5. TCP end client: `nc 127.0.0.1 30381` 5. TCP end client: `nc 127.0.0.1 30381`

View File

@ -1,5 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Run from `ptth/`
set -euo pipefail set -euo pipefail
@ -9,12 +8,14 @@ DOCKER_TAG="ptth_quic:latest"
mkdir -p app_packages mkdir -p app_packages
git archive --format=tar "$GIT_COMMIT" | docker build -f crates/ptth_quic/Dockerfile -t "$DOCKER_TAG" --build-arg "git_version=$GIT_COMMIT" - pushd ../../
git archive --format=tar "$GIT_COMMIT" | sudo docker build -f prototypes/quic_demo/Dockerfile -t "$DOCKER_TAG" --build-arg "git_version=$GIT_COMMIT" -
popd
docker run --rm "$DOCKER_TAG" \ sudo docker run --rm "$DOCKER_TAG" \
tar -c \ tar -c \
app \ app \
ptth_quic_relay_server \ quic_demo_relay_server \
| gzip > "app_packages/ptth_quic_relay_$GIT_COMMIT_SHORT.tar.gz" | gzip > "app_packages/ptth_quic_relay_$GIT_COMMIT_SHORT.tar.gz"
# sudo docker build -f app_package_Dockerfile -t ptth_app_host:latest . # sudo docker build -f app_package_Dockerfile -t ptth_app_host:latest .

View File

@ -0,0 +1,82 @@
use structopt::StructOpt;
use tokio::net::TcpListener;
use quic_demo::prelude::*;
use protocol::PeerId;
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
client_id: Option <PeerId>,
#[structopt (long)]
server_id: Option <PeerId>,
#[structopt (long)]
client_tcp_port: Option <u16>,
#[structopt (long)]
server_tcp_port: Option <u16>,
}
#[tokio::main]
async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let opt = Opt::from_args ();
let server_cert = tokio::fs::read ("quic_server.crt").await?;
let relay_addr = opt.relay_addr.unwrap_or_else (|| String::from ("127.0.0.1:30380")).parse ()?;
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&server_cert])?;
debug! ("Connecting to relay server");
let client_id = opt.client_id.unwrap_or_else (|| "bogus_client".to_string ());
let quinn::NewConnection {
connection,
..
} = protocol::p2_connect_to_p3 (&endpoint, &relay_addr, &client_id).await?;
// End of per-client stuff
// Beginning of per-port stuff
let server_id = opt.server_id.unwrap_or_else (|| "bogus_server".to_string ());
let client_tcp_port = opt.client_tcp_port.unwrap_or (30381);
let server_tcp_port = opt.server_tcp_port.unwrap_or (30382);
let listener = TcpListener::bind (("127.0.0.1", client_tcp_port)).await?;
debug! ("Accepting local TCP connections from P1");
// End of per-port stuff
// Beginning of per-connection stuff
loop {
let (tcp_socket, _) = listener.accept ().await?;
let connection = connection.clone ();
let server_id = server_id.clone ();
tokio::spawn (async move {
let (local_recv, local_send) = tcp_socket.into_split ();
debug! ("Starting PTTH connection");
let (relay_send, relay_recv) = protocol::p2_connect_to_p5 (&connection, &server_id, server_tcp_port).await?;
trace! ("Relaying bytes...");
let ptth_conn = quic_demo::connection::NewConnection {
local_send,
local_recv,
relay_send,
relay_recv,
}.build ();
ptth_conn.wait_for_close ().await?;
debug! ("Ended PTTH connection");
Ok::<_, anyhow::Error> (())
});
}
}

View File

@ -0,0 +1,90 @@
use structopt::StructOpt;
use tokio::net::TcpStream;
use quic_demo::prelude::*;
use protocol::PeerId;
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
relay_addr: Option <String>,
#[structopt (long)]
server_id: Option <PeerId>,
}
#[tokio::main]
async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let opt = Opt::from_args ();
let server_cert = tokio::fs::read ("quic_server.crt").await?;
let relay_addr = opt.relay_addr.unwrap_or_else (|| String::from ("127.0.0.1:30380")).parse ()?;
let endpoint = make_client_endpoint ("0.0.0.0:0".parse ()?, &[&server_cert])?;
trace! ("Connecting to relay server");
let server_id = opt.server_id.unwrap_or_else (|| "bogus_server".to_string ());
let quinn::NewConnection {
mut bi_streams,
..
} = protocol::p4_connect_to_p3 (&endpoint, &relay_addr, &server_id).await?;
trace! ("Accepting bi streams from P3");
loop {
let (relay_send, relay_recv) = bi_streams.next ().await.ok_or_else (|| anyhow::anyhow! ("Relay server didn't open a bi stream"))??;
tokio::spawn (handle_bi_stream (relay_send, relay_recv));
}
}
async fn handle_bi_stream (
relay_send: quinn::SendStream,
mut relay_recv: quinn::RecvStream,
) -> anyhow::Result <()>
{
match protocol::p4_accept_p3_stream (&mut relay_recv).await? {
protocol::P3ToP4Stream::NewPtthConnection {
client_id,
..
} => handle_new_ptth_connection (relay_send, relay_recv, client_id).await?,
}
Ok (())
}
async fn handle_new_ptth_connection (
mut relay_send: quinn::SendStream,
mut relay_recv: quinn::RecvStream,
_client_id: String,
) -> anyhow::Result <()>
{
// TODO: Check authorization for P2 --> P4
protocol::p4_authorize_p2_connection (&mut relay_send).await?;
let p4_to_p5_req = protocol::p4_expect_p5_request (&mut relay_recv).await?;
// TODO: Check authorization for P1 --> P5
protocol::p4_authorize_p1_connection (&mut relay_send).await?;
debug! ("Started PTTH connection");
let stream = TcpStream::connect (("127.0.0.1", p4_to_p5_req.port)).await?;
let (local_recv, local_send) = stream.into_split ();
trace! ("Relaying bytes...");
let ptth_conn = quic_demo::connection::NewConnection {
local_send,
local_recv,
relay_send,
relay_recv,
}.build ();
ptth_conn.wait_for_close ().await?;
Ok (())
}

View File

@ -0,0 +1,317 @@
use structopt::StructOpt;
use quic_demo::prelude::*;
use protocol::PeerId;
#[derive (Debug, StructOpt)]
struct Opt {
#[structopt (long)]
listen_addr: Option <String>,
}
#[tokio::main]
async fn main () -> anyhow::Result <()> {
tracing_subscriber::fmt::init ();
let opt = Opt::from_args ();
let listen_addr = opt.listen_addr.unwrap_or_else (|| String::from ("0.0.0.0:30380")).parse ()?;
let (mut incoming, server_cert) = make_server_endpoint (listen_addr)?;
println! ("Base64 cert: {}", base64::encode (&server_cert));
tokio::fs::write ("quic_server.crt", &server_cert).await?;
let relay_state = RelayState::default ();
let relay_state = Arc::new (relay_state);
while let Some (conn) = incoming.next ().await {
let relay_state = Arc::clone (&relay_state);
// Each new peer QUIC connection gets its own task
tokio::spawn (async move {
let active = relay_state.stats.quic.connect ();
debug! ("QUIC connections: {}", active);
match handle_quic_connection (Arc::clone (&relay_state), conn).await {
Ok (_) => (),
Err (e) => warn! ("handle_quic_connection {:?}", e),
}
let active = relay_state.stats.quic.disconnect ();
debug! ("QUIC connections: {}", active);
});
}
Ok (())
}
#[derive (Default)]
struct RelayState {
p4_server_proxies: Mutex <HashMap <PeerId, P4State>>,
stats: Stats,
}
#[derive (Default)]
struct Stats {
quic: ConnectEvents,
}
#[derive (Default)]
struct ConnectEvents {
connects: AtomicU64,
disconnects: AtomicU64,
}
impl ConnectEvents {
fn connect (&self) -> u64 {
let connects = self.connects.fetch_add (1, Ordering::Relaxed) + 1;
let disconnects = self.disconnects.load (Ordering::Relaxed);
connects - disconnects
}
fn disconnect (&self) -> u64 {
let disconnects = self.disconnects.fetch_add (1, Ordering::Relaxed) + 1;
let connects = self.connects.load (Ordering::Relaxed);
connects - disconnects
}
fn _active (&self) -> u64 {
let connects = self.connects.load (Ordering::Relaxed);
let disconnects = self.disconnects.load (Ordering::Relaxed);
connects - disconnects
}
}
struct P4State {
req_channel: mpsc::Sender <RequestP2ToP4>,
}
impl RelayState {
}
struct RequestP2ToP4 {
client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
client_id: String,
}
struct PtthNewConnection {
client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
server_send: quinn::SendStream,
server_recv: quinn::RecvStream,
}
struct PtthConnection {
uplink_task: JoinHandle <anyhow::Result <()>>,
downlink_task: JoinHandle <anyhow::Result <()>>,
}
impl PtthNewConnection {
fn build (self) -> PtthConnection {
let Self {
mut client_send,
mut client_recv,
mut server_send,
mut server_recv,
} = self;
let uplink_task = tokio::spawn (async move {
// Uplink - Client to end server
let mut buf = vec! [0u8; 65_536];
while let Some (bytes_read) = client_recv.read (&mut buf).await? {
if bytes_read == 0 {
break;
}
let buf_slice = &buf [0..bytes_read];
trace! ("Uplink relaying {} bytes", bytes_read);
server_send.write_all (buf_slice).await?;
}
trace! ("Uplink closed");
Ok::<_, anyhow::Error> (())
});
let downlink_task = tokio::spawn (async move {
// Downlink - End server to client
let mut buf = vec! [0u8; 65_536];
while let Some (bytes_read) = server_recv.read (&mut buf).await? {
let buf_slice = &buf [0..bytes_read];
trace! ("Downlink relaying {} bytes", bytes_read);
client_send.write_all (buf_slice).await?;
}
trace! ("Downlink closed");
Ok::<_, anyhow::Error> (())
});
PtthConnection {
uplink_task,
downlink_task,
}
}
}
async fn handle_quic_connection (
relay_state: Arc <RelayState>,
conn: quinn::Connecting,
) -> anyhow::Result <()>
{
let mut conn = conn.await?;
// Everyone who connects must identify themselves with the first
// bi stream
// TODO: Timeout
let (mut send, mut recv) = conn.bi_streams.next ().await.ok_or_else (|| anyhow::anyhow! ("QUIC client didn't identify itself"))??;
let peer = protocol::p3_accept_peer (&mut recv).await?;
match peer {
protocol::P3Peer::P2ClientProxy (peer) => {
// TODO: Check authorization for P2 peers
protocol::p3_authorize_p2_peer (&mut send).await?;
handle_p2_connection (relay_state, conn, peer).await?;
},
protocol::P3Peer::P4ServerProxy (peer) => {
// TODO: Check authorization for P2 peers
protocol::p3_authorize_p4_peer (&mut send).await?;
handle_p4_connection (relay_state, conn, peer).await?;
},
}
Ok::<_, anyhow::Error> (())
}
async fn handle_p2_connection (
relay_state: Arc <RelayState>,
conn: quinn::NewConnection,
peer: protocol::P2ClientProxy,
) -> anyhow::Result <()>
{
let client_id = peer.id;
let quinn::NewConnection {
mut bi_streams,
..
} = conn;
while let Some (bi_stream) = bi_streams.next ().await {
let (send, mut recv) = bi_stream?;
let relay_state = Arc::clone (&relay_state);
let client_id = client_id.clone ();
tokio::spawn (async move {
debug! ("Request started for P2");
match protocol::p3_accept_p2_stream (&mut recv).await? {
protocol::P2ToP3Stream::ConnectP2ToP4 {
server_id,
} => handle_request_p2_to_p4 (relay_state, client_id, server_id, send, recv).await?,
}
debug! ("Request ended for P2");
Ok::<_, anyhow::Error> (())
});
}
debug! ("P2 {} disconnected", client_id);
Ok (())
}
async fn handle_request_p2_to_p4 (
relay_state: Arc <RelayState>,
client_id: String,
server_id: PeerId,
mut client_send: quinn::SendStream,
client_recv: quinn::RecvStream,
) -> anyhow::Result <()>
{
trace! ("P2 {} wants to connect to P4 {}", client_id, server_id);
// TODO: Check authorization for P2 to connect to P4
protocol::p3_authorize_p2_to_p4_connection (&mut client_send).await?;
{
let p4_server_proxies = relay_state.p4_server_proxies.lock ().await;
match p4_server_proxies.get (&server_id) {
Some (p4_state) => {
p4_state.req_channel.send (RequestP2ToP4 {
client_send,
client_recv,
client_id,
}).await.map_err (|_| anyhow::anyhow! ("Can't send request to P4 server"))?;
},
None => warn! ("That server isn't connected"),
}
}
Ok (())
}
async fn handle_p4_connection (
relay_state: Arc <RelayState>,
conn: quinn::NewConnection,
peer: protocol::P4ServerProxy,
) -> anyhow::Result <()>
{
let server_id = peer.id;
let quinn::NewConnection {
connection,
..
} = conn;
let (tx, mut rx) = mpsc::channel (2);
let p4_state = P4State {
req_channel: tx,
};
{
let mut p4_server_proxies = relay_state.p4_server_proxies.lock ().await;
p4_server_proxies.insert (server_id.clone (), p4_state);
}
while let Some (req) = rx.recv ().await {
let connection = connection.clone ();
let server_id = server_id.clone ();
tokio::spawn (async move {
let RequestP2ToP4 {
client_send,
client_recv,
client_id,
} = req;
debug! ("P4 {} got a request from P2 {}", server_id, client_id);
let (server_send, server_recv) = protocol::p3_connect_p2_to_p4 (&connection, &client_id).await?;
trace! ("Relaying bytes...");
let ptth_conn = PtthNewConnection {
client_send,
client_recv,
server_send,
server_recv,
}.build ();
ptth_conn.uplink_task.await??;
ptth_conn.downlink_task.await??;
debug! ("Request ended for P4");
Ok::<_, anyhow::Error> (())
});
}
debug! ("P4 {} disconnected", server_id);
Ok (())
}

View File

@ -9,48 +9,33 @@ use crate::prelude::*;
pub struct ForwardingInstance { pub struct ForwardingInstance {
task: JoinHandle <anyhow::Result <()>>, task: JoinHandle <anyhow::Result <()>>,
shutdown_flag: watch::Sender <bool>, shutdown_flag: watch::Sender <bool>,
local_port: u16,
} }
impl ForwardingInstance { impl ForwardingInstance {
pub async fn new ( pub fn new (
connection_p2_p3: quinn::Connection, connection_p2_p3: quinn::Connection,
params: ForwardingParams, params: ForwardingParams,
) -> anyhow::Result <Self> ) -> Self
{ {
let (shutdown_flag, shutdown_flag_rx) = tokio::sync::watch::channel (true); let (shutdown_flag, shutdown_flag_rx) = tokio::sync::watch::channel (true);
let listener = TcpListener::bind (("127.0.0.1", params.client_tcp_port)).await?;
let local_port = listener.local_addr ()?.port ();
trace! ("Accepting local TCP connections from P1 on {}", local_port);
let task = tokio::spawn (forward_port ( let task = tokio::spawn (forward_port (
listener,
connection_p2_p3, connection_p2_p3,
params, params,
shutdown_flag_rx shutdown_flag_rx
)); ));
Ok (Self { Self {
task, task,
shutdown_flag, shutdown_flag,
local_port, }
})
} }
pub async fn close (self) -> anyhow::Result <()> { pub async fn close (self) -> anyhow::Result <()> {
if self.shutdown_flag.send (false).is_err () { self.shutdown_flag.send (false)?;
warn! ("Trying to gracefully shutdown forwarding task but it appears to already be shut down"); self.task.await??;
}
self.task.await
.context ("awaiting ForwardingInstance task")?
.context ("inside ForwardingInstance task")?;
Ok (()) Ok (())
} }
pub fn local_port (&self) -> u16 {
self.local_port
}
} }
pub struct ForwardingParams { pub struct ForwardingParams {
@ -59,23 +44,25 @@ pub struct ForwardingParams {
pub server_tcp_port: u16, pub server_tcp_port: u16,
} }
/// Starts a TCP listener that can forward any number of TCP streams to async fn forward_port (
/// the same client:server port combination
pub async fn forward_port (
listener: TcpListener,
connection_p2_p3: quinn::Connection, connection_p2_p3: quinn::Connection,
params: ForwardingParams, params: ForwardingParams,
mut shutdown_flag_rx: tokio::sync::watch::Receiver <bool>, shutdown_flag_rx: tokio::sync::watch::Receiver <bool>,
) -> anyhow::Result <()> ) -> anyhow::Result <()>
{ {
let ForwardingParams { let ForwardingParams {
client_tcp_port,
server_id, server_id,
server_tcp_port, server_tcp_port,
..
} = params; } = params;
let listener = TcpListener::bind (("127.0.0.1", client_tcp_port)).await?;
trace! ("Accepting local TCP connections from P1 on {}", client_tcp_port);
while *shutdown_flag_rx.borrow () { while *shutdown_flag_rx.borrow () {
let mut shutdown_flag_rx_2 = shutdown_flag_rx.clone ();
tokio::select! { tokio::select! {
x = listener.accept () => { x = listener.accept () => {
let (tcp_socket, _) = x?; let (tcp_socket, _) = x?;
@ -85,7 +72,7 @@ pub async fn forward_port (
tokio::spawn (handle_p1 (connection, server_id, server_tcp_port, tcp_socket, shutdown_flag_rx)); tokio::spawn (handle_p1 (connection, server_id, server_tcp_port, tcp_socket, shutdown_flag_rx));
}, },
_ = shutdown_flag_rx.changed () => (), _ = shutdown_flag_rx_2.changed () => (),
}; };
} }

View File

@ -0,0 +1,5 @@
pub mod client_proxy;
pub mod connection;
pub mod prelude;
pub mod protocol;
pub mod quinn_utils;

View File

@ -1,12 +1,5 @@
pub use std::{ pub use std::{
collections::*, collections::*,
ffi::OsString,
iter::FromIterator,
net::{
Ipv4Addr,
SocketAddr,
SocketAddrV4,
},
sync::{ sync::{
Arc, Arc,
atomic::{ atomic::{
@ -14,10 +7,7 @@ pub use std::{
Ordering, Ordering,
}, },
}, },
time::{ time::Duration,
Duration,
Instant,
},
}; };
pub use anyhow::{ pub use anyhow::{
@ -30,25 +20,12 @@ pub use tokio::{
AsyncReadExt, AsyncReadExt,
AsyncWriteExt, AsyncWriteExt,
}, },
net::{
TcpListener,
TcpSocket,
UdpSocket,
},
sync::{ sync::{
Mutex, Mutex,
RwLock,
mpsc, mpsc,
}, },
task::JoinHandle, task::JoinHandle,
}; };
pub use rand::{
Rng,
RngCore,
};
pub use rusty_ulid::Ulid;
pub use serde::Deserialize;
pub use serde_json::json;
pub use tracing::{ pub use tracing::{
debug, debug,
error, error,

View File

@ -16,9 +16,6 @@ const MAX_ID_LENGTH: usize = 128;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Command (pub u8); pub struct Command (pub u8);
// I can't remember how I picked the numbers. Just increment I guess,
// and then switch to a variable-length format around 200.
impl Command { impl Command {
pub const CONNECT_P2_TO_P3: Command = Command (2); pub const CONNECT_P2_TO_P3: Command = Command (2);
pub const CONNECT_P4_TO_P3: Command = Command (4); pub const CONNECT_P4_TO_P3: Command = Command (4);
@ -26,21 +23,20 @@ impl Command {
pub const CONNECT_P2_TO_P4_STEP_2: Command = Command (11); pub const CONNECT_P2_TO_P4_STEP_2: Command = Command (11);
pub const CONNECT_P2_TO_P5: Command = Command (12); pub const CONNECT_P2_TO_P5: Command = Command (12);
pub const OKAY: Command = Command (20); pub const OKAY: Command = Command (20);
pub const DIREC_P2_TO_P3: Command = Command (21);
} }
pub async fn p2_connect_to_p3 ( pub async fn p2_connect_to_p3 (
endpoint: &quinn::Endpoint, endpoint: &quinn::Endpoint,
relay_addr: std::net::SocketAddr, relay_addr: &std::net::SocketAddr,
client_id: &str, client_id: &str,
) -> Result <quinn::Connection> ) -> Result <quinn::NewConnection>
{ {
if client_id.as_bytes ().len () > MAX_ID_LENGTH { if client_id.as_bytes ().len () > MAX_ID_LENGTH {
bail! ("Client ID is longer than MAX_ID_LENGTH"); bail! ("Client ID is longer than MAX_ID_LENGTH");
} }
let new_conn = endpoint.connect (relay_addr, "localhost")?.await?; let new_conn = endpoint.connect (relay_addr, "localhost")?.await?;
let (mut send, mut recv) = new_conn.open_bi ().await?; let (mut send, mut recv) = new_conn.connection.open_bi ().await?;
let cmd_type = Command::CONNECT_P2_TO_P3.0; let cmd_type = Command::CONNECT_P2_TO_P3.0;
send.write_all (&[cmd_type, 0, 0, 0]).await?; send.write_all (&[cmd_type, 0, 0, 0]).await?;
@ -65,7 +61,7 @@ pub async fn p2_connect_to_p5 (
let cmd_type = Command::CONNECT_P2_TO_P4.0; let cmd_type = Command::CONNECT_P2_TO_P4.0;
send.write_all (&[cmd_type, 0, 0, 0]).await?; send.write_all (&[cmd_type, 0, 0, 0]).await?;
send_lv_string (&mut send, server_id).await?; send_lv_string (&mut send, &server_id).await?;
expect_exact_response (&mut recv, [Command::OKAY.0, cmd_type, 0, 0]).await expect_exact_response (&mut recv, [Command::OKAY.0, cmd_type, 0, 0]).await
.context ("P2 didn't get OK response when asking P3 to connect P2 to P4")?; .context ("P2 didn't get OK response when asking P3 to connect P2 to P4")?;
@ -83,30 +79,6 @@ pub async fn p2_connect_to_p5 (
Ok ((send, recv)) Ok ((send, recv))
} }
pub async fn p2_direc_to_p4 (
connection: &quinn::Connection,
server_id: &str,
) -> Result <Vec <u8>>
{
let (mut send, mut recv) = connection.open_bi ().await?;
let cmd_type = Command::DIREC_P2_TO_P3.0;
let mut cookie = vec! [0u8; 32];
rand::thread_rng ().fill_bytes (&mut cookie [..]);
let cookie = cookie;
send.write_all (&[cmd_type, 0, 0, 0]).await?;
send_lv_string (&mut send, server_id).await?;
send_lv_u16 (&mut send, &cookie).await?;
debug! ("Waiting for OK response for DIREC");
expect_exact_response (&mut recv, [Command::OKAY.0, cmd_type, 0, 0]).await?;
Ok (cookie)
}
pub enum P3Peer { pub enum P3Peer {
P2ClientProxy (P2ClientProxy), P2ClientProxy (P2ClientProxy),
P4ServerProxy (P4ServerProxy), P4ServerProxy (P4ServerProxy),
@ -191,14 +163,6 @@ pub enum P2ToP3Stream {
ConnectP2ToP4 { ConnectP2ToP4 {
server_id: PeerId, server_id: PeerId,
}, },
DirecP2ToP4 {
/// P2 wants a P2P connection to this P4
server_id: PeerId,
/// P2 will send this cookie over plain UDP to P3
/// P3 will learn P2's WAN address from that.
cookie: Vec <u8>,
},
} }
pub async fn p3_accept_p2_stream ( pub async fn p3_accept_p2_stream (
@ -218,15 +182,6 @@ pub async fn p3_accept_p2_stream (
server_id, server_id,
} }
}, },
Command::DIREC_P2_TO_P3 => {
let server_id = recv_lv_string (recv, MAX_ID_LENGTH).await?;
let cookie = recv_lv_u16 (recv, 64).await?;
P2ToP3Stream::DirecP2ToP4 {
server_id,
cookie,
}
},
_ => bail! ("Invalid command type while P3 was accepting a new bi stream from P2"), _ => bail! ("Invalid command type while P3 was accepting a new bi stream from P2"),
}) })
} }
@ -239,33 +194,25 @@ pub async fn p3_authorize_p2_to_p4_connection (
Ok (()) Ok (())
} }
pub async fn p3_authorize_p2_to_p4_direc (
send: &mut SendStream,
) -> Result <()>
{
send.write_all (&[Command::OKAY.0, Command::DIREC_P2_TO_P3.0, 0, 0]).await?;
Ok (())
}
pub async fn p4_connect_to_p3 ( pub async fn p4_connect_to_p3 (
endpoint: &quinn::Endpoint, endpoint: &quinn::Endpoint,
relay_addr: std::net::SocketAddr, relay_addr: &std::net::SocketAddr,
server_id: &str, server_id: &str,
) -> Result <quinn::Connection> ) -> Result <quinn::NewConnection>
{ {
if server_id.as_bytes ().len () > MAX_ID_LENGTH { if server_id.as_bytes ().len () > MAX_ID_LENGTH {
bail! ("Server ID is longer than MAX_ID_LENGTH"); bail! ("Server ID is longer than MAX_ID_LENGTH");
} }
let new_conn = endpoint.connect (relay_addr, "localhost")?.await.context ("UXTDVL2V quinn::Endpoint::connect")?; let new_conn = endpoint.connect (relay_addr, "localhost")?.await?;
let (mut send, mut recv) = new_conn.open_bi ().await?; let (mut send, mut recv) = new_conn.connection.open_bi ().await?;
let cmd_type = Command::CONNECT_P4_TO_P3.0; let cmd_type = Command::CONNECT_P4_TO_P3.0;
send.write_all (&[cmd_type, 0, 0, 0]).await?; send.write_all (&[cmd_type, 0, 0, 0]).await?;
send_lv_string (&mut send, server_id).await?; send_lv_string (&mut send, server_id).await?;
expect_exact_response (&mut recv, [Command::OKAY.0, cmd_type, 0, 0]).await expect_exact_response (&mut recv, [Command::OKAY.0, cmd_type, 0, 0]).await
.context ("WMGW2RXU P4 didn't get OK response when connecting to P3")?; .context ("P4 didn't get OK response when connecting to P3")?;
Ok (new_conn) Ok (new_conn)
} }

View File

@ -8,8 +8,8 @@ use std::{
}; };
use quinn::{ use quinn::{
ClientConfig, Endpoint, Certificate, CertificateChain, ClientConfig, ClientConfigBuilder, Endpoint, Incoming,
ServerConfig, PrivateKey, ServerConfig, ServerConfigBuilder, TransportConfig,
}; };
/// Constructs a QUIC endpoint configured for use a client only. /// Constructs a QUIC endpoint configured for use a client only.
@ -26,10 +26,11 @@ pub fn make_client_endpoint(
let mut transport = quinn::TransportConfig::default (); let mut transport = quinn::TransportConfig::default ();
transport.keep_alive_interval (Some (Duration::from_millis (5_000))); transport.keep_alive_interval (Some (Duration::from_millis (5_000)));
client_cfg.transport_config (Arc::new (transport)); client_cfg.transport = Arc::new (transport);
let mut endpoint = Endpoint::client (bind_addr)?; let mut endpoint_builder = Endpoint::builder ();
endpoint.set_default_client_config (client_cfg); endpoint_builder.default_client_config (client_cfg);
let (endpoint, _incoming) = endpoint_builder.bind(&bind_addr)?;
Ok(endpoint) Ok(endpoint)
} }
@ -41,10 +42,12 @@ pub fn make_client_endpoint(
/// - a stream of incoming QUIC connections /// - a stream of incoming QUIC connections
/// - server certificate serialized into DER format /// - server certificate serialized into DER format
#[allow(unused)] #[allow(unused)]
pub fn make_server_endpoint(bind_addr: SocketAddr) -> anyhow::Result<(Endpoint, Vec<u8>)> { pub fn make_server_endpoint(bind_addr: SocketAddr) -> anyhow::Result<(Incoming, Vec<u8>)> {
let (server_config, server_cert) = configure_server()?; let (server_config, server_cert) = configure_server()?;
let endpoint = Endpoint::server (server_config, bind_addr)?; let mut endpoint_builder = Endpoint::builder();
Ok((endpoint, server_cert)) endpoint_builder.listen(server_config);
let (_endpoint, incoming) = endpoint_builder.bind(&bind_addr)?;
Ok((incoming, server_cert))
} }
/// Builds default quinn client config and trusts given certificates. /// Builds default quinn client config and trusts given certificates.
@ -52,28 +55,29 @@ pub fn make_server_endpoint(bind_addr: SocketAddr) -> anyhow::Result<(Endpoint,
/// ## Args /// ## Args
/// ///
/// - server_certs: a list of trusted certificates in DER format. /// - server_certs: a list of trusted certificates in DER format.
fn configure_client (server_certs: &[&[u8]]) -> anyhow::Result<ClientConfig> { fn configure_client(server_certs: &[&[u8]]) -> anyhow::Result<ClientConfig> {
let mut certs = rustls::RootCertStore::empty (); let mut cfg_builder = ClientConfigBuilder::default();
for cert in server_certs { for cert in server_certs {
certs.add (&rustls::Certificate (cert.to_vec ()))?; cfg_builder.add_certificate_authority(Certificate::from_der(&cert)?)?;
} }
Ok(cfg_builder.build())
Ok (ClientConfig::with_root_certificates (certs))
} }
/// Returns default server configuration along with its certificate. /// Returns default server configuration along with its certificate.
#[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527 #[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527
fn configure_server () -> anyhow::Result<(ServerConfig, Vec<u8>)> { fn configure_server() -> anyhow::Result<(ServerConfig, Vec<u8>)> {
let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap();
let cert_der = cert.serialize_der().unwrap(); let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der(); let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::PrivateKey (priv_key); let priv_key = PrivateKey::from_der(&priv_key)?;
let cert_chain = vec! [rustls::Certificate (cert_der.clone ())];
let mut transport_config = TransportConfig::default();
let mut server_config = ServerConfig::with_single_cert (cert_chain, priv_key)?; transport_config.max_concurrent_uni_streams(0).unwrap();
Arc::get_mut (&mut server_config.transport) let mut server_config = ServerConfig::default();
.unwrap () server_config.transport = Arc::new(transport_config);
.max_concurrent_uni_streams (0_u8.into ()); let mut cfg_builder = ServerConfigBuilder::new(server_config);
let cert = Certificate::from_der(&cert_der)?;
Ok ((server_config, cert_der)) cfg_builder.certificate(CertificateChain::from_certs(vec![cert]), priv_key)?;
Ok((cfg_builder.build(), cert_der))
} }

View File

@ -1,46 +1,36 @@
![The PTTH logo, a green box sitting on a black conveyor belt. The box has an arrow pointing left, and the text "PTTH", in white. The conveyor belt has an arrow pointing right, in white.](assets/logo-128-pixel.png) ![The PTTH logo, a green box sitting on a black conveyor belt. The box has an arrow pointing left, and the text "PTTH", in white. The conveyor belt has an arrow pointing right, in white.](assets/logo-128-pixel.png)
TODO: "Splitting a server in half" diagram
# PTTH # PTTH
PTTH is a web server. PTTH lets you run file servers from behind NAT / firewalls.
A web server has:
1. A public host
2. Files to serve
**With PTTH, the files can live outside of the public host.**
If you want to host 1 TB of files, normally you'd put them on the
cloud server:
``` ```
Cloud HTTP clients ptth_server instances
HTTP server ----> 1 TB of files
^ Firefox ---> | | <--- Server 1
Not cloud | Chromium --> | | <--- Server 2
HTTP client Wget ------> | ptth_relay | <--- Server 3
Curl ------> | | <--- Server 4
Reqwest ---> | | <--- Server 5
``` ```
But uploading 1 TB to the cloud is expensive and slow, even if # Setup
you're sending it to S3-like blob storage.
With PTTH, the files can live outside of the cloud: Pick a relay computer and a server computer.
``` The relay must be able to accept incoming HTTP connections. If the relay
Cloud will be exposed to the Internet, you should use Nginx, Caddy, or Apache as a
+-----> ptth_relay <------+ reverse proxy to terminate TLS in front of ptth_relay. Relays on the Internet
| | will typically have a domain name so that clients and servers can find them.
Not cloud | |
HTTP client ptth_server
1 TB of files
```
The PTTH server lives where the files live. The server must have read access to the files you want to serve, and it must
The cloud host runs a relay which streams files from the end be able to make outgoing HTTP(S) connections to the relay. The server
server as the client demands. does not need a static IP, since it won't accept any incoming HTTP connections.
For home users, this can save you money - The relay server Begin installing PTTH. Run `cargo install ptth_relay` on the relay and
doesn't need to store any of your files. `cargo install ptth_server` on the server. ptth_server is manually tested on
Windows and Linux. ptth_relay is manually tested on Linux only.
- Run ptth_relay on cloud host
- Configure ptth_server for relay, with auto keygen
- Add tripcode to relay config

1
rust-toolchain Normal file
View File

@ -0,0 +1 @@
1.50.0

View File

@ -1,30 +0,0 @@
<html>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link href="data:image/x-icon;base64,AAABAAEAEBAQAAEABAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAlJSUAAGIAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAIiIiIgAAAAAAAAAgAAAAAAAAAAAAADIiIiIi
IiIjMhERERERESMyERERERERIzIREREREREjMhERIRERESMyERIiIiERIzIRESEREREjMhERERER
ESMyERERERERIzIREREREREjMiIiIiIiIiMAAAAAAAAAAAAAAAAAAAAAAAAAAIABAACAAQAAgAEA
AIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAA" rel="icon" type="image/x-icon" />
<style>
body {
font-family: sans-serif;
}
#JNG5U2EX {
border: solid black 1px;
}
</style>
<title>Scope</title>
</head>
<body>
<pre id="JNG5U2EX"></pre>
<button onclick="cursor_left ()">Left</button>
<button onclick="cursor_right ()">Right</button>
<button onclick="cursor_up ()">Up</button>
<button onclick="cursor_down ()">Down</button>
<pre id="XDHSIT76"></pre>
<script src="scope.js"></script>
</body>
</html>

View File

@ -1,126 +0,0 @@
const sync_max = 512;
const textarea = document.getElementById ("JNG5U2EX");
const debug_text = document.getElementById ("XDHSIT76");
textarea.innerText = "Loading...";
let page = "";
let offset = 0;
function line_start_before (page, offset) {
const sync_start = Math.max (sync_max, offset) - sync_max;
for (let i = offset; i >= sync_start; i--) {
const c = page.charAt (i);
if (c == '\n') {
return i + 1;
}
}
return null;
}
function line_start_after (page, offset)
{
for (let i = offset; i < offset + sync_max; i++) {
const c = page.charAt (i);
if (c == '\n') {
return i + 1;
}
}
return null;
}
function layout_text (page, offset) {
const width = 120;
const height = 24;
const line_start = line_start_before (page, offset) || 0;
// Compute layout
let lines = Array ();
let line = "";
for (let i = line_start; i < page.length; i++) {
if (lines.length >= height) {
break;
}
if (line.length >= width) {
lines.push (line);
line = "";
}
const c = page.charAt (i);
if (c == '\n') {
lines.push (line);
line = "";
}
else {
line = line + page.charAt (i);
}
}
return lines.join ('\n');
}
function repaint (offset)
{
textarea.innerText = layout_text (page, offset);
debug_text.innerText = "Offset: " + offset + "\n";
}
async function async_main ()
{
let file_length = null;
{
const resp = await fetch ("log.txt", {
"method": "HEAD",
});
file_length = resp.headers.get ("content-length");
}
const resp = await fetch ("log.txt", {
"headers": {
"range": "bytes=0-65535",
},
});
page = await resp.text ();
repaint (offset);
}
async_main ().then (
function (val) {},
function (err) {}
);
function cursor_left ()
{
offset = Math.max (1, offset) - 1;
repaint (offset);
}
function cursor_right ()
{
offset += 1;
repaint (offset);
}
function cursor_up ()
{
offset = line_start_before (page, offset - 2) || 0;
repaint (offset);
}
function cursor_down ()
{
offset = line_start_after (page, offset);
repaint (offset);
}

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cargo check --target x86_64-pc-windows-gnu -p ptth -p ptth_quic
cargo check --target x86_64-unknown-linux-gnu -p ptth -p ptth_quic
cargo test -p ptth -p ptth_quic

View File

@ -77,7 +77,7 @@ async fn main () -> anyhow::Result <()> {
let (stop_server_tx, stop_server_rx) = oneshot::channel (); let (stop_server_tx, stop_server_rx) = oneshot::channel ();
let task_server = { let task_server = {
spawn (async move { spawn (async move {
ptth_server::run_server (config_file, stop_server_rx, None, None, None).await ptth_server::run_server (config_file, stop_server_rx, None, None).await
}) })
}; };
@ -99,7 +99,7 @@ async fn main () -> anyhow::Result <()> {
let resp = client.get (&format! ("{}/frontend/servers/{}/files/COPYING", relay_url, server_name)) let resp = client.get (&format! ("{}/frontend/servers/{}/files/COPYING", relay_url, server_name))
.send ().await.expect ("Couldn't find license").bytes ().await.expect ("Couldn't find license"); .send ().await.expect ("Couldn't find license").bytes ().await.expect ("Couldn't find license");
if blake3::hash (&resp) != [ if blake3::hash (&resp) != blake3::Hash::from ([
0xca, 0x02, 0x92, 0x78, 0xca, 0x02, 0x92, 0x78,
0x9c, 0x0a, 0x0e, 0xcb, 0x9c, 0x0a, 0x0e, 0xcb,
0xa7, 0x06, 0xf4, 0xb3, 0xa7, 0x06, 0xf4, 0xb3,
@ -109,7 +109,7 @@ async fn main () -> anyhow::Result <()> {
0xc1, 0xd4, 0x32, 0xc5, 0xc1, 0xd4, 0x32, 0xc5,
0x2c, 0x4a, 0xac, 0x1f, 0x2c, 0x4a, 0xac, 0x1f,
0x1a, 0xbb, 0xa8, 0xef, 0x1a, 0xbb, 0xa8, 0xef,
] { ]) {
panic! ("{}", String::from_utf8 (resp.to_vec ()).expect ("???")); panic! ("{}", String::from_utf8 (resp.to_vec ()).expect ("???"));
} }

View File

@ -45,7 +45,7 @@ async fn testing_client_checks (
.send (); .send ();
let resp = tokio::time::timeout (Duration::from_secs (2), req).await.expect ("Request timed out").expect ("Couldn't find license").bytes ().await.expect ("Couldn't find license"); let resp = tokio::time::timeout (Duration::from_secs (2), req).await.expect ("Request timed out").expect ("Couldn't find license").bytes ().await.expect ("Couldn't find license");
if blake3::hash (&resp) != [ if blake3::hash (&resp) != blake3::Hash::from ([
0xca, 0x02, 0x92, 0x78, 0xca, 0x02, 0x92, 0x78,
0x9c, 0x0a, 0x0e, 0xcb, 0x9c, 0x0a, 0x0e, 0xcb,
0xa7, 0x06, 0xf4, 0xb3, 0xa7, 0x06, 0xf4, 0xb3,
@ -55,7 +55,7 @@ async fn testing_client_checks (
0xc1, 0xd4, 0x32, 0xc5, 0xc1, 0xd4, 0x32, 0xc5,
0x2c, 0x4a, 0xac, 0x1f, 0x2c, 0x4a, 0xac, 0x1f,
0x1a, 0xbb, 0xa8, 0xef, 0x1a, 0xbb, 0xa8, 0xef,
] { ]) {
panic! ("{}", String::from_utf8 (resp.to_vec ()).expect ("???")); panic! ("{}", String::from_utf8 (resp.to_vec ()).expect ("???"));
} }
@ -151,7 +151,7 @@ impl TestingServer {
let (stop_tx, stop_rx) = oneshot::channel (); let (stop_tx, stop_rx) = oneshot::channel ();
let task = { let task = {
spawn (async move { spawn (async move {
ptth_server::run_server (config_file, stop_rx, None, None, None).await ptth_server::run_server (config_file, stop_rx, None, None).await
}) })
}; };
@ -175,7 +175,7 @@ async fn end_to_end () {
//tracing_subscriber::fmt ().try_init ().ok (); //tracing_subscriber::fmt ().try_init ().ok ();
let relay_port = 40000; let relay_port = 4000;
// No proxy // No proxy
let proxy_port = relay_port; let proxy_port = relay_port;
let server_name = "aliens_wildland"; let server_name = "aliens_wildland";

View File

@ -1,7 +1,6 @@
Interesting issues will get a unique ID with Interesting issues will get a unique ID with
`dd if=/dev/urandom bs=5 count=1 | base32` `dd if=/dev/urandom bs=5 count=1 | base32`
- PTTH_QUIC: Report errors on client GUI (At least whether it's P2 or P4)
- Fix long-running downloads restarting in the middle - Fix long-running downloads restarting in the middle
- [DMX6CO4G](issues/2021-01Jan/status-DMX6CO4G.md) fire-and-forget logs / key-value status data - [DMX6CO4G](issues/2021-01Jan/status-DMX6CO4G.md) fire-and-forget logs / key-value status data
- ptth_tail - ptth_tail