🚧 I'm gonna try to fix the rendezvous problem

main
_ 2020-11-01 18:13:12 -06:00
parent c056118798
commit 067e240ff4
2 changed files with 54 additions and 2 deletions

View File

@ -45,8 +45,6 @@ mod tests {
let server_name_2 = server_name.into (); let server_name_2 = server_name.into ();
spawn (async move { spawn (async move {
tokio::time::delay_for (std::time::Duration::from_secs (5)).await;
let opt = server::Opt { let opt = server::Opt {
relay_url: relay_url_2, relay_url: relay_url_2,
server_name: server_name_2, server_name: server_name_2,
@ -56,6 +54,8 @@ mod tests {
server::main (opt).await.unwrap (); server::main (opt).await.unwrap ();
}); });
tokio::time::delay_for (std::time::Duration::from_secs (1)).await;
let client = Client::new (); let client = Client::new ();
let resp = client.get (&format! ("{}/relay_up_check", relay_url)) let resp = client.get (&format! ("{}/relay_up_check", relay_url))

View File

@ -36,7 +36,14 @@ use watcher::*;
struct ServerState { struct ServerState {
handlebars: Arc <Handlebars <'static>>, handlebars: Arc <Handlebars <'static>>,
// Holds clients that are waiting for a response to come
// back from a server.
client_watchers: Arc <Mutex <Watchers <(http_serde::ResponseParts, Body)>>>, client_watchers: Arc <Mutex <Watchers <(http_serde::ResponseParts, Body)>>>,
// Holds servers that are waiting for a request to come in
// from a client.
server_watchers: Arc <Mutex <Watchers <http_serde::WrappedRequest>>>, server_watchers: Arc <Mutex <Watchers <http_serde::WrappedRequest>>>,
} }
@ -284,3 +291,48 @@ pub async fn main () -> Result <(), Box <dyn Error>> {
Ok (()) Ok (())
} }
#[cfg (test)]
mod tests {
// Toy model of a relay for a single server
// with one consumer thread.
// To scale this up, we can just put a bunch into a
// concurrent hash map inside of mutexes or something
struct RelayStateMachine {
}
enum RequestStateMachine {
WaitForServerAccept, // Client has connected
WaitForServerResponse, // Server has accepted request
}
/*
Here's what we need to handle:
When a request comes in:
- Look up the server
- If the server is parked, unpark it
- Park the client
When a server comes to listen:
- Look up the server
- Either return all pending requests, or park the server
When a server comes to respond:
- Look up the parked client
- Begin a stream, unparking the client
So we need these lookups to be fast:
- Server IDs, where 0 or 1 servers and 0 or many clients
can be parked
- Request IDs, where 1 client is parked
*/
}