flodgatt/src/main.rs

114 lines
4.6 KiB
Rust
Raw Normal View History

use flodgatt::config;
use flodgatt::err::FatalErr;
use flodgatt::event::Event;
use flodgatt::request::{Handler, Subscription, Timeline};
use flodgatt::response::redis;
use flodgatt::response::stream;
2019-02-19 20:29:32 +01:00
use futures::{future::lazy, stream::Stream as _};
use std::fs;
use std::net::SocketAddr;
use std::os::unix::fs::PermissionsExt;
use std::time::Instant;
use tokio::net::UnixListener;
use tokio::sync::{mpsc, watch};
use tokio::timer::Interval;
use warp::ws::Ws2;
use warp::Filter;
fn main() -> Result<(), FatalErr> {
config::merge_dotenv()?;
pretty_env_logger::try_init()?;
let (postgres_cfg, redis_cfg, cfg) = config::from_env(dotenv::vars().collect())?;
let poll_freq = *redis_cfg.polling_interval;
// Create channels to communicate between threads
Stream events via a watch channel (#128) This squashed commit makes a fairly significant structural change to significantly reduce Flodgatt's CPU usage. Flodgatt connects to Redis in a single (green) thread, and then creates a new thread to handle each WebSocket/SSE connection. Previously, each thread was responsible for polling the Redis thread to determine whether it had a message relevant to the connected client. I initially selected this structure both because it was simple and because it minimized memory overhead – no messages are sent to a particular thread unless they are relevant to the client connected to the thread. However, I recently ran some load tests that show this approach to have unacceptable CPU costs when 300+ clients are simultaneously connected. Accordingly, Flodgatt now uses a different structure: the main Redis thread now announces each incoming message via a watch channel connected to every client thread, and each client thread filters out irrelevant messages. In theory, this could lead to slightly higher memory use, but tests I have run so far have not found a measurable increase. On the other hand, Flodgatt's CPU use is now an order of magnitude lower in tests I've run. This approach does run a (very slight) risk of dropping messages under extremely heavy load: because a watch channel only stores the most recent message transmitted, if Flodgatt adds a second message before the thread can read the first message, the first message will be overwritten and never transmitted. This seems unlikely to happen in practice, and we can avoid the issue entirely by changing to a broadcast channel when we upgrade to the most recent Tokio version (see #75).
2020-04-09 19:32:36 +02:00
let (event_tx, event_rx) = watch::channel((Timeline::empty(), Event::Ping));
let (cmd_tx, cmd_rx) = mpsc::unbounded_channel();
let request = Handler::new(&postgres_cfg, *cfg.whitelist_mode)?;
let shared_manager = redis::Manager::try_from(&redis_cfg, event_tx, cmd_rx)?.into_arc();
2019-07-06 02:08:50 +02:00
// Server Sent Events
let sse_manager = shared_manager.clone();
Stream events via a watch channel (#128) This squashed commit makes a fairly significant structural change to significantly reduce Flodgatt's CPU usage. Flodgatt connects to Redis in a single (green) thread, and then creates a new thread to handle each WebSocket/SSE connection. Previously, each thread was responsible for polling the Redis thread to determine whether it had a message relevant to the connected client. I initially selected this structure both because it was simple and because it minimized memory overhead – no messages are sent to a particular thread unless they are relevant to the client connected to the thread. However, I recently ran some load tests that show this approach to have unacceptable CPU costs when 300+ clients are simultaneously connected. Accordingly, Flodgatt now uses a different structure: the main Redis thread now announces each incoming message via a watch channel connected to every client thread, and each client thread filters out irrelevant messages. In theory, this could lead to slightly higher memory use, but tests I have run so far have not found a measurable increase. On the other hand, Flodgatt's CPU use is now an order of magnitude lower in tests I've run. This approach does run a (very slight) risk of dropping messages under extremely heavy load: because a watch channel only stores the most recent message transmitted, if Flodgatt adds a second message before the thread can read the first message, the first message will be overwritten and never transmitted. This seems unlikely to happen in practice, and we can avoid the issue entirely by changing to a broadcast channel when we upgrade to the most recent Tokio version (see #75).
2020-04-09 19:32:36 +02:00
let (sse_rx, sse_cmd_tx) = (event_rx.clone(), cmd_tx.clone());
let sse = request
.sse_subscription()
.and(warp::sse())
.map(move |subscription: Subscription, sse: warp::sse::Sse| {
log::info!("Incoming SSE request for {:?}", subscription.timeline);
let mut manager = sse_manager.lock().unwrap_or_else(redis::Manager::recover);
manager.subscribe(&subscription);
stream::Sse::send_events(sse, sse_cmd_tx.clone(), subscription, sse_rx.clone())
})
.with(warp::reply::with::header("Connection", "keep-alive"));
2019-07-06 02:08:50 +02:00
// WebSocket
let ws_manager = shared_manager.clone();
let ws = request
.ws_subscription()
.and(warp::ws::ws2())
.map(move |subscription: Subscription, ws: Ws2| {
log::info!("Incoming websocket request for {:?}", subscription.timeline);
let mut manager = ws_manager.lock().unwrap_or_else(redis::Manager::recover);
manager.subscribe(&subscription);
let token = subscription.access_token.clone().unwrap_or_default(); // token sent for security
let ws_stream = stream::Ws::new(cmd_tx.clone(), event_rx.clone(), subscription);
(ws.on_upgrade(move |ws| ws_stream.send_to(ws)), token)
})
.map(|(reply, token)| warp::reply::with_header(reply, "sec-websocket-protocol", token));
#[cfg(feature = "stub_status")]
#[rustfmt::skip]
let status = {
let (r1, r3) = (shared_manager.clone(), shared_manager.clone());
request.health().map(|| "OK")
.or(request.status()
.map(move || r1.lock().unwrap_or_else(redis::Manager::recover).count()))
.or(request.status_per_timeline()
.map(move || r3.lock().unwrap_or_else(redis::Manager::recover).list()))
};
#[cfg(not(feature = "stub_status"))]
let status = request.health().map(|| "OK");
2019-10-04 00:02:23 +02:00
let cors = warp::cors()
.allow_any_origin()
.allow_methods(cfg.cors.allowed_methods)
.allow_headers(cfg.cors.allowed_headers);
// use futures::future::Future;
let streaming_server = move || {
let manager = shared_manager.clone();
let stream = Interval::new(Instant::now(), poll_freq)
// .take(1200)
.map_err(|e| log::error!("{}", e))
.for_each(
move |_| {
let mut manager = manager.lock().unwrap_or_else(redis::Manager::recover);
manager.poll_broadcast().map_err(FatalErr::log)
}, // ).and_then(|_| {
// log::info!("shutting down!");
// std::process::exit(0);
// futures::future::ok(())
// }
);
warp::spawn(lazy(move || stream));
warp::serve(ws.or(sse).with(cors).or(status).recover(Handler::err))
2020-04-05 16:54:42 +02:00
};
if let Some(socket) = &*cfg.unix_socket {
2020-03-20 01:54:23 +01:00
log::info!("Using Unix socket {}", socket);
fs::remove_file(socket).unwrap_or_default();
let incoming = UnixListener::bind(socket)?.incoming();
fs::set_permissions(socket, PermissionsExt::from_mode(0o666))?;
tokio::run(lazy(|| streaming_server().serve_incoming(incoming)));
2019-10-04 00:02:23 +02:00
} else {
let server_addr = SocketAddr::new(*cfg.address, *cfg.port);
tokio::run(lazy(move || streaming_server().bind(server_addr)));
}
Err(FatalErr::Unrecoverable) // on get here if there's an unrecoverable error in poll_broadcast.
2019-02-11 09:45:14 +01:00
}