Reorganize files

This commit is contained in:
Daniel Sockwell 2020-04-12 16:36:45 -04:00
parent 0eec8f6f7b
commit bdb402798c
35 changed files with 287 additions and 323 deletions

75
Cargo.lock generated
View File

@ -57,28 +57,6 @@ name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "backtrace"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"backtrace-sys 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-demangle 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "backtrace-sys"
version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "base64"
version = "0.10.1"
@ -386,13 +364,8 @@ dependencies = [
[[package]]
name = "dotenv"
version = "0.14.0"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "dtoa"
@ -416,26 +389,6 @@ dependencies = [
"termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "failure"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"backtrace 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "failure_derive"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.34 (registry+https://github.com/rust-lang/crates.io-index)",
"synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fake-simd"
version = "0.1.2"
@ -456,7 +409,7 @@ name = "flodgatt"
version = "0.8.2"
dependencies = [
"criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dotenv 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dotenv 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)",
"hashbrown 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1607,11 +1560,6 @@ name = "rent_to_own"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc-demangle"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc_version"
version = "0.2.3"
@ -1830,17 +1778,6 @@ dependencies = [
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "synstructure"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.34 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tempfile"
version = "3.1.0"
@ -2399,8 +2336,6 @@ dependencies = [
"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652"
"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
"checksum backtrace 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "f106c02a3604afcdc0df5d36cc47b44b55917dbaf3d808f71c163a0ddba64637"
"checksum backtrace-sys 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "797c830ac25ccc92a7f8a7b9862bde440715531514594a6154e3d4a54dd769b6"
"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
@ -2435,12 +2370,10 @@ dependencies = [
"checksum darling_macro 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "244e8987bd4e174385240cde20a3657f607fb0797563c28255c353b5819a07b1"
"checksum derive_state_machine_future 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1220ad071cb8996454c20adf547a34ba3ac793759dab793d9dc04996a373ac83"
"checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c"
"checksum dotenv 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7bdb5b956a911106b6b479cdc6bc1364d359a32299f17b49994f5327132e18d9"
"checksum dotenv 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e"
"checksum either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5527cfe0d098f36e3f8839852688e63c8fff1c90b2b405aef730615f9a7bcf7b"
"checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3"
"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2"
"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1"
"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
"checksum fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33"
@ -2573,7 +2506,6 @@ dependencies = [
"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90"
"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
"checksum rent_to_own 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05a51ad2b1c5c710fa89e6b1631068dab84ed687bc6a5fe061ad65da3d0c25b2"
"checksum rustc-demangle 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "ccc78bfd5acd7bf3e89cffcf899e5cb1a52d6fafa8dec2739ad70c9577a57288"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997"
"checksum safemem 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d2b08423011dae9a5ca23f07cf57dac3857f5c885d352b76f6d95f4aea9434d0"
@ -2604,7 +2536,6 @@ dependencies = [
"checksum subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
"checksum syn 0.15.34 (registry+https://github.com/rust-lang/crates.io-index)" = "a1393e4a97a19c01e900df2aec855a29f71cf02c402e2f443b8d2747c25c5dbe"
"checksum syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "66850e97125af79138385e9b88339cbcd037e3f28ceab8c5ad98e64f0f1f80bf"
"checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015"
"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
"checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e"
"checksum termion 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a8fb22f7cde82c8220e5aeacb3258ed7ce996142c77cba193f203515e26c330"

View File

@ -15,7 +15,7 @@ serde_json = "1.0.50"
serde_derive = "1.0.90"
pretty_env_logger = "0.3.0"
postgres = "0.17.0"
dotenv = "0.14.0"
dotenv = "0.15.0"
postgres-openssl = { git = "https://github.com/sfackler/rust-postgres.git"}
url = "2.1.0"
strum = "0.16.0"

33
src/config.rs Normal file
View File

@ -0,0 +1,33 @@
pub use {deployment_cfg::Deployment, postgres_cfg::Postgres, redis_cfg::Redis};
use self::environmental_variables::EnvVar;
use super::err;
use hashbrown::HashMap;
use std::env;
mod deployment_cfg;
mod deployment_cfg_types;
mod environmental_variables;
mod postgres_cfg;
mod postgres_cfg_types;
mod redis_cfg;
mod redis_cfg_types;
pub fn merge_dotenv() -> Result<(), err::FatalErr> {
dotenv::from_filename(match env::var("ENV").ok().as_deref() {
Some("production") => ".env.production",
Some("development") | None => ".env",
Some(_unsupported) => Err(err::FatalErr::Unknown)?, // TODO make more specific
})?;
Ok(())
}
pub fn from_env<'a>(env_vars: HashMap<String, String>) -> (Postgres, Redis, Deployment<'a>) {
let env_vars = EnvVar::new(env_vars);
log::info!("Environmental variables Flodgatt received: {}", &env_vars);
(
Postgres::from_env(env_vars.clone()),
Redis::from_env(env_vars.clone()),
Deployment::from_env(env_vars.clone()),
)
}

View File

@ -1,7 +1,7 @@
use super::{deployment_cfg_types::*, EnvVar};
#[derive(Debug, Default)]
pub struct DeploymentConfig<'a> {
pub struct Deployment<'a> {
pub env: Env,
pub log_level: LogLevel,
pub address: FlodgattAddr,
@ -13,7 +13,7 @@ pub struct DeploymentConfig<'a> {
pub whitelist_mode: WhitelistMode,
}
impl DeploymentConfig<'_> {
impl Deployment<'_> {
pub fn from_env(env: EnvVar) -> Self {
let mut cfg = Self {
env: Env::default().maybe_update(env.get("NODE_ENV")),

View File

@ -39,7 +39,7 @@ impl EnvVar {
impl fmt::Display for EnvVar {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut result = String::new();
for env_var in [
for env_var in &[
"NODE_ENV",
"RUST_LOG",
"BIND",
@ -62,9 +62,7 @@ impl fmt::Display for EnvVar {
"REDIS_USER",
"REDIS_DB",
"REDIS_FREQ",
]
.iter()
{
] {
if let Some(value) = self.get(&(*env_var).to_string()) {
result = format!("{}\n {}: {}", result, env_var, value)
}
@ -125,14 +123,6 @@ macro_rules! from_env_var {
})),
None => self,
}
// if let Some(value) = var {
// Self(Self::inner_from_str(value).unwrap_or_else(|| {
// crate::err::env_var_fatal($env_var, value, $allowed_values)
// }))
// } else {
// self
// }
}
}
};

View File

@ -1,10 +0,0 @@
mod deployment_cfg;
mod deployment_cfg_types;
mod postgres_cfg;
mod postgres_cfg_types;
mod redis_cfg;
mod redis_cfg_types;
mod environmental_variables;
pub use {deployment_cfg::DeploymentConfig, postgres_cfg::PostgresConfig, redis_cfg::RedisConfig, environmental_variables::EnvVar};

View File

@ -3,7 +3,7 @@ use url::Url;
use urlencoding;
#[derive(Debug)]
pub struct PostgresConfig {
pub struct Postgres {
pub user: PgUser,
pub host: PgHost,
pub password: PgPass,
@ -46,7 +46,7 @@ impl EnvVar {
}
}
impl PostgresConfig {
impl Postgres {
/// Configure Postgres and return a connection
pub fn from_env(env: EnvVar) -> Self {

View File

@ -3,7 +3,7 @@ use crate::config::EnvVar;
use url::Url;
#[derive(Debug, Default)]
pub struct RedisConfig {
pub struct Redis {
pub user: RedisUser,
pub password: RedisPass,
pub port: RedisPort,
@ -40,7 +40,7 @@ impl EnvVar {
}
}
impl RedisConfig {
impl Redis {
const USER_SET_WARNING: &'static str =
"Redis user specified, but Redis did not ask for a username. Ignoring it.";
const DB_SET_WARNING: &'static str = r"Redis database specified, but PubSub connections do not use databases.
@ -52,7 +52,7 @@ For similar functionality, you may wish to set a REDIS_NAMESPACE";
None => env,
};
let cfg = RedisConfig {
let cfg = Redis {
user: RedisUser::default().maybe_update(env.get("REDIS_USER")),
password: RedisPass::default().maybe_update(env.get("REDIS_PASSWORD")),
port: RedisPort::default().maybe_update(env.get("REDIS_PORT")),

67
src/err.rs Normal file
View File

@ -0,0 +1,67 @@
mod timeline;
pub use timeline::TimelineErr;
use crate::response::ManagerErr;
use std::fmt;
pub enum FatalErr {
Unknown,
ReceiverErr(ManagerErr),
DotEnv(dotenv::Error),
Logger(log::SetLoggerError),
}
impl FatalErr {
pub fn exit(msg: impl fmt::Display) {
eprintln!("{}", msg);
std::process::exit(1);
}
}
impl std::error::Error for FatalErr {}
impl fmt::Debug for FatalErr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self)
}
}
impl fmt::Display for FatalErr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use FatalErr::*;
write!(
f,
"{}",
match self {
Unknown => "Flodgatt encountered an unknown, unrecoverable error".into(),
ReceiverErr(e) => format!("{}", e),
Logger(e) => format!("{}", e),
DotEnv(e) => format!("Could not load specified environmental file: {}", e),
}
)
}
}
impl From<dotenv::Error> for FatalErr {
fn from(e: dotenv::Error) -> Self {
Self::DotEnv(e)
}
}
impl From<ManagerErr> for FatalErr {
fn from(e: ManagerErr) -> Self {
Self::ReceiverErr(e)
}
}
impl From<log::SetLoggerError> for FatalErr {
fn from(e: log::SetLoggerError) -> Self {
Self::Logger(e)
}
}
// TODO delete vvvv when postgres_cfg.rs has better error handling
pub fn die_with_msg(msg: impl fmt::Display) -> ! {
eprintln!("FATAL ERROR: {}", msg);
std::process::exit(1);
}

View File

@ -1,43 +0,0 @@
mod timeline;
pub use timeline::TimelineErr;
use crate::redis_to_client_stream::ReceiverErr;
use std::fmt;
pub enum FatalErr {
Err,
ReceiverErr(ReceiverErr),
}
impl FatalErr {
pub fn exit(msg: impl fmt::Display) {
eprintln!("{}", msg);
std::process::exit(1);
}
}
impl std::error::Error for FatalErr {}
impl fmt::Debug for FatalErr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self)
}
}
impl fmt::Display for FatalErr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Error message")
}
}
impl From<ReceiverErr> for FatalErr {
fn from(e: ReceiverErr) -> Self {
Self::ReceiverErr(e)
}
}
// TODO delete vvvv when postgres_cfg.rs has better error handling
pub fn die_with_msg(msg: impl fmt::Display) -> ! {
eprintln!("FATAL ERROR: {}", msg);
std::process::exit(1);
}

View File

@ -35,10 +35,11 @@
//! polls the `Receiver` and the frequency with which the `Receiver` polls Redis.
//!
#![warn(clippy::pedantic)]
#![allow(clippy::try_err, clippy::match_bool)]
pub mod config;
pub mod err;
pub mod messages;
pub mod parse_client_request;
pub mod redis_to_client_stream;
pub mod request;
pub mod response;

View File

@ -1,73 +1,67 @@
use flodgatt::{
config::{DeploymentConfig, EnvVar, PostgresConfig, RedisConfig},
err::FatalErr,
messages::Event,
parse_client_request::{PgPool, Subscription, Timeline},
redis_to_client_stream::{Receiver, SseStream, WsStream},
};
use std::{env, fs, net::SocketAddr, os::unix::fs::PermissionsExt};
use tokio::{
net::UnixListener,
sync::{mpsc, watch},
};
use warp::{http::StatusCode, path, ws::Ws2, Filter, Rejection};
use flodgatt::config;
use flodgatt::err::FatalErr;
use flodgatt::messages::Event;
use flodgatt::request::{PgPool, Subscription, Timeline};
use flodgatt::response::redis;
use flodgatt::response::stream;
use std::fs;
use std::net::SocketAddr;
use std::os::unix::fs::PermissionsExt;
use tokio::net::UnixListener;
use tokio::sync::{mpsc, watch};
use warp::http::StatusCode;
use warp::path;
use warp::ws::Ws2;
use warp::{Filter, Rejection};
fn main() -> Result<(), FatalErr> {
dotenv::from_filename(match env::var("ENV").ok().as_deref() {
Some("production") => ".env.production",
Some("development") | None => ".env",
Some(unsupported) => EnvVar::err("ENV", unsupported, "`production` or `development`"),
})
.ok();
let env_vars = EnvVar::new(dotenv::vars().collect());
pretty_env_logger::init();
log::info!("Environmental variables Flodgatt received: {}", &env_vars);
config::merge_dotenv()?;
pretty_env_logger::try_init()?;
let postgres_cfg = PostgresConfig::from_env(env_vars.clone());
let redis_cfg = RedisConfig::from_env(env_vars.clone());
let cfg = DeploymentConfig::from_env(env_vars);
let pg_pool = PgPool::new(postgres_cfg);
let (postgres_cfg, redis_cfg, cfg) = config::from_env(dotenv::vars().collect());
let (event_tx, event_rx) = watch::channel((Timeline::empty(), Event::Ping));
let (cmd_tx, cmd_rx) = mpsc::unbounded_channel();
let shared_pg_conn = PgPool::new(postgres_cfg, *cfg.whitelist_mode);
let poll_freq = *redis_cfg.polling_interval;
let receiver = Receiver::try_from(redis_cfg, event_tx, cmd_rx)?.into_arc();
log::info!("Streaming server initialized and ready to accept connections");
let manager = redis::Manager::try_from(redis_cfg, event_tx, cmd_rx)?.into_arc();
// Server Sent Events
let sse_receiver = receiver.clone();
let sse_manager = manager.clone();
let (sse_rx, sse_cmd_tx) = (event_rx.clone(), cmd_tx.clone());
let whitelist_mode = *cfg.whitelist_mode;
let sse_routes = Subscription::from_sse_query(pg_pool.clone(), whitelist_mode)
let sse_routes = Subscription::from_sse_request(shared_pg_conn.clone())
.and(warp::sse())
.map(
move |subscription: Subscription, sse_connection_to_client: warp::sse::Sse| {
move |subscription: Subscription, client_conn: warp::sse::Sse| {
log::info!("Incoming SSE request for {:?}", subscription.timeline);
{
let mut receiver = sse_receiver.lock().unwrap_or_else(Receiver::recover);
receiver.subscribe(&subscription).unwrap_or_else(|e| {
let mut manager = sse_manager.lock().unwrap_or_else(redis::Manager::recover);
manager.subscribe(&subscription).unwrap_or_else(|e| {
log::error!("Could not subscribe to the Redis channel: {}", e)
});
}
let cmd_tx = sse_cmd_tx.clone();
let sse_rx = sse_rx.clone();
// send the updates through the SSE connection
SseStream::send_events(sse_connection_to_client, cmd_tx, subscription, sse_rx)
stream::Sse::send_events(
client_conn,
sse_cmd_tx.clone(),
subscription,
sse_rx.clone(),
)
},
)
.with(warp::reply::with::header("Connection", "keep-alive"));
// WebSocket
let ws_receiver = receiver.clone();
let whitelist_mode = *cfg.whitelist_mode;
let ws_routes = Subscription::from_ws_request(pg_pool, whitelist_mode)
let ws_manager = manager.clone();
let ws_routes = Subscription::from_ws_request(shared_pg_conn)
.and(warp::ws::ws2())
.map(move |subscription: Subscription, ws: Ws2| {
log::info!("Incoming websocket request for {:?}", subscription.timeline);
{
let mut receiver = ws_receiver.lock().unwrap_or_else(Receiver::recover);
let mut manager = ws_manager.lock().unwrap_or_else(redis::Manager::recover);
receiver.subscribe(&subscription).unwrap_or_else(|e| {
manager.subscribe(&subscription).unwrap_or_else(|e| {
log::error!("Could not subscribe to the Redis channel: {}", e)
});
}
@ -78,11 +72,10 @@ fn main() -> Result<(), FatalErr> {
.access_token
.unwrap_or_else(String::new);
// send the updates through the WS connection (along with the access_token, for security)
(
ws.on_upgrade(move |ws| WsStream::new(ws, cmd_tx, subscription).send_events(ws_rx)),
token,
)
let ws_response_stream = ws
.on_upgrade(move |ws| stream::Ws::new(ws, cmd_tx, subscription).send_events(ws_rx));
(ws_response_stream, token)
})
.map(|(reply, token)| warp::reply::with_header(reply, "sec-websocket-protocol", token));
@ -93,15 +86,15 @@ fn main() -> Result<(), FatalErr> {
#[cfg(feature = "stub_status")]
let status_endpoints = {
let (r1, r3) = (receiver.clone(), receiver.clone());
let (r1, r3) = (manager.clone(), manager.clone());
warp::path!("api" / "v1" / "streaming" / "health")
.map(|| "OK")
.or(warp::path!("api" / "v1" / "streaming" / "status")
.and(warp::path::end())
.map(move || r1.lock().unwrap_or_else(Receiver::recover).count()))
.map(move || r1.lock().unwrap_or_else(redis::Manager::recover).count()))
.or(
warp::path!("api" / "v1" / "streaming" / "status" / "per_timeline")
.map(move || r3.lock().unwrap_or_else(Receiver::recover).list()),
.map(move || r3.lock().unwrap_or_else(redis::Manager::recover).list()),
)
};
#[cfg(not(feature = "stub_status"))]
@ -139,13 +132,13 @@ fn main() -> Result<(), FatalErr> {
let server_addr = SocketAddr::new(*cfg.address, *cfg.port);
tokio::run(lazy(move || {
let receiver = receiver.clone();
let receiver = manager.clone();
warp::spawn(lazy(move || {
tokio::timer::Interval::new(Instant::now(), poll_freq)
.map_err(|e| log::error!("{}", e))
.for_each(move |_| {
let mut receiver = receiver.lock().unwrap_or_else(Receiver::recover);
let mut receiver = receiver.lock().unwrap_or_else(redis::Manager::recover);
receiver.poll_broadcast().unwrap_or_else(FatalErr::exit);
Ok(())
})

View File

@ -8,7 +8,7 @@ use super::{
};
use {application::Application, attachment::Attachment, card::Card, poll::Poll};
use crate::parse_client_request::Blocks;
use crate::request::Blocks;
use hashbrown::HashSet;
use serde::{Deserialize, Serialize};

View File

@ -1,5 +1,5 @@
use super::{EventErr, Id};
use crate::parse_client_request::Blocks;
use crate::request::Blocks;
use std::convert::TryFrom;

View File

@ -1,12 +0,0 @@
//! Stream the updates appropriate for a given `User`/`timeline` pair from Redis.
mod event_stream;
mod receiver;
mod redis;
pub use {
event_stream::{SseStream, WsStream},
receiver::{Receiver, ReceiverErr},
};
#[cfg(feature = "bench")]
pub use redis::redis_msg::{RedisMsg, RedisParseOutput};

View File

@ -1,5 +0,0 @@
pub mod redis_connection;
pub mod redis_msg;
pub use redis_connection::{RedisConn, RedisConnErr};
pub use redis_msg::RedisParseErr;

View File

@ -2,7 +2,7 @@
use crate::{
config,
messages::Id,
parse_client_request::subscription::{Scope, UserData},
request::subscription::{Scope, UserData},
};
use ::postgres;
use hashbrown::HashSet;
@ -10,9 +10,12 @@ use r2d2_postgres::PostgresConnectionManager;
use warp::reject::Rejection;
#[derive(Clone, Debug)]
pub struct PgPool(pub r2d2::Pool<PostgresConnectionManager<postgres::NoTls>>);
pub struct PgPool {
pub conn: r2d2::Pool<PostgresConnectionManager<postgres::NoTls>>,
whitelist_mode: bool,
}
impl PgPool {
pub fn new(pg_cfg: config::PostgresConfig) -> Self {
pub fn new(pg_cfg: config::Postgres, whitelist_mode: bool) -> Self {
let mut cfg = postgres::Config::new();
cfg.user(&pg_cfg.user)
.host(&*pg_cfg.host.to_string())
@ -27,12 +30,16 @@ impl PgPool {
.max_size(10)
.build(manager)
.expect("Can connect to local postgres");
Self(pool)
Self {
conn: pool,
whitelist_mode,
}
}
pub fn select_user(self, token: &str) -> Result<UserData, Rejection> {
let mut conn = self.0.get().unwrap();
let query_rows = conn
pub fn select_user(self, token: &Option<String>) -> Result<UserData, Rejection> {
let mut conn = self.conn.get().unwrap();
if let Some(token) = token {
let query_rows = conn
.query(
"
SELECT oauth_access_tokens.resource_owner_id, users.account_id, users.chosen_languages, oauth_access_tokens.scopes
@ -46,47 +53,52 @@ LIMIT 1",
&[&token.to_owned()],
)
.expect("Hard-coded query will return Some([0 or more rows])");
if let Some(result_columns) = query_rows.get(0) {
let id = Id(result_columns.get(1));
let allowed_langs = result_columns
.try_get::<_, Vec<_>>(2)
.unwrap_or_else(|_| Vec::new())
.into_iter()
.collect();
let mut scopes: HashSet<Scope> = result_columns
.get::<_, String>(3)
.split(' ')
.filter_map(|scope| match scope {
"read" => Some(Scope::Read),
"read:statuses" => Some(Scope::Statuses),
"read:notifications" => Some(Scope::Notifications),
"read:lists" => Some(Scope::Lists),
"write" | "follow" => None, // ignore write scopes
unexpected => {
log::warn!("Ignoring unknown scope `{}`", unexpected);
None
}
})
.collect();
// We don't need to separately track read auth - it's just all three others
if scopes.remove(&Scope::Read) {
scopes.insert(Scope::Statuses);
scopes.insert(Scope::Notifications);
scopes.insert(Scope::Lists);
}
if let Some(result_columns) = query_rows.get(0) {
let id = Id(result_columns.get(1));
let allowed_langs = result_columns
.try_get::<_, Vec<_>>(2)
.unwrap_or_else(|_| Vec::new())
.into_iter()
.collect();
let mut scopes: HashSet<Scope> = result_columns
.get::<_, String>(3)
.split(' ')
.filter_map(|scope| match scope {
"read" => Some(Scope::Read),
"read:statuses" => Some(Scope::Statuses),
"read:notifications" => Some(Scope::Notifications),
"read:lists" => Some(Scope::Lists),
"write" | "follow" => None, // ignore write scopes
unexpected => {
log::warn!("Ignoring unknown scope `{}`", unexpected);
None
}
})
.collect();
// We don't need to separately track read auth - it's just all three others
if scopes.remove(&Scope::Read) {
scopes.insert(Scope::Statuses);
scopes.insert(Scope::Notifications);
scopes.insert(Scope::Lists);
}
Ok(UserData {
id,
allowed_langs,
scopes,
})
} else {
Ok(UserData {
id,
allowed_langs,
scopes,
})
} else {
Err(warp::reject::custom("Error: Invalid access token"))
}
} else if self.whitelist_mode {
Err(warp::reject::custom("Error: Invalid access token"))
} else {
Ok(UserData::public())
}
}
pub fn select_hashtag_id(self, tag_name: &str) -> Result<i64, Rejection> {
let mut conn = self.0.get().unwrap();
let mut conn = self.conn.get().unwrap();
let rows = &conn
.query(
"
@ -108,7 +120,7 @@ LIMIT 1",
/// **NOTE**: because we check this when the user connects, it will not include any blocks
/// the user adds until they refresh/reconnect.
pub fn select_blocked_users(self, user_id: Id) -> HashSet<Id> {
self.0
self.conn
.get()
.unwrap()
.query(
@ -131,7 +143,7 @@ UNION SELECT target_account_id
/// **NOTE**: because we check this when the user connects, it will not include any blocks
/// the user adds until they refresh/reconnect.
pub fn select_blocking_users(self, user_id: Id) -> HashSet<Id> {
self.0
self.conn
.get()
.unwrap()
.query(
@ -152,7 +164,7 @@ SELECT account_id
/// **NOTE**: because we check this when the user connects, it will not include any blocks
/// the user adds until they refresh/reconnect.
pub fn select_blocked_domains(self, user_id: Id) -> HashSet<String> {
self.0
self.conn
.get()
.unwrap()
.query(
@ -167,7 +179,7 @@ SELECT account_id
/// Test whether a user owns a list
pub fn user_owns_list(self, user_id: Id, list_id: i64) -> bool {
let mut conn = self.0.get().unwrap();
let mut conn = self.conn.get().unwrap();
// For the Postgres query, `id` = list number; `account_id` = user.id
let rows = &conn
.query(

View File

@ -77,15 +77,15 @@ impl Default for Subscription {
}
impl Subscription {
pub fn from_ws_request(pg_pool: PgPool, whitelist_mode: bool) -> BoxedFilter<(Subscription,)> {
pub fn from_ws_request(pg_pool: PgPool) -> BoxedFilter<(Subscription,)> {
parse_ws_query()
.and(query::OptionalAccessToken::from_ws_header())
.and_then(Query::update_access_token)
.and_then(move |q| Subscription::from_query(q, pg_pool.clone(), whitelist_mode))
.and_then(move |q| Subscription::from_query(q, pg_pool.clone()))
.boxed()
}
pub fn from_sse_query(pg_pool: PgPool, whitelist_mode: bool) -> BoxedFilter<(Subscription,)> {
pub fn from_sse_request(pg_pool: PgPool) -> BoxedFilter<(Subscription,)> {
any_of!(
parse_sse_query!(
path => "api" / "v1" / "streaming" / "user" / "notification"
@ -113,16 +113,12 @@ impl Subscription {
// parameter, we need to update our Query if the header has a token
.and(query::OptionalAccessToken::from_sse_header())
.and_then(Query::update_access_token)
.and_then(move |q| Subscription::from_query(q, pg_pool.clone(), whitelist_mode))
.and_then(move |q| Subscription::from_query(q, pg_pool.clone()))
.boxed()
}
fn from_query(q: Query, pool: PgPool, whitelist_mode: bool) -> Result<Self, Rejection> {
let user = match q.access_token.clone() {
Some(token) => pool.clone().select_user(&token)?,
None if whitelist_mode => Err(warp::reject::custom("Error: Invalid access token"))?,
None => UserData::public(),
};
fn from_query(q: Query, pool: PgPool) -> Result<Self, Rejection> {
let user = pool.clone().select_user(&q.access_token)?;
let timeline = Timeline::from_query_and_user(&q, &user, pool.clone())?;
let hashtag_name = match timeline {
Timeline(Stream::Hashtag(_), _, _) => Some(q.hashtag),
@ -185,7 +181,7 @@ impl Timeline {
Timeline(Public, Local, All) => "timeline:public:local".into(),
Timeline(Public, Federated, Media) => "timeline:public:media".into(),
Timeline(Public, Local, Media) => "timeline:public:local:media".into(),
// TODO -- would `.push_str` be faster here?
Timeline(Hashtag(_id), Federated, All) => format!(
"timeline:hashtag:{}",
hashtag.ok_or_else(|| TimelineErr::MissingHashtag)?
@ -310,7 +306,7 @@ pub struct UserData {
}
impl UserData {
fn public() -> Self {
pub fn public() -> Self {
Self {
id: Id(-1),
allowed_langs: HashSet::new(),

9
src/response.rs Normal file
View File

@ -0,0 +1,9 @@
//! Stream the updates appropriate for a given `User`/`timeline` pair from Redis.
pub mod redis;
pub mod stream;
pub use redis::{Manager, ManagerErr};
#[cfg(feature = "bench")]
pub use redis::msg::{RedisMsg, RedisParseOutput};

7
src/response/redis.rs Normal file
View File

@ -0,0 +1,7 @@
pub mod connection;
mod manager;
pub mod msg;
pub use connection::{RedisConn, RedisConnErr};
pub use manager::{Manager, ManagerErr};
pub use msg::RedisParseErr;

View File

@ -1,12 +1,12 @@
mod err;
pub use err::RedisConnErr;
use super::super::receiver::ReceiverErr;
use super::redis_msg::{RedisParseErr, RedisParseOutput};
use super::msg::{RedisParseErr, RedisParseOutput};
use super::ManagerErr;
use crate::{
config::RedisConfig,
config::Redis,
messages::Event,
parse_client_request::{Stream, Timeline},
request::{Stream, Timeline},
};
use std::{
@ -33,7 +33,7 @@ pub struct RedisConn {
}
impl RedisConn {
pub fn new(redis_cfg: RedisConfig) -> Result<Self> {
pub fn new(redis_cfg: Redis) -> Result<Self> {
let addr = format!("{}:{}", *redis_cfg.host, *redis_cfg.port);
let conn = Self::new_connection(&addr, redis_cfg.password.as_ref())?;
conn.set_nonblocking(true)
@ -52,7 +52,7 @@ impl RedisConn {
Ok(redis_conn)
}
pub fn poll_redis(&mut self) -> Poll<Option<(Timeline, Event)>, ReceiverErr> {
pub fn poll_redis(&mut self) -> Poll<Option<(Timeline, Event)>, ManagerErr> {
let mut size = 100; // large enough to handle subscribe/unsubscribe notice
let (mut buffer, mut first_read) = (vec![0u8; size], true);
loop {
@ -105,7 +105,7 @@ impl RedisConn {
},
Ok(NonMsg(leftover)) => (Ok(Ready(None)), leftover),
Err(RedisParseErr::Incomplete) => (Ok(NotReady), input),
Err(other_parse_err) => (Err(ReceiverErr::RedisParseErr(other_parse_err)), input),
Err(other_parse_err) => (Err(ManagerErr::RedisParseErr(other_parse_err)), input),
};
self.redis_input.extend_from_slice(leftover.as_bytes());
self.redis_input.extend_from_slice(invalid_bytes);

View File

@ -2,31 +2,24 @@
//! polled by the correct `ClientAgent`. Also manages sububscriptions and
//! unsubscriptions to/from Redis.
mod err;
pub use err::ReceiverErr;
pub use err::ManagerErr;
use super::redis::{redis_connection::RedisCmd, RedisConn};
use crate::{
config,
messages::Event,
parse_client_request::{Stream, Subscription, Timeline},
};
use super::{connection::RedisCmd, RedisConn};
use crate::config;
use crate::messages::Event;
use crate::request::{Stream, Subscription, Timeline};
use futures::{Async, Stream as _Stream};
use hashbrown::HashMap;
use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
use std::time::{Duration, Instant};
use tokio::sync::{mpsc, watch};
use std::{
result,
sync::{Arc, Mutex, MutexGuard, PoisonError},
time::{Duration, Instant},
};
type Result<T> = result::Result<T, ReceiverErr>;
type Result<T> = std::result::Result<T, ManagerErr>;
/// The item that streams from Redis and is polled by the `ClientAgent`
#[derive(Debug)]
pub struct Receiver {
pub struct Manager {
redis_connection: RedisConn,
clients_per_timeline: HashMap<Timeline, i32>,
tx: watch::Sender<(Timeline, Event)>,
@ -34,12 +27,12 @@ pub struct Receiver {
ping_time: Instant,
}
impl Receiver {
impl Manager {
/// Create a new `Receiver`, with its own Redis connections (but, as yet, no
/// active subscriptions).
pub fn try_from(
redis_cfg: config::RedisConfig,
redis_cfg: config::Redis,
tx: watch::Sender<(Timeline, Event)>,
rx: mpsc::UnboundedReceiver<Timeline>,
) -> Result<Self> {

View File

@ -1,11 +1,11 @@
use super::super::redis::{RedisConnErr, RedisParseErr};
use super::super::{RedisConnErr, RedisParseErr};
use crate::err::TimelineErr;
use crate::messages::{Event, EventErr};
use crate::parse_client_request::Timeline;
use crate::request::Timeline;
use std::fmt;
#[derive(Debug)]
pub enum ReceiverErr {
pub enum ManagerErr {
InvalidId,
TimelineErr(TimelineErr),
EventErr(EventErr),
@ -14,11 +14,11 @@ pub enum ReceiverErr {
ChannelSendErr(tokio::sync::watch::error::SendError<(Timeline, Event)>),
}
impl std::error::Error for ReceiverErr {}
impl std::error::Error for ManagerErr {}
impl fmt::Display for ReceiverErr {
impl fmt::Display for ManagerErr {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use ReceiverErr::*;
use ManagerErr::*;
match self {
InvalidId => write!(
f,
@ -34,31 +34,31 @@ impl fmt::Display for ReceiverErr {
}
}
impl From<tokio::sync::watch::error::SendError<(Timeline, Event)>> for ReceiverErr {
impl From<tokio::sync::watch::error::SendError<(Timeline, Event)>> for ManagerErr {
fn from(error: tokio::sync::watch::error::SendError<(Timeline, Event)>) -> Self {
Self::ChannelSendErr(error)
}
}
impl From<EventErr> for ReceiverErr {
impl From<EventErr> for ManagerErr {
fn from(error: EventErr) -> Self {
Self::EventErr(error)
}
}
impl From<RedisConnErr> for ReceiverErr {
impl From<RedisConnErr> for ManagerErr {
fn from(e: RedisConnErr) -> Self {
Self::RedisConnErr(e)
}
}
impl From<TimelineErr> for ReceiverErr {
impl From<TimelineErr> for ManagerErr {
fn from(e: TimelineErr) -> Self {
Self::TimelineErr(e)
}
}
impl From<RedisParseErr> for ReceiverErr {
impl From<RedisParseErr> for ManagerErr {
fn from(e: RedisParseErr) -> Self {
Self::RedisParseErr(e)
}

View File

@ -36,6 +36,8 @@ pub enum RedisParseOutput<'a> {
NonMsg(&'a str),
}
// TODO -- should this impl Iterator?
#[derive(Debug, Clone, PartialEq)]
pub struct RedisMsg<'a> {
pub timeline_txt: &'a str,

View File

@ -1,5 +1,5 @@
use crate::messages::Event;
use crate::parse_client_request::{Subscription, Timeline};
use crate::request::{Subscription, Timeline};
use futures::{future::Future, stream::Stream};
use log;
@ -7,17 +7,17 @@ use std::time::Duration;
use tokio::sync::{mpsc, watch};
use warp::{
reply::Reply,
sse::{ServerSentEvent, Sse},
sse::{ServerSentEvent, Sse as WarpSse},
ws::{Message, WebSocket},
};
pub struct WsStream {
pub struct Ws {
ws_tx: mpsc::UnboundedSender<Message>,
unsubscribe_tx: mpsc::UnboundedSender<Timeline>,
subscription: Subscription,
}
impl WsStream {
impl Ws {
pub fn new(
ws: WebSocket,
unsubscribe_tx: mpsc::UnboundedSender<Timeline>,
@ -57,7 +57,7 @@ impl WsStream {
self.send_ping()
} else if target_timeline == tl {
use crate::messages::{CheckedEvent::Update, Event::*, EventKind};
use crate::parse_client_request::Stream::Public;
use crate::request::Stream::Public;
let blocks = &self.subscription.blocks;
let allowed_langs = &self.subscription.allowed_langs;
@ -109,9 +109,9 @@ impl WsStream {
}
}
pub struct SseStream {}
pub struct Sse;
impl SseStream {
impl Sse {
fn reply_with(event: Event) -> Option<(impl ServerSentEvent, impl ServerSentEvent)> {
Some((
warp::sse::event(event.event_name()),
@ -120,7 +120,7 @@ impl SseStream {
}
pub fn send_events(
sse: Sse,
sse: WarpSse,
mut unsubscribe_tx: mpsc::UnboundedSender<Timeline>,
subscription: Subscription,
sse_rx: watch::Receiver<(Timeline, Event)>,
@ -136,7 +136,7 @@ impl SseStream {
CheckedEvent, CheckedEvent::Update, DynEvent, Event::*, EventKind,
};
use crate::parse_client_request::Stream::Public;
use crate::request::Stream::Public;
match event {
TypeSafe(Update { payload, queued_at }) => match timeline {
Timeline(Public, _, _) if payload.language_not(&allowed_langs) => None,