Add additional logging for postgres connection/server status

This commit is contained in:
Daniel Sockwell 2019-07-09 22:20:11 -04:00
parent 00cf62cd09
commit 9ec245ccdb
2 changed files with 20 additions and 13 deletions

View File

@ -19,19 +19,20 @@ const DEFAULT_REDIS_POLL_INTERVAL: u64 = 100;
lazy_static! {
static ref POSTGRES_ADDR: String = env::var("POSTGRES_ADDR").unwrap_or_else(|_| {
let mut postgres_addr = DEFAULT_POSTGRES_ADDR.to_string();
postgres_addr.insert_str(11,
match &env::var("USER") {
Err(_) => {
warn!("No USER env variable set. Connecting to Postgress with default `postgres` user");
"postgres"
},
Ok(user) => {
warn!("No POSTGRES_ADDR env variable set. Connecting to Postgress with the current user: {}", &user);
user
}
});
postgres_addr
warn!("No POSTGRES_ADDR env variable set; using default postgres address.");
match &env::var("USER") {
Err(_) => {
let addr = DEFAULT_POSTGRES_ADDR.replace("@", format!("{}@", "postgres").as_str());
warn!("No USER env variable set; using default `postgres` user.\n Using postgres address: {}\n", addr);
addr
},
Ok(user) => {
let addr = DEFAULT_POSTGRES_ADDR.replace("@", format!("{}@", user).as_str());
warn!("Connecting to postgres with current user.\n Using postgres address: {}\n", addr);
addr
}
}
});
static ref REDIS_ADDR: String = env::var("REDIS_ADDR").unwrap_or_else(|_| DEFAULT_REDIS_ADDR.to_owned());
@ -71,6 +72,7 @@ pub fn cross_origin_resource_sharing() -> warp::filters::cors::Cors {
pub fn logging_and_env() {
dotenv().ok();
pretty_env_logger::init();
POSTGRES_ADDR.to_string();
}
/// Configure Postgres and return a connection

View File

@ -1,3 +1,4 @@
use log::{log_enabled, Level};
use ragequit::{
any_of, config,
parse_client_request::{sse, user, ws},
@ -12,6 +13,10 @@ fn main() {
let client_agent_sse = ClientAgent::blank();
let client_agent_ws = client_agent_sse.clone_with_shared_receiver();
if log_enabled!(Level::Warn) {
println!("Streaming server initialized and ready to accept connections");
};
// Server Sent Events
//
// For SSE, the API requires users to use different endpoints, so we first filter based on