Content-Length: 570378 | pFad | https://github.com/sebadob/rauthy/commit/fa0e496956769f995e22bc6e388a8cd2a88a34d3

29 Merge pull request #215 from sebadob/encrypted-backups-to-s3 · sebadob/rauthy@fa0e496 · GitHub
Skip to content

Commit

Permalink
Merge pull request #215 from sebadob/encrypted-backups-to-s3
Browse files Browse the repository at this point in the history
Encrypted backups to S3
  • Loading branch information
sebadob authored Dec 22, 2023
2 parents f75cb50 + c5a7682 commit fa0e496
Show file tree
Hide file tree
Showing 8 changed files with 189 additions and 14 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ reqwest = { version = "0.11", default-features = false, features = ["json", "rus
ring = "0.17"
rio_api = "0.8.4"
rio_turtle = "0.8.4"
rusty-s3 = "0.5.0"
semver = { version = "1.0.19", features = ["serde"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
Expand Down
14 changes: 14 additions & 0 deletions rauthy-book/src/config/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,20 @@ extract these values, create Kubernetes Secrets and provide them as environment
# Disables the housekeeping schedulers (default: false)
#SCHED_DISABLE=true
# The following section will only be taken into account, when
# SQLite is used as the main database. If you use Postgres, you
# should use Postgres native tooling like for instance `pgbackrest`
# to manage your backups.
# If S3 access is configured, your SQLite backups will be encrypted
# and pushed into the configured bucket.
#S3_URL=
#S3_REGION=
#S3_PATH_STYLE=false
#S3_BUCKET=my_s3_bucket_name
#S3_ACCESS_KEY=
#S3_ACCESS_SECRET=
#S3_DANGER_ACCEPT_INVALID_CERTS=false
#####################################
############# E-MAIL ################
#####################################
Expand Down
2 changes: 1 addition & 1 deletion rauthy-main/src/logging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub fn setup_logging() -> tracing::Level {
_ => panic!("Log Level must be one of the following: error, warn, info, debug, trace"),
};
let filter = format!(
"{},hyper=info,matrix_sdk_crypto=error,matrix_sdk_base=error,matrix_sdk::encryption=error",
"{},cryptr=info,hyper=info,matrix_sdk_crypto=error,matrix_sdk_base=error,matrix_sdk::encryption=error",
log_level.as_str()
);
env::set_var("RUST_LOG", &filter);
Expand Down
21 changes: 12 additions & 9 deletions rauthy-main/src/schedulers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use rauthy_models::entity::refresh_tokens::RefreshToken;
use rauthy_models::entity::sessions::Session;
use rauthy_models::entity::users::User;
use rauthy_models::events::event::Event;
use rauthy_models::migration::backup_db;
use rauthy_models::migration::{backup_db, s3_backup_init_test};
use rauthy_service::auth;
use redhac::{cache_del, QuorumHealthState, QuorumState};
use semver::Version;
Expand All @@ -31,6 +31,9 @@ pub async fn scheduler_main(data: web::Data<AppState>) {

let rx_health = data.caches.ha_cache_config.rx_health_state.clone();

// initialize and possibly panic early if anything is mis-configured regarding the s3 storage
s3_backup_init_test().await;

tokio::spawn(db_backup(data.db.clone()));
tokio::spawn(events_cleanup(data.db.clone(), rx_health.clone()));
tokio::spawn(magic_link_cleanup(data.db.clone(), rx_health.clone()));
Expand All @@ -53,14 +56,14 @@ pub async fn db_backup(db: DbPool) {
let mut cron_task = env::var("BACKUP_TASK").unwrap_or_else(|_| "0 0 4 * * * *".to_string());

// sec min hour day_of_month month day_of_week year
let schedule = match cron::Schedule::from_str(&cron_task) {
Ok(sched) => sched,
Err(err) => {
error!("Error creating a cron scheduler with the given BACKUP_TASK input: {} - using default \"0 0 4 * * * *\": {}", cron_task, err);
cron_task = "0 0 4 * * * *".to_string();
cron::Schedule::from_str(&cron_task).unwrap()
}
};
let schedule = cron::Schedule::from_str(&cron_task).unwrap_or_else(|err| {
error!(
"Error creating a cron scheduler with the given BACKUP_TASK input: {} - using default \"0 0 4 * * * *\": {}",
cron_task, err
);
cron_task = "0 0 4 * * * *".to_string();
cron::Schedule::from_str(&cron_task).unwrap()
});

info!("Database backups are scheduled for: {}", cron_task);

Expand Down
1 change: 1 addition & 0 deletions rauthy-models/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ ring = { workspace = true }
rio_api = { workspace = true }
rio_turtle = { workspace = true }
rsa = { version = "0.9.3", features = ["serde", "sha2"] }
rusty-s3 = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
Expand Down
142 changes: 140 additions & 2 deletions rauthy-models/src/migration/mod.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,27 @@
use crate::app_state::DbPool;
use rauthy_common::constants::{DATABASE_URL, RAUTHY_VERSION};
use chrono::Utc;
use cryptr::stream::writer::s3_writer::{Bucket, Credentials, UrlStyle};
use cryptr::{EncValue, FileReader, S3Writer, StreamReader, StreamWriter};
use rauthy_common::constants::{DATABASE_URL, DB_TYPE, RAUTHY_VERSION};
use rauthy_common::error_response::{ErrorResponse, ErrorResponseType};
use rauthy_common::DbType;
use rusty_s3::actions::ListObjectsV2;
use rusty_s3::S3Action;
use std::env;
use std::path::Path;
use std::sync::OnceLock;
use std::time::Duration;
use time::OffsetDateTime;
use tokio::time::Instant;
use tracing::{debug, error, info};
use tracing::{debug, error, info, warn};

pub mod db_migrate;
pub mod db_migrate_dev;

static BUCKET: OnceLock<Bucket> = OnceLock::new();
static CREDENTIALS: OnceLock<Credentials> = OnceLock::new();
static ACCEPT_INVALID_CERTS: OnceLock<bool> = OnceLock::new();

pub async fn backup_db(db: &DbPool) -> Result<(), ErrorResponse> {
let start = Instant::now();
info!("Starting database backup");
Expand Down Expand Up @@ -52,6 +64,7 @@ pub async fn backup_db(db: &DbPool) -> Result<(), ErrorResponse> {
}

// TODO encrypt and push backup to S3 storage
s3_backup(&backup_file_path).await?;

// cleanup old backups
let path_base = "data/backup/";
Expand Down Expand Up @@ -95,6 +108,131 @@ pub async fn backup_db(db: &DbPool) -> Result<(), ErrorResponse> {
Ok(())
}

async fn s3_backup(file_path: &str) -> Result<(), ErrorResponse> {
let bucket = match BUCKET.get() {
None => {
return Ok(());
}
Some(b) => b,
};
let credentials = CREDENTIALS
.get()
.expect("CREDENTIALS to be set up correctly");
let danger_accept_invalid_certs = ACCEPT_INVALID_CERTS
.get()
.expect("ACCEPT_INVALID_CERTS to be set up correctly");

// execute backup
let reader = StreamReader::File(FileReader {
path: file_path,
print_progress: false,
});

let object = format!(
"rauthy-{}-{}.cryptr",
RAUTHY_VERSION,
Utc::now().timestamp()
);
let writer = StreamWriter::S3(S3Writer {
credentials: Some(credentials),
bucket,
object: &object,
danger_accept_invalid_certs: *danger_accept_invalid_certs,
});

info!("Pushing backup to S3 storage {}", bucket.region());
EncValue::encrypt_stream(reader, writer).await?;
info!("S3 backup push successful");

Ok(())
}

//github.com/ Initializes and tests the connection for S3 backups, if configured.
//github.com/ This will panic if anything is not configured correctly to avoid unexpected behavior at runtime.
pub async fn s3_backup_init_test() {
let s3_url = match env::var("S3_URL") {
Ok(url) => url,
Err(_) => {
if *DB_TYPE == DbType::Sqlite {
info!("S3 backups are not configured, 'S3_URL' not found");
}
return;
}
};
if *DB_TYPE == DbType::Postgres {
warn!(
r#"
Found S3 config. This will be ignored, since you are using a Postgres.
Postgres backups must be managed in a Postgres-native way with proper tooling like for instance pgbackrest
"#
);
return;
}

// read env vars
let region = env::var("S3_REGION").expect("Found S3_URL but no S3_REGION\n");
let use_path_style = env::var("S3_PATH_STYLE")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Cannot parse S3_PATH_STYLE to bool\n");
let bucket = env::var("S3_BUCKET").expect("Found S3_URL but no S3_BUCKET\n");
let access_key = env::var("S3_ACCESS_KEY").expect("Found S3_URL but no S3_ACCESS_KEY\n");
let secret = env::var("S3_ACCESS_SECRET").expect("Found S3_URL but no S3_ACCESS_SECRET\n");
let danger_accept_invalid_certs = env::var("S3_DANGER_ACCEPT_INVALID_CERTS")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Cannot parse S3_DANGER_ACCEPT_INVALID_CERTS to bool\n");

let credentials = Credentials::new(access_key, secret);
let path_style = if use_path_style {
UrlStyle::Path
} else {
UrlStyle::VirtualHost
};
info!("S3 backups are configured for '{}'", region);
let bucket = Bucket::new(
s3_url.parse().expect("Invalid format for S3_URL"),
path_style,
bucket,
region,
)
.expect("Cannot build S3 Bucket object from given configuration");

// test the connection to be able to panic early
let action = ListObjectsV2::new(&bucket, Some(&credentials)).sign(Duration::from_secs(10));
let client = if danger_accept_invalid_certs {
cryptr::stream::http_client_insecure()
} else {
cryptr::stream::http_client()
};
match client.get(action).send().await {
Ok(resp) => {
if resp.status().is_success() {
info!("S3 connection test was successful");
} else {
let body = resp.text().await.unwrap_or_default();
panic!(
"\nCannot connect to S3 storage - check your configuration and access rights\n\n{}\n",
body
);
}
}
Err(err) => {
panic!("Cannot connect to S3 storage: {}", err);
}
}

// save values for backups
BUCKET.set(bucket).expect("to set BUCKET only once");
CREDENTIALS
.set(credentials)
.expect("to set CREDENTIALS only once");
ACCEPT_INVALID_CERTS
.set(danger_accept_invalid_certs)
.expect("to set ACCEPT_INVALID_CERTS only once");
}

// Important: This must be executed BEFORE the rauthy store has been opened in `main`
pub async fn restore_local_backup() -> Result<(), ErrorResponse> {
error!("Backup restores are not yet adopted for the new database drivers - exiting");
Expand Down
21 changes: 19 additions & 2 deletions rauthy.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ UNSAFE_NO_RESET_BINDING=true

# Cron job for automatic data store backups (default: "0 0 4 * * * *")
# sec min hour day_of_month month day_of_week year
BACKUP_TASK="0 0 4 * * * *"
#BACKUP_TASK="0 0 4 * * * *"
BACKUP_TASK="0 4 12 * * * *"

# The name for the data store backups. The current timestamp will always be appended automatically. (default: rauthy-backup-)
BACKUP_NAME="rauthy-backup-"
Expand Down Expand Up @@ -167,6 +168,18 @@ CACHE_RECONNECT_TIMEOUT_UPPER=5000
# default: disabled / not set
#SCHED_USER_EXP_DELETE_MINS=7200

# The following section will only be taken into account, when SQLite is used as the main database.
# If you use Postgres, you should use Postgres native tooling like for instance `pgbackrest` to manage
# your backups.
# If S3 access is configured, your SQLite backups will be encrypted and pushed into the configured bucket.
#S3_URL=
#S3_REGION=
#S3_PATH_STYLE=true
#S3_BUCKET=
#S3_ACCESS_KEY=
#S3_ACCESS_SECRET=
#S3_DANGER_ACCEPT_INVALID_CERTS=true

#####################################
############# DEPOP #################
#####################################
Expand Down Expand Up @@ -202,7 +215,11 @@ EMAIL_SUB_PREFIX="Rauthy IAM"
# Format: "key_id/enc_key another_key_id/another_enc_key" - the enc_key itself must be exactly 32 characters long and
# and should not contain special characters.
# The ID must match '[a-zA-Z0-9]{2,20}'
ENC_KEYS="bVCyTsGaggVy5yqQ/S9n7oCen53xSJLzcsmfdnBDvNrqQ63r4 q6u26onRvXVG4427/3CEC8RJWBcMkrBMkRXgx65AmJsNTghSA"
#ENC_KEYS="bVCyTsGaggVy5yqQ/S9n7oCen53xSJLzcsmfdnBDvNrqQ63r4 q6u26onRvXVG4427/3CEC8RJWBcMkrBMkRXgx65AmJsNTghSA"
ENC_KEYS="
q6u26onRvXVG4427/M0NFQzhSSldCY01rckJNa1JYZ3g2NUFtSnNOVGdoU0E=
bVCyTsGaggVy5yqQ/UzluN29DZW41M3hTSkx6Y3NtZmRuQkR2TnJxUTYzcjQ=
"
ENC_KEY_ACTIVE=bVCyTsGaggVy5yqQ

# M_COST should never be below 32768 in production
Expand Down

0 comments on commit fa0e496

Please sign in to comment.








ApplySandwichStrip

pFad - (p)hone/(F)rame/(a)nonymizer/(d)eclutterfier!      Saves Data!


--- a PPN by Garber Painting Akron. With Image Size Reduction included!

Fetched URL: https://github.com/sebadob/rauthy/commit/fa0e496956769f995e22bc6e388a8cd2a88a34d3

Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy