42 Commits

Author SHA1 Message Date
Ara Sadoyan
f135106a44 Changes in authentication 2026-04-08 19:05:19 +02:00
Ara Sadoyan
389c12119a code cleanup and improvements. 2026-04-08 17:00:06 +02:00
Ara Sadoyan
93a8661281 Cargo cleanup, dependency merge 2026-04-08 15:14:46 +02:00
Ara Sadoyan
0505ce2849 split upstreams.yaml file 2026-03-30 19:04:32 +02:00
Ara Sadoyan
72ed870538 split upstreams.yaml file 2026-03-27 19:24:30 +01:00
Ara Sadoyan
68140d0cf0 tye changes, optimization 2026-03-26 17:40:22 +01:00
Ara Sadoyan
7b9b206c13 optimization & cleanup 2026-03-26 16:58:53 +01:00
Ara Sadoyan
4706b281bc cleanup 2026-03-26 14:17:59 +01:00
Ara Sadoyan
1f8efc6af7 FUNDING.yml 2026-03-25 15:16:47 +01:00
Ara Sadoyan
9f595b2709 example config file update 2026-03-25 11:15:55 +01:00
Ara Sadoyan
ed44516015 added redirect_to directive for upstreams 2026-03-24 16:08:14 +01:00
Ara Sadoyan
17da7862e3 upstreams ID hashing update 2026-03-18 20:06:50 +01:00
Ara Sadoyan
24d00da855 performance improvement, sticky session minor bug fix 2026-03-17 19:21:05 +01:00
Ara Sadoyan
c9422759aa Minor performance improvement 2026-03-17 13:54:42 +01:00
Ara Sadoyan
94b1f77734 Type changes, auth override policy 2026-03-04 12:35:45 +01:00
Ara Sadoyan
9d986f9a28 Path level authentication 2026-03-03 19:35:16 +01:00
Ara Sadoyan
3afa2f209f pingora 0.8.0 upgrade 2026-03-03 13:54:53 +01:00
Ara Sadoyan
c151fdf58b moving to boringssl 2026-02-19 18:11:54 +01:00
Ara Sadoyan
438426153f removed unwrap 2026-02-18 12:00:33 +01:00
Ara Sadoyan
9bb01fd1b0 minor improvements 2026-02-17 18:22:46 +01:00
Ara Sadoyan
abb5fef1d6 minor improvements 2026-02-17 17:03:52 +01:00
Ara Sadoyan
3618687ad5 Memory allocation improvements for proxyhttp, fix issue with sticky session . 2026-02-10 19:07:43 +01:00
Ara Sadoyan
a893b3c301 Memory allocation improvements for metrics collector . 2026-02-05 13:57:39 +01:00
Ara Sadoyan
3ff262c7f4 Merge pull request #13 from yerke/patch-1
Fix grammar and formatting in README.md
2026-02-04 14:41:50 +01:00
Yerkebulan Tulibergenov
062f02259f Fix grammar and formatting in README.md 2026-01-30 23:59:10 -08:00
Ara Sadoyan
1a4c9b7d55 Performance optimization in headers 2026-01-28 16:07:45 +01:00
Ara Sadoyan
6ef7f23823 Performance optimization v2 2026-01-28 13:20:31 +01:00
Ara Sadoyan
2b437c65fb Performance improvement. String removal from hot paths. 2026-01-27 16:19:51 +01:00
Ara Sadoyan
38055ae94e added new metric aralez_requests_by_upstream 2026-01-25 18:08:15 +01:00
Ara Sadoyan
703de9e909 updates on API server https://sadoyan.github.io/aralez-docs/assets/api/ 2026-01-22 16:50:51 +01:00
Ara Sadoyan
2c8b01295c Minor subfunction removal 2026-01-21 20:01:16 +01:00
Ara Sadoyan
baebe1c00f Async apply of config via API 2026-01-20 19:16:27 +01:00
Ara Sadoyan
6c1d3c5ef8 Error handling on API server 2026-01-09 18:44:36 +01:00
Ara Sadoyan
2d1a827007 Removed unneeded loop 2025-12-14 12:09:11 +01:00
Ara Sadoyan
a2a5250711 Performance improvements on data types . 2025-12-11 15:21:34 +01:00
Ara Sadoyan
985e923342 to https redirect bug fix 2025-12-11 13:37:40 +01:00
Ara Sadoyan
0fc79c022f perf: optimize header handling and concurrent access patterns 2025-12-10 19:09:04 +01:00
Ara Sadoyan
a43bccdfb8 minor, performance improvements 2025-11-28 13:13:15 +01:00
Ara Sadoyan
5b87391fbb some more type changes, performance improvements 2025-11-27 18:47:04 +01:00
Ara Sadoyan
c68a4ad83d Type changes, performance improvements 2025-11-27 18:03:34 +01:00
Ara Sadoyan
8ba8d32df1 Performance improvements, type changes 2025-11-26 12:12:41 +01:00
Ara Sadoyan
7a839065e6 update on kubernetes web client 2025-11-24 17:57:44 +01:00
28 changed files with 2732 additions and 1551 deletions

13
.cargo/config.toml Normal file
View File

@@ -0,0 +1,13 @@
[target.aarch64-unknown-linux-musl]
rustflags = [
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
]
[target.x86_64-unknown-linux-musl]
rustflags = [
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
]

2
.github/FUNDING.yml vendored
View File

@@ -1,6 +1,6 @@
# These are supported funding model platforms # These are supported funding model platforms
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] github: sadoyan
patreon: # Replace with a single Patreon username patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username ko_fi: # Replace with a single Ko-fi username

2762
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,45 +11,36 @@ panic = "abort"
strip = true strip = true
[dependencies] [dependencies]
tokio = { version = "1.45.1", features = ["full"] } tokio = { version = "1.51.1", features = ["full"] }
pingora = { version = "0.6.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl pingora = { version = "0.8.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
serde = { version = "1.0.219", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }
dashmap = "7.0.0-rc2" dashmap = "7.0.0-rc2"
pingora-core = "0.6.0" pingora-core = "0.8.0"
pingora-proxy = "0.6.0" pingora-proxy = "0.8.0"
pingora-http = "0.6.0" pingora-http = "0.8.0"
pingora-limits = "0.6.0" pingora-limits = "0.8.0"
async-trait = "0.1.89" async-trait = "0.1.89"
env_logger = "0.11.8" env_logger = "0.11.10"
log = "0.4.28" log = "0.4.29"
futures = "0.3.31" futures = "0.3.32"
notify = "8.2.0" notify = "9.0.0-rc.2"
axum = { version = "0.8.4" } axum = { version = "0.8.8" }
axum-server = { version = "0.7.2", features = ["tls-openssl"] } reqwest = { version = "0.13.2", features = ["json", "stream"] }
reqwest = { version = "0.12.23", features = ["json", "native-tls-alpn", "stream"] } serde_yml = "0.0.12"
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] } rand = "0.10.0"
#reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
serde_yaml = "0.9.34-deprecated"
rand = "0.9.2"
base64 = "0.22.1" base64 = "0.22.1"
jsonwebtoken = "9.3.1" jsonwebtoken = { version = "10.3.0", default-features = false, features = ["use_pem", "rust_crypto"] }
tonic = "0.14.2" tonic = "0.14.5"
sha2 = { version = "0.11.0-rc.2", default-features = false } sha2 = { version = "0.11.0-rc.5", default-features = false }
base16ct = { version = "0.3.0", features = ["alloc"] } base16ct = { version = "1.0.0", features = ["alloc"] }
urlencoding = "2.1.3" urlencoding = "2.1.3"
arc-swap = "1.7.1" arc-swap = "1.9.1"
mimalloc = { version = "0.1.48", default-features = false } mimalloc = { version = "0.1.48", default-features = false }
prometheus = "0.14.0" prometheus = "0.14.0"
lazy_static = "1.5.0" x509-parser = "0.18.1"
x509-parser = "0.18.0"
rustls-pemfile = "2.2.0" rustls-pemfile = "2.2.0"
tower-http = { version = "0.6.6", features = ["fs"] } tower-http = { version = "0.6.8", features = ["fs"] }
once_cell = "1.21.3"
privdrop = "0.5.6" privdrop = "0.5.6"
ctrlc = "3.5.0" ctrlc = "3.5.2"
port_check = "0.3.0" serde_json = "1.0.149"
#moka = { version = "0.12.10", features = ["sync"] } subtle = "2.6.1"
#rustls = { version = "0.23.27", features = ["ring"] }
#hickory-client = { version = "0.25.2" }
#openssl = "0.10.73"

View File

@@ -11,7 +11,7 @@ featuring Consul and Kubernetes integration for dynamic pod discovery and health
--- ---
What Aralez means ? What Aralez means ?
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>. **Aralez = Արալեզ** <ins>Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them</ins>.
Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box. Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
@@ -23,7 +23,7 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required. - **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
- **TLS Termination** — Built-in OpenSSL support. - **TLS Termination** — Built-in OpenSSL support.
- **Automatic load of certificates** — Automatically reads and loads certificates from a folder, without a restart. - **Automatic loading of certificates** — Automatically reads and loads certificates from a folder, without a restart.
- **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection. - **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
- **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost. - **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost.
- **Global rate limiter** — Set rate limit for all virtualhosts. - **Global rate limiter** — Set rate limit for all virtualhosts.
@@ -122,13 +122,15 @@ Make the binary executable `chmod 755 ./aralez-VERSION` and run.
File names: File names:
| File Name | Description | | File Name | Description |
|---------------------------|--------------------------------------------------------------------------| |---------------------------------|--------------------------------------------------------------------------|
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency | | `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies | | `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency | | `aralez-x86_64-compat-musl.gz` | Static Linux x86_64 binary, compatible with old pre Haswell CPUs |
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies | | `aralez-x86_64-compat-glibc.gz` | Dynamic Linux x86_64 binary, compatible with old pre Haswell CPUs |
| `sadoyan/aralez` | Docker image on Debian 13 slim (https://hub.docker.com/r/sadoyan/aralez) | | `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
| `sadoyan/aralez` | Docker image on Debian 13 slim (https://hub.docker.com/r/sadoyan/aralez) |
**Via docker** **Via docker**

BIN
assets/bench2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

View File

@@ -43,7 +43,7 @@ kubernetes:
path: "/" path: "/"
upstream: "webapi-service" upstream: "webapi-service"
- hostname: "webapi-service" - hostname: "webapi-service"
upstream: "vt-console-service" upstream: "console-service"
path: "/one" path: "/one"
client_headers: client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
@@ -51,7 +51,7 @@ kubernetes:
rate_limit: 100 rate_limit: 100
to_https: false to_https: false
- hostname: "webapi-service" - hostname: "webapi-service"
upstream: "vt-rambulik-service" upstream: "rambul-service"
path: "/two" path: "/two"
- hostname: "websocket-service" - hostname: "websocket-service"
upstream: "websocket-service" upstream: "websocket-service"
@@ -72,6 +72,9 @@ upstreams:
- "127.0.0.4:8000" - "127.0.0.4:8000"
- "127.0.0.5:8000" - "127.0.0.5:8000"
"/ping": "/ping":
authorization: # Will be ignored if global authentication is enabled.
type: "basic"
creds: "admin:admin"
to_https: false to_https: false
server_headers: server_headers:
- "X-Forwarded-Proto:https" - "X-Forwarded-Proto:https"
@@ -107,9 +110,10 @@ upstreams:
healthcheck: false healthcheck: false
servers: servers:
- "127.0.0.1:8001" - "127.0.0.1:8001"
localpost: rdr.mydomain.com:
paths: paths:
"/": "/":
to_https: false redirect_to: "https://som.other.domain:6194"
healthcheck: false
servers: servers:
- "127.0.0.1:9000" - "127.0.0.1:8080"

View File

@@ -3,6 +3,7 @@ mod web;
#[global_allocator] #[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
// pub static A: CountingAllocator = CountingAllocator;
fn main() { fn main() {
web::start::run(); web::start::run();

View File

@@ -2,6 +2,7 @@ pub mod auth;
pub mod discovery; pub mod discovery;
pub mod dnsclient; pub mod dnsclient;
mod filewatch; mod filewatch;
pub mod fordebug;
pub mod healthcheck; pub mod healthcheck;
pub mod httpclient; pub mod httpclient;
pub mod jwt; pub mod jwt;

View File

@@ -3,6 +3,8 @@ use base64::engine::general_purpose::STANDARD;
use base64::Engine; use base64::Engine;
use pingora_proxy::Session; use pingora_proxy::Session;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc;
use subtle::ConstantTimeEq;
use urlencoding::decode; use urlencoding::decode;
trait AuthValidator { trait AuthValidator {
@@ -15,10 +17,14 @@ struct JwtAuth<'a>(&'a str);
impl AuthValidator for BasicAuth<'_> { impl AuthValidator for BasicAuth<'_> {
fn validate(&self, session: &Session) -> bool { fn validate(&self, session: &Session) -> bool {
if let Some(header) = session.get_header("authorization") { if let Some(header) = session.get_header("authorization") {
if let Some((_, val)) = header.to_str().ok().unwrap().split_once(' ') { if let Some(h) = header.to_str().ok() {
let decoded = STANDARD.decode(val).ok().unwrap(); if let Some((_, val)) = h.split_once(' ') {
let decoded_str = String::from_utf8(decoded).ok().unwrap(); if let Some(decoded) = STANDARD.decode(val).ok() {
return decoded_str == self.0; if decoded.as_slice().ct_eq(self.0.as_bytes()).into() {
return true;
}
}
}
} }
} }
false false
@@ -28,7 +34,9 @@ impl AuthValidator for BasicAuth<'_> {
impl AuthValidator for ApiKeyAuth<'_> { impl AuthValidator for ApiKeyAuth<'_> {
fn validate(&self, session: &Session) -> bool { fn validate(&self, session: &Session) -> bool {
if let Some(header) = session.get_header("x-api-key") { if let Some(header) = session.get_header("x-api-key") {
return header.to_str().ok().unwrap() == self.0; if let Some(h) = header.to_str().ok() {
return h.as_bytes().ct_eq(self.0.as_bytes()).into();
}
} }
false false
} }
@@ -52,26 +60,14 @@ impl AuthValidator for JwtAuth<'_> {
false false
} }
} }
fn validate(auth: &dyn AuthValidator, session: &Session) -> bool {
auth.validate(session)
}
pub fn authenticate(c: &[String], session: &Session) -> bool { pub fn authenticate(auth_type: &Arc<str>, credentials: &Arc<str>, session: &Session) -> bool {
match c[0].as_str() { match &**auth_type {
"basic" => { "basic" => BasicAuth(credentials).validate(session),
let auth = BasicAuth(c[1].as_str().into()); "apikey" => ApiKeyAuth(credentials).validate(session),
validate(&auth, session) "jwt" => JwtAuth(credentials).validate(session),
}
"apikey" => {
let auth = ApiKeyAuth(c[1].as_str().into());
validate(&auth, session)
}
"jwt" => {
let auth = JwtAuth(c[1].as_str().into());
validate(&auth, session)
}
_ => { _ => {
println!("Unsupported authentication mechanism : {}", c[0]); log::warn!("Unsupported authentication mechanism : {}", auth_type);
false false
} }
} }
@@ -89,6 +85,5 @@ pub fn get_query_param(session: &Session, key: &str) -> Option<String> {
Some((k, v)) Some((k, v))
}) })
.collect(); .collect();
params.get(key).and_then(|v| decode(v).ok()).map(|s| s.to_string())
params.get(key).map(|v| decode(v).ok()).flatten().map(|s| s.to_string())
} }

View File

@@ -1,6 +1,6 @@
use crate::utils::filewatch; use crate::utils::filewatch;
use crate::utils::kuberconsul::{ConsulDiscovery, KubernetesDiscovery, ServiceDiscovery}; use crate::utils::kuberconsul::{ConsulDiscovery, KubernetesDiscovery, ServiceDiscovery};
use crate::utils::structs::Configuration; use crate::utils::structs::{Configuration, UpstreamsDashMap};
use crate::web::webserver; use crate::web::webserver;
use async_trait::async_trait; use async_trait::async_trait;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
@@ -10,18 +10,13 @@ pub struct APIUpstreamProvider {
pub config_api_enabled: bool, pub config_api_enabled: bool,
pub address: String, pub address: String,
pub masterkey: String, pub masterkey: String,
pub tls_address: Option<String>, // pub tls_address: Option<String>,
pub tls_certificate: Option<String>, // pub tls_certificate: Option<String>,
pub tls_key_file: Option<String>, // pub tls_key_file: Option<String>,
pub file_server_address: Option<String>, pub file_server_address: Option<String>,
pub file_server_folder: Option<String>, pub file_server_folder: Option<String>,
} pub current_upstreams: Arc<UpstreamsDashMap>,
pub full_upstreams: Arc<UpstreamsDashMap>,
#[async_trait]
impl Discovery for APIUpstreamProvider {
async fn start(&self, toreturn: Sender<Configuration>) {
webserver::run_server(self, toreturn).await;
}
} }
pub struct FromFileProvider { pub struct FromFileProvider {
@@ -41,6 +36,13 @@ pub trait Discovery {
async fn start(&self, tx: Sender<Configuration>); async fn start(&self, tx: Sender<Configuration>);
} }
#[async_trait]
impl Discovery for APIUpstreamProvider {
async fn start(&self, toreturn: Sender<Configuration>) {
webserver::run_server(self, toreturn, self.current_upstreams.clone(), self.full_upstreams.clone()).await;
}
}
#[async_trait] #[async_trait]
impl Discovery for FromFileProvider { impl Discovery for FromFileProvider {
async fn start(&self, tx: Sender<Configuration>) { async fn start(&self, tx: Sender<Configuration>) {

View File

@@ -103,6 +103,7 @@ impl DnsClient {
is_ssl: false, is_ssl: false,
is_http2: false, is_http2: false,
to_https: false, to_https: false,
sticky_sessions: false,
rate_limit: None, rate_limit: None,
}; };
values.push(to_add); values.push(to_add);

View File

@@ -41,7 +41,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
if start.elapsed() > Duration::from_secs(2) { if start.elapsed() > Duration::from_secs(2) {
start = Instant::now(); start = Instant::now();
// info!("Config File changed :=> {:?}", e); // info!("Config File changed :=> {:?}", e);
let snd = load_configuration(file_path, "filepath").await; let snd = load_configuration(file_path, "filepath").await.0;
match snd { match snd {
Some(snd) => { Some(snd) => {
toreturn.send(snd).await.unwrap(); toreturn.send(snd).await.unwrap();

31
src/utils/fordebug.rs Normal file
View File

@@ -0,0 +1,31 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
pub struct CountingAllocator;
pub static ALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static DEALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static ALLOC_BYTES: AtomicUsize = AtomicUsize::new(0);
#[allow(dead_code)]
unsafe impl GlobalAlloc for CountingAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
ALLOC_BYTES.fetch_add(layout.size(), Ordering::Relaxed);
System.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
DEALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
System.dealloc(ptr, layout)
}
}
// Uncomment following lines and comment allocator in main.rs
// #[global_allocator]
// pub static A: CountingAllocator = CountingAllocator;
#[allow(dead_code)]
fn for_example() {
let before = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
let after = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
println!("Allocations : {}", after - before);
}

View File

@@ -15,12 +15,18 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
loop { loop {
tokio::select! { tokio::select! {
_ = period.tick() => { _ = period.tick() => {
populate_upstreams(&upslist, &fullist, &idlist, params, &client).await; // populate_upstreams(&upslist, &fullist, &idlist, params, &client).await;
let totest = build_upstreams(&fullist, params.0, &client).await;
if !compare_dashmaps(&totest, &upslist) {
clone_dashmap_into(&totest, &upslist);
clone_idmap_into(&totest, &idlist);
}
} }
} }
} }
} }
/*
pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<UpstreamsDashMap>, idlist: &Arc<UpstreamsIdMap>, params: (&str, u64), client: &Client) { pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<UpstreamsDashMap>, idlist: &Arc<UpstreamsIdMap>, params: (&str, u64), client: &Client) {
let totest = build_upstreams(fullist, params.0, client).await; let totest = build_upstreams(fullist, params.0, client).await;
if !compare_dashmaps(&totest, upslist) { if !compare_dashmaps(&totest, upslist) {
@@ -28,6 +34,7 @@ pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<U
clone_idmap_into(&totest, idlist); clone_idmap_into(&totest, idlist);
} }
} }
*/
pub async fn initiate_upstreams(fullist: UpstreamsDashMap) -> UpstreamsDashMap { pub async fn initiate_upstreams(fullist: UpstreamsDashMap) -> UpstreamsDashMap {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap(); let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
@@ -46,7 +53,7 @@ async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Clie
let mut innervec = Vec::new(); let mut innervec = Vec::new();
for (_, upstream) in path_entry.value().0.iter().enumerate() { for (_, upstream) in path_entry.value().0.iter().enumerate() {
let tls = detect_tls(upstream.address.as_str(), &upstream.port, &client).await; let tls = detect_tls(&upstream.address.to_string(), &upstream.port, &client).await;
let is_h2 = matches!(tls.1, Some(Version::HTTP_2)); let is_h2 = matches!(tls.1, Some(Version::HTTP_2));
let link = if tls.0 { let link = if tls.0 {
@@ -63,6 +70,8 @@ async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Clie
to_https: upstream.to_https, to_https: upstream.to_https,
rate_limit: upstream.rate_limit, rate_limit: upstream.rate_limit,
healthcheck: upstream.healthcheck, healthcheck: upstream.healthcheck,
redirect_to: upstream.redirect_to.clone(),
authorization: upstream.authorization.clone(),
}; };
if scheme.healthcheck.unwrap_or(true) { if scheme.healthcheck.unwrap_or(true) {
@@ -71,23 +80,13 @@ async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Clie
if resp.1 { if resp.1 {
scheme.is_http2 = is_h2; // could be adjusted further scheme.is_http2 = is_h2; // could be adjusted further
} }
innervec.push(scheme); innervec.push(Arc::from(scheme));
} else { } else {
warn!("Dead Upstream : {}", link); warn!("Dead Upstream : {}", link);
} }
} else { } else {
innervec.push(scheme); innervec.push(Arc::from(scheme));
} }
// let resp = http_request(&link, method, "", &client).await;
// if resp.0 {
// if resp.1 {
// scheme.is_http2 = is_h2; // could be adjusted further
// }
// innervec.push(scheme);
// } else {
// warn!("Dead Upstream : {}", link);
// }
} }
inner.insert(path.clone(), (innervec, AtomicUsize::new(0))); inner.insert(path.clone(), (innervec, AtomicUsize::new(0)));
} }
@@ -120,18 +119,11 @@ async fn http_request(url: &str, method: &str, payload: &str, client: &Client) -
} }
pub async fn ping_grpc(addr: &str) -> bool { pub async fn ping_grpc(addr: &str) -> bool {
let endpoint_result = Endpoint::from_shared(addr.to_owned()); let endpoint = match Endpoint::from_shared(addr.to_owned()) {
Ok(e) => e.timeout(Duration::from_secs(2)),
if let Ok(endpoint) = endpoint_result { Err(_) => return false,
let endpoint = endpoint.timeout(Duration::from_secs(2)); };
tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await.ok().and_then(Result::ok).is_some()
match tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await {
Ok(Ok(_channel)) => true,
_ => false,
}
} else {
false
}
} }
async fn detect_tls(ip: &str, port: &u16, client: &Client) -> (bool, Option<Version>) { async fn detect_tls(ip: &str, port: &u16, client: &Client) -> (bool, Option<Version>) {

View File

@@ -1,12 +1,13 @@
use crate::utils::kuberconsul::{match_path, ConsulService, KubeEndpoints}; use crate::utils::kuberconsul::{match_path, ConsulService, KubeEndpoints};
use crate::utils::structs::{InnerMap, ServiceMapping}; use crate::utils::structs::{GlobalServiceMapping, InnerMap};
use axum::http::{HeaderMap, HeaderValue}; use axum::http::{HeaderMap, HeaderValue};
use dashmap::DashMap; use dashmap::DashMap;
use reqwest::Client; use reqwest::Client;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
pub async fn for_consul(url: String, token: Option<String>, conf: &ServiceMapping) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> { pub async fn for_consul(url: String, token: Option<String>, conf: &GlobalServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?; let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
let mut headers = HeaderMap::new(); let mut headers = HeaderMap::new();
if let Some(token) = token { if let Some(token) = token {
@@ -19,27 +20,32 @@ pub async fn for_consul(url: String, token: Option<String>, conf: &ServiceMappin
return None; return None;
} }
let mut inner_vec = Vec::new(); let mut inner_vec = Vec::new();
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new(); let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
let endpoints: Vec<ConsulService> = resp.json().await.ok()?; let endpoints: Vec<ConsulService> = resp.json().await.ok()?;
for subsets in endpoints { for subsets in endpoints {
// let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
// let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone(); let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone(); let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
let to_add = InnerMap { // let redirect_link = conf.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
address: addr, let to_add = Arc::from(InnerMap {
address: Arc::from(&*addr),
port: prt, port: prt,
is_ssl: false, is_ssl: false,
is_http2: false, is_http2: false,
to_https: conf.to_https.unwrap_or(false), to_https: conf.to_https.unwrap_or(false),
rate_limit: conf.rate_limit, rate_limit: conf.rate_limit,
redirect_to: None,
healthcheck: None, healthcheck: None,
}; authorization: None,
});
inner_vec.push(to_add); inner_vec.push(to_add);
} }
match_path(&conf, &upstreams, inner_vec.clone()); match_path(&conf, &upstreams, inner_vec.clone());
Some(upstreams) Some(upstreams)
} }
pub async fn for_kuber(url: &str, token: &str, conf: &ServiceMapping) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> { pub async fn for_kuber(url: &str, token: &str, conf: &GlobalServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
let to = Duration::from_secs(10); let to = Duration::from_secs(10);
let client = Client::builder().timeout(Duration::from_secs(10)).danger_accept_invalid_certs(true).build().ok()?; let client = Client::builder().timeout(Duration::from_secs(10)).danger_accept_invalid_certs(true).build().ok()?;
let resp = client.get(url).timeout(to).bearer_auth(token).send().await.ok()?; let resp = client.get(url).timeout(to).bearer_auth(token).send().await.ok()?;
@@ -48,22 +54,27 @@ pub async fn for_kuber(url: &str, token: &str, conf: &ServiceMapping) -> Option<
return None; return None;
} }
let endpoints: KubeEndpoints = resp.json().await.ok()?; let endpoints: KubeEndpoints = resp.json().await.ok()?;
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
if let Some(subsets) = endpoints.subsets { if let Some(subsets) = endpoints.subsets {
for subset in subsets { for subset in subsets {
if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) { if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) {
let mut inner_vec = Vec::new(); let mut inner_vec = Vec::new();
for addr in addresses { for addr in addresses {
for port in &ports { for port in &ports {
let to_add = InnerMap { // let redirect_link = conf.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
address: addr.ip.clone(), let to_add = Arc::from(InnerMap {
address: Arc::from(addr.ip.clone()),
port: port.port.clone(), port: port.port.clone(),
is_ssl: false, is_ssl: false,
is_http2: false, is_http2: false,
to_https: conf.to_https.unwrap_or(false), to_https: conf.to_https.unwrap_or(false),
rate_limit: conf.rate_limit, rate_limit: conf.rate_limit,
healthcheck: None, healthcheck: None,
}; redirect_to: None,
authorization: None,
});
inner_vec.push(to_add); inner_vec.push(to_add);
} }
} }

View File

@@ -1,16 +1,18 @@
use crate::utils::httpclient; use crate::utils::httpclient;
use crate::utils::parceyaml::build_headers; use crate::utils::parceyaml::build_headers;
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap}; use crate::utils::structs::{Configuration, GlobalServiceMapping, InnerMap, UpstreamsDashMap};
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams}; use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
use async_trait::async_trait; use async_trait::async_trait;
use dashmap::DashMap; use dashmap::DashMap;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::SinkExt; use futures::SinkExt;
use pingora::prelude::sleep; use pingora::prelude::sleep;
use rand::Rng; use rand::RngExt;
use serde::Deserialize; use serde::Deserialize;
use std::collections::HashMap; use std::collections::HashMap;
use std::env; use std::env;
use std::fs;
use std::path::Path;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -50,28 +52,28 @@ pub struct ConsulTaggedAddress {
#[serde(rename = "Port")] #[serde(rename = "Port")]
pub port: u16, pub port: u16,
} }
pub fn list_to_upstreams(lt: Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>>, upstreams: &UpstreamsDashMap, i: &ServiceMapping) { pub fn list_to_upstreams(lt: Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>, upstreams: &UpstreamsDashMap, i: &GlobalServiceMapping) {
if let Some(list) = lt { if let Some(list) = lt {
match upstreams.get(&i.hostname.clone()) { match upstreams.get(&*i.hostname.clone()) {
Some(upstr) => { Some(upstr) => {
for (k, v) in list { for (k, v) in list {
upstr.value().insert(k, v); upstr.value().insert(Arc::from(k.to_owned()), v);
} }
} }
None => { None => {
upstreams.insert(i.hostname.clone(), list); upstreams.insert(Arc::from(i.hostname.clone()), list);
} }
}; };
} }
} }
pub fn match_path(conf: &ServiceMapping, upstreams: &DashMap<String, (Vec<InnerMap>, AtomicUsize)>, values: Vec<InnerMap>) { pub fn match_path(conf: &GlobalServiceMapping, upstreams: &DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>, values: Vec<Arc<InnerMap>>) {
match conf.path { match conf.path {
Some(ref p) => { Some(ref p) => {
upstreams.insert(p.to_string(), (values, AtomicUsize::new(0))); upstreams.insert(Arc::from(p.clone()), (values, AtomicUsize::new(0)));
} }
None => { None => {
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0))); upstreams.insert(Arc::from("/"), (values, AtomicUsize::new(0)));
} }
} }
} }
@@ -106,25 +108,35 @@ impl ServiceDiscovery for KubernetesDiscovery {
let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 }; let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 };
let server = servers.get(num).unwrap().to_string(); let server = servers.get(num).unwrap().to_string();
let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string()); let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string());
let namespace = get_current_namespace().unwrap_or_else(|| "default".to_string());
let token = read_token(path.as_str()).await; let token = read_token(path.as_str()).await;
// let mut oldcrt: HashMap<String, String> = HashMap::new();
loop { loop {
// crate::utils::watchksecret::watch_secret("ar-tls", "staging", server.clone(), token.clone(), &mut oldcrt).await;
let upstreams = UpstreamsDashMap::new(); let upstreams = UpstreamsDashMap::new();
if let Some(kuber) = config.kubernetes.clone() { if let Some(kuber) = config.kubernetes.clone() {
if let Some(svc) = kuber.services { if let Some(svc) = kuber.services {
for i in svc { for service in svc {
let header_list = DashMap::new(); let header_list: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
let mut hl = Vec::new(); let mut hl = Vec::new();
build_headers(&i.client_headers, config.as_ref(), &mut hl); build_headers(&service.client_headers, config.as_ref(), &mut hl);
if !hl.is_empty() { if !hl.is_empty() {
header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl); match service.path.clone() {
config.client_headers.insert(i.hostname.clone(), header_list); Some(path) => {
header_list.insert(Arc::from(path.as_str()), hl);
}
None => {
header_list.insert(Arc::from("/"), hl);
}
}
// header_list.insert(Arc::from(path.as_str()), hl);
// header_list.insert(Arc::from(i.path).unwrap_or(Arc::from("/")).as_str(), hl);
config.client_headers.insert(Arc::from(service.hostname.clone()), header_list);
} }
let url = format!("https://{}/api/v1/namespaces/staging/endpoints/{}", server, i.hostname); let url = format!("https://{}/api/v1/namespaces/{}/endpoints/{}", server, namespace, service.hostname);
let list = httpclient::for_kuber(&*url, &*token, &i).await; // let url = format!("https://{}/api/v1/namespaces/{}/endpoints?labelSelector=app", server, namespace);
list_to_upstreams(list, &upstreams, &i); let list = httpclient::for_kuber(&*url, &*token, &service).await;
// println!("{:?}", list);
list_to_upstreams(list, &upstreams, &service);
} }
} }
if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await { if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await {
@@ -137,6 +149,16 @@ impl ServiceDiscovery for KubernetesDiscovery {
} }
} }
fn get_current_namespace() -> Option<String> {
let ns_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
if Path::new(ns_path).exists() {
if let Ok(contents) = fs::read_to_string(ns_path) {
return Some(contents.trim().to_string());
}
}
std::env::var("POD_NAMESPACE").ok()
}
#[async_trait] #[async_trait]
impl ServiceDiscovery for ConsulDiscovery { impl ServiceDiscovery for ConsulDiscovery {
async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) { async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) {
@@ -161,8 +183,16 @@ impl ServiceDiscovery for ConsulDiscovery {
let mut hl = Vec::new(); let mut hl = Vec::new();
build_headers(&i.client_headers, config.as_ref(), &mut hl); build_headers(&i.client_headers, config.as_ref(), &mut hl);
if !hl.is_empty() { if !hl.is_empty() {
header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl); match i.path.clone() {
config.client_headers.insert(i.hostname.clone(), header_list); Some(path) => {
header_list.insert(Arc::from(path.as_str()), hl);
}
None => {
header_list.insert(Arc::from("/"), hl);
}
}
// header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl);
config.client_headers.insert(Arc::from(i.hostname.clone()), header_list);
} }
let pref = ss.clone() + &i.upstream; let pref = ss.clone() + &i.upstream;

View File

@@ -1,48 +1,51 @@
use pingora_http::Method;
use pingora_http::StatusCode;
use pingora_http::Version; use pingora_http::Version;
use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec}; use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec};
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
pub struct MetricTypes { pub struct MetricTypes {
pub method: String, pub method: Method,
pub code: String, pub upstream: Arc<str>,
pub code: Option<StatusCode>,
pub latency: Duration, pub latency: Duration,
pub version: Version, pub version: Version,
} }
lazy_static::lazy_static! {
pub static ref REQUEST_COUNT: IntCounter = register_int_counter!( use std::sync::LazyLock;
"aralez_requests_total",
"Total number of requests handled by Aralez" pub static REQUEST_COUNT: LazyLock<IntCounter> = LazyLock::new(|| register_int_counter!("aralez_requests_total", "Total number of requests handled by Aralez").unwrap());
).unwrap();
pub static ref RESPONSE_CODES: IntCounterVec = register_int_counter_vec!( pub static RESPONSE_CODES: LazyLock<IntCounterVec> =
"aralez_responses_total", LazyLock::new(|| register_int_counter_vec!("aralez_responses_total", "Responses grouped by status code", &["status"]).unwrap());
"Responses grouped by status code",
&["status"] pub static REQUEST_LATENCY: LazyLock<Histogram> = LazyLock::new(|| {
).unwrap(); register_histogram!(
pub static ref REQUEST_LATENCY: Histogram = register_histogram!(
"aralez_request_latency_seconds", "aralez_request_latency_seconds",
"Request latency in seconds", "Request latency in seconds",
vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0] vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]
).unwrap(); )
pub static ref RESPONSE_LATENCY: Histogram = register_histogram!( .unwrap()
});
pub static RESPONSE_LATENCY: LazyLock<Histogram> = LazyLock::new(|| {
register_histogram!(
"aralez_response_latency_seconds", "aralez_response_latency_seconds",
"Response latency in seconds", "Response latency in seconds",
vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0] vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0]
).unwrap(); )
pub static ref REQUESTS_BY_METHOD: IntCounterVec = register_int_counter_vec!( .unwrap()
"aralez_requests_by_method_total", });
"Number of requests by HTTP method",
&["method"] pub static REQUESTS_BY_METHOD: LazyLock<IntCounterVec> =
).unwrap(); LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_method_total", "Number of requests by HTTP method", &["method"]).unwrap());
pub static ref REQUESTS_BY_VERSION: IntCounterVec = register_int_counter_vec!(
"aralez_requests_by_version_total", pub static REQUESTS_BY_UPSTREAM: LazyLock<IntCounterVec> =
"Number of requests by HTTP versions", LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_upstream", "Number of requests by UPSTREAM server", &["upstream"]).unwrap());
&["version"]
).unwrap(); pub static REQUESTS_BY_VERSION: LazyLock<IntCounterVec> =
pub static ref ERROR_COUNT: IntCounter = register_int_counter!( LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_version_total", "Number of requests by HTTP versions", &["version"]).unwrap());
"aralez_errors_total",
"Total number of errors"
).unwrap();
}
pub fn calc_metrics(metric_types: &MetricTypes) { pub fn calc_metrics(metric_types: &MetricTypes) {
REQUEST_COUNT.inc(); REQUEST_COUNT.inc();
@@ -57,7 +60,8 @@ pub fn calc_metrics(metric_types: &MetricTypes) {
_ => "Unknown", _ => "Unknown",
}; };
REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc(); REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc();
RESPONSE_CODES.with_label_values(&[&metric_types.code.to_string()]).inc(); RESPONSE_CODES.with_label_values(&[metric_types.code.unwrap_or(StatusCode::GONE).as_str()]).inc();
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc(); REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
REQUESTS_BY_UPSTREAM.with_label_values(&[metric_types.upstream.as_ref()]).inc();
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64()); RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
} }

View File

@@ -5,22 +5,46 @@ use crate::utils::tools::{clone_dashmap, clone_dashmap_into, print_upstreams};
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info, warn}; use log::{error, info, warn};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
// use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Arc;
use std::{env, fs}; use std::{env, fs};
// use tokio::sync::oneshot::{Receiver, Sender};
pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> { pub async fn load_configuration(d: &str, kind: &str) -> (Option<Configuration>, String) {
let mut conf_files = Vec::new();
let yaml_data = match kind { let yaml_data = match kind {
"filepath" => match fs::read_to_string(d) { "filepath" => match fs::read_to_string(d) {
Ok(data) => { Ok(data) => {
let mut confdir = Path::new(d).parent().unwrap().to_path_buf();
confdir.push("conf.d");
if let Ok(entries) = fs::read_dir(&confdir) {
let mut paths: Vec<_> = entries
.flatten()
.map(|e| e.path())
.filter(|p| p.extension().and_then(|e| e.to_str()) == Some("yaml"))
.collect();
paths.sort();
for path in paths {
let content = fs::read_to_string(&path);
match content {
Ok(content) => {
conf_files.push(content);
}
Err(e) => {
error!("Reading: {}: {:?}", path.display(), e)
}
};
}
}
info!("Reading upstreams from {}", d); info!("Reading upstreams from {}", d);
data data
} }
Err(e) => { Err(e) => {
error!("Reading: {}: {:?}", d, e); error!("Reading: {}: {:?}", d, e);
warn!("Running with empty upstreams list, update it via API"); warn!("Running with empty upstreams list, update it via API");
return None; return (None, e.to_string());
} }
}, },
"content" => { "content" => {
@@ -29,86 +53,93 @@ pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
} }
_ => { _ => {
error!("Mismatched parameter, only filepath|content is allowed"); error!("Mismatched parameter, only filepath|content is allowed");
return None; return (None, "Mismatched parameter, only filepath|content is allowed".to_string());
} }
}; };
let parsed: Config = match serde_yaml::from_str(&yaml_data) { let mut parsed: Config = match serde_yml::from_str(&yaml_data) {
Ok(cfg) => cfg, Ok(cfg) => cfg,
Err(e) => { Err(e) => {
error!("Failed to parse upstreams file: {}", e); error!("Failed to parse upstreams file: {}", e);
return None; return (None, e.to_string());
} }
}; };
let mut toreturn = Configuration::default(); if let Some(ref mut upstreams) = parsed.upstreams {
for uconf in conf_files {
let p: HashMap<String, HostConfig> = match serde_yml::from_str(&uconf) {
Ok(ucfg) => ucfg,
Err(e) => {
error!("Failed to parse upstreams file: {}", e);
return (None, e.to_string());
}
};
upstreams.extend(p);
}
}
let mut toreturn = Configuration::default();
populate_headers_and_auth(&mut toreturn, &parsed).await; populate_headers_and_auth(&mut toreturn, &parsed).await;
toreturn.typecfg = parsed.provider.clone(); toreturn.typecfg = parsed.provider.clone();
match parsed.provider.as_str() { match parsed.provider.as_str() {
"file" => { "file" => {
populate_file_upstreams(&mut toreturn, &parsed).await; populate_file_upstreams(&mut toreturn, &parsed).await;
Some(toreturn) (Some(toreturn), "Ok".to_string())
} }
"consul" => { "consul" => {
toreturn.consul = parsed.consul; toreturn.consul = parsed.consul;
toreturn.consul.is_some().then_some(toreturn) (toreturn.consul.is_some().then_some(toreturn), "Ok".to_string())
} }
"kubernetes" => { "kubernetes" => {
toreturn.kubernetes = parsed.kubernetes; toreturn.kubernetes = parsed.kubernetes;
toreturn.kubernetes.is_some().then_some(toreturn) (toreturn.kubernetes.is_some().then_some(toreturn), "Ok".to_string())
} }
_ => { _ => {
warn!("Unknown provider {}", parsed.provider); warn!("Unknown provider {}", parsed.provider);
None (None, "Unknown provider".to_string())
} }
} }
} }
async fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) { async fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) {
let mut ch = Vec::new(); let mut ch: Vec<(String, Arc<str>)> = Vec::new();
ch.push(("Server".to_string(), "Aralez".to_string()));
// println!("{:?}", &parsed.client_headers);
if let Some(headers) = &parsed.client_headers { if let Some(headers) = &parsed.client_headers {
for header in headers { for header in headers {
if let Some((key, val)) = header.split_once(':') { if let Some((key, val)) = header.split_once(':') {
println!("{}:{}", key.trim().to_string(), val.trim().to_string()); ch.push((key.to_string(), Arc::from(val)));
ch.push((key.trim().to_string(), val.trim().to_string()));
} }
} }
} }
let global_headers = DashMap::new(); let global_headers: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
global_headers.insert("/".to_string(), ch); global_headers.insert(Arc::from("/"), ch);
config.client_headers.insert("GLOBAL_CLIENT_HEADERS".to_string(), global_headers); config.client_headers.insert(Arc::from("GLOBAL_CLIENT_HEADERS"), global_headers);
let mut sh = Vec::new(); let mut sh: Vec<(String, Arc<str>)> = Vec::new();
sh.push(("X-Proxy-Server".to_string(), "Aralez".to_string()));
if let Some(headers) = &parsed.server_headers { if let Some(headers) = &parsed.server_headers {
for header in headers { for header in headers {
if let Some((key, val)) = header.split_once(':') { if let Some((key, val)) = header.split_once(':') {
sh.push((key.trim().to_string(), val.trim().to_string())); sh.push((key.to_string(), Arc::from(val.trim())));
} }
} }
} }
let server_global_headers = DashMap::new(); let server_global_headers: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
server_global_headers.insert("/".to_string(), sh); server_global_headers.insert(Arc::from("/"), sh);
config.server_headers.insert("GLOBAL_SERVER_HEADERS".to_string(), server_global_headers); config.server_headers.insert(Arc::from("GLOBAL_SERVER_HEADERS"), server_global_headers);
config.extraparams.sticky_sessions = parsed.sticky_sessions;
config.extraparams.to_https = parsed.to_https; config.extraparams.to_https = parsed.to_https;
config.extraparams.sticky_sessions = parsed.sticky_sessions;
config.extraparams.rate_limit = parsed.rate_limit; config.extraparams.rate_limit = parsed.rate_limit;
if let Some(rate) = &parsed.rate_limit { if let Some(rate) = &parsed.rate_limit {
info!("Applied Global Rate Limit : {} request per second", rate); info!("Applied Global Rate Limit : {} request per second", rate);
} }
if let Some(auth) = &parsed.authorization { if let Some(pa) = &parsed.authorization {
let name = auth.get("type").unwrap_or(&"".to_string()).to_string(); let y: InnerAuth = InnerAuth {
let creds = auth.get("creds").unwrap_or(&"".to_string()).to_string(); auth_type: Arc::from(pa.auth_type.clone()),
config.extraparams.authentication.insert("authorization".to_string(), vec![name, creds]); auth_cred: Arc::from(pa.auth_cred.clone()),
} else { };
config.extraparams.authentication = DashMap::new(); config.extraparams.authentication = Some(Arc::from(y));
} }
} }
@@ -123,35 +154,44 @@ async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
if let Some(rate) = &path_config.rate_limit { if let Some(rate) = &path_config.rate_limit {
info!("Applied Rate Limit for {} : {} request per second", hostname, rate); info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
} }
let mut hl: Vec<(String, Arc<str>)> = Vec::new();
let mut hl: Vec<(String, String)> = Vec::new(); let mut sl: Vec<(String, Arc<str>)> = Vec::new();
let mut sl: Vec<(String, String)> = Vec::new();
build_headers(&path_config.client_headers, config, &mut hl); build_headers(&path_config.client_headers, config, &mut hl);
build_headers(&path_config.server_headers, config, &mut sl); build_headers(&path_config.server_headers, config, &mut sl);
client_header_list.insert(path.clone(), hl); client_header_list.insert(Arc::from(path.as_str()), hl);
server_header_list.insert(path.clone(), sl); server_header_list.insert(Arc::from(path.as_str()), sl);
let mut server_list = Vec::new(); let mut server_list = Vec::new();
for server in &path_config.servers { for server in &path_config.servers {
let mut path_auth: Option<Arc<InnerAuth>> = None;
if let Some(pa) = &path_config.authorization {
let y: InnerAuth = InnerAuth {
auth_type: Arc::from(pa.auth_type.clone()),
auth_cred: Arc::from(pa.auth_cred.clone()),
};
path_auth = Some(Arc::from(y));
}
let redirect_link = path_config.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
if let Some((ip, port_str)) = server.split_once(':') { if let Some((ip, port_str)) = server.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() { if let Ok(port) = port_str.parse::<u16>() {
server_list.push(InnerMap { server_list.push(Arc::from(InnerMap {
address: ip.trim().to_string(), address: Arc::from(ip),
port, port,
is_ssl: true, is_ssl: false,
is_http2: false, is_http2: false,
to_https: path_config.to_https.unwrap_or(false), to_https: path_config.to_https.unwrap_or(false),
rate_limit: path_config.rate_limit, rate_limit: path_config.rate_limit,
healthcheck: path_config.healthcheck, healthcheck: path_config.healthcheck,
}); redirect_to: redirect_link,
authorization: path_auth,
}));
} }
} }
} }
path_map.insert(path.clone(), (server_list, AtomicUsize::new(0))); path_map.insert(Arc::from(path.clone()), (server_list, AtomicUsize::new(0)));
} }
config.client_headers.insert(hostname.clone(), client_header_list); config.client_headers.insert(Arc::from(hostname.clone()), client_header_list);
config.server_headers.insert(hostname.clone(), server_header_list); config.server_headers.insert(Arc::from(hostname.clone()), server_header_list);
imtdashmap.insert(hostname.clone(), path_map); imtdashmap.insert(Arc::from(hostname.clone()), path_map);
} }
if is_first_run() { if is_first_run() {
@@ -169,8 +209,8 @@ async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
pub fn parce_main_config(path: &str) -> AppConfig { pub fn parce_main_config(path: &str) -> AppConfig {
let data = fs::read_to_string(path).unwrap(); let data = fs::read_to_string(path).unwrap();
let reply = DashMap::new(); let reply = DashMap::new();
let cfg: HashMap<String, String> = serde_yaml::from_str(&*data).expect("Failed to parse main config file"); let cfg: HashMap<String, String> = serde_yml::from_str(&*data).expect("Failed to parse main config file");
let mut cfo: AppConfig = serde_yaml::from_str(&*data).expect("Failed to parse main config file"); let mut cfo: AppConfig = serde_yml::from_str(&*data).expect("Failed to parse main config file");
log_builder(&cfo); log_builder(&cfo);
cfo.hc_method = cfo.hc_method.to_uppercase(); cfo.hc_method = cfo.hc_method.to_uppercase();
for (k, v) in cfg { for (k, v) in cfg {
@@ -181,13 +221,24 @@ pub fn parce_main_config(path: &str) -> AppConfig {
cfo.local_server = Option::from((ip.to_string(), port)); cfo.local_server = Option::from((ip.to_string(), port));
} }
} }
// if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
// if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
// if let Ok(port) = port_str.parse::<u16>() {
// cfo.proxy_port_tls = Some(port);
// }
// }
// };
if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() { if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
if let Some((_, port_str)) = tlsport_cfg.split_once(':') { if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() { cfo.proxy_port_tls = Some(port_str.to_string());
cfo.proxy_port_tls = Some(port);
}
} }
}; };
if let Some((_, port_str)) = cfo.proxy_address_http.split_once(':') {
cfo.proxy_port = Some(port_str.to_string());
}
cfo.proxy_tls_grade = parce_tls_grades(cfo.proxy_tls_grade.clone()); cfo.proxy_tls_grade = parce_tls_grades(cfo.proxy_tls_grade.clone());
cfo cfo
} }
@@ -214,7 +265,7 @@ fn parce_tls_grades(what: Option<String>) -> Option<String> {
}, },
None => { None => {
warn!("TLS grade not set, defaulting to: medium"); warn!("TLS grade not set, defaulting to: medium");
Some("b".to_string()) Some("medium".to_string())
} }
} }
} }
@@ -238,19 +289,12 @@ fn log_builder(conf: &AppConfig) {
env_logger::builder().init(); env_logger::builder().init();
} }
pub fn build_headers(path_config: &Option<Vec<String>>, _config: &Configuration, hl: &mut Vec<(String, String)>) { pub fn build_headers(path_config: &Option<Vec<String>>, _config: &Configuration, hl: &mut Vec<(String, Arc<str>)>) {
if let Some(headers) = &path_config { if let Some(headers) = &path_config {
for header in headers { for header in headers {
if let Some((key, val)) = header.split_once(':') { if let Some((key, val)) = header.split_once(':') {
hl.push((key.trim().to_string(), val.trim().to_string())); hl.push((key.trim().to_string(), Arc::from(val.trim())));
} }
} }
// if let Some(push) = config.client_headers.get("GLOBAL_HEADERS") {
// for k in push.iter() {
// for x in k.value() {
// hl.push(x.to_owned());
// }
// }
// }
} }
} }

View File

@@ -1,12 +1,11 @@
use once_cell::sync::Lazy; use std::sync::{LazyLock, RwLock};
use std::sync::RwLock;
#[derive(Debug)] #[derive(Debug)]
pub struct SharedState { pub struct SharedState {
pub first_run: bool, pub first_run: bool,
} }
pub static GLOBAL_STATE: Lazy<RwLock<SharedState>> = Lazy::new(|| RwLock::new(SharedState { first_run: true })); pub static GLOBAL_STATE: LazyLock<RwLock<SharedState>> = LazyLock::new(|| RwLock::new(SharedState { first_run: true }));
pub fn mark_not_first_run() { pub fn mark_not_first_run() {
let mut state = GLOBAL_STATE.write().unwrap(); let mut state = GLOBAL_STATE.write().unwrap();

View File

@@ -2,50 +2,53 @@ use dashmap::DashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>; pub type UpstreamsDashMap = DashMap<Arc<str>, DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>;
pub type UpstreamsIdMap = DashMap<String, InnerMap>; pub type UpstreamsIdMap = DashMap<String, Arc<InnerMap>>;
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>; pub type Headers = DashMap<Arc<str>, DashMap<Arc<str>, Vec<(String, Arc<str>)>>>;
// pub type UpstreamsSerDde = Option<HashMap<String, HostConfig>>;
// pub type UpstreamsSerDe = HashMap<String, HostConfig>;
#[derive(Clone, Debug, Default)]
pub struct Extraparams {
pub to_https: Option<bool>,
pub sticky_sessions: bool,
pub authentication: Option<Arc<InnerAuth>>,
pub rate_limit: Option<isize>,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)] #[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ServiceMapping { pub struct GlobalServiceMapping {
pub upstream: String, pub upstream: String,
pub hostname: String, pub hostname: String,
pub path: Option<String>, pub path: Option<String>,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub sticky_sessions: Option<bool>,
pub rate_limit: Option<isize>, pub rate_limit: Option<isize>,
pub client_headers: Option<Vec<String>>, pub client_headers: Option<Vec<String>>,
pub server_headers: Option<Vec<String>>, pub server_headers: Option<Vec<String>>,
} }
// pub type Services = DashMap<String, Vec<(String, Option<String>)>>;
#[derive(Clone, Debug, Default)]
pub struct Extraparams {
pub sticky_sessions: bool,
pub to_https: Option<bool>,
pub authentication: DashMap<String, Vec<String>>,
pub rate_limit: Option<isize>,
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)] #[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct Kubernetes { pub struct Kubernetes {
pub servers: Option<Vec<String>>, pub servers: Option<Vec<String>>,
pub services: Option<Vec<ServiceMapping>>, pub services: Option<Vec<GlobalServiceMapping>>,
pub tokenpath: Option<String>, pub tokenpath: Option<String>,
} }
#[derive(Clone, Default, Debug, Serialize, Deserialize)] #[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct Consul { pub struct Consul {
pub servers: Option<Vec<String>>, pub servers: Option<Vec<String>>,
pub services: Option<Vec<ServiceMapping>>, pub services: Option<Vec<GlobalServiceMapping>>,
pub token: Option<String>, pub token: Option<String>,
} }
#[derive(Debug, Default, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct Config { pub struct Config {
pub provider: String, pub provider: String,
pub sticky_sessions: bool,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub sticky_sessions: bool,
#[serde(default)] #[serde(default)]
pub upstreams: Option<HashMap<String, HostConfig>>, pub upstreams: Option<HashMap<String, HostConfig>>,
#[serde(default)] #[serde(default)]
@@ -55,7 +58,7 @@ pub struct Config {
#[serde(default)] #[serde(default)]
pub server_headers: Option<Vec<String>>, pub server_headers: Option<Vec<String>>,
#[serde(default)] #[serde(default)]
pub authorization: Option<HashMap<String, String>>, pub authorization: Option<Auth>,
#[serde(default)] #[serde(default)]
pub consul: Option<Consul>, pub consul: Option<Consul>,
#[serde(default)] #[serde(default)]
@@ -69,15 +72,24 @@ pub struct HostConfig {
pub paths: HashMap<String, PathConfig>, pub paths: HashMap<String, PathConfig>,
pub rate_limit: Option<isize>, pub rate_limit: Option<isize>,
} }
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct Auth {
#[serde(rename = "type")]
pub auth_type: String,
#[serde(rename = "creds")]
pub auth_cred: String,
}
#[derive(Debug, Default, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct PathConfig { pub struct PathConfig {
pub servers: Vec<String>, pub servers: Vec<String>,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub sticky_sessions: Option<bool>,
pub client_headers: Option<Vec<String>>, pub client_headers: Option<Vec<String>>,
pub server_headers: Option<Vec<String>>, pub server_headers: Option<Vec<String>>,
pub rate_limit: Option<isize>, pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>, pub healthcheck: Option<bool>,
pub redirect_to: Option<String>,
pub authorization: Option<Auth>,
} }
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct Configuration { pub struct Configuration {
@@ -104,7 +116,8 @@ pub struct AppConfig {
pub config_tls_certificate: Option<String>, pub config_tls_certificate: Option<String>,
pub config_tls_key_file: Option<String>, pub config_tls_key_file: Option<String>,
pub proxy_address_tls: Option<String>, pub proxy_address_tls: Option<String>,
pub proxy_port_tls: Option<u16>, pub proxy_port_tls: Option<String>,
pub proxy_port: Option<String>,
pub local_server: Option<(String, u16)>, pub local_server: Option<(String, u16)>,
pub proxy_certificates: Option<String>, pub proxy_certificates: Option<String>,
pub proxy_tls_grade: Option<String>, pub proxy_tls_grade: Option<String>,
@@ -114,8 +127,44 @@ pub struct AppConfig {
pub rungroup: Option<String>, pub rungroup: Option<String>,
} }
#[derive(Debug, Clone, PartialEq, Eq, Hash)] #[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct InnerAuth {
pub auth_type: Arc<str>,
pub auth_cred: Arc<str>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct InnerMap { pub struct InnerMap {
pub address: Arc<str>,
pub port: u16,
pub is_ssl: bool,
pub is_http2: bool,
pub to_https: bool,
pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>,
pub redirect_to: Option<Arc<str>>,
pub authorization: Option<Arc<InnerAuth>>,
}
#[allow(dead_code)]
impl InnerMap {
pub fn new() -> Self {
Self {
address: Arc::from("127.0.0.1"),
port: Default::default(),
is_ssl: Default::default(),
is_http2: Default::default(),
to_https: Default::default(),
rate_limit: Default::default(),
healthcheck: Default::default(),
redirect_to: Default::default(),
authorization: Default::default(),
}
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct InnerMapForJson {
pub address: String, pub address: String,
pub port: u16, pub port: u16,
pub is_ssl: bool, pub is_ssl: bool,
@@ -124,18 +173,8 @@ pub struct InnerMap {
pub rate_limit: Option<isize>, pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>, pub healthcheck: Option<bool>,
} }
#[derive(Debug, Default, Serialize, Deserialize)]
#[allow(dead_code)] pub struct UpstreamSnapshotForJson {
impl InnerMap { pub backends: Vec<InnerMapForJson>,
pub fn new() -> Self { pub requests: usize,
Self {
address: Default::default(),
port: Default::default(),
is_ssl: Default::default(),
is_http2: Default::default(),
to_https: Default::default(),
rate_limit: Default::default(),
healthcheck: Default::default(),
}
}
} }

View File

@@ -228,26 +228,30 @@ pub fn set_tsl_grade(tls_settings: &mut TlsSettings, grade: &str) {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1_2)); let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1_2));
// let _ = tls_settings.set_max_proto_version(Some(SslVersion::TLS1_3)); // let _ = tls_settings.set_max_proto_version(Some(SslVersion::TLS1_3));
let _ = tls_settings.set_cipher_list(CIPHERS.high); let _ = tls_settings.set_cipher_list(CIPHERS.high);
let _ = tls_settings.set_ciphersuites(CIPHERS.high); // let _ = tls_settings.set_ciphersuites(CIPHERS.high);
let _ = tls_settings.set_cipher_list(CIPHERS.high);
info!("TLS grade: {:?}, => HIGH", tls_settings.options()); info!("TLS grade: {:?}, => HIGH", tls_settings.options());
} }
Some(TlsGrade::MEDIUM) => { Some(TlsGrade::MEDIUM) => {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1)); let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
let _ = tls_settings.set_cipher_list(CIPHERS.medium); let _ = tls_settings.set_cipher_list(CIPHERS.medium);
let _ = tls_settings.set_ciphersuites(CIPHERS.medium); // let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
info!("TLS grade: {:?}, => MEDIUM", tls_settings.options()); info!("TLS grade: {:?}, => MEDIUM", tls_settings.options());
} }
Some(TlsGrade::LEGACY) => { Some(TlsGrade::LEGACY) => {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::SSL3)); let _ = tls_settings.set_min_proto_version(Some(SslVersion::SSL3));
let _ = tls_settings.set_cipher_list(CIPHERS.legacy); let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
let _ = tls_settings.set_ciphersuites(CIPHERS.legacy); // let _ = tls_settings.set_ciphersuites(CIPHERS.legacy);
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
warn!("TLS grade: {:?}, => UNSAFE", tls_settings.options()); warn!("TLS grade: {:?}, => UNSAFE", tls_settings.options());
} }
None => { None => {
// Defaults to MEDIUM // Defaults to MEDIUM
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1)); let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
let _ = tls_settings.set_cipher_list(CIPHERS.medium); let _ = tls_settings.set_cipher_list(CIPHERS.medium);
let _ = tls_settings.set_ciphersuites(CIPHERS.medium); // let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
warn!("TLS grade is not detected defaulting top MEDIUM"); warn!("TLS grade is not detected defaulting top MEDIUM");
} }
} }

View File

@@ -1,20 +1,22 @@
use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{InnerMap, InnerMapForJson, UpstreamSnapshotForJson, UpstreamsDashMap, UpstreamsIdMap};
use crate::utils::tls; use crate::utils::tls;
use crate::utils::tls::CertificateConfig; use crate::utils::tls::CertificateConfig;
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info}; use log::{error, info};
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher}; use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use port_check::is_port_reachable;
use privdrop::PrivDrop; use privdrop::PrivDrop;
use serde_json::{json, Value};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::any::type_name; use std::any::type_name;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::fmt::Write; use std::fmt::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::net::TcpListener;
use std::os::unix::fs::MetadataExt; use std::os::unix::fs::MetadataExt;
use std::str::FromStr; use std::str::FromStr;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Sender}; use std::sync::mpsc::{channel, Sender};
use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::{fs, process, thread, time}; use std::{fs, process, thread, time};
@@ -122,17 +124,29 @@ pub fn compare_dashmaps(map1: &UpstreamsDashMap, map2: &UpstreamsDashMap) -> boo
return false; // Path exists in map1 but not in map2 return false; // Path exists in map1 but not in map2
}; };
let (vec2, _counter2) = entry2.value(); let (vec2, _counter2) = entry2.value();
let set1: HashSet<_> = vec1.iter().collect();
let set2: HashSet<_> = vec2.iter().collect(); if vec1.len() != vec2.len() {
if set1 != set2 {
return false; return false;
} }
for item in vec1.iter() {
let count1 = vec1.iter().filter(|&x| x == item).count();
let count2 = vec2.iter().filter(|&x| x == item).count();
if count1 != count2 {
return false;
}
}
// let set1: HashSet<_> = vec1.iter().collect();
// let set2: HashSet<_> = vec2.iter().collect();
// if set1 != set2 {
// return false;
// }
} }
} }
true true
} }
pub fn merge_headers(target: &DashMap<String, Vec<(String, String)>>, source: &DashMap<String, Vec<(String, String)>>) { pub fn merge_headers(target: &DashMap<Arc<str>, Vec<(String, Arc<str>)>>, source: &DashMap<Arc<str>, Vec<(String, Arc<str>)>>) {
for entry in source.iter() { for entry in source.iter() {
let global_key = entry.key().clone(); let global_key = entry.key().clone();
let global_values = entry.value().clone(); let global_values = entry.value().clone();
@@ -152,27 +166,45 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
let new_vec = vec.clone(); let new_vec = vec.clone();
for x in vec.iter() { for x in vec.iter() {
let mut id = String::new(); let mut id = String::new();
write!(&mut id, "{}:{}:{}", x.address, x.port, x.is_ssl).unwrap(); write!(
&mut id,
"{}:{}:{}:{}:{}:{}:{}:{:?}",
outer_entry.key(),
x.address,
x.port,
x.is_http2,
x.to_https,
x.rate_limit.unwrap_or_default(),
x.healthcheck.unwrap_or_default(),
x.authorization
)
.unwrap_or(());
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
// address: "127.0.0.3", port: 8000, is_ssl: false, is_http2: false, to_https: false, rate_limit: Some(200), healthcheck: None, authorization: None } }
hasher.update(id.clone().into_bytes()); hasher.update(id.clone().into_bytes());
let hash = hasher.finalize(); let hash = hasher.finalize();
let hex_hash = base16ct::lower::encode_string(&hash); let hex_hash = base16ct::lower::encode_string(&hash);
let hh = hex_hash[0..50].to_string(); let hh = hex_hash[0..50].to_string();
let to_add = InnerMap { let to_add = InnerMap {
address: hh.clone(), address: Arc::from("127.0.0.1"),
port: 0, port: 0,
is_ssl: false, is_ssl: false,
is_http2: false, is_http2: false,
to_https: false, to_https: false,
rate_limit: None, rate_limit: None,
healthcheck: None, healthcheck: None,
redirect_to: None,
authorization: None,
}; };
cloned.insert(id, to_add); cloned.insert(id, Arc::from(to_add));
cloned.insert(hh, x.to_owned()); cloned.insert(hh, Arc::from(x.to_owned()));
// println!("CLONNED :===========> {:?}", cloned);
} }
new_inner_map.insert(path.clone(), new_vec); new_inner_map.insert(path.clone(), new_vec);
} }
} }
info!("Upstreams are fully populated. Ready to server requests");
} }
pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> { pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
@@ -195,13 +227,13 @@ pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
certificate_configs.push(y); certificate_configs.push(y);
} }
} }
for (_, v) in f.iter() { // for (_, v) in f.iter() {
let y = CertificateConfig { // let y = CertificateConfig {
cert_path: v[0].clone(), // cert_path: v[0].clone(),
key_path: v[1].clone(), // key_path: v[1].clone(),
}; // };
certificate_configs.push(y); // certificate_configs.push(y);
} // }
certificate_configs certificate_configs
} }
@@ -236,14 +268,14 @@ pub fn drop_priv(user: String, group: String, http_addr: String, tls_addr: Optio
thread::sleep(time::Duration::from_millis(10)); thread::sleep(time::Duration::from_millis(10));
loop { loop {
thread::sleep(time::Duration::from_millis(10)); thread::sleep(time::Duration::from_millis(10));
if is_port_reachable(http_addr.clone()) { if port_is_available(http_addr.clone()) {
break; break;
} }
} }
if let Some(tls_addr) = tls_addr { if let Some(tls_addr) = tls_addr {
loop { loop {
thread::sleep(time::Duration::from_millis(10)); thread::sleep(time::Duration::from_millis(10));
if is_port_reachable(tls_addr.clone()) { if port_is_available(tls_addr.clone()) {
break; break;
} }
} }
@@ -255,6 +287,13 @@ pub fn drop_priv(user: String, group: String, http_addr: String, tls_addr: Optio
} }
} }
fn port_is_available(addr: String) -> bool {
match TcpListener::bind(addr) {
Ok(_) => false,
Err(_) => true,
}
}
pub fn check_priv(addr: &str) { pub fn check_priv(addr: &str) {
let port = SocketAddr::from_str(addr).map(|sa| sa.port()).unwrap(); let port = SocketAddr::from_str(addr).map(|sa| sa.port()).unwrap();
match port < 1024 { match port < 1024 {
@@ -268,3 +307,99 @@ pub fn check_priv(addr: &str) {
false => {} false => {}
} }
} }
#[allow(dead_code)]
pub fn upstreams_to_json(upstreams: &UpstreamsDashMap) -> serde_json::Result<String> {
let mut outer = HashMap::new();
for outer_entry in upstreams.iter() {
let mut inner_map = HashMap::new();
for inner_entry in outer_entry.value().iter() {
let (backends, counter) = inner_entry.value();
inner_map.insert(
inner_entry.key().to_string(),
UpstreamSnapshotForJson {
backends: backends
.iter()
.map(|a| InnerMapForJson {
address: a.address.to_string(),
port: a.port,
is_ssl: a.is_ssl,
is_http2: a.is_http2,
to_https: a.to_https,
rate_limit: a.rate_limit,
healthcheck: a.healthcheck,
})
.collect(),
requests: counter.load(Ordering::Relaxed),
},
);
}
outer.insert(outer_entry.key().to_string(), inner_map);
}
// serde_json::to_string_pretty(&outer)
serde_json::to_string(&outer)
}
pub fn upstreams_liveness_json(configured: &UpstreamsDashMap, current: &UpstreamsDashMap) -> Value {
let mut result = serde_json::Map::new();
for host_entry in configured.iter() {
let hostname = host_entry.key().to_string();
let configured_paths = host_entry.value();
let mut paths_json = serde_json::Map::new();
for path_entry in configured_paths.iter() {
let path = path_entry.key().clone();
let (configured_backends, _) = path_entry.value();
let backends_json: Vec<Value> = configured_backends
.iter()
.map(|backend| {
let alive = if let Some(host_map) = current.get(&*hostname) {
if let Some(path_entry) = host_map.get(&*path) {
let list = &path_entry.value().0; // Vec<Arc<InnerMap>>
list.iter().any(|b| b.address == backend.address && b.port == backend.port)
} else {
false
}
} else {
false
};
json!({
"address": &*backend.address,
"port": backend.port,
"alive": alive
})
})
.collect();
paths_json.insert(
path.to_string(),
json!({
"backends": backends_json
}),
);
}
result.insert(hostname, Value::Object(paths_json));
}
Value::Object(result)
}
#[allow(dead_code)]
pub fn prepend(prefix: &str, val: &Option<Arc<str>>, uri: &str, port: &str) -> Option<String> {
val.as_ref().map(|s| {
let mut buf = String::with_capacity(32);
buf.push_str(prefix);
buf.push_str(s);
buf.push_str(":");
buf.push_str(port);
buf.push_str(uri);
buf
})
}

View File

@@ -21,6 +21,7 @@ impl BackgroundService for LB {
let tx_api = tx.clone(); let tx_api = tx.clone();
let config = load_configuration(self.config.upstreams_conf.clone().as_str(), "filepath") let config = load_configuration(self.config.upstreams_conf.clone().as_str(), "filepath")
.await .await
.0
.expect("Failed to load configuration"); .expect("Failed to load configuration");
match config.typecfg.as_str() { match config.typecfg.as_str() {
@@ -53,11 +54,13 @@ impl BackgroundService for LB {
address: self.config.config_address.clone(), address: self.config.config_address.clone(),
masterkey: self.config.master_key.clone(), masterkey: self.config.master_key.clone(),
config_api_enabled: self.config.config_api_enabled.clone(), config_api_enabled: self.config.config_api_enabled.clone(),
tls_address: self.config.config_tls_address.clone(), // tls_address: self.config.config_tls_address.clone(),
tls_certificate: self.config.config_tls_certificate.clone(), // tls_certificate: self.config.config_tls_certificate.clone(),
tls_key_file: self.config.config_tls_key_file.clone(), // tls_key_file: self.config.config_tls_key_file.clone(),
file_server_address: self.config.file_server_address.clone(), file_server_address: self.config.file_server_address.clone(),
file_server_folder: self.config.file_server_folder.clone(), file_server_folder: self.config.file_server_folder.clone(),
current_upstreams: self.ump_upst.clone(),
full_upstreams: self.ump_full.clone(),
}; };
// let tx_api = tx.clone(); // let tx_api = tx.clone();
let _ = tokio::spawn(async move { api_load.start(tx_api).await }); let _ = tokio::spawn(async move { api_load.start(tx_api).await });
@@ -80,8 +83,8 @@ impl BackgroundService for LB {
clone_dashmap_into(&ss.upstreams, &self.ump_upst); clone_dashmap_into(&ss.upstreams, &self.ump_upst);
let current = self.extraparams.load_full(); let current = self.extraparams.load_full();
let mut new = (*current).clone(); let mut new = (*current).clone();
new.sticky_sessions = ss.extraparams.sticky_sessions;
new.to_https = ss.extraparams.to_https; new.to_https = ss.extraparams.to_https;
new.sticky_sessions = ss.extraparams.sticky_sessions;
new.authentication = ss.extraparams.authentication.clone(); new.authentication = ss.extraparams.authentication.clone();
new.rate_limit = ss.extraparams.rate_limit; new.rate_limit = ss.extraparams.rate_limit;
self.extraparams.store(Arc::new(new)); self.extraparams.store(Arc::new(new));

View File

@@ -2,102 +2,120 @@ use crate::utils::structs::InnerMap;
use crate::web::proxyhttp::LB; use crate::web::proxyhttp::LB;
use async_trait::async_trait; use async_trait::async_trait;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::Arc;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct GetHostsReturHeaders { pub struct GetHostsReturHeaders {
pub client_headers: Option<Vec<(String, String)>>, pub client_headers: Option<Vec<(String, Arc<str>)>>,
pub server_headers: Option<Vec<(String, String)>>, pub server_headers: Option<Vec<(String, Arc<str>)>>,
} }
#[async_trait] #[async_trait]
pub trait GetHost { pub trait GetHost {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap>; fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>>;
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders>; fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders>;
// fn get_upstreams(&self) -> Arc<UpstreamsDashMap>;
} }
#[async_trait] #[async_trait]
impl GetHost for LB { impl GetHost for LB {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap> { // fn get_upstreams(&self) -> Arc<UpstreamsDashMap> {
// self.ump_full.clone()
// }
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>> {
if let Some(b) = backend_id { if let Some(b) = backend_id {
if let Some(bb) = self.ump_byid.get(b) { if let Some(bb) = self.ump_byid.get(b) {
return Some(bb.value().clone()); return Some(bb.value().clone());
} }
} }
let host_entry = self.ump_upst.get(peer)?; let host_entry = self.ump_upst.get(peer)?;
let mut current_path = path.to_string(); let mut end = path.len();
let mut best_match: Option<InnerMap> = None;
loop { loop {
if let Some(entry) = host_entry.get(&current_path) { let slice = &path[..end];
if let Some(entry) = host_entry.get(slice) {
let (servers, index) = entry.value(); let (servers, index) = entry.value();
if !servers.is_empty() { if !servers.is_empty() {
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len(); let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
best_match = Some(servers[idx].clone()); return Some(servers[idx].clone());
break;
} }
} }
if let Some(pos) = current_path.rfind('/') { if let Some(pos) = slice.rfind('/') {
current_path.truncate(pos); end = pos;
} else { } else {
break; break;
} }
} }
if best_match.is_none() { if let Some(entry) = host_entry.get("/") {
if let Some(entry) = host_entry.get("/") { let (servers, index) = entry.value();
let (servers, index) = entry.value(); if !servers.is_empty() {
if !servers.is_empty() { let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len(); return Some(servers[idx].clone());
best_match = Some(servers[idx].clone());
}
} }
} }
best_match None
} }
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders> { fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders> {
let client_entry = self.client_headers.get(peer)?; let client_entry = self.client_headers.get(peer);
let server_entry = self.server_headers.get(peer)?; let server_entry = self.server_headers.get(peer);
if client_entry.is_none() && server_entry.is_none() {
return None;
}
let mut current_path = path; let mut current_path = path;
let mut best_match = None; let mut clnt_match = None;
loop { if let Some(client_entry) = client_entry {
if let Some(entry) = client_entry.get(current_path) { loop {
if !entry.value().is_empty() { if let Some(entry) = client_entry.get(current_path) {
best_match = Some(entry.value().clone()); if !entry.value().is_empty() {
clnt_match = Some(entry.value().clone());
break;
}
}
if current_path == "/" {
break;
}
if let Some(pos) = current_path.rfind('/') {
current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else {
break; break;
} }
}
if let Some(pos) = current_path.rfind('/') {
current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else {
break;
} }
} }
current_path = path; current_path = path;
let mut serv_match = None; let mut serv_match = None;
loop { if let Some(server_entry) = server_entry {
if let Some(entry) = server_entry.get(current_path) { loop {
if !entry.value().is_empty() { if let Some(entry) = server_entry.get(current_path) {
serv_match = Some(entry.value().clone());
break;
}
}
if let Some(pos) = current_path.rfind('/') {
current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else {
break;
}
if best_match.is_none() {
if let Some(entry) = server_entry.get("/") {
if !entry.value().is_empty() { if !entry.value().is_empty() {
best_match = Some(entry.value().clone()); serv_match = Some(entry.value().clone());
break; break;
} }
} }
if current_path == "/" {
if let Some(entry) = server_entry.get("/") {
if !entry.value().is_empty() {
serv_match = Some(entry.value().clone());
break;
}
}
break;
}
if let Some(pos) = current_path.rfind('/') {
current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else {
break;
}
} }
} }
let result = GetHostsReturHeaders { let result = GetHostsReturHeaders {
client_headers: best_match, client_headers: clnt_match,
server_headers: serv_match, server_headers: serv_match,
}; };
Some(result)
if result.client_headers.is_some() || result.server_headers.is_some() {
Some(result)
} else {
None
}
} }
} }

View File

@@ -1,24 +1,33 @@
use crate::utils::auth::authenticate; use crate::utils::auth::authenticate;
use crate::utils::metrics::*; use crate::utils::metrics::*;
use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::web::gethosts::GetHost; use crate::web::gethosts::{GetHost, GetHostsReturHeaders};
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use axum::body::Bytes; use axum::body::Bytes;
use dashmap::DashMap;
use log::{debug, error, warn}; use log::{debug, error, warn};
use once_cell::sync::Lazy;
use pingora::http::{RequestHeader, ResponseHeader, StatusCode}; use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
use pingora::prelude::*; use pingora::prelude::*;
use pingora::ErrorSource::Upstream; use pingora::ErrorSource::Upstream;
use pingora_core::listeners::ALPN; use pingora_core::listeners::ALPN;
use pingora_core::prelude::HttpPeer; use pingora_core::prelude::HttpPeer;
// use pingora_core::protocols::TcpKeepalive;
use pingora_limits::rate::Rate; use pingora_limits::rate::Rate;
use pingora_proxy::{ProxyHttp, Session}; use pingora_proxy::{ProxyHttp, Session};
use std::sync::Arc; // use prometheus::{register_int_counter, IntCounter};
use sha2::{Digest, Sha256};
use std::cell::RefCell;
use std::fmt::Write;
use std::sync::{Arc, LazyLock};
use std::time::Duration; use std::time::Duration;
use tokio::time::Instant; use tokio::time::Instant;
static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1))); // static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1)));
// static REVERSE_STORE: Lazy<DashMap<String, String>> = Lazy::new(|| DashMap::new());
static REVERSE_STORE: LazyLock<DashMap<String, String>> = LazyLock::new(|| DashMap::new());
thread_local! {static IP_BUFFER: RefCell<String> = RefCell::new(String::with_capacity(50));}
pub static RATE_LIMITER: LazyLock<Rate> = LazyLock::new(|| Rate::new(Duration::from_secs(1)));
#[derive(Clone)] #[derive(Clone)]
pub struct LB { pub struct LB {
@@ -32,13 +41,14 @@ pub struct LB {
} }
pub struct Context { pub struct Context {
backend_id: String, backend_id: Option<String>,
to_https: bool, sticky_sessions: bool,
redirect_to: String, // redirect_to: Option<String>,
start_time: Instant, start_time: Instant,
hostname: Option<String>, hostname: Option<Arc<str>>,
upstream_peer: Option<InnerMap>, upstream_peer: Option<Arc<InnerMap>>,
extraparams: arc_swap::Guard<Arc<Extraparams>>, extraparams: arc_swap::Guard<Arc<Extraparams>>,
client_headers: Option<Vec<(String, Arc<str>)>>,
} }
#[async_trait] #[async_trait]
@@ -46,69 +56,95 @@ impl ProxyHttp for LB {
type CTX = Context; type CTX = Context;
fn new_ctx(&self) -> Self::CTX { fn new_ctx(&self) -> Self::CTX {
Context { Context {
backend_id: String::new(), backend_id: None,
to_https: false, sticky_sessions: false,
redirect_to: String::new(), // redirect_to: None,
start_time: Instant::now(), start_time: Instant::now(),
hostname: None, hostname: None,
upstream_peer: None, upstream_peer: None,
extraparams: self.extraparams.load(), extraparams: self.extraparams.load(),
client_headers: None,
} }
} }
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> { async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
let ep = _ctx.extraparams.clone(); let hostname = return_header_host_from_upstream(session, &self.ump_upst);
if let Some(auth) = ep.authentication.get("authorization") {
let authenticated = authenticate(&auth.value(), &session);
if !authenticated {
let _ = session.respond_error(401).await;
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
return Ok(true);
}
};
let hostname = return_header_host(&session);
_ctx.hostname = hostname; _ctx.hostname = hostname;
let mut backend_id = None; let mut backend_id = None;
if _ctx.extraparams.sticky_sessions {
if ep.sticky_sessions {
if let Some(cookies) = session.req_header().headers.get("cookie") { if let Some(cookies) = session.req_header().headers.get("cookie") {
if let Ok(cookie_str) = cookies.to_str() { if let Ok(cookie_str) = cookies.to_str() {
for cookie in cookie_str.split(';') { if let Some(pos) = cookie_str.find("backend_id=") {
let trimmed = cookie.trim(); let value = &cookie_str[pos + "backend_id=".len()..];
if let Some(value) = trimmed.strip_prefix("backend_id=") { let end = value.find(';').unwrap_or(value.len());
backend_id = Some(value); backend_id = Some(&value[..end]);
break;
}
} }
} }
} }
} }
match _ctx.hostname.as_ref() { match _ctx.hostname.as_ref() {
None => return Ok(false), None => return Ok(false),
Some(host) => { Some(host) => {
// let optioninnermap = self.get_host(host.as_str(), host.as_str(), backend_id); let optioninnermap = self.get_host(host, session.req_header().uri.path(), backend_id);
let optioninnermap = self.get_host(host.as_str(), session.req_header().uri.path(), backend_id);
match optioninnermap { match optioninnermap {
None => return Ok(false), None => return Ok(false),
Some(ref innermap) => { Some(ref innermap) => {
if let Some(rate) = innermap.rate_limit.or(ep.rate_limit) { if let Some(auth) = _ctx.extraparams.authentication.as_ref().or(innermap.authorization.as_ref()) {
// let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip().to_string()).unwrap_or_else(|| host.to_string()); if !authenticate(&auth.auth_type, &auth.auth_cred, &session) {
let _ = session.respond_error(401).await;
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
return Ok(true);
}
}
if let Some(rate) = innermap.rate_limit.or(_ctx.extraparams.rate_limit) {
let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip()); let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip());
let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1); let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1);
if curr_window_requests > rate { if curr_window_requests > rate {
let mut header = ResponseHeader::build(429, None).unwrap(); let header = ResponseHeader::build(429, None)?;
header.insert_header("X-Rate-Limit-Limit", rate.to_string()).unwrap();
header.insert_header("X-Rate-Limit-Remaining", "0").unwrap();
header.insert_header("X-Rate-Limit-Reset", "1").unwrap();
session.set_keepalive(None); session.set_keepalive(None);
session.write_response_header(Box::new(header), true).await?; session.write_response_header(Box::new(header), true).await?;
debug!("Rate limited: {:?}, {}", rate_key, rate); debug!("Rate limited: {:?}, {}", rate_key, rate);
return Ok(true); return Ok(true);
} }
} }
if let Some(redirect_to) = &innermap.redirect_to {
let uri = session.req_header().uri.path();
let capacity = redirect_to.len() + uri.len();
let mut s = String::with_capacity(capacity);
s.push_str(redirect_to);
s.push_str(uri);
let mut resp = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
resp.insert_header("Location", s)?;
resp.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(resp), true).await?;
return Ok(true);
}
if _ctx.extraparams.to_https.unwrap_or(false) || innermap.to_https {
if let Some(stream) = session.stream() {
if stream.get_ssl().is_none() {
if let Some(host) = _ctx.hostname.as_ref() {
let port = self.config.proxy_port_tls.as_deref().unwrap_or("443");
let uri = session.req_header().uri.path();
let capacity = host.len() + uri.len() + 8;
let mut s = String::with_capacity(capacity);
s.push_str("https://");
s.push_str(host);
if port != "443" {
s.push_str(":");
s.push_str(&port);
}
s.push_str(uri);
let mut resp = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
resp.insert_header("Location", s)?;
resp.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(resp), true).await?;
return Ok(true);
}
}
}
}
} }
} }
_ctx.upstream_peer = optioninnermap; _ctx.upstream_peer = optioninnermap;
@@ -117,56 +153,67 @@ impl ProxyHttp for LB {
Ok(false) Ok(false)
} }
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> { async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
// let host_name = return_header_host(&session);
match ctx.hostname.as_ref() { match ctx.hostname.as_ref() {
Some(hostname) => { Some(hostname) => match ctx.upstream_peer.as_ref() {
match ctx.upstream_peer.as_ref() { Some(innermap) => {
// Some((address, port, ssl, is_h2, to_https)) => { let mut peer = Box::new(HttpPeer::new((&*innermap.address, innermap.port), innermap.is_ssl, hostname.to_string()));
Some(innermap) => {
let mut peer = Box::new(HttpPeer::new((innermap.address.clone(), innermap.port.clone()), innermap.is_ssl, String::new()));
// if session.is_http2() {
if innermap.is_http2 {
peer.options.alpn = ALPN::H2;
}
if innermap.is_ssl {
peer.sni = hostname.clone();
peer.options.verify_cert = false;
peer.options.verify_hostname = false;
}
if ctx.to_https || innermap.to_https {
if let Some(stream) = session.stream() {
if stream.get_ssl().is_none() {
if let Some(addr) = session.server_addr() {
if let Some((host, _)) = addr.to_string().split_once(':') {
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str());
let port = self.config.proxy_port_tls.unwrap_or(403);
ctx.to_https = true;
ctx.redirect_to = format!("https://{}:{}{}", host, port, uri);
}
}
}
}
}
ctx.backend_id = format!("{}:{}:{}", innermap.address.clone(), innermap.port.clone(), innermap.is_ssl); if innermap.is_http2 {
Ok(peer) peer.options.alpn = ALPN::H2;
} }
None => { if innermap.is_ssl {
if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await { peer.options.verify_cert = false;
error!("Failed to send error response: {:?}", e); peer.options.verify_hostname = false;
}
Err(Box::new(Error {
etype: HTTPStatus(502),
esource: Upstream,
retry: RetryType::Decided(false),
cause: None,
context: Option::from(ImmutStr::Static("Upstream not found")),
}))
} }
/*
Experimental optionsv
The following TCP optimizations were tested but caused performance degrade under heavy load:
peer.options.tcp_keepalive = Some(TcpKeepalive {
idle: Duration::from_secs(60),
interval: Duration::from_secs(10),
count: 5,
user_timeout: Duration::from_secs(30),
});
peer.options.idle_timeout = Some(Duration::from_secs(300));
peer.options.tcp_recv_buf = Some(128 * 1024);
End of experimental options
*/
if ctx.extraparams.sticky_sessions {
let mut s = String::with_capacity(64);
write!(
&mut s,
"{}:{}:{}:{}:{}:{}:{}:{:?}",
hostname,
innermap.address,
innermap.port,
innermap.is_http2,
innermap.to_https,
innermap.rate_limit.unwrap_or_default(),
innermap.healthcheck.unwrap_or_default(),
innermap.authorization
)
.unwrap_or(());
ctx.backend_id = Some(s);
ctx.sticky_sessions = true;
}
Ok(peer)
} }
} None => {
if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
error!("Failed to send error response: {:?}", e);
}
Err(Box::new(Error {
etype: HTTPStatus(502),
esource: Upstream,
retry: RetryType::Decided(false),
cause: None,
context: Option::from(ImmutStr::Static("Upstream not found")),
}))
}
},
None => { None => {
// session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await { if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
error!("Failed to send error response: {:?}", e); error!("Failed to send error response: {:?}", e);
} }
@@ -182,91 +229,68 @@ impl ProxyHttp for LB {
} }
async fn upstream_request_filter(&self, session: &mut Session, upstream_request: &mut RequestHeader, ctx: &mut Self::CTX) -> Result<()> { async fn upstream_request_filter(&self, session: &mut Session, upstream_request: &mut RequestHeader, ctx: &mut Self::CTX) -> Result<()> {
if let Some(hostname) = ctx.hostname.as_ref() { // if let Some(hostname) = ctx.hostname.as_deref() {
upstream_request.insert_header("Host", hostname)?; // upstream_request.insert_header("Host", hostname)?;
} // }
if let Some(peer) = ctx.upstream_peer.as_ref() {
upstream_request.insert_header("X-Forwarded-For", peer.address.as_str())?; if let Some(client_ip) = session.client_addr() {
IP_BUFFER.with(|buffer| {
let mut buf = buffer.borrow_mut();
buf.clear();
write!(buf, "{}", client_ip).unwrap_or(());
upstream_request.append_header("X-Forwarded-For", buf.as_str()).unwrap_or(false);
});
} }
if let Some(headers) = self.get_header(ctx.hostname.as_ref().unwrap_or(&"localhost".to_string()), session.req_header().uri.path()) { let hostname = ctx.hostname.as_deref().unwrap_or("localhost");
if let Some(client_headers) = headers.server_headers { let path = session.req_header().uri.path();
for k in client_headers { let GetHostsReturHeaders { server_headers, client_headers } = match self.get_header(hostname, path) {
upstream_request.insert_header(k.0, k.1)?; Some(h) => h,
} None => return Ok(()),
};
if let Some(sh) = server_headers {
for (k, v) in sh {
upstream_request.insert_header(k, v.as_ref())?;
} }
} }
if let Some(ch) = client_headers {
ctx.client_headers = Some(ch);
}
Ok(()) Ok(())
} }
async fn response_filter(&self, _session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
// async fn request_body_filter(&self, _session: &mut Session, _body: &mut Option<Bytes>, _end_of_stream: bool, _ctx: &mut Self::CTX) -> Result<()> if ctx.sticky_sessions {
// where if let Some(bid) = &ctx.backend_id {
// Self::CTX: Send + Sync, let tt = if let Some(existing) = REVERSE_STORE.get(bid) {
// { existing.value().clone()
// Ok(()) } else {
// } let mut hasher = Sha256::new();
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> { hasher.update(bid.as_bytes());
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap(); let hash = hasher.finalize();
if ctx.extraparams.sticky_sessions { let hex_hash = base16ct::lower::encode_string(&hash);
let backend_id = ctx.backend_id.clone(); let hh = hex_hash[0..50].to_string();
if let Some(bid) = self.ump_byid.get(&backend_id) { REVERSE_STORE.insert(bid.clone(), hh.clone());
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.address)); REVERSE_STORE.insert(hh.clone(), bid.clone());
hh
};
// let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", tt));
let mut buf = String::with_capacity(80);
buf.push_str("backend_id=");
buf.push_str(&tt);
buf.push_str("; Path=/; Max-Age=600; HttpOnly; SameSite=Lax");
let _ = _upstream_response.insert_header("set-cookie", buf.as_str());
} }
} }
if ctx.to_https {
let mut redirect_response = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
redirect_response.insert_header("Location", ctx.redirect_to.clone())?;
redirect_response.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(redirect_response), false).await?;
}
match ctx.hostname.as_ref() {
Some(host) => {
let path = session.req_header().uri.path();
let split_header = host.split_once(':');
match split_header {
Some((host, _port)) => {
if let Some(headers) = self.get_header(host, path) {
if let Some(server_headers) = headers.client_headers {
for k in server_headers {
_upstream_response.insert_header(k.0, k.1).unwrap();
}
}
}
}
None => {
if let Some(headers) = self.get_header(host, path) {
if let Some(server_headers) = headers.client_headers {
for k in server_headers {
_upstream_response.insert_header(k.0, k.1).unwrap();
}
}
}
}
}
// match split_header { if let Some(client_headers) = &ctx.client_headers {
// Some(sh) => { for (k, v) in client_headers.iter() {
// let client_header = self.get_header(sh.0, path); _upstream_response.append_header(k.clone(), v.as_ref())?;
// for k in client_header.iter() {
// for t in k.iter() {
// _upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
// }
// }
// }
// None => {
// let client_header = self.get_header(host_header, path);
// for k in client_header.iter() {
// for t in k.iter() {
// _upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
// }
// }
// }
// }
} }
None => {}
} }
session.set_keepalive(Some(300));
// session.set_keepalive(Some(300));
// println!("session.get_keepalive: {:?}", session.get_keepalive());
Ok(()) Ok(())
} }
@@ -274,28 +298,23 @@ impl ProxyHttp for LB {
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16()); let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
debug!("{}, response code: {response_code}", self.request_summary(session, ctx)); debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
let m = &MetricTypes { let m = &MetricTypes {
method: session.req_header().method.to_string(), method: session.req_header().method.clone(),
code: session.response_written().map(|resp| resp.status.as_str().to_owned()).unwrap_or("0".to_string()), code: session.response_written().map(|resp| resp.status),
latency: ctx.start_time.elapsed(), latency: ctx.start_time.elapsed(),
version: session.req_header().version, version: session.req_header().version,
// upstream: ctx.hostname.clone().unwrap_or(Arc::from("localhost")),
upstream: ctx.hostname.take().unwrap_or_else(|| Arc::from("localhost")),
}; };
calc_metrics(m); calc_metrics(m);
} }
} }
fn return_header_host(session: &Session) -> Option<String> { fn return_header_host_from_upstream(session: &Session, ump_upst: &UpstreamsDashMap) -> Option<Arc<str>> {
if session.is_http2() { let host_str = if session.is_http2() {
match session.req_header().uri.host() { session.req_header().uri.host()?
Some(host) => Option::from(host.to_string()),
None => None,
}
} else { } else {
match session.req_header().headers.get("host") { let h = session.req_header().headers.get("host")?.to_str().ok()?;
Some(host) => { h.split_once(':').map_or(h, |(host, _)| host)
let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>(); };
Option::from(header_host[0].to_string()) ump_upst.get(host_str).map(|entry| entry.key().clone())
}
None => None,
}
}
} }

View File

@@ -31,9 +31,9 @@ pub fn run() {
let sh_config = Arc::new(DashMap::new()); let sh_config = Arc::new(DashMap::new());
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams { let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
sticky_sessions: false,
to_https: None, to_https: None,
authentication: DashMap::new(), sticky_sessions: false,
authentication: None,
rate_limit: None, rate_limit: None,
})); }));

View File

@@ -1,12 +1,13 @@
use crate::utils::discovery::APIUpstreamProvider; use crate::utils::discovery::APIUpstreamProvider;
use crate::utils::structs::Configuration; use crate::utils::structs::{Config, Configuration, UpstreamsDashMap};
use crate::utils::tools::{upstreams_liveness_json, upstreams_to_json};
use axum::body::Body; use axum::body::Body;
use axum::extract::{Query, State}; use axum::extract::{Query, State};
use axum::http::{Response, StatusCode}; use axum::http::{header::HeaderMap, Response, StatusCode};
use axum::response::IntoResponse; use axum::response::IntoResponse;
use axum::routing::{get, post}; use axum::routing::{get, post};
use axum::{Json, Router}; use axum::{Json, Router};
use axum_server::tls_openssl::OpenSSLConfig; // use axum_server::tls_openssl::OpenSSLConfig;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::SinkExt; use futures::SinkExt;
use jsonwebtoken::{encode, EncodingKey, Header}; use jsonwebtoken::{encode, EncodingKey, Header};
@@ -14,8 +15,10 @@ use log::{error, info, warn};
use prometheus::{gather, Encoder, TextEncoder}; use prometheus::{gather, Encoder, TextEncoder};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::net::SocketAddr; // use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use subtle::ConstantTimeEq;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tower_http::services::ServeDir; use tower_http::services::ServeDir;
@@ -36,16 +39,19 @@ struct AppState {
master_key: String, master_key: String,
config_sender: Sender<Configuration>, config_sender: Sender<Configuration>,
config_api_enabled: bool, config_api_enabled: bool,
current_upstreams: Arc<UpstreamsDashMap>,
full_upstreams: Arc<UpstreamsDashMap>,
} }
#[allow(unused_mut)] #[allow(unused_mut)]
pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>) { pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>, upstreams_curr: Arc<UpstreamsDashMap>, upstreams_full: Arc<UpstreamsDashMap>) {
let app_state = AppState { let app_state = AppState {
master_key: config.masterkey.clone(), master_key: config.masterkey.clone(),
config_sender: to_return.clone(), config_sender: to_return.clone(),
config_api_enabled: config.config_api_enabled.clone(), config_api_enabled: config.config_api_enabled.clone(),
current_upstreams: upstreams_curr,
full_upstreams: upstreams_full,
}; };
let app = Router::new() let app = Router::new()
// .route("/{*wildcard}", get(senderror)) // .route("/{*wildcard}", get(senderror))
// .route("/{*wildcard}", post(senderror)) // .route("/{*wildcard}", post(senderror))
@@ -56,19 +62,20 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
.route("/jwt", post(jwt_gen)) .route("/jwt", post(jwt_gen))
.route("/conf", post(conf)) .route("/conf", post(conf))
.route("/metrics", get(metrics)) .route("/metrics", get(metrics))
.route("/status", get(status))
.with_state(app_state); .with_state(app_state);
if let Some(value) = &config.tls_address { // if let Some(value) = &config.tls_address {
let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap(); // let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
let addr: SocketAddr = value.parse().expect("Unable to parse socket address"); // let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
let tls_app = app.clone(); // let tls_app = app.clone();
tokio::spawn(async move { // tokio::spawn(async move {
if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await { // if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
eprintln!("TLS server failed: {}", e); // eprintln!("TLS server failed: {}", e);
} // }
}); // });
info!("Starting the TLS API server on: {}", value); // info!("Starting the TLS API server on: {}", value);
} // }
if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) { if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) {
let static_files = ServeDir::new(folder); let static_files = ServeDir::new(folder);
@@ -82,27 +89,36 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
axum::serve(listener, app).await.unwrap(); axum::serve(listener, app).await.unwrap();
} }
async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse { async fn conf(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>, headers: HeaderMap, content: String) -> impl IntoResponse {
if !st.config_api_enabled { if !st.config_api_enabled {
return Response::builder() return Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Config API is disabled !\n")).unwrap();
.status(StatusCode::FORBIDDEN)
.body(Body::from("Config remote API is disabled !\n"))
.unwrap();
} }
if let Some(s) = headers.get("x-api-key").and_then(|v| v.to_str().ok()).or(params.get("key").map(|s| s.as_str())) {
if let Some(s) = params.get("key") { if s.as_bytes().ct_eq(st.master_key.as_bytes()).into() {
if s.to_owned() == st.master_key { let strcontent = content.as_str();
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content").await { let parsed = serde_yml::from_str::<Config>(strcontent);
st.config_sender.send(serverlist).await.unwrap(); match parsed {
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap(); Ok(_) => {
} else { let _ = tokio::spawn(async move { apply_config(content.as_str(), st).await });
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("Failed to parse config!\n")).unwrap(); return Response::builder().status(StatusCode::OK).body(Body::from("Accepted! Applying in background\n")).unwrap();
}; }
Err(err) => {
error!("Failed to parse upstreams file: {}", err);
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from(format!("Failed: {}\n", err))).unwrap();
}
}
} }
} }
Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap() Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap()
} }
async fn apply_config(content: &str, mut st: AppState) {
let sl = crate::utils::parceyaml::load_configuration(content, "content").await;
if let Some(serverlist) = sl.0 {
let _ = st.config_sender.send(serverlist).await;
}
}
async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) { async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) {
if payload.master_key == state.master_key { if payload.master_key == state.master_key {
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60); let now = SystemTime::now() + Duration::from_secs(payload.valid * 60);
@@ -132,7 +148,6 @@ async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -
async fn metrics() -> impl IntoResponse { async fn metrics() -> impl IntoResponse {
let metric_families = gather(); let metric_families = gather();
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
let mut buffer = Vec::new(); let mut buffer = Vec::new();
if let Err(e) = encoder.encode(&metric_families, &mut buffer) { if let Err(e) = encoder.encode(&metric_families, &mut buffer) {
// encoding error fallback // encoding error fallback
@@ -141,7 +156,6 @@ async fn metrics() -> impl IntoResponse {
.body(Body::from(format!("Failed to encode metrics: {}", e))) .body(Body::from(format!("Failed to encode metrics: {}", e)))
.unwrap(); .unwrap();
} }
Response::builder() Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
.header("Content-Type", encoder.format_type()) .header("Content-Type", encoder.format_type())
@@ -149,7 +163,35 @@ async fn metrics() -> impl IntoResponse {
.unwrap() .unwrap()
} }
// #[allow(dead_code)] async fn status(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
// async fn senderror() -> impl IntoResponse { if let Some(_) = params.get("live") {
// Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap() let r = upstreams_liveness_json(&st.full_upstreams, &st.current_upstreams);
// } return Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(format!("{}", r)))
.unwrap();
}
if let Some(_) = params.get("all") {
let resp = upstreams_to_json(&st.current_upstreams);
match resp {
Ok(j) => {
return Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(j))
.unwrap()
}
Err(e) => {
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("Failed to get status: {}", e)))
.unwrap();
}
}
}
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("Parameter mismatch")))
.unwrap()
}