33 Commits

Author SHA1 Message Date
Ara Sadoyan
ece4fa20af README 2025-07-24 13:50:15 +02:00
Ara Sadoyan
2ad3a059ab Per path rate limiter 2025-07-24 13:34:15 +02:00
Ara Sadoyan
6f012cee69 Code cleanup 2025-07-22 17:40:58 +02:00
Ara Sadoyan
51c88c8f7c Some structural changes and improvements 2025-07-12 16:17:45 +02:00
Ara Sadoyan
f91bc41103 benchmark image 2025-07-10 17:46:05 +02:00
Ara Sadoyan
21e1276ff5 Readme update 2025-07-09 15:22:38 +02:00
Ara Sadoyan
8463cdabbc Added configurable rate limiter 2025-07-09 15:01:20 +02:00
Ara Sadoyan
d0e4b52ce6 Enable/Disable config API from config 2025-07-04 15:06:05 +02:00
Ara Sadoyan
b552d24497 README 2025-07-02 19:00:05 +02:00
Ara Sadoyan
2e33d692bb Added optional minimal file server 2025-07-02 18:29:14 +02:00
Ara Sadoyan
e586967830 Code cleanup, nothing special 2025-06-30 18:24:25 +02:00
Ara Sadoyan
8d4e434d6a Dynamic load of SSL certificates from disk. 2025-06-19 18:32:44 +02:00
Ara Sadoyan
60b7b3aa7a README 2025-06-16 13:42:30 +02:00
Ara Sadoyan
569db8e18d Project rename. Load multiple certificates from folder. 2025-06-16 13:32:05 +02:00
Ara Sadoyan
4126249bcd Project rename. Load multiple certificates from folder. 2025-06-16 13:29:13 +02:00
Ara Sadoyan
0779f97277 README Update 2025-06-09 18:12:25 +02:00
Ara Sadoyan
b047331e6a README Update 2025-06-09 18:11:44 +02:00
Ara Sadoyan
a341fa30db Add TLS to API server 2025-06-09 18:06:16 +02:00
Ara Sadoyan
9d604d62e7 METRICS.md update 2025-06-07 15:51:23 +02:00
Ara Sadoyan
4a21700552 README update 2025-06-07 11:47:29 +02:00
Ara Sadoyan
f0157b6e8f README update 2025-06-07 11:38:07 +02:00
Ara Sadoyan
1370396ae8 README update 2025-06-07 10:56:31 +02:00
Ara Sadoyan
64ef4e14af README update 2025-06-07 10:11:39 +02:00
Ara Sadoyan
ffc2bab79f API server changes, improvements 2025-06-06 19:30:51 +02:00
Ara Sadoyan
8e05794784 Metrics exporter for Prometheus 2025-05-28 21:24:22 +02:00
Ara Sadoyan
423c7afa90 Metrics exporter for Prometheus 2025-05-28 21:23:10 +02:00
Ara Sadoyan
78a084380a Name and config changes 2025-05-28 14:54:01 +02:00
Ara Sadoyan
ada2032732 http to https redirect cleanup 2025-05-26 18:30:42 +02:00
Ara Sadoyan
a89592bd07 http to https redirect cleanup 2025-05-26 16:24:15 +02:00
Ara Sadoyan
2a93bc2cd6 http to https redirect cleanup 2025-05-26 12:42:01 +02:00
Ara Sadoyan
d38588a299 http to https redirect 2025-05-25 11:19:28 +02:00
Ara Sadoyan
3e93920a0d Some type changes 2025-05-21 16:49:37 +02:00
Ara Sadoyan
fce25b8d15 Turned back to OpenSSL, with static link 2025-05-21 15:54:38 +02:00
24 changed files with 1884 additions and 1012 deletions

1
.gitignore vendored
View File

@@ -8,6 +8,7 @@
/target/ /target/
*.iml *.iml
.idea/ .idea/
.etc/
*.ipr *.ipr
*.iws *.iws
/out/ /out/

919
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +1,55 @@
[package] [package]
name = "gazan" name = "aralez"
version = "0.1.0" version = "0.9.1"
edition = "2021" edition = "2021"
[profile.release] [profile.release]
opt-level = 3
lto = true lto = true
codegen-units = 1 codegen-units = 1
opt-level = 3 panic = "abort"
strip = "symbols" strip = true
[dependencies] [dependencies]
tokio = { version = "1.45.0", features = ["full"] } tokio = { version = "1.45.1", features = ["full"] }
pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl #pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl
pingora = { version = "0.5.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
serde = { version = "1.0.219", features = ["derive"] } serde = { version = "1.0.219", features = ["derive"] }
dashmap = "7.0.0-rc2" dashmap = "7.0.0-rc2"
pingora-core = "0.5.0" pingora-core = "0.5.0"
pingora-proxy = "0.5.0" pingora-proxy = "0.5.0"
pingora-http = "0.5.0" pingora-http = "0.5.0"
pingora-limits = "0.5.0"
#pingora-pool = "0.5.0"
async-trait = "0.1.88" async-trait = "0.1.88"
env_logger = "0.11.8" env_logger = "0.11.8"
log = "0.4.27" log = "0.4.27"
futures = "0.3.31" futures = "0.3.31"
notify = "8.0.0" notify = "8.0.0"
axum = { version = "0.8.4" } axum = { version = "0.8.4" }
#reqwest = { version = "0.12.15", features = ["json", "native-tls-alpn"] } axum-server = { version = "0.7.2", features = ["tls-openssl"] }
reqwest = { version = "0.12.20", features = ["json", "native-tls-alpn"] }
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] } #reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] }
reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] } #reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
serde_yaml = "0.9.34-deprecated" serde_yaml = "0.9.34-deprecated"
rand = "0.9.0" rand = "0.9.0"
base64 = "0.22.1" base64 = "0.22.1"
jsonwebtoken = "9.3.1" jsonwebtoken = "9.3.1"
tonic = "0.13.0" tonic = "0.13.1"
sha2 = { version = "0.11.0-pre.5", default-features = false } sha2 = { version = "0.11.0-rc.0", default-features = false }
base16ct = { version = "0.2.0", features = ["alloc"] } base16ct = { version = "0.2.0", features = ["alloc"] }
urlencoding = "2.1.3" urlencoding = "2.1.3"
arc-swap = "1.7.1" arc-swap = "1.7.1"
rustls = { version = "0.23.27", features = ["ring"] } #rustls = { version = "0.23.27", features = ["ring"] }
mimalloc = { version = "0.1.46", default-features = false } mimalloc = { version = "0.1.47", default-features = false }
prometheus = "0.14.0"
lazy_static = "1.5.0"
#openssl = "0.10.73"
x509-parser = "0.17.0"
rustls-pemfile = "2.2.0"
tower-http = { version = "0.6.6", features = ["fs"] }
once_cell = "1.20.2"
#moka = { version = "0.12.10", features = ["sync"] }

120
METRICS.md Normal file
View File

@@ -0,0 +1,120 @@
# 📈 Aralez Prometheus Metrics Reference
This document outlines Prometheus metrics for the [Aralez](https://github.com/sadoyan/aralez) reverse proxy.
These metrics can be used for monitoring, alerting and performance analysis.
Exposed to `http://config_address/metrics`
By default `http://127.0.0.1:3000/metrics`
# 📊 Example Grafana dashboard during stress test :
![Aralez](https://netangels.net/utils/dash.png)
---
## 🛠️ Prometheus Metrics
### 1. `aralez_requests_total`
- **Type**: `Counter`
- **Purpose**: Total amount requests served by Aralez.
**PromQL example:**
```promql
rate(aralez_requests_total[5m])
```
---
### 2. `aralez_errors_total`
- **Type**: `Counter`
- **Purpose**: Count of requests that resulted in an error.
**PromQL example:**
```promql
rate(aralez_errors_total[5m])
```
---
### 3. `aralez_responses_total{status="200"}`
- **Type**: `CounterVec`
- **Purpose**: Count of responses by HTTP status code.
**PromQL example:**
```promql
rate(aralez_responses_total{status=~"5.."}[5m]) > 0
```
> Useful for alerting on 5xx errors.
---
### 4. `aralez_response_latency_seconds`
- **Type**: `Histogram`
- **Purpose**: Tracks the latency of responses in seconds.
**Example bucket output:**
```prometheus
aralez_response_latency_seconds_bucket{le="0.01"} 15
aralez_response_latency_seconds_bucket{le="0.1"} 120
aralez_response_latency_seconds_bucket{le="0.25"} 245
aralez_response_latency_seconds_bucket{le="0.5"} 500
...
aralez_response_latency_seconds_count 1023
aralez_response_latency_seconds_sum 42.6
```
| Metric | Meaning |
|-------------------------|---------------------------------------------------------------|
| `bucket{le="0.1"} 120` | 120 requests were ≤ 100ms |
| `bucket{le="0.25"} 245` | 245 requests were ≤ 250ms |
| `count` | Total number of observations (i.e., total responses measured) |
| `sum` | Total time of all responses, in seconds |
### 🔍 How to interpret:
- `le` means “less than or equal to”.
- `count` is total amount of observations.
- `sum` is the total time (in seconds) of all responses.
**PromQL examples:**
🔹 **95th percentile latency**
```promql
histogram_quantile(0.95, rate(aralez_response_latency_seconds_bucket[5m]))
```
🔹 **Average latency**
```promql
rate(aralez_response_latency_seconds_sum[5m]) / rate(aralez_response_latency_seconds_count[5m])
```
---
## ✅ Notes
- Metrics are registered after the first served request.
---
✅ Summary of key metrics
| Metric Name | Type | What it Tells You |
|---------------------------------------|------------|---------------------------|
| `aralez_requests_total` | Counter | Total requests served |
| `aralez_errors_total` | Counter | Number of failed requests |
| `aralez_responses_total{status="200"}` | CounterVec | Response status breakdown |
| `aralez_response_latency_seconds` | Histogram | How fast responses are |
📘 *Last updated: May 2025*

358
README.md
View File

@@ -1,38 +1,47 @@
![Gazan](https://netangels.net/utils/gazan-white.jpg) ![Aralez](https://netangels.net/utils/aralez-white.jpg)
# Gazan - The beast-mode reverse proxy. # Aralez (Արալեզ), Reverse proxy and service mesh built on top of Cloudflare's Pingora
Gazan is a Reverse proxy, service mesh based on Cloudflare's Pingora What Aralez means ?
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>.
**What Gazan means?** Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
<ins>Gazan = Գազան = beast / wild animal in Armenian / Often used as a synonym to something great.</ins>.
Built on Rust, on top of **Cloudflares Pingora engine**, **Gazan** delivers world-class performance, security and scalability — right out of the box.
--- ---
## 🔧 Key Features ## 🔧 Key Features
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required - **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
- **TLS Termination** — Built-in OpenSSL support - **TLS Termination** — Built-in OpenSSL support.
- **Upstreams TLS detection** — Gazan will automatically detect if upstreams uses secure connection - **Automatic load of certificates** — Automatically reads and loads certificates from a folder, without a restart.
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification - **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
- **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost.
- **Global rate limiter** — Set rate limit for all virtualhosts.
- **Per path rate limiter** — Set rate limit for specific paths. Path limits will override global limits.
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification.
- **Basic Auth**
- **API Key** via `x-api-key` header
- **JWT Auth**, with tokens issued by Aralez itself via `/jwt` API
- ⬇️ See below for examples and implementation details.
- **Load Balancing Strategies** - **Load Balancing Strategies**
- Round-robin - Round-robin
- Failover with health checks - Failover with health checks
- Sticky sessions via cookies - Sticky sessions via cookies
- **Unified Port** — Serve HTTP and WebSocket traffic over the same connection - **Unified Port** — Serve HTTP and WebSocket traffic over the same connection.
- **Memory Safe** — Created purely on Rust - **Built in file server** — Build in minimalistic file server for serving static files, should be added as upstreams for public access.
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O - **Memory Safe** — Created purely on Rust.
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O.
## 🌍 Highlights ## 🌍 Highlights
- ⚙️ **Upstream Providers:** Supports `file`-based static upstreams, dynamic service discovery via `Consul`. - ⚙️ **Upstream Providers:**
- `file` Upstreams are declared in config file.
- `consul` Upstreams are dynamically updated from Hashicorp Consul.
- 🔁 **Hot Reloading:** Modify upstreams on the fly via `upstreams.yaml` — no restart needed. - 🔁 **Hot Reloading:** Modify upstreams on the fly via `upstreams.yaml` — no restart needed.
- 🔮 **Automatic WebSocket Support:** Zero config — connection upgrades are handled seamlessly. - 🔮 **Automatic WebSocket Support:** Zero config — connection upgrades are handled seamlessly.
- 🔮 **Automatic GRPC Support:** Zero config, Requires `ssl` to proxy, gRPC is handled seamlessly. - 🔮 **Automatic GRPC Support:** Zero config, Requires `ssl` to proxy, gRPC handled seamlessly.
- 🔮 **Upstreams Session Stickiness:** Enable/Disable Sticky sessions. - 🔮 **Upstreams Session Stickiness:** Enable/Disable Sticky sessions globally.
- 🔐 **TLS Termination:** Fully supports TLS for incoming and upstream traffic. - 🔐 **TLS Termination:** Fully supports TLS for upstreams and downstreams.
- 🛡️ **Built-in Authentication** Basic Auth, JWT, API key. - 🛡️ **Built-in Authentication** Basic Auth, JWT, API key.
- 🧠 **Header Injection:** Global and per-route header configuration. - 🧠 **Header Injection:** Global and per-route header configuration.
- 🧪 **Health Checks:** Pluggable health check methods for upstreams. - 🧪 **Health Checks:** Pluggable health check methods for upstreams.
@@ -57,15 +66,31 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Gazan** delivers w
### 🔧 `main.yaml` ### 🔧 `main.yaml`
- `proxy_address_http`: `0.0.0.0:6193` (HTTP listener) | Key | Example Value | Description |
- `proxy_address_tls`: `0.0.0.0:6194` (TLS listener, optional) |----------------------------------|--------------------------------------|--------------------------------------------------------------------------------------------------|
- `config_address`: `0.0.0.0:3000` (HTTP API for remote config push) | **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
- `upstreams_conf`: `etc/upstreams.yaml` (location of upstreams config) | **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
- `log_level`: `info` (verbosity of logs) | **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
- `hc_method`: `HEAD`, `hc_interval`: `2s` (upstream health checks) | **daemon** | false | Run in background (boolean) |
- `user` Optional. Drop privileges to regular user. To bind to privileged ports. Requires to start as root. | **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
- `group` Optional. Drop privileges to regular group | **pid_file** | /tmp/aralez.pid | Path to PID file |
- Other defaults: thread count, keep-alive pool size, etc. | **error_log** | /tmp/aralez_err.log | Path to error log file |
| **upgrade_sock** | /tmp/aralez.sock | Path to live upgrade socket file |
| **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location |
| **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location |
| **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional |
| **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional |
| **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address |
| **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) |
| **proxy_certificates** | etc/certs/ | The directory containing certificate and key files. In a format {NAME}.crt, {NAME}.key. |
| **upstreams_conf** | etc/upstreams.yaml | The location of upstreams file |
| **log_level** | info | Log level , possible values : info, warn, error, debug, trace, off |
| **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE |
| **hc_interval** | 2 | Interval for health checks in seconds |
| **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation |
| **file_server_folder** | /some/local/folder | Optional, local folder to serve |
| **file_server_address** | 127.0.0.1:3002 | Optional, Local address for file server. Can set as upstream for public access |
| **config_api_enabled** | true | Boolean to enable/disable remote config push capability |
### 🌐 `upstreams.yaml` ### 🌐 `upstreams.yaml`
@@ -81,40 +106,58 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Gazan** delivers w
## 🛠 Installation ## 🛠 Installation
Download the prebuilt binary for your architecture from releases section of [GitHub](https://github.com/sadoyan/gazan/releases) repo Download the prebuilt binary for your architecture from releases section of [GitHub](https://github.com/sadoyan/aralez/releases) repo
Make the binary executable `chmod 755 ./gazan-VERSION` and run. Make the binary executable `chmod 755 ./aralez-VERSION` and run.
File names: File names:
| File Name | Description | | File Name | Description |
|--------------------------|---------------------------------------------------------------| |---------------------------|---------------------------------------------------------------|
| `gazan-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency | | `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
| `gazan-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies | | `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
| `gazan-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency | | `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
| `gazan-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies | | `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
## 💡 Note
In general **glibc** builds are working faster, but have few, basic, system dependencies for example :
```
linux-vdso.so.1 (0x00007ffeea33b000)
libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f09e7377000)
libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f09e6320000)
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f09e613f000)
/lib64/ld-linux-x86-64.so.2 (0x00007f09e73b1000)
```
These are common to any Linux systems, so the binary should work on almost any Linux system.
**musl** builds are 100% portable, static compiled binaries and have zero system depencecies.
In general musl builds have a little less performance.
The most intensive tests shows 107k-110k requests per second on **Glibc** binaries against 97k-100k **Musl** ones.
## 🔌 Running the Proxy ## 🔌 Running the Proxy
```bash ```bash
./gazan -c path/to/main.yaml ./aralez -c path/to/main.yaml
``` ```
## 🔌 Systemd integration ## 🔌 Systemd integration
```bash ```bash
cat > /etc/systemd/system/gazan.service <<EOF cat > /etc/systemd/system/aralez.service <<EOF
[Service] [Service]
Type=forking Type=forking
PIDFile=/run/gazan.pid PIDFile=/run/aralez.pid
ExecStart=/bin/gazan -d -c /etc/gazan.conf ExecStart=/bin/aralez -d -c /etc/aralez.conf
ExecReload=kill -QUIT $MAINPID ExecReload=kill -QUIT $MAINPID
ExecReload=/bin/gazan -u -d -c /etc/gazan.conf ExecReload=/bin/aralez -u -d -c /etc/aralez.conf
EOF EOF
``` ```
```bash ```bash
systemctl enable gazan.service. systemctl enable aralez.service.
systemctl restart gazan.service. systemctl restart aralez.service.
``` ```
## 💡 Example ## 💡 Example
@@ -123,18 +166,21 @@ A sample `upstreams.yaml` entry:
```yaml ```yaml
provider: "file" provider: "file"
stickysessions: false sticky_sessions: false
globals: to_https: false
headers: rate_limit: 10
- "Access-Control-Allow-Origin:*" headers:
- "Access-Control-Allow-Methods:POST, GET, OPTIONS" - "Access-Control-Allow-Origin:*"
- "Access-Control-Max-Age:86400" - "Access-Control-Allow-Methods:POST, GET, OPTIONS"
authorization: - "Access-Control-Max-Age:86400"
- "jwt" authorization:
- "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774" type: "jwt"
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
myhost.mydomain.com: myhost.mydomain.com:
paths: paths:
"/": "/":
rate_limit: 20
to_https: false
headers: headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Hopaaaaaaaaaaaar" - "X-Proxy-From:Hopaaaaaaaaaaaar"
@@ -142,6 +188,7 @@ myhost.mydomain.com:
- "127.0.0.1:8000" - "127.0.0.1:8000"
- "127.0.0.2:8000" - "127.0.0.2:8000"
"/foo": "/foo":
to_https: true
headers: headers:
- "X-Another-Header:Hohohohoho" - "X-Another-Header:Hohohohoho"
servers: servers:
@@ -151,15 +198,22 @@ myhost.mydomain.com:
**This means:** **This means:**
- Sticky sessions are disabled globally. This setting applies to all upstreams. - Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS.
- HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream.
- Requests to each hosted domains will be limited to 10 requests per second per virtualhost.
- Requests limits are calculated per requester ip plus requested virtualhost.
- If the requester exceeds the limit it will receive `429 Too Many Requests` error.
- Optional. Rate limiter will be disabled if the parameter is entirely removed from config.
- Requests to `myhost.mydomain.com/` will be limited to 20 requests per second.
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`. - Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
- Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez.
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`. - Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter. - SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS. - Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
- Self signed certificates are silently accepted. - Self-signed certificates are silently accepted.
- Global headers (CORS for this case) will be injected to all upstreams - Global headers (CORS for this case) will be injected to all upstreams.
- Additional headers will be injected into the request for `myhost.mydomain.com`. - Additional headers will be injected into the request for `myhost.mydomain.com`.
- You can choose any path, deep nested paths are supported, the best match is chosen. - You can choose any path, deep nested paths are supported, the best match chosen.
- All requests to servers will require JWT token authentication (You can comment out the authorization to disable it), - All requests to servers will require JWT token authentication (You can comment out the authorization to disable it),
- Firs parameter specifies the mechanism of authorisation `jwt` - Firs parameter specifies the mechanism of authorisation `jwt`
- Second is the secret key for validating `jwt` tokens - Second is the secret key for validating `jwt` tokens
@@ -185,10 +239,11 @@ To enable TLS for A proxy server: Currently only OpenSSL is supported, working o
## 📡 Remote Config API ## 📡 Remote Config API
You can push new `upstreams.yaml` over HTTP to `config_address` (`:3000` by default). Useful for CI/CD automation or remote config updates. Push new `upstreams.yaml` over HTTP to `config_address` (`:3000` by default). Useful for CI/CD automation or remote config updates.
URL parameter. `key=MASTERKEY` is required. `MASTERKEY` is the value of `master_key` in the `main.yaml`
```bash ```bash
curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf?key=${MASTERKEY}
``` ```
--- ---
@@ -199,18 +254,18 @@ curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf
- Only one method can be active at a time. - Only one method can be active at a time.
- `basic` : Standard HTTP Basic Authentication requests. - `basic` : Standard HTTP Basic Authentication requests.
- `apikey` : Authentication via `x-api-key` header, which should match the value in config. - `apikey` : Authentication via `x-api-key` header, which should match the value in config.
- `jwt`: JWT authentication implemented via `gazantoken=` url parameter. `/some/url?gazantoken=TOKEN` - `jwt`: JWT authentication implemented via `araleztoken=` url parameter. `/some/url?araleztoken=TOKEN`
- `jwt`: JWT authentication implemented via `Authorization: Bearer <token>` header. - `jwt`: JWT authentication implemented via `Authorization: Bearer <token>` header.
- To obtain JWT token, you should send **generate** request to built in api server's `/jwt` endpoint. - To obtain JWT a token, you should send **generate** request to built in api server's `/jwt` endpoint.
- `masterkey`: should match configured `masterkey` in `main.yaml` and `upstreams.yaml`. - `master_key`: should match configured `masterkey` in `main.yaml` and `upstreams.yaml`.
- `owner` : Just a placeholder, can be anything. - `owner` : Just a placeholder, can be anything.
- `valid` : Time in minutes during which the generated token will be valid. - `valid` : Time in minutes during which the generated token will be valid.
**Example JWT token generateion request** **Example JWT token generation request**
```bash ```bash
PAYLOAD='{ PAYLOAD='{
"masterkey": "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774", "master_key": "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774",
"owner": "valod", "owner": "valod",
"valid": 10 "valid": 10
}' }'
@@ -230,7 +285,7 @@ curl -H "Authorization: Bearer ${TOK}" -H 'Host: myip.mydomain.com' http://127.0
With URL parameter (Very useful if you want to generate and share temporary links) With URL parameter (Very useful if you want to generate and share temporary links)
```bash ```bash
curl -H 'Host: myip.mydomain.com' "http://127.0.0.1:6193/?gazantoken=${TOK}`" curl -H 'Host: myip.mydomain.com' "http://127.0.0.1:6193/?araleztoken=${TOK}`"
``` ```
**Example Request with API Key** **Example Request with API Key**
@@ -261,3 +316,180 @@ curl -u username:password -H 'Host: myip.mydomain.com' http://127.0.0.1:6193/
- Transparent, fully automatic gRPC proxy. - Transparent, fully automatic gRPC proxy.
- Sticky session support. - Sticky session support.
- HTTP2 ready. - HTTP2 ready.
📊 Why Choose Aralez? Feature Comparison
| Feature | **Aralez** | **Nginx** | **HAProxy** | **Traefik** |
|----------------------------|----------------------------------------------------------------------|--------------------------|-------------------------|-----------------|
| **Hot Reload** | ✅ Yes (live, API/file) | ⚠️ Reloads config | ⚠️ Reloads config | ✅ Yes (dynamic) |
| **JWT Auth** | ✅ Built-in | ❌ External scripts | ❌ External Lua or agent | ⚠️ With plugins |
| **WebSocket Support** | ✅ Automatic | ⚠️ Manual config | ✅ Yes | ✅ Yes |
| **gRPC Support** | ✅ Automatic (no config) | ⚠️ Manual + HTTP/2 + TLS | ⚠️ Complex setup | ✅ Native |
| **TLS Termination** | ✅ Built-in (OpenSSL) | ✅ Yes | ✅ Yes | ✅ Yes |
| **TLS Upstream Detection** | ✅ Automatic | ❌ | ❌ | ❌ |
| **HTTP/2 Support** | ✅ Automatic | ⚠️ Requires extra config | ⚠️ Requires build flags | ✅ Native |
| **Sticky Sessions** | ✅ Cookie-based | ⚠️ In plus version only | ✅ | ✅ |
| **Prometheus Metrics** | ✅ [Built in](https://github.com/sadoyan/aralez/blob/main/METRICS.md) | ⚠️ With Lua or exporter | ⚠️ With external script | ✅ Native |
| **Built With** | 🦀 Rust | C | C | Go |
## 💡 Simple benchmark by [Oha](https://github.com/hatoo/oha)
⚠️ These benchmarks use :
- 3 async Rust echo servers on a local network with 1Gbit as upstreams.
- A dedicated server for running **Aralez**
- A dedicated server for running **Oha**
- The following upstreams configuration.
- 9 test URLs from simple `/` to nested up to 7 subpaths.
```yaml
myhost.mydomain.com:
paths:
"/":
to_https: false
headers:
- "X-Proxy-From:Aralez"
servers:
- "192.168.211.211:8000"
- "192.168.211.212:8000"
- "192.168.211.213:8000"
"/ping":
to_https: false
headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Aralez"
servers:
- "192.168.211.211:8000"
- "192.168.211.212:8000"
```
## 💡 Results reflect synthetic performance under optimal conditions.
- CPU : Intel(R) Xeon(R) CPU E3-1270 v6 @ 3.80GHz
- 300 : simultaneous connections
- Duration : 10 Minutes
- Binary : aralez-x86_64-glibc
```
Summary:
Success rate: 100.00%
Total: 600.0027 secs
Slowest: 0.2138 secs
Fastest: 0.0002 secs
Average: 0.0023 secs
Requests/sec: 129777.3838
Total data: 0 B
Size/request: 0 B
Size/sec: 0 B
Response time histogram:
0.000 [1] |
0.022 [77668026] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.043 [190362] |
0.064 [7908] |
0.086 [319] |
0.107 [4] |
0.128 [0] |
0.150 [0] |
0.171 [0] |
0.192 [0] |
0.214 [4] |
Response time distribution:
10.00% in 0.0012 secs
25.00% in 0.0016 secs
50.00% in 0.0020 secs
75.00% in 0.0026 secs
90.00% in 0.0033 secs
95.00% in 0.0040 secs
99.00% in 0.0078 secs
99.90% in 0.0278 secs
99.99% in 0.0434 secs
Details (average, fastest, slowest):
DNS+dialup: 0.0161 secs, 0.0002 secs, 0.0316 secs
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
Status code distribution:
[200] 77866624 responses
Error distribution:
[158] aborted due to deadline
```
![Aralez](https://netangels.net/utils/glibc10.png)
- CPU : Intel(R) Xeon(R) CPU E3-1270 v6 @ 3.80GHz
- 300 : simultaneous connections
- Duration : 10 Minutes
- Binary : aralez-x86_64-musl
```
Summary:
Success rate: 100.00%
Total: 600.0021 secs
Slowest: 0.2182 secs
Fastest: 0.0002 secs
Average: 0.0024 secs
Requests/sec: 123870.5820
Total data: 0 B
Size/request: 0 B
Size/sec: 0 B
Response time histogram:
0.000 [1] |
0.022 [74254679] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
0.044 [61400] |
0.066 [5911] |
0.087 [385] |
0.109 [0] |
0.131 [0] |
0.153 [0] |
0.175 [0] |
0.196 [0] |
0.218 [1] |
Response time distribution:
10.00% in 0.0012 secs
25.00% in 0.0016 secs
50.00% in 0.0021 secs
75.00% in 0.0028 secs
90.00% in 0.0037 secs
95.00% in 0.0045 secs
99.00% in 0.0077 secs
99.90% in 0.0214 secs
99.99% in 0.0424 secs
Details (average, fastest, slowest):
DNS+dialup: 0.0066 secs, 0.0002 secs, 0.0210 secs
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
Status code distribution:
[200] 74322377 responses
Error distribution:
[228] aborted due to deadline
```
![Aralez](https://netangels.net/utils/musl10.png)
## 🚀 Aralez, Nginx, Traefik performance benchmark
This benchmark is done on 4 servers. With CPU Intel(R) Xeon(R) E-2174G CPU @ 3.80GHz, 64 GB RAM.
1. Sever runs Aralez, Traefik, Nginx on different ports. Tuned as much as I could .
2. 3x Upstreams servers, running Nginx. Replying with dummy json hardcoded in config file for max performance.
All servers are connected to the same switch with 1GB port in datacenter , not a home lab. The results:
![Aralez](https://raw.githubusercontent.com/sadoyan/aralez/refs/heads/main/assets/bench.png)
The results show requests per second performed by Load balancer. You can see 3 batches with 800 concurrent users.
1. Requests via http1.1 to plain text endpoint.
2. Requests to via http2 to SSL endpoint.
3. Mixed workload with plain http1.1 and htt2 SSL.

BIN
assets/bench.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

View File

@@ -1,18 +1,23 @@
# Main configuration file , applied on startup # Main configuration file, applied on startup
threads: 12 # Nubber of daemon threads default setting threads: 12 # Number of daemon threads default setting
#user: pastor # Username for running gazan after dropping root privileges, requires program to start as root #user: pastor # Username for running aralez after dropping root privileges, requires program to start as root
#group: pastor # Group for running gazan after dropping root privileges, requires program to start as root #group: pastor # Group for running aralez after dropping root privileges, requires program to start as root
daemon: false # Run in background daemon: false # Run in background
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
pid_file: /tmp/gazan.pid # Path to PID file pid_file: /tmp/aralez.pid # Path to PID file
error_log: /tmp/gazan_err.log # Path to error log error_log: /tmp/aralez_err.log # Path to error log
upgrade_sock: /tmp/gazan.sock # Path to socket file upgrade_sock: /tmp/aralez.sock # Path to socket file
config_api_enabled: true # Boolean to enable/disable remote config push capability.
config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location
config_tls_address: 0.0.0.0:3001 # HTTP TLS API address for pushing upstreams.yaml from remote location
config_tls_certificate: /opt/Rust/Projects/asyncweb/etc/server.crt # Mandatory if config_tls_address is set
config_tls_key_file: /opt/Rust/Projects/asyncweb/etc/key.pem # Mandatory if config_tls_address is set
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
tls_certificate: etc/server.crt # Mandatory if proxy_address_tls is set proxy_certificates: /opt/Rust/Projects/asyncweb/etc/yoyo # Mandatory if proxy_address_tls set, should contain certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
tls_key_file: etc/key.pem # Mandatory if proxy_address_tls is set upstreams_conf: /opt/Rust/Projects/asyncweb/etc/upstreams.yaml # the location of upstreams file
upstreams_conf: etc/upstreams.yaml # the location of upstreams file file_server_folder: /opt/storage # Optional, local folder to serve
file_server_address: 127.0.0.1:3002 # Optional, Local address for file server. Can set as upstream for public access.
log_level: info # info, warn, error, debug, trace, off log_level: info # info, warn, error, debug, trace, off
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
hc_interval: 2 #Interval for health checks in seconds hc_interval: 2 #Interval for health checks in seconds

View File

@@ -11,7 +11,7 @@ upstreams:
"/": "/":
ssl: false ssl: false
headers: headers:
- "X-Proxy-From:Gazan" - "X-Proxy-From:Aralez"
servers: servers:
- "192.168.221.213:8000" - "192.168.221.213:8000"
- "192.168.221.214:8000" - "192.168.221.214:8000"

View File

@@ -1,54 +1,54 @@
# The file is under watch and hot reload , changes are applied immediately, no need to restart or reload # The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
provider: "file" # consul provider: "file" # consul
stickysessions: true sticky_sessions: false
globals: to_ssl: false
headers: # Global headers, appended for all upstreams and all paths. #rate_limit: 100
- "Access-Control-Allow-Origin:*" headers:
- "Access-Control-Allow-Methods:POST, GET, OPTIONS" - "Access-Control-Allow-Origin:*"
- "Access-Control-Max-Age:86400" - "Access-Control-Allow-Methods:POST, GET, OPTIONS"
- "X-Custom-Header:Something Special" - "Access-Control-Max-Age:86400"
# authorization: # Optional, only one of auth methods below can be active at a time - "X-Custom-Header:Something Special"
# - "basic" authorization:
# - "gazan:Gazanpass1234" type: "jwt"
# - "apikey" creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
# - "5a28cc4c-ce10-4ff1-824e-743c38835f5c" # type: "basic"
# - "jwt" # creds: "user:Passw0rd"
# - "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774" # type: "apikey"
consul: # If the provider is consul. Otherwise ignored # creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
consul: # If the provider is consul. Otherwise, ignored.
servers: servers:
- "http://master1:8500" - "http://consul1:8500"
- "http://192.168.22.1:8500" - "http://consul2:8500"
- "http://master1.digitai.local:8500" - "http://consul3:8500"
services: # proxy: The hostname to access proxy server, real : The real service name in Consul services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database.
- proxy: "proxy-frontend-dev-frontend-srv" - proxy: "proxy-frontend-dev-frontend-srv"
real: "frontend-dev-frontend-srv" real: "frontend-dev-frontend-srv"
# - proxy: "proxy-gateway-test-gateway-srv"
# real: "gateway-test-gateway-srv"
# - proxy: "proxy-backoffice-dev-backoffice-srv"
# real: "backoffice-dev-backoffice-srv"
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
upstreams: # If provider is files. Otherwise ignored upstreams:
myip.netangels.net: # Hostname, or header host to access the upstream myip.mydomain.com:
paths: # URL path(s) for current upstream, closest match wins paths:
rate_limit: 10 # Per path rate limit have higher priority than global rate limit. If not set, the global rate limit will be used
"/": "/":
headers: # Custom headers, set only for this Host and Path to_https: false
- "X-Proxy-From:Gazan" headers:
- "X-Proxy-From:Aralez"
servers: # List of upstreams HOST:PORT servers: # List of upstreams HOST:PORT
- "127.0.0.1:8000" - "127.0.0.1:8000"
- "127.0.0.2:8000" - "127.0.0.2:8000"
- "127.0.0.3:8000" - "127.0.0.3:8000"
- "127.0.0.4:8000" - "127.0.0.4:8000"
"/ping": "/ping":
to_https: true
headers: headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Gazan" - "X-Proxy-From:Aralez"
servers: servers:
- "127.0.0.1:8000" - "127.0.0.1:8000"
- "127.0.0.2:8000" - "127.0.0.2:8000"
"/draw": "/draw":
servers: servers:
- "192.168.1.1:8000" - "192.168.1.1:8000"
polo.netangels.net: polo.mydomain.com:
paths: paths:
"/": "/":
headers: headers:
@@ -60,36 +60,3 @@ upstreams: # If provider is files. Otherwise ignored
- "127.0.0.2:8000" - "127.0.0.2:8000"
- "127.0.0.3:8000" - "127.0.0.3:8000"
- "127.0.0.4:8000" - "127.0.0.4:8000"
glop.netangels.net:
paths:
"/":
headers:
- "X-Hopar-From:Hopaaaaaaaaaaaar"
servers:
- "192.168.1.10:8000"
- "192.168.1.1:8000"
apt.netangels.net:
paths:
"/":
servers:
- "apt.netangels.net:443"
test.netangels.net:
paths:
"/":
servers:
- "myip.netangels.net:80"
127.0.0.1:
paths:
"/":
servers:
- "192.168.1.5:8080"
127.0.0.2:
paths:
"/":
servers:
- "10.0.55.171:3000"
localpost:
paths:
"/":
servers:
- "127.0.0.1:9000"

View File

@@ -4,6 +4,8 @@ pub mod discovery;
mod filewatch; mod filewatch;
pub mod healthcheck; pub mod healthcheck;
pub mod jwt; pub mod jwt;
pub mod metrics;
pub mod parceyaml; pub mod parceyaml;
pub mod structs; pub mod structs;
pub mod tls;
pub mod tools; pub mod tools;

View File

@@ -37,7 +37,7 @@ impl AuthValidator for ApiKeyAuth<'_> {
impl AuthValidator for JwtAuth<'_> { impl AuthValidator for JwtAuth<'_> {
fn validate(&self, session: &Session) -> bool { fn validate(&self, session: &Session) -> bool {
let jwtsecret = self.0; let jwtsecret = self.0;
if let Some(tok) = get_query_param(session, "gazantoken") { if let Some(tok) = get_query_param(session, "araleztoken") {
return check_jwt(tok.as_str(), jwtsecret); return check_jwt(tok.as_str(), jwtsecret);
} }

View File

@@ -1,5 +1,5 @@
use crate::utils::parceyaml::load_configuration; use crate::utils::parceyaml::load_configuration;
use crate::utils::structs::{Configuration, ServiceMapping, UpstreamsDashMap}; use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps}; use crate::utils::tools::{clone_dashmap_into, compare_dashmaps};
use dashmap::DashMap; use dashmap::DashMap;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
@@ -109,7 +109,7 @@ async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, tok
Some(upstreams) Some(upstreams)
} }
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)>> { async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let mut headers = HeaderMap::new(); let mut headers = HeaderMap::new();
if let Some(token) = token { if let Some(token) = token {
@@ -118,7 +118,7 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
let to = Duration::from_secs(1); let to = Duration::from_secs(1);
let u = client.get(url).timeout(to).send(); let u = client.get(url).timeout(to).send();
let mut values = Vec::new(); let mut values = Vec::new();
let upstreams: DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)> = DashMap::new(); let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
match u.await { match u.await {
Ok(r) => { Ok(r) => {
let jason = r.json::<Vec<Service>>().await; let jason = r.json::<Vec<Service>>().await;
@@ -127,7 +127,14 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
for service in whitelist { for service in whitelist {
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone(); let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone(); let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
let to_add = (addr, prt, false, false); let to_add = InnerMap {
address: addr,
port: prt,
is_ssl: false,
is_http2: false,
to_https: false,
rate_limit: None,
};
values.push(to_add); values.push(to_add);
} }
} }

View File

@@ -9,8 +9,14 @@ pub struct FromFileProvider {
pub path: String, pub path: String,
} }
pub struct APIUpstreamProvider { pub struct APIUpstreamProvider {
pub config_api_enabled: bool,
pub address: String, pub address: String,
pub masterkey: String, pub masterkey: String,
pub tls_address: Option<String>,
pub tls_certificate: Option<String>,
pub tls_key_file: Option<String>,
pub file_server_address: Option<String>,
pub file_server_folder: Option<String>,
} }
pub struct ConsulProvider { pub struct ConsulProvider {
@@ -25,7 +31,7 @@ pub trait Discovery {
#[async_trait] #[async_trait]
impl Discovery for APIUpstreamProvider { impl Discovery for APIUpstreamProvider {
async fn start(&self, toreturn: Sender<Configuration>) { async fn start(&self, toreturn: Sender<Configuration>) {
webserver::run_server(self.address.clone(), self.masterkey.clone(), toreturn).await; webserver::run_server(self, toreturn).await;
} }
} }

View File

@@ -1,4 +1,4 @@
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::utils::tools::*; use crate::utils::tools::*;
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info, warn}; use log::{error, info, warn};
@@ -9,9 +9,11 @@ use std::time::Duration;
use tokio::time::interval; use tokio::time::interval;
use tonic::transport::Endpoint; use tonic::transport::Endpoint;
#[allow(unused_assignments)]
pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>, idlist: Arc<UpstreamsIdMap>, params: (&str, u64)) { pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>, idlist: Arc<UpstreamsIdMap>, params: (&str, u64)) {
let mut period = interval(Duration::from_secs(params.1)); let mut period = interval(Duration::from_secs(params.1));
let mut first_run = 0; let mut first_run = 0;
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
loop { loop {
tokio::select! { tokio::select! {
_ = period.tick() => { _ = period.tick() => {
@@ -20,47 +22,44 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
for val in fclone.iter() { for val in fclone.iter() {
let host = val.key(); let host = val.key();
let inner = DashMap::new(); let inner = DashMap::new();
let mut _scheme: (String, u16, bool, bool) = ("".to_string(), 0, false, false); let mut scheme = InnerMap::new();
for path_entry in val.value().iter() { for path_entry in val.value().iter() {
// let inner = DashMap::new();
let path = path_entry.key(); let path = path_entry.key();
let mut innervec= Vec::new(); let mut innervec= Vec::new();
for k in path_entry.value().0 .iter().enumerate() { for k in path_entry.value().0 .iter().enumerate() {
let (ip, port, _ssl, _version) = k.1;
let mut _link = String::new(); let mut _link = String::new();
let tls = detect_tls(ip, port).await; let tls = detect_tls(k.1.address.as_str(), &k.1.port, &client).await;
let mut is_h2 = false; let mut is_h2 = false;
// if tls.1 == Some(Version::HTTP_11) {
// println!(" V1: ==> {:?}", tls.1)
// }else if tls.1 == Some(Version::HTTP_2) {
// is_h2 = true;
// println!(" V2: ==> {:?}", tls.1)
// }
if tls.1 == Some(Version::HTTP_2) { if tls.1 == Some(Version::HTTP_2) {
is_h2 = true; is_h2 = true;
// println!(" V2: ==> {} ==> {:?}", tls.0, tls.1)
} }
match tls.0 { match tls.0 {
true => _link = format!("https://{}:{}{}", ip, port, path), true => _link = format!("https://{}:{}{}", k.1.address, k.1.port, path),
false => _link = format!("http://{}:{}{}", ip, port, path), false => _link = format!("http://{}:{}{}", k.1.address, k.1.port, path),
} }
// if _pref == "https://" { scheme = InnerMap {
// _scheme = (ip.to_string(), *port, true); address: k.1.address.clone(),
// }else { port: k.1.port,
// _scheme = (ip.to_string(), *port, false); is_ssl: tls.0,
// } is_http2: is_h2,
_scheme = (ip.to_string(), *port, tls.0, is_h2); to_https: k.1.to_https,
// let link = format!("{}{}:{}{}", _pref, ip, port, path); rate_limit: k.1.rate_limit,
let resp = http_request(_link.as_str(), params.0, "").await; };
let resp = http_request(_link.as_str(), params.0, "", &client).await;
match resp.0 { match resp.0 {
true => { true => {
if resp.1 { if resp.1 {
_scheme = (ip.to_string(), *port, tls.0, true); scheme = InnerMap {
address: k.1.address.clone(),
port: k.1.port,
is_ssl: tls.0,
is_http2: is_h2,
to_https: k.1.to_https,
rate_limit: k.1.rate_limit,
};
} }
innervec.push(_scheme.clone()); innervec.push(scheme);
} }
false => { false => {
warn!("Dead Upstream : {}", _link); warn!("Dead Upstream : {}", _link);
@@ -75,7 +74,8 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
if first_run == 1 { if first_run == 1 {
info!("Performing initial hatchecks and upstreams ssl detection"); info!("Performing initial hatchecks and upstreams ssl detection");
clone_idmap_into(&totest, &idlist); clone_idmap_into(&totest, &idlist);
info!("Gazan is up and ready to serve requests"); info!("Aralez is up and ready to serve requests, the upstreams list is:");
print_upstreams(&totest)
} }
first_run+=1; first_run+=1;
@@ -90,33 +90,26 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
} }
} }
#[allow(dead_code)] async fn http_request(url: &str, method: &str, payload: &str, client: &Client) -> (bool, bool) {
async fn http_request(url: &str, method: &str, payload: &str) -> (bool, bool) {
let client = Client::builder().danger_accept_invalid_certs(true).build().unwrap();
let timeout = Duration::from_secs(1);
if !["POST", "GET", "HEAD"].contains(&method) { if !["POST", "GET", "HEAD"].contains(&method) {
error!("Method {} not supported. Only GET|POST|HEAD are supported ", method); error!("Method {} not supported. Only GET|POST|HEAD are supported ", method);
return (false, false); return (false, false);
} }
async fn send_request(client: &Client, method: &str, url: &str, payload: &str, timeout: Duration) -> Option<reqwest::Response> { async fn send_request(client: &Client, method: &str, url: &str, payload: &str) -> Option<reqwest::Response> {
match method { match method {
"POST" => client.post(url).body(payload.to_owned()).timeout(timeout).send().await.ok(), "POST" => client.post(url).body(payload.to_owned()).send().await.ok(),
"GET" => client.get(url).timeout(timeout).send().await.ok(), "GET" => client.get(url).send().await.ok(),
"HEAD" => client.head(url).timeout(timeout).send().await.ok(), "HEAD" => client.head(url).send().await.ok(),
_ => None, _ => None,
} }
} }
match send_request(&client, method, url, payload, timeout).await { match send_request(&client, method, url, payload).await {
Some(response) => { Some(response) => {
let status = response.status().as_u16(); let status = response.status().as_u16();
((99..499).contains(&status), false) ((99..499).contains(&status), false)
} }
None => { None => (ping_grpc(&url).await, true),
// let fallback_url = url.replace("https", "http");
// ping_grpc(&fallback_url).await
(ping_grpc(&url).await, true)
}
} }
} }
@@ -127,10 +120,7 @@ pub async fn ping_grpc(addr: &str) -> bool {
let endpoint = endpoint.timeout(Duration::from_secs(2)); let endpoint = endpoint.timeout(Duration::from_secs(2));
match tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await { match tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await {
Ok(Ok(_channel)) => { Ok(Ok(_channel)) => true,
// println!("{:?} ==> {:?} ==> {}", endpoint, _channel, addr);
true
}
_ => false, _ => false,
} }
} else { } else {
@@ -138,15 +128,24 @@ pub async fn ping_grpc(addr: &str) -> bool {
} }
} }
async fn detect_tls(ip: &str, port: &u16) -> (bool, Option<Version>) { async fn detect_tls(ip: &str, port: &u16, client: &Client) -> (bool, Option<Version>) {
let url = format!("https://{}:{}", ip, port); let https_url = format!("https://{}:{}", ip, port);
// let url = format!("{}:{}", ip, port); match client.get(&https_url).send().await {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap(); Ok(response) => {
match client.get(&url).send().await { // println!("{} => {:?} (HTTPS)", https_url, response.version());
Ok(response) => (true, Some(response.version())), return (true, Some(response.version()));
Err(e) => { }
if e.is_builder() || e.is_connect() || e.to_string().contains("tls") { _ => {}
(false, None) }
let http_url = format!("http://{}:{}", ip, port);
match client.get(&http_url).send().await {
Ok(response) => {
// println!("{} => {:?} (HTTP)", http_url, response.version());
(false, Some(response.version()))
}
Err(_) => {
if ping_grpc(&http_url).await {
(false, Some(Version::HTTP_2))
} else { } else {
(false, None) (false, None)
} }

87
src/utils/metrics.rs Normal file
View File

@@ -0,0 +1,87 @@
use pingora_http::Version;
use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec};
use std::time::Duration;
pub struct MetricTypes {
pub method: String,
pub code: String,
pub latency: Duration,
pub version: Version,
}
lazy_static::lazy_static! {
pub static ref REQUEST_COUNT: IntCounter = register_int_counter!(
"aralez_requests_total",
"Total number of requests handled by Aralez"
).unwrap();
pub static ref RESPONSE_CODES: IntCounterVec = register_int_counter_vec!(
"aralez_responses_total",
"Responses grouped by status code",
&["status"]
).unwrap();
pub static ref REQUEST_LATENCY: Histogram = register_histogram!(
"aralez_request_latency_seconds",
"Request latency in seconds",
vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]
).unwrap();
pub static ref RESPONSE_LATENCY: Histogram = register_histogram!(
"aralez_response_latency_seconds",
"Response latency in seconds",
vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0]
).unwrap();
pub static ref REQUESTS_BY_METHOD: IntCounterVec = register_int_counter_vec!(
"aralez_requests_by_method_total",
"Number of requests by HTTP method",
&["method"]
).unwrap();
pub static ref REQUESTS_BY_VERSION: IntCounterVec = register_int_counter_vec!(
"aralez_requests_by_version_total",
"Number of requests by HTTP versions",
&["version"]
).unwrap();
pub static ref ERROR_COUNT: IntCounter = register_int_counter!(
"aralez_errors_total",
"Total number of errors"
).unwrap();
}
pub fn calc_metrics(metric_types: &MetricTypes) {
REQUEST_COUNT.inc();
let timer = REQUEST_LATENCY.start_timer();
timer.observe_duration();
let version_str = match &metric_types.version {
&Version::HTTP_11 => "HTTP/1.1",
&Version::HTTP_2 => "HTTP/2.0",
&Version::HTTP_3 => "HTTP/3.0",
&Version::HTTP_10 => "HTTP/1.0",
_ => "Unknown",
};
REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc();
RESPONSE_CODES.with_label_values(&[&metric_types.code.to_string()]).inc();
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
}
/*
pub fn calc_metrics(method: String, code: u16, latency: Duration) {
REQUEST_COUNT.inc();
let timer = REQUEST_LATENCY.start_timer();
timer.observe_duration();
RESPONSE_CODES.with_label_values(&[&code.to_string()]).inc();
REQUESTS_BY_METHOD.with_label_values(&[&method]).inc();
RESPONSE_LATENCY.observe(latency.as_secs_f64());
}
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(5));
loop {
interval.tick().await;
// read Pingora stats
let stats = pingora.get_stats();
// update Prometheus metrics accordingly
REQUEST_COUNT.set(stats.requests_total);
// ... etc
}
});
*/

View File

@@ -1,129 +1,153 @@
use crate::utils::structs::*; use crate::utils::structs::*;
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info, warn}; use log::{error, info, warn};
use serde_yaml::Error;
use std::collections::HashMap; use std::collections::HashMap;
use std::fs; use std::fs;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> { pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
let mut toreturn: Configuration = Configuration { let yaml_data = match kind {
upstreams: Default::default(), "filepath" => match fs::read_to_string(d) {
headers: Default::default(), Ok(data) => {
consul: None, info!("Reading upstreams from {}", d);
typecfg: "".to_string(), data
extraparams: Extraparams { }
stickysessions: false, Err(e) => {
authentication: DashMap::new(), error!("Reading: {}: {:?}", d, e);
warn!("Running with empty upstreams list, update it via API");
return None;
}
}, },
};
toreturn.upstreams = UpstreamsDashMap::new();
toreturn.headers = Headers::new();
let mut yaml_data = d.to_string();
match kind {
"filepath" => {
let _ = match fs::read_to_string(d) {
Ok(data) => {
info!("Reading upstreams from {}", d);
yaml_data = data
}
Err(e) => {
error!("Reading: {}: {:?}", d, e.to_string());
warn!("Running with empty upstreams list, update it via API");
return None;
}
};
}
"content" => { "content" => {
info!("Reading upstreams from API post body"); info!("Reading upstreams from API post body");
d.to_string()
} }
_ => error!("Mismatched parameter, only filepath|content is allowed "), _ => {
} error!("Mismatched parameter, only filepath|content is allowed");
return None;
}
};
let p: Result<Config, Error> = serde_yaml::from_str(&yaml_data); let parsed: Config = match serde_yaml::from_str(&yaml_data) {
match p { Ok(cfg) => cfg,
Ok(parsed) => {
let global_headers = DashMap::new();
let mut hl = Vec::new();
if let Some(globals) = &parsed.globals {
for headers in globals.get("headers").iter().by_ref() {
for header in headers.iter() {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.to_string(), val.to_string()));
}
}
}
global_headers.insert("/".to_string(), hl);
toreturn.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
toreturn.extraparams.stickysessions = parsed.stickysessions;
let cfg = DashMap::new();
if let Some(k) = globals.get("authorization") {
cfg.insert("authorization".to_string(), k.to_owned());
toreturn.extraparams.authentication = cfg;
} else {
toreturn.extraparams.authentication = DashMap::new();
}
}
match parsed.provider.as_str() {
"file" => {
toreturn.typecfg = "file".to_string();
if let Some(upstream) = parsed.upstreams {
for (hostname, host_config) in upstream {
let path_map = DashMap::new();
let header_list = DashMap::new();
for (path, path_config) in host_config.paths {
let mut server_list = Vec::new();
let mut hl = Vec::new();
if let Some(headers) = &path_config.headers {
for header in headers.iter().by_ref() {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.to_string(), val.to_string()));
}
}
}
header_list.insert(path.clone(), hl);
for server in path_config.servers {
if let Some((ip, port_str)) = server.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() {
// server_list.push((ip.to_string(), port, path_config.ssl));
server_list.push((ip.to_string(), port, true, false));
}
}
}
path_map.insert(path, (server_list, AtomicUsize::new(0)));
}
toreturn.headers.insert(hostname.clone(), header_list);
toreturn.upstreams.insert(hostname, path_map);
}
}
Some(toreturn)
}
"consul" => {
toreturn.typecfg = "consul".to_string();
let consul = parsed.consul;
match consul {
Some(consul) => {
toreturn.consul = Some(consul);
Some(toreturn)
}
None => None,
}
}
"kubernetes" => None,
_ => {
warn!("Unknown provider {}", parsed.provider);
None
}
}
}
Err(e) => { Err(e) => {
error!("Failed to parse upstreams file: {}", e); error!("Failed to parse upstreams file: {}", e);
return None;
}
};
let mut toreturn = Configuration::default();
populate_headers_and_auth(&mut toreturn, &parsed);
toreturn.typecfg = parsed.provider.clone();
match parsed.provider.as_str() {
"file" => {
populate_file_upstreams(&mut toreturn, &parsed);
Some(toreturn)
}
"consul" => {
toreturn.consul = parsed.consul;
if toreturn.consul.is_some() {
Some(toreturn)
} else {
None
}
}
"kubernetes" => None,
_ => {
warn!("Unknown provider {}", parsed.provider);
None None
} }
} }
} }
fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) {
if let Some(headers) = &parsed.headers {
let mut hl = Vec::new();
for header in headers {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.trim().to_string(), val.trim().to_string()));
}
}
let global_headers = DashMap::new();
global_headers.insert("/".to_string(), hl);
config.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
}
config.extraparams.sticky_sessions = parsed.sticky_sessions;
config.extraparams.to_https = parsed.to_https;
config.extraparams.rate_limit = parsed.rate_limit;
if let Some(rate) = &parsed.rate_limit {
info!("Applied Global Rate Limit : {} request per second", rate);
}
if let Some(auth) = &parsed.authorization {
let name = auth.get("type").unwrap_or(&"".to_string()).to_string();
let creds = auth.get("creds").unwrap_or(&"".to_string()).to_string();
config.extraparams.authentication.insert("authorization".to_string(), vec![name, creds]);
} else {
config.extraparams.authentication = DashMap::new();
}
}
fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
if let Some(upstreams) = &parsed.upstreams {
for (hostname, host_config) in upstreams {
let path_map = DashMap::new();
let header_list = DashMap::new();
for (path, path_config) in &host_config.paths {
if let Some(rate) = &path_config.rate_limit {
info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
}
let mut server_list = Vec::new();
let mut hl = Vec::new();
if let Some(headers) = &path_config.headers {
for header in headers {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.trim().to_string(), val.trim().to_string()));
}
}
}
header_list.insert(path.clone(), hl);
for server in &path_config.servers {
// let mut rate: Option<isize> = None;
// let size: isize = path_config.servers.len() as isize;
// if let Some(limit) = &path_config.rate_limit {
// if size > 0 {
// rate = Some(limit / size);
// }
// }
if let Some((ip, port_str)) = server.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() {
server_list.push(InnerMap {
address: ip.trim().to_string(),
port,
is_ssl: true,
is_http2: false,
to_https: path_config.to_https.unwrap_or(false),
// rate_limit: rate,
rate_limit: path_config.rate_limit,
});
}
}
}
path_map.insert(path.clone(), (server_list, AtomicUsize::new(0)));
}
config.headers.insert(hostname.clone(), header_list);
config.upstreams.insert(hostname.clone(), path_map);
}
}
}
pub fn parce_main_config(path: &str) -> AppConfig { pub fn parce_main_config(path: &str) -> AppConfig {
info!("Parsing configuration"); info!("Parsing configuration");
let data = fs::read_to_string(path).unwrap(); let data = fs::read_to_string(path).unwrap();
@@ -139,5 +163,12 @@ pub fn parce_main_config(path: &str) -> AppConfig {
cfo.local_server = Option::from((ip.to_string(), port)); cfo.local_server = Option::from((ip.to_string(), port));
} }
} }
if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() {
cfo.proxy_port_tls = Some(port);
}
}
};
cfo cfo
} }

View File

@@ -3,48 +3,81 @@ use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)>>; // pub type InnerMap = BackendConfig;
pub type UpstreamsIdMap = DashMap<String, (String, u16, bool, bool)>; pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>;
// #[derive(Debug, Default)]
// pub struct UpstreamsMap {
// pub upstreams: DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>,
// pub ratelimit: DashMap<String, Option<isize>>,
// }
// impl UpstreamsMap {
// pub fn new() -> Self {
// Self {
// upstreams: Default::default(),
// ratelimit: Default::default(),
// }
// }
// }
//
// pub type XUpstreamsDashMap = DashMap<String, UpstreamsMap>;
pub type UpstreamsIdMap = DashMap<String, InnerMap>;
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>; pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ServiceMapping { pub struct ServiceMapping {
pub proxy: String, pub proxy: String,
pub real: String, pub real: String,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Default)]
pub struct Extraparams { pub struct Extraparams {
pub stickysessions: bool, pub sticky_sessions: bool,
pub to_https: Option<bool>,
pub authentication: DashMap<String, Vec<String>>, pub authentication: DashMap<String, Vec<String>>,
pub rate_limit: Option<isize>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct Consul { pub struct Consul {
pub servers: Option<Vec<String>>, pub servers: Option<Vec<String>>,
pub services: Option<Vec<ServiceMapping>>, pub services: Option<Vec<ServiceMapping>>,
pub token: Option<String>, pub token: Option<String>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct Config { pub struct Config {
pub provider: String, pub provider: String,
pub stickysessions: bool, pub sticky_sessions: bool,
pub to_https: Option<bool>,
#[serde(default)]
pub upstreams: Option<HashMap<String, HostConfig>>, pub upstreams: Option<HashMap<String, HostConfig>>,
#[serde(default)]
pub globals: Option<HashMap<String, Vec<String>>>, pub globals: Option<HashMap<String, Vec<String>>>,
#[serde(default)]
pub headers: Option<Vec<String>>,
#[serde(default)]
pub authorization: Option<HashMap<String, String>>,
#[serde(default)]
pub consul: Option<Consul>, pub consul: Option<Consul>,
#[serde(default)]
pub rate_limit: Option<isize>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct HostConfig { pub struct HostConfig {
pub paths: HashMap<String, PathConfig>, pub paths: HashMap<String, PathConfig>,
pub rate_limit: Option<isize>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct PathConfig { pub struct PathConfig {
pub servers: Vec<String>, pub servers: Vec<String>,
pub to_https: Option<bool>,
pub headers: Option<Vec<String>>, pub headers: Option<Vec<String>>,
pub rate_limit: Option<isize>,
} }
#[derive(Debug)] #[derive(Debug, Default)]
pub struct Configuration { pub struct Configuration {
pub upstreams: UpstreamsDashMap, pub upstreams: UpstreamsDashMap,
pub headers: Headers, pub headers: Headers,
@@ -53,17 +86,46 @@ pub struct Configuration {
pub extraparams: Extraparams, pub extraparams: Extraparams,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct AppConfig { pub struct AppConfig {
pub hc_interval: u16, pub hc_interval: u16,
pub hc_method: String, pub hc_method: String,
pub upstreams_conf: String, pub upstreams_conf: String,
pub log_level: String, pub log_level: String,
pub master_key: String,
pub config_address: String, pub config_address: String,
pub proxy_address_http: String, pub proxy_address_http: String,
pub master_key: String, pub config_api_enabled: bool,
pub config_tls_address: Option<String>,
pub config_tls_certificate: Option<String>,
pub config_tls_key_file: Option<String>,
pub proxy_address_tls: Option<String>, pub proxy_address_tls: Option<String>,
pub tls_certificate: Option<String>, pub proxy_port_tls: Option<u16>,
pub tls_key_file: Option<String>,
pub local_server: Option<(String, u16)>, pub local_server: Option<(String, u16)>,
pub proxy_certificates: Option<String>,
pub file_server_address: Option<String>,
pub file_server_folder: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct InnerMap {
pub address: String,
pub port: u16,
pub is_ssl: bool,
pub is_http2: bool,
pub to_https: bool,
pub rate_limit: Option<isize>,
}
impl InnerMap {
pub fn new() -> Self {
Self {
address: Default::default(),
port: Default::default(),
is_ssl: Default::default(),
is_http2: Default::default(),
to_https: Default::default(),
rate_limit: Default::default(),
}
}
} }

193
src/utils/tls.rs Normal file
View File

@@ -0,0 +1,193 @@
use dashmap::DashMap;
use log::error;
use pingora::tls::ssl::{select_next_proto, AlpnError, NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef};
use rustls_pemfile::{read_one, Item};
use serde::Deserialize;
use std::collections::HashSet;
use std::fs::File;
use std::io::BufReader;
// use tokio::time::Instant;
use x509_parser::extensions::GeneralName;
use x509_parser::nom::Err as NomErr;
use x509_parser::prelude::*;
#[derive(Clone, Deserialize, Debug)]
pub struct CertificateConfig {
pub cert_path: String,
pub key_path: String,
}
#[derive(Debug)]
struct CertificateInfo {
common_names: Vec<String>,
alt_names: Vec<String>,
ssl_context: SslContext,
#[allow(dead_code)]
cert_path: String, // Only used for logging
#[allow(dead_code)]
key_path: String, // Only used for logging
}
#[derive(Debug)]
pub struct Certificates {
configs: Vec<CertificateInfo>,
name_map: DashMap<String, SslContext>,
pub default_cert_path: String,
pub default_key_path: String,
}
impl Certificates {
pub fn new(configs: &Vec<CertificateConfig>) -> Option<Self> {
let default_cert = configs.first().expect("At least one TLS certificate required");
let mut cert_infos = Vec::new();
let name_map: DashMap<String, SslContext> = DashMap::new();
for config in configs {
let cert_info = load_cert_info(&config.cert_path, &config.key_path);
match cert_info {
Some(cert) => {
for name in &cert.common_names {
name_map.insert(name.clone(), cert.ssl_context.clone());
}
for name in &cert.alt_names {
name_map.insert(name.clone(), cert.ssl_context.clone());
}
cert_infos.push(cert)
}
None => {
error!("Unable to load certificate info | public: {}, private: {}", &config.cert_path, &config.key_path);
return None;
}
}
}
Some(Self {
name_map: name_map,
configs: cert_infos,
default_cert_path: default_cert.cert_path.clone(),
default_key_path: default_cert.key_path.clone(),
})
}
fn find_ssl_context(&self, server_name: &str) -> Option<SslContext> {
if let Some(ctx) = self.name_map.get(server_name) {
return Some(ctx.clone());
}
for config in &self.configs {
for name in &config.common_names {
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
return Some(config.ssl_context.clone());
}
}
for name in &config.alt_names {
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
return Some(config.ssl_context.clone());
}
}
}
None
}
pub fn server_name_callback(&self, ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert) -> Result<(), SniError> {
let server_name = ssl_ref.servername(NameType::HOST_NAME);
log::debug!("TLS connect: server_name = {:?}, ssl_ref = {:?}, ssl_alert = {:?}", server_name, ssl_ref, ssl_alert);
// let start_time = Instant::now();
if let Some(name) = server_name {
match self.find_ssl_context(name) {
Some(ctx) => {
ssl_ref.set_ssl_context(&*ctx).map_err(|_| SniError::ALERT_FATAL)?;
}
None => {
log::debug!("No matching server name found");
}
}
}
// println!("Context ==> {:?} <==", start_time.elapsed());
Ok(())
}
}
fn load_cert_info(cert_path: &str, key_path: &str) -> Option<CertificateInfo> {
let mut common_names = HashSet::new();
let mut alt_names = HashSet::new();
let file = File::open(cert_path);
match file {
Err(e) => {
log::error!("Failed to open certificate file: {:?}", e);
return None;
}
Ok(file) => {
let mut reader = BufReader::new(file);
match read_one(&mut reader) {
Err(e) => {
log::error!("Failed to decode PEM from certificate file: {:?}", e);
return None;
}
Ok(leaf) => match leaf {
Some(Item::X509Certificate(cert)) => match X509Certificate::from_der(&cert) {
Err(NomErr::Error(e)) | Err(NomErr::Failure(e)) => {
log::error!("Failed to parse certificate: {:?}", e);
return None;
}
Err(_) => {
log::error!("Unknown error while parsing certificate");
return None;
}
Ok((_, x509)) => {
let subject = x509.subject();
for attr in subject.iter_common_name() {
if let Ok(cn) = attr.as_str() {
common_names.insert(cn.to_string());
}
}
if let Ok(Some(san)) = x509.subject_alternative_name() {
for name in san.value.general_names.iter() {
if let GeneralName::DNSName(dns) = name {
let dns_string = dns.to_string();
if !common_names.contains(&dns_string) {
alt_names.insert(dns_string);
}
}
}
}
}
},
_ => {
log::error!("Failed to read certificate");
return None;
}
},
}
}
}
if let Ok(ssl_context) = create_ssl_context(cert_path, key_path) {
Some(CertificateInfo {
cert_path: cert_path.to_string(),
key_path: key_path.to_string(),
common_names: common_names.into_iter().collect(),
alt_names: alt_names.into_iter().collect(),
ssl_context,
})
} else {
log::error!("Failed to create SSL context from cert paths");
None
}
}
fn create_ssl_context(cert_path: &str, key_path: &str) -> Result<SslContext, Box<dyn std::error::Error>> {
let mut ctx = SslContext::builder(SslMethod::tls())?;
ctx.set_certificate_chain_file(cert_path)?;
ctx.set_private_key_file(key_path, SslFiletype::PEM)?;
ctx.set_alpn_select_callback(prefer_h2);
let built = ctx.build();
Ok(built)
}
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
match select_next_proto("\x02h2\x08http/1.1".as_bytes(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::NOACK),
}
}

View File

@@ -1,10 +1,17 @@
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::utils::tls;
use crate::utils::tls::CertificateConfig;
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info};
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::any::type_name; use std::any::type_name;
use std::collections::HashSet; use std::collections::{HashMap, HashSet};
use std::fmt::Write; use std::fmt::Write;
use std::fs;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::{channel, Sender};
use std::time::{Duration, Instant};
#[allow(dead_code)] #[allow(dead_code)]
pub fn print_upstreams(upstreams: &UpstreamsDashMap) { pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
@@ -14,10 +21,12 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
for path_entry in host_entry.value().iter() { for path_entry in host_entry.value().iter() {
let path = path_entry.key(); let path = path_entry.key();
println!(" Path: {}", path); println!(" Path: {}", path);
for f in path_entry.value().0.clone() {
for (ip, port, ssl, vers) in path_entry.value().0.clone() { println!(
println!(" ===> IP: {}, Port: {}, SSL: {}, H2: {}", ip, port, ssl, vers); " IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}",
f.address, f.port, f.is_ssl, f.is_http2, f.to_https
);
} }
} }
} }
@@ -133,16 +142,81 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
let new_vec = vec.clone(); let new_vec = vec.clone();
for x in vec.iter() { for x in vec.iter() {
let mut id = String::new(); let mut id = String::new();
write!(&mut id, "{}:{}:{}", x.0, x.1, x.2).unwrap(); write!(&mut id, "{}:{}:{}", x.address, x.port, x.is_ssl).unwrap();
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
hasher.update(id.clone().into_bytes()); hasher.update(id.clone().into_bytes());
let hash = hasher.finalize(); let hash = hasher.finalize();
let hex_hash = base16ct::lower::encode_string(&hash); let hex_hash = base16ct::lower::encode_string(&hash);
let hh = hex_hash[0..50].to_string(); let hh = hex_hash[0..50].to_string();
cloned.insert(id, (hh.clone(), 0000, false, false)); let to_add = InnerMap {
address: hh.clone(),
port: 0,
is_ssl: false,
is_http2: false,
to_https: false,
rate_limit: None,
};
cloned.insert(id, to_add);
cloned.insert(hh, x.to_owned()); cloned.insert(hh, x.to_owned());
} }
new_inner_map.insert(path.clone(), new_vec); new_inner_map.insert(path.clone(), new_vec);
} }
} }
} }
pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
let mut f = HashMap::new();
let mut certificate_configs: Vec<tls::CertificateConfig> = vec![];
let paths = fs::read_dir(dir).unwrap();
for path in paths {
let path_str = path.unwrap().path().to_str().unwrap().to_owned();
if path_str.ends_with(".crt") {
let name = path_str.replace(".crt", "");
let mut inner = vec![];
let domain = name.split("/").collect::<Vec<&str>>();
inner.push(name.clone() + ".crt");
inner.push(name.clone() + ".key");
f.insert(domain[domain.len() - 1].to_owned(), inner);
let y = CertificateConfig {
cert_path: name.clone() + ".crt",
key_path: name.clone() + ".key",
};
certificate_configs.push(y);
}
}
for (_, v) in f.iter() {
let y = CertificateConfig {
cert_path: v[0].clone(),
key_path: v[1].clone(),
};
certificate_configs.push(y);
}
certificate_configs
}
pub fn watch_folder(path: String, sender: Sender<Vec<CertificateConfig>>) -> notify::Result<()> {
let (tx, rx) = channel();
let mut watcher = RecommendedWatcher::new(tx, Config::default())?;
watcher.watch(path.as_ref(), RecursiveMode::Recursive)?;
info!("Watching for certificates in : {}", path);
let certificate_configs = listdir(path.clone());
sender.send(certificate_configs)?;
let mut start = Instant::now();
loop {
match rx.recv_timeout(Duration::from_secs(1)) {
Ok(Ok(event)) => match &event.kind {
EventKind::Modify(ModifyKind::Data(_)) | EventKind::Create(_) | EventKind::Remove(_) => {
if start.elapsed() > Duration::from_secs(1) {
start = Instant::now();
let certificate_configs = listdir(path.clone());
sender.send(certificate_configs)?;
info!("Certificate changed: {:?}, {:?}", event.kind, event.paths);
}
}
_ => {}
},
Ok(Err(e)) => error!("Watch error: {:?}", e),
Err(_) => {}
}
}
}

View File

@@ -30,10 +30,18 @@ impl BackgroundService for LB {
let _ = tokio::spawn(async move { file_load.start(tx_file).await }); let _ = tokio::spawn(async move { file_load.start(tx_file).await });
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await }); let _ = tokio::spawn(async move { consul_load.start(tx_consul).await });
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone()));
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone())).await;
let api_load = APIUpstreamProvider { let api_load = APIUpstreamProvider {
address: self.config.config_address.clone(), address: self.config.config_address.clone(),
masterkey: self.config.master_key.clone(), masterkey: self.config.master_key.clone(),
config_api_enabled: self.config.config_api_enabled.clone(),
tls_address: self.config.config_tls_address.clone(),
tls_certificate: self.config.config_tls_certificate.clone(),
tls_key_file: self.config.config_tls_key_file.clone(),
file_server_address: self.config.file_server_address.clone(),
file_server_folder: self.config.file_server_folder.clone(),
}; };
let tx_api = tx.clone(); let tx_api = tx.clone();
let _ = tokio::spawn(async move { api_load.start(tx_api).await }); let _ = tokio::spawn(async move { api_load.start(tx_api).await });
@@ -56,8 +64,10 @@ impl BackgroundService for LB {
clone_dashmap_into(&ss.upstreams, &self.ump_upst); clone_dashmap_into(&ss.upstreams, &self.ump_upst);
let current = self.extraparams.load_full(); let current = self.extraparams.load_full();
let mut new = (*current).clone(); let mut new = (*current).clone();
new.stickysessions = ss.extraparams.stickysessions; new.sticky_sessions = ss.extraparams.sticky_sessions;
new.to_https = ss.extraparams.to_https;
new.authentication = ss.extraparams.authentication.clone(); new.authentication = ss.extraparams.authentication.clone();
new.rate_limit = ss.extraparams.rate_limit;
self.extraparams.store(Arc::new(new)); self.extraparams.store(Arc::new(new));
self.headers.clear(); self.headers.clear();
@@ -79,8 +89,8 @@ impl BackgroundService for LB {
} }
} }
} }
info!("Upstreams list is changed, updating to:"); // info!("Upstreams list is changed, updating to:");
print_upstreams(&self.ump_full); // print_upstreams(&self.ump_full);
} }
None => {} None => {}
} }

View File

@@ -1,15 +1,16 @@
use crate::utils::structs::InnerMap;
use crate::web::proxyhttp::LB; use crate::web::proxyhttp::LB;
use async_trait::async_trait; use async_trait::async_trait;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
#[async_trait] #[async_trait]
pub trait GetHost { pub trait GetHost {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<(String, u16, bool, bool)>; fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap>;
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>; fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>;
} }
#[async_trait] #[async_trait]
impl GetHost for LB { impl GetHost for LB {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<(String, u16, bool, bool)> { fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap> {
if let Some(b) = backend_id { if let Some(b) = backend_id {
if let Some(bb) = self.ump_byid.get(b) { if let Some(bb) = self.ump_byid.get(b) {
// println!("BIB :===> {:?}", Some(bb.value())); // println!("BIB :===> {:?}", Some(bb.value()));
@@ -19,7 +20,7 @@ impl GetHost for LB {
let host_entry = self.ump_upst.get(peer)?; let host_entry = self.ump_upst.get(peer)?;
let mut current_path = path.to_string(); let mut current_path = path.to_string();
let mut best_match: Option<(String, u16, bool, bool)> = None; let mut best_match: Option<InnerMap> = None;
loop { loop {
if let Some(entry) = host_entry.get(&current_path) { if let Some(entry) = host_entry.get(&current_path) {
let (servers, index) = entry.value(); let (servers, index) = entry.value();
@@ -44,7 +45,6 @@ impl GetHost for LB {
} }
} }
} }
// println!("BMT :===> {:?}", best_match);
best_match best_match
} }
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>> { fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>> {

View File

@@ -1,17 +1,26 @@
use crate::utils::auth::authenticate; use crate::utils::auth::authenticate;
use crate::utils::structs::{AppConfig, Extraparams, Headers, UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::metrics::*;
use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::web::gethosts::GetHost; use crate::web::gethosts::GetHost;
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use axum::body::Bytes;
use log::{debug, warn}; use log::{debug, warn};
use pingora::http::RequestHeader; use once_cell::sync::Lazy;
use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
use pingora::prelude::*; use pingora::prelude::*;
use pingora::ErrorSource::Upstream;
use pingora_core::listeners::ALPN; use pingora_core::listeners::ALPN;
use pingora_core::prelude::HttpPeer; use pingora_core::prelude::HttpPeer;
use pingora_http::ResponseHeader; use pingora_limits::rate::Rate;
use pingora_proxy::{ProxyHttp, Session}; use pingora_proxy::{ProxyHttp, Session};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use tokio::time::Instant;
static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1)));
#[derive(Clone)]
pub struct LB { pub struct LB {
pub ump_upst: Arc<UpstreamsDashMap>, pub ump_upst: Arc<UpstreamsDashMap>,
pub ump_full: Arc<UpstreamsDashMap>, pub ump_full: Arc<UpstreamsDashMap>,
@@ -23,87 +32,150 @@ pub struct LB {
pub struct Context { pub struct Context {
backend_id: String, backend_id: String,
to_https: bool,
redirect_to: String,
start_time: Instant,
hostname: Option<String>,
upstream_peer: Option<InnerMap>,
extraparams: arc_swap::Guard<Arc<Extraparams>>,
} }
#[async_trait] #[async_trait]
impl ProxyHttp for LB { impl ProxyHttp for LB {
// type CTX = ();
// fn new_ctx(&self) -> Self::CTX {}
type CTX = Context; type CTX = Context;
fn new_ctx(&self) -> Self::CTX { fn new_ctx(&self) -> Self::CTX {
Context { backend_id: String::new() } Context {
backend_id: String::new(),
to_https: false,
redirect_to: String::new(),
start_time: Instant::now(),
hostname: None,
upstream_peer: None,
extraparams: self.extraparams.load(),
}
} }
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> { async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
if let Some(auth) = self.extraparams.load().authentication.get("authorization") { let ep = _ctx.extraparams.clone();
if let Some(auth) = ep.authentication.get("authorization") {
let authenticated = authenticate(&auth.value(), &session); let authenticated = authenticate(&auth.value(), &session);
if !authenticated { if !authenticated {
let _ = session.respond_error(401).await; let _ = session.respond_error(401).await;
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path().to_string()); warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
return Ok(true); return Ok(true);
} }
}; };
// if session.req_header().uri.path().starts_with("/denied") {
// let _ = session.respond_error(403).await;
// warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path().to_string());
// return Ok(true);
// };
Ok(false)
}
async fn upstream_peer(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
let host_name = return_header_host(&session);
match host_name { let hostname = return_header_host(&session);
Some(hostname) => { _ctx.hostname = hostname.clone();
// session.req_header_mut().headers.insert("X-Host-Name", host.to_string().parse().unwrap());
let mut backend_id = None; let mut backend_id = None;
if self.extraparams.load().stickysessions {
if let Some(cookies) = session.req_header().headers.get("cookie") { if ep.sticky_sessions {
if let Ok(cookie_str) = cookies.to_str() { if let Some(cookies) = session.req_header().headers.get("cookie") {
for cookie in cookie_str.split(';') { if let Ok(cookie_str) = cookies.to_str() {
let trimmed = cookie.trim(); for cookie in cookie_str.split(';') {
if let Some(value) = trimmed.strip_prefix("backend_id=") { let trimmed = cookie.trim();
backend_id = Some(value); if let Some(value) = trimmed.strip_prefix("backend_id=") {
break; backend_id = Some(value);
} break;
}
}
}
}
}
match hostname {
None => return Ok(false),
Some(host) => {
let optioninnermap = self.get_host(host.as_str(), host.as_str(), backend_id);
match optioninnermap {
None => return Ok(false),
Some(ref innermap) => {
if let Some(rate) = innermap.rate_limit.or(ep.rate_limit) {
// let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip().to_string()).unwrap_or_else(|| host.to_string());
let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip());
let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1);
if curr_window_requests > rate {
let mut header = ResponseHeader::build(429, None).unwrap();
header.insert_header("X-Rate-Limit-Limit", rate.to_string()).unwrap();
header.insert_header("X-Rate-Limit-Remaining", "0").unwrap();
header.insert_header("X-Rate-Limit-Reset", "1").unwrap();
session.set_keepalive(None);
session.write_response_header(Box::new(header), true).await?;
debug!("Rate limited: {:?}, {}", rate_key, rate);
return Ok(true);
} }
} }
} }
} }
_ctx.upstream_peer = optioninnermap;
let ddr = self.get_host(hostname, hostname, backend_id); }
}
match ddr { Ok(false)
Some((address, port, ssl, is_h2)) => { }
let mut peer = Box::new(HttpPeer::new((address.clone(), port.clone()), ssl, String::new())); async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
// let host_name = return_header_host(&session);
match ctx.hostname.as_ref() {
Some(hostname) => {
match ctx.upstream_peer.as_ref() {
// Some((address, port, ssl, is_h2, to_https)) => {
Some(innermap) => {
let mut peer = Box::new(HttpPeer::new((innermap.address.clone(), innermap.port.clone()), innermap.is_ssl, String::new()));
// if session.is_http2() { // if session.is_http2() {
if is_h2 { if innermap.is_http2 {
peer.options.alpn = ALPN::H2; peer.options.alpn = ALPN::H2;
} }
if ssl { if innermap.is_ssl {
peer.sni = hostname.to_string(); peer.sni = hostname.clone();
peer.options.verify_cert = false; peer.options.verify_cert = false;
peer.options.verify_hostname = false; peer.options.verify_hostname = false;
} }
// println!(" ==> {} ==> {} => {} => {:?}", hostname, address.as_str(), peer.options.alpn, is_h2); if ctx.to_https || innermap.to_https {
_ctx.backend_id = format!("{}:{}:{}", address.clone(), port.clone(), ssl); if let Some(stream) = session.stream() {
if stream.get_ssl().is_none() {
if let Some(addr) = session.server_addr() {
if let Some((host, _)) = addr.to_string().split_once(':') {
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str());
let port = self.config.proxy_port_tls.unwrap_or(403);
ctx.to_https = true;
ctx.redirect_to = format!("https://{}:{}{}", host, port, uri);
}
}
}
}
}
ctx.backend_id = format!("{}:{}:{}", innermap.address.clone(), innermap.port.clone(), innermap.is_ssl);
Ok(peer) Ok(peer)
} }
None => { None => {
warn!("Upstream not found. Host: {:?}, Path: {}", hostname, session.req_header().uri); session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
Ok(return_no_host(&self.config.local_server)) Err(Box::new(Error {
etype: HTTPStatus(502),
esource: Upstream,
retry: RetryType::Decided(false),
cause: None,
context: Option::from(ImmutStr::Static("Upstream not found")),
}))
} }
} }
} }
None => { None => {
warn!("Upstream not found. Host: {:?}, Path: {}", host_name, session.req_header().uri); session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
Ok(return_no_host(&self.config.local_server)) Err(Box::new(Error {
etype: HTTPStatus(502),
esource: Upstream,
retry: RetryType::Decided(false),
cause: None,
context: None,
}))
} }
} }
} }
async fn upstream_request_filter(&self, _session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> { async fn upstream_request_filter(&self, session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> {
let clientip = _session.client_addr(); match session.client_addr() {
match clientip {
Some(ip) => { Some(ip) => {
let inet = ip.as_inet(); let inet = ip.as_inet();
match inet { match inet {
@@ -122,20 +194,30 @@ impl ProxyHttp for LB {
Ok(()) Ok(())
} }
async fn response_filter(&self, _session: &mut Session, _upstream_response: &mut ResponseHeader, _ctx: &mut Self::CTX) -> Result<()> { // async fn request_body_filter(&self, _session: &mut Session, _body: &mut Option<Bytes>, _end_of_stream: bool, _ctx: &mut Self::CTX) -> Result<()>
// where
// Self::CTX: Send + Sync,
// {
// Ok(())
// }
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap(); // _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap();
if self.extraparams.load().stickysessions { if ctx.extraparams.sticky_sessions {
let backend_id = _ctx.backend_id.clone(); let backend_id = ctx.backend_id.clone();
if let Some(bid) = self.ump_byid.get(&backend_id) { if let Some(bid) = self.ump_byid.get(&backend_id) {
// let _ = _upstream_response.insert_header("set-cookie", format!("backend {}", bid.0)); let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.address));
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.0));
} }
} }
if ctx.to_https {
let host_name = return_header_host(&_session); let mut redirect_response = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
match host_name { redirect_response.insert_header("Location", ctx.redirect_to.clone())?;
redirect_response.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(redirect_response), false).await?;
}
// match return_header_host(&session) {
match ctx.hostname.as_ref() {
Some(host) => { Some(host) => {
let path = _session.req_header().uri.path(); let path = session.req_header().uri.path();
let host_header = host; let host_header = host;
let split_header = host_header.split_once(':'); let split_header = host_header.split_once(':');
match split_header { match split_header {
@@ -159,35 +241,36 @@ impl ProxyHttp for LB {
} }
None => {} None => {}
} }
session.set_keepalive(Some(300));
Ok(()) Ok(())
} }
async fn logging(&self, session: &mut Session, _e: Option<&pingora::Error>, ctx: &mut Self::CTX) { async fn logging(&self, session: &mut Session, _e: Option<&pingora::Error>, ctx: &mut Self::CTX) {
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16()); let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
debug!("{}, response code: {response_code}", self.request_summary(session, ctx)); debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
let m = &MetricTypes {
method: session.req_header().method.to_string(),
code: session.response_written().map(|resp| resp.status.as_str().to_owned()).unwrap_or("0".to_string()),
latency: ctx.start_time.elapsed(),
version: session.req_header().version,
};
calc_metrics(m);
} }
} }
fn return_header_host(session: &Session) -> Option<&str> { fn return_header_host(session: &Session) -> Option<String> {
if session.is_http2() { if session.is_http2() {
match session.req_header().uri.host() { match session.req_header().uri.host() {
Some(host) => Option::from(host), Some(host) => Option::from(host.to_string()),
None => None, None => None,
} }
} else { } else {
match session.req_header().headers.get("host") { match session.req_header().headers.get("host") {
Some(host) => { Some(host) => {
let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>(); let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>();
Option::from(header_host[0]) Option::from(header_host[0].to_string())
} }
None => None, None => None,
} }
} }
} }
fn return_no_host(inp: &Option<(String, u16)>) -> Box<HttpPeer> {
match inp {
Some(t) => Box::new(HttpPeer::new(t, false, String::new())),
None => Box::new(HttpPeer::new(("0.0.0.0", 0), false, String::new())),
}
}

View File

@@ -1,16 +1,22 @@
// use rustls::crypto::ring::default_provider;
use crate::utils::structs::Extraparams; use crate::utils::structs::Extraparams;
use crate::utils::tls;
use crate::utils::tls::CertificateConfig;
use crate::utils::tools::*;
use crate::web::proxyhttp::LB; use crate::web::proxyhttp::LB;
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use dashmap::DashMap; use dashmap::DashMap;
use log::info; use log::info;
use pingora::tls::ssl::{SslAlert, SslRef};
use pingora_core::listeners::tls::TlsSettings;
use pingora_core::prelude::{background_service, Opt}; use pingora_core::prelude::{background_service, Opt};
use pingora_core::server::Server; use pingora_core::server::Server;
use rustls::crypto::ring::default_provider; use std::sync::mpsc::{channel, Receiver, Sender};
use std::env;
use std::sync::Arc; use std::sync::Arc;
use std::{env, thread};
pub fn run() { pub fn run() {
default_provider().install_default().expect("Failed to install rustls crypto provider"); // default_provider().install_default().expect("Failed to install rustls crypto provider");
let parameters = Some(Opt::parse_args()).unwrap(); let parameters = Some(Opt::parse_args()).unwrap();
let file = parameters.conf.clone().unwrap(); let file = parameters.conf.clone().unwrap();
let maincfg = crate::utils::parceyaml::parce_main_config(file.as_str()); let maincfg = crate::utils::parceyaml::parce_main_config(file.as_str());
@@ -22,66 +28,83 @@ pub fn run() {
let ff_config = Arc::new(DashMap::new()); let ff_config = Arc::new(DashMap::new());
let im_config = Arc::new(DashMap::new()); let im_config = Arc::new(DashMap::new());
let hh_config = Arc::new(DashMap::new()); let hh_config = Arc::new(DashMap::new());
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams { let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
stickysessions: false, sticky_sessions: false,
to_https: None,
authentication: DashMap::new(), authentication: DashMap::new(),
rate_limit: None,
})); }));
let cfg = Arc::new(maincfg); let cfg = Arc::new(maincfg);
let lb = LB { let lb = LB {
ump_upst: uf_config.clone(), ump_upst: uf_config,
ump_full: ff_config.clone(), ump_full: ff_config,
ump_byid: im_config.clone(), ump_byid: im_config,
config: cfg.clone(), config: cfg.clone(),
headers: hh_config.clone(), headers: hh_config,
extraparams: ec_config.clone(), extraparams: ec_config,
}; };
let bg = LB {
ump_upst: uf_config.clone(),
ump_full: ff_config.clone(),
ump_byid: im_config.clone(),
config: cfg.clone(),
headers: hh_config.clone(),
extraparams: ec_config.clone(),
};
// env_logger::Env::new();
// env_logger::init();
let log_level = cfg.log_level.clone(); let log_level = cfg.log_level.clone();
match log_level.as_str() { unsafe {
"info" => env::set_var("RUST_LOG", "info"), match log_level.as_str() {
"error" => env::set_var("RUST_LOG", "error"), "info" => env::set_var("RUST_LOG", "info"),
"warn" => env::set_var("RUST_LOG", "warn"), "error" => env::set_var("RUST_LOG", "error"),
"debug" => env::set_var("RUST_LOG", "debug"), "warn" => env::set_var("RUST_LOG", "warn"),
"trace" => env::set_var("RUST_LOG", "trace"), "debug" => env::set_var("RUST_LOG", "debug"),
"off" => env::set_var("RUST_LOG", "off"), "trace" => env::set_var("RUST_LOG", "trace"),
_ => { "off" => env::set_var("RUST_LOG", "off"),
println!("Error reading log level, defaulting to: INFO"); _ => {
env::set_var("RUST_LOG", "info") println!("Error reading log level, defaulting to: INFO");
env::set_var("RUST_LOG", "info")
}
} }
} }
env_logger::builder() env_logger::builder().init();
// .format_timestamp(None)
// .format_module_path(false)
// .format_source_path(false)
// .format_target(false)
.init();
let bg_srvc = background_service("bgsrvc", bg); let bg_srvc = background_service("bgsrvc", lb.clone());
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb); let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone());
let bind_address_http = cfg.proxy_address_http.clone(); let bind_address_http = cfg.proxy_address_http.clone();
let bind_address_tls = cfg.proxy_address_tls.clone(); let bind_address_tls = cfg.proxy_address_tls.clone();
match bind_address_tls { match bind_address_tls {
Some(bind_address_tls) => { Some(bind_address_tls) => {
info!("Running TLS listener on :{}", bind_address_tls); let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
let cert_path = cfg.tls_certificate.clone().unwrap(); let certs_path = cfg.proxy_certificates.clone().unwrap();
let key_path = cfg.tls_key_file.clone().unwrap(); thread::spawn(move || {
let mut tls_settings = pingora_core::listeners::tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap(); watch_folder(certs_path, tx).unwrap();
tls_settings.enable_h2(); });
let certificate_configs = rx.recv().unwrap();
let first_set = tls::Certificates::new(&certificate_configs).unwrap_or_else(|| panic!("Unable to load initial certificate info"));
let certificates = Arc::new(ArcSwap::from_pointee(first_set));
let certs_for_callback = certificates.clone();
let certs_for_watcher = certificates.clone();
let new_certs = tls::Certificates::new(&certificate_configs);
certs_for_watcher.store(Arc::new(new_certs.unwrap()));
let mut tls_settings =
TlsSettings::intermediate(&certs_for_callback.load().default_cert_path, &certs_for_callback.load().default_key_path).expect("unable to load or parse cert/key");
tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certs_for_callback.load().server_name_callback(ssl_ref, ssl_alert));
tls_settings.set_alpn_select_callback(tls::prefer_h2);
proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings); proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
let certs_for_watcher = certificates.clone();
thread::spawn(move || {
while let Ok(new_configs) = rx.recv() {
let new_certs = tls::Certificates::new(&new_configs);
match new_certs {
Some(new_certs) => {
certs_for_watcher.store(Arc::new(new_certs));
info!("Reload TLS certificates from {}", cfg.proxy_certificates.clone().unwrap())
}
None => {}
};
}
});
} }
None => {} None => {}
} }
@@ -89,8 +112,5 @@ pub fn run() {
proxy.add_tcp(bind_address_http.as_str()); proxy.add_tcp(bind_address_http.as_str());
server.add_service(proxy); server.add_service(proxy);
server.add_service(bg_srvc); server.add_service(bg_srvc);
// let mut prometheus_service_http = Service::prometheus_http_service();
// prometheus_service_http.add_tcp("0.0.0.0:1234");
// server.add_service(prometheus_service_http);
server.run_forever(); server.run_forever();
} }

View File

@@ -1,21 +1,27 @@
use crate::utils::discovery::APIUpstreamProvider;
use crate::utils::structs::Configuration; use crate::utils::structs::Configuration;
use axum::body::Body; use axum::body::Body;
use axum::extract::State; use axum::extract::{Query, State};
use axum::http::{Response, StatusCode}; use axum::http::{Response, StatusCode};
use axum::response::IntoResponse; use axum::response::IntoResponse;
use axum::routing::{delete, get, head, post, put}; use axum::routing::{get, post};
use axum::{Json, Router}; use axum::{Json, Router};
use axum_server::tls_openssl::OpenSSLConfig;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::SinkExt; use futures::SinkExt;
use jsonwebtoken::{encode, EncodingKey, Header}; use jsonwebtoken::{encode, EncodingKey, Header};
use log::{error, info, warn}; use log::{error, info, warn};
use prometheus::{gather, Encoder, TextEncoder};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tower_http::services::ServeDir;
#[derive(Deserialize)] #[derive(Deserialize)]
struct InputKey { struct InputKey {
masterkey: String, master_key: String,
owner: String, owner: String,
valid: u64, valid: u64,
} }
@@ -25,51 +31,84 @@ struct OutToken {
token: String, token: String,
} }
#[allow(unused_mut)] #[derive(Clone)]
pub async fn run_server(bindaddress: String, masterkey: String, mut toreturn: Sender<Configuration>) { struct AppState {
let mut tr = toreturn.clone(); master_key: String,
let app = Router::new() config_sender: Sender<Configuration>,
.route("/{*wildcard}", get(senderror)) config_api_enabled: bool,
.route("/{*wildcard}", post(senderror)) }
.route("/{*wildcard}", put(senderror))
.route("/{*wildcard}", head(senderror))
.route("/{*wildcard}", delete(senderror))
.route("/jwt", post(jwt_gen))
.with_state(masterkey.clone())
.route(
"/conf",
post(|up: String| async move {
let serverlist = crate::utils::parceyaml::load_configuration(up.as_str(), "content");
match serverlist { #[allow(unused_mut)]
Some(serverlist) => { pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>) {
let _ = tr.send(serverlist).await.unwrap(); let app_state = AppState {
Response::builder().status(StatusCode::CREATED).body(Body::from("Config, conf file, updated!\n")).unwrap() master_key: config.masterkey.clone(),
} config_sender: to_return.clone(),
None => Response::builder() config_api_enabled: config.config_api_enabled.clone(),
.status(StatusCode::INTERNAL_SERVER_ERROR) };
.body(Body::from("Failed to parce config file!\n"))
.unwrap(), let app = Router::new()
} // .route("/{*wildcard}", get(senderror))
}) // .route("/{*wildcard}", post(senderror))
.with_state("state"), // .route("/{*wildcard}", put(senderror))
); // .route("/{*wildcard}", head(senderror))
let listener = TcpListener::bind(bindaddress.clone()).await.unwrap(); // .route("/{*wildcard}", delete(senderror))
info!("Starting the API server on: {}", bindaddress); // .nest_service("/static", static_files)
.route("/jwt", post(jwt_gen))
.route("/conf", post(conf))
.route("/metrics", get(metrics))
.with_state(app_state);
if let Some(value) = &config.tls_address {
let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
let tls_app = app.clone();
tokio::spawn(async move {
if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
eprintln!("TLS server failed: {}", e);
}
});
info!("Starting the TLS API server on: {}", value);
}
if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) {
let static_files = ServeDir::new(folder);
let static_serve: Router = Router::new().fallback_service(static_files);
let static_listen = TcpListener::bind(address).await.unwrap();
let _ = tokio::spawn(async move { axum::serve(static_listen, static_serve).await.unwrap() });
}
let listener = TcpListener::bind(config.address.clone()).await.unwrap();
info!("Starting the API server on: {}", config.address);
axum::serve(listener, app).await.unwrap(); axum::serve(listener, app).await.unwrap();
} }
#[allow(dead_code)] async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse {
async fn senderror() -> impl IntoResponse { if !st.config_api_enabled {
Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap() return Response::builder()
.status(StatusCode::FORBIDDEN)
.body(Body::from("Config remote API is disabled !\n"))
.unwrap();
}
if let Some(s) = params.get("key") {
if s.to_owned() == st.master_key {
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content") {
st.config_sender.send(serverlist).await.unwrap();
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap();
} else {
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("Failed to parse config!\n")).unwrap();
};
}
}
Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap()
} }
async fn jwt_gen(State(masterkey): State<String>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) { async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) {
if payload.masterkey == masterkey { if payload.master_key == state.master_key {
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60); let now = SystemTime::now() + Duration::from_secs(payload.valid * 60);
let a = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); let a = now.duration_since(UNIX_EPOCH).unwrap().as_secs();
let claim = crate::utils::jwt::Claims { user: payload.owner, exp: a }; let claim = crate::utils::jwt::Claims { user: payload.owner, exp: a };
match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.masterkey.as_ref())) { match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.master_key.as_ref())) {
Ok(t) => { Ok(t) => {
let tok = OutToken { token: t }; let tok = OutToken { token: t };
info!("Generating token: {:?}", tok); info!("Generating token: {:?}", tok);
@@ -89,3 +128,28 @@ async fn jwt_gen(State(masterkey): State<String>, Json(payload): Json<InputKey>)
(StatusCode::FORBIDDEN, Json(tok)) (StatusCode::FORBIDDEN, Json(tok))
} }
} }
async fn metrics() -> impl IntoResponse {
let metric_families = gather();
let encoder = TextEncoder::new();
let mut buffer = Vec::new();
if let Err(e) = encoder.encode(&metric_families, &mut buffer) {
// encoding error fallback
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("Failed to encode metrics: {}", e)))
.unwrap();
}
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", encoder.format_type())
.body(Body::from(buffer))
.unwrap()
}
// #[allow(dead_code)]
// async fn senderror() -> impl IntoResponse {
// Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap()
// }