mirror of
https://github.com/sadoyan/aralez.git
synced 2026-04-30 23:08:40 +08:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d4e434d6a | ||
|
|
60b7b3aa7a | ||
|
|
569db8e18d | ||
|
|
4126249bcd | ||
|
|
0779f97277 | ||
|
|
b047331e6a | ||
|
|
a341fa30db | ||
|
|
9d604d62e7 | ||
|
|
4a21700552 | ||
|
|
f0157b6e8f | ||
|
|
1370396ae8 | ||
|
|
64ef4e14af | ||
|
|
ffc2bab79f | ||
|
|
8e05794784 | ||
|
|
423c7afa90 | ||
|
|
78a084380a | ||
|
|
ada2032732 | ||
|
|
a89592bd07 | ||
|
|
2a93bc2cd6 | ||
|
|
d38588a299 | ||
|
|
3e93920a0d | ||
|
|
fce25b8d15 |
881
Cargo.lock
generated
881
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
32
Cargo.toml
32
Cargo.toml
@@ -1,17 +1,19 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "gazan"
|
name = "aralez"
|
||||||
version = "0.1.0"
|
version = "0.9.1"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
|
opt-level = 3
|
||||||
lto = true
|
lto = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
opt-level = 3
|
panic = "abort"
|
||||||
strip = "symbols"
|
strip = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1.45.0", features = ["full"] }
|
tokio = { version = "1.45.1", features = ["full"] }
|
||||||
pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl
|
#pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl
|
||||||
|
pingora = { version = "0.5.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
dashmap = "7.0.0-rc2"
|
dashmap = "7.0.0-rc2"
|
||||||
pingora-core = "0.5.0"
|
pingora-core = "0.5.0"
|
||||||
@@ -23,19 +25,25 @@ log = "0.4.27"
|
|||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
notify = "8.0.0"
|
notify = "8.0.0"
|
||||||
axum = { version = "0.8.4" }
|
axum = { version = "0.8.4" }
|
||||||
#reqwest = { version = "0.12.15", features = ["json", "native-tls-alpn"] }
|
axum-server = { version = "0.7.2", features = ["tls-openssl"] }
|
||||||
|
reqwest = { version = "0.12.20", features = ["json", "native-tls-alpn"] }
|
||||||
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] }
|
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] }
|
||||||
reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
|
#reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
|
||||||
|
|
||||||
serde_yaml = "0.9.34-deprecated"
|
serde_yaml = "0.9.34-deprecated"
|
||||||
rand = "0.9.0"
|
rand = "0.9.0"
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
jsonwebtoken = "9.3.1"
|
jsonwebtoken = "9.3.1"
|
||||||
tonic = "0.13.0"
|
tonic = "0.13.1"
|
||||||
sha2 = { version = "0.11.0-pre.5", default-features = false }
|
sha2 = { version = "0.11.0-rc.0", default-features = false }
|
||||||
base16ct = { version = "0.2.0", features = ["alloc"] }
|
base16ct = { version = "0.2.0", features = ["alloc"] }
|
||||||
urlencoding = "2.1.3"
|
urlencoding = "2.1.3"
|
||||||
arc-swap = "1.7.1"
|
arc-swap = "1.7.1"
|
||||||
rustls = { version = "0.23.27", features = ["ring"] }
|
#rustls = { version = "0.23.27", features = ["ring"] }
|
||||||
mimalloc = { version = "0.1.46", default-features = false }
|
mimalloc = { version = "0.1.47", default-features = false }
|
||||||
|
prometheus = "0.14.0"
|
||||||
|
lazy_static = "1.5.0"
|
||||||
|
#openssl = "0.10.73"
|
||||||
|
x509-parser = "0.17.0"
|
||||||
|
rustls-pemfile = "2.2.0"
|
||||||
|
|
||||||
|
|||||||
120
METRICS.md
Normal file
120
METRICS.md
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# 📈 Aralez Prometheus Metrics Reference
|
||||||
|
|
||||||
|
This document outlines Prometheus metrics for the [Aralez](https://github.com/sadoyan/aralez) reverse proxy.
|
||||||
|
These metrics can be used for monitoring, alerting and performance analysis.
|
||||||
|
|
||||||
|
Exposed to `http://config_address/metrics`
|
||||||
|
|
||||||
|
By default `http://127.0.0.1:3000/metrics`
|
||||||
|
|
||||||
|
# 📊 Example Grafana dashboard during stress test :
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛠️ Prometheus Metrics
|
||||||
|
|
||||||
|
### 1. `aralez_requests_total`
|
||||||
|
|
||||||
|
- **Type**: `Counter`
|
||||||
|
- **Purpose**: Total amount requests served by Aralez.
|
||||||
|
|
||||||
|
**PromQL example:**
|
||||||
|
|
||||||
|
```promql
|
||||||
|
rate(aralez_requests_total[5m])
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. `aralez_errors_total`
|
||||||
|
|
||||||
|
- **Type**: `Counter`
|
||||||
|
- **Purpose**: Count of requests that resulted in an error.
|
||||||
|
|
||||||
|
**PromQL example:**
|
||||||
|
|
||||||
|
```promql
|
||||||
|
rate(aralez_errors_total[5m])
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. `aralez_responses_total{status="200"}`
|
||||||
|
|
||||||
|
- **Type**: `CounterVec`
|
||||||
|
- **Purpose**: Count of responses by HTTP status code.
|
||||||
|
|
||||||
|
**PromQL example:**
|
||||||
|
|
||||||
|
```promql
|
||||||
|
rate(aralez_responses_total{status=~"5.."}[5m]) > 0
|
||||||
|
```
|
||||||
|
|
||||||
|
> Useful for alerting on 5xx errors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. `aralez_response_latency_seconds`
|
||||||
|
|
||||||
|
- **Type**: `Histogram`
|
||||||
|
- **Purpose**: Tracks the latency of responses in seconds.
|
||||||
|
|
||||||
|
**Example bucket output:**
|
||||||
|
|
||||||
|
```prometheus
|
||||||
|
aralez_response_latency_seconds_bucket{le="0.01"} 15
|
||||||
|
aralez_response_latency_seconds_bucket{le="0.1"} 120
|
||||||
|
aralez_response_latency_seconds_bucket{le="0.25"} 245
|
||||||
|
aralez_response_latency_seconds_bucket{le="0.5"} 500
|
||||||
|
...
|
||||||
|
aralez_response_latency_seconds_count 1023
|
||||||
|
aralez_response_latency_seconds_sum 42.6
|
||||||
|
```
|
||||||
|
|
||||||
|
| Metric | Meaning |
|
||||||
|
|-------------------------|---------------------------------------------------------------|
|
||||||
|
| `bucket{le="0.1"} 120` | 120 requests were ≤ 100ms |
|
||||||
|
| `bucket{le="0.25"} 245` | 245 requests were ≤ 250ms |
|
||||||
|
| `count` | Total number of observations (i.e., total responses measured) |
|
||||||
|
| `sum` | Total time of all responses, in seconds |
|
||||||
|
|
||||||
|
### 🔍 How to interpret:
|
||||||
|
|
||||||
|
- `le` means “less than or equal to”.
|
||||||
|
- `count` is total amount of observations.
|
||||||
|
- `sum` is the total time (in seconds) of all responses.
|
||||||
|
|
||||||
|
**PromQL examples:**
|
||||||
|
|
||||||
|
🔹 **95th percentile latency**
|
||||||
|
|
||||||
|
```promql
|
||||||
|
histogram_quantile(0.95, rate(aralez_response_latency_seconds_bucket[5m]))
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
🔹 **Average latency**
|
||||||
|
|
||||||
|
```promql
|
||||||
|
rate(aralez_response_latency_seconds_sum[5m]) / rate(aralez_response_latency_seconds_count[5m])
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ✅ Notes
|
||||||
|
|
||||||
|
- Metrics are registered after the first served request.
|
||||||
|
|
||||||
|
---
|
||||||
|
✅ Summary of key metrics
|
||||||
|
|
||||||
|
| Metric Name | Type | What it Tells You |
|
||||||
|
|---------------------------------------|------------|---------------------------|
|
||||||
|
| `aralez_requests_total` | Counter | Total requests served |
|
||||||
|
| `aralez_errors_total` | Counter | Number of failed requests |
|
||||||
|
| `aralez_responses_total{status="200"}` | CounterVec | Response status breakdown |
|
||||||
|
| `aralez_response_latency_seconds` | Histogram | How fast responses are |
|
||||||
|
|
||||||
|
📘 *Last updated: May 2025*
|
||||||
306
README.md
306
README.md
@@ -1,38 +1,42 @@
|
|||||||

|

|
||||||
|
|
||||||
# Gazan - The beast-mode reverse proxy.
|
# Aralez (Արալեզ), Reverse proxy and service mesh built on top of Cloudflare's Pingora
|
||||||
|
|
||||||
Gazan is a Reverse proxy, service mesh based on Cloudflare's Pingora
|
What Aralez means ?
|
||||||
|
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>.
|
||||||
|
|
||||||
**What Gazan means?**
|
Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
|
||||||
<ins>Gazan = Գազան = beast / wild animal in Armenian / Often used as a synonym to something great.</ins>.
|
|
||||||
|
|
||||||
Built on Rust, on top of **Cloudflare’s Pingora engine**, **Gazan** delivers world-class performance, security and scalability — right out of the box.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🔧 Key Features
|
## 🔧 Key Features
|
||||||
|
|
||||||
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required
|
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
|
||||||
- **TLS Termination** — Built-in OpenSSL support
|
- **TLS Termination** — Built-in OpenSSL support.
|
||||||
- **Upstreams TLS detection** — Gazan will automatically detect if upstreams uses secure connection
|
- **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
|
||||||
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification
|
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification.
|
||||||
|
- **Basic Auth**
|
||||||
|
- **API Key** via `x-api-key` header
|
||||||
|
- **JWT Auth**, with tokens issued by Aralez itself via `/jwt` API
|
||||||
|
- ⬇️ See below for examples and implementation details.
|
||||||
- **Load Balancing Strategies**
|
- **Load Balancing Strategies**
|
||||||
- Round-robin
|
- Round-robin
|
||||||
- Failover with health checks
|
- Failover with health checks
|
||||||
- Sticky sessions via cookies
|
- Sticky sessions via cookies
|
||||||
- **Unified Port** — Serve HTTP and WebSocket traffic over the same connection
|
- **Unified Port** — Serve HTTP and WebSocket traffic over the same connection.
|
||||||
- **Memory Safe** — Created purely on Rust
|
- **Memory Safe** — Created purely on Rust.
|
||||||
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O
|
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O.
|
||||||
|
|
||||||
## 🌍 Highlights
|
## 🌍 Highlights
|
||||||
|
|
||||||
- ⚙️ **Upstream Providers:** Supports `file`-based static upstreams, dynamic service discovery via `Consul`.
|
- ⚙️ **Upstream Providers:**
|
||||||
|
- `file` Upstreams are declared in config file.
|
||||||
|
- `consul` Upstreams are dynamically updated from Hashicorp Consul.
|
||||||
- 🔁 **Hot Reloading:** Modify upstreams on the fly via `upstreams.yaml` — no restart needed.
|
- 🔁 **Hot Reloading:** Modify upstreams on the fly via `upstreams.yaml` — no restart needed.
|
||||||
- 🔮 **Automatic WebSocket Support:** Zero config — connection upgrades are handled seamlessly.
|
- 🔮 **Automatic WebSocket Support:** Zero config — connection upgrades are handled seamlessly.
|
||||||
- 🔮 **Automatic GRPC Support:** Zero config, Requires `ssl` to proxy, gRPC is handled seamlessly.
|
- 🔮 **Automatic GRPC Support:** Zero config, Requires `ssl` to proxy, gRPC handled seamlessly.
|
||||||
- 🔮 **Upstreams Session Stickiness:** Enable/Disable Sticky sessions.
|
- 🔮 **Upstreams Session Stickiness:** Enable/Disable Sticky sessions globally.
|
||||||
- 🔐 **TLS Termination:** Fully supports TLS for incoming and upstream traffic.
|
- 🔐 **TLS Termination:** Fully supports TLS for upstreams and downstreams.
|
||||||
- 🛡️ **Built-in Authentication** Basic Auth, JWT, API key.
|
- 🛡️ **Built-in Authentication** Basic Auth, JWT, API key.
|
||||||
- 🧠 **Header Injection:** Global and per-route header configuration.
|
- 🧠 **Header Injection:** Global and per-route header configuration.
|
||||||
- 🧪 **Health Checks:** Pluggable health check methods for upstreams.
|
- 🧪 **Health Checks:** Pluggable health check methods for upstreams.
|
||||||
@@ -57,15 +61,28 @@ Built on Rust, on top of **Cloudflare’s Pingora engine**, **Gazan** delivers w
|
|||||||
|
|
||||||
### 🔧 `main.yaml`
|
### 🔧 `main.yaml`
|
||||||
|
|
||||||
- `proxy_address_http`: `0.0.0.0:6193` (HTTP listener)
|
| Key | Example Value | Description |
|
||||||
- `proxy_address_tls`: `0.0.0.0:6194` (TLS listener, optional)
|
|----------------------------------|--------------------------------------|--------------------------------------------------------------------------------------------------|
|
||||||
- `config_address`: `0.0.0.0:3000` (HTTP API for remote config push)
|
| **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
|
||||||
- `upstreams_conf`: `etc/upstreams.yaml` (location of upstreams config)
|
| **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
|
||||||
- `log_level`: `info` (verbosity of logs)
|
| **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
|
||||||
- `hc_method`: `HEAD`, `hc_interval`: `2s` (upstream health checks)
|
| **daemon** | false | Run in background (boolean) |
|
||||||
- `user` Optional. Drop privileges to regular user. To bind to privileged ports. Requires to start as root.
|
| **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
|
||||||
- `group` Optional. Drop privileges to regular group
|
| **pid_file** | /tmp/aralez.pid | Path to PID file |
|
||||||
- Other defaults: thread count, keep-alive pool size, etc.
|
| **error_log** | /tmp/aralez_err.log | Path to error log file |
|
||||||
|
| **upgrade_sock** | /tmp/aralez.sock | Path to live upgrade socket file |
|
||||||
|
| **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location |
|
||||||
|
| **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location |
|
||||||
|
| **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional |
|
||||||
|
| **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional |
|
||||||
|
| **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address |
|
||||||
|
| **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) |
|
||||||
|
| **proxy_certificates** | etc/certs/ | The directory containing certificate and key files. In a format {NAME}.crt, {NAME}.key. |
|
||||||
|
| **upstreams_conf** | etc/upstreams.yaml | The location of upstreams file |
|
||||||
|
| **log_level** | info | Log level , possible values : info, warn, error, debug, trace, off |
|
||||||
|
| **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE |
|
||||||
|
| **hc_interval** | 2 | Interval for health checks in seconds |
|
||||||
|
| **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation |
|
||||||
|
|
||||||
### 🌐 `upstreams.yaml`
|
### 🌐 `upstreams.yaml`
|
||||||
|
|
||||||
@@ -81,40 +98,40 @@ Built on Rust, on top of **Cloudflare’s Pingora engine**, **Gazan** delivers w
|
|||||||
|
|
||||||
## 🛠 Installation
|
## 🛠 Installation
|
||||||
|
|
||||||
Download the prebuilt binary for your architecture from releases section of [GitHub](https://github.com/sadoyan/gazan/releases) repo
|
Download the prebuilt binary for your architecture from releases section of [GitHub](https://github.com/sadoyan/aralez/releases) repo
|
||||||
Make the binary executable `chmod 755 ./gazan-VERSION` and run.
|
Make the binary executable `chmod 755 ./aralez-VERSION` and run.
|
||||||
|
|
||||||
File names:
|
File names:
|
||||||
|
|
||||||
| File Name | Description |
|
| File Name | Description |
|
||||||
|--------------------------|---------------------------------------------------------------|
|
|---------------------------|---------------------------------------------------------------|
|
||||||
| `gazan-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
||||||
| `gazan-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
||||||
| `gazan-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
||||||
| `gazan-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
||||||
|
|
||||||
## 🔌 Running the Proxy
|
## 🔌 Running the Proxy
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./gazan -c path/to/main.yaml
|
./aralez -c path/to/main.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔌 Systemd integration
|
## 🔌 Systemd integration
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cat > /etc/systemd/system/gazan.service <<EOF
|
cat > /etc/systemd/system/aralez.service <<EOF
|
||||||
[Service]
|
[Service]
|
||||||
Type=forking
|
Type=forking
|
||||||
PIDFile=/run/gazan.pid
|
PIDFile=/run/aralez.pid
|
||||||
ExecStart=/bin/gazan -d -c /etc/gazan.conf
|
ExecStart=/bin/aralez -d -c /etc/aralez.conf
|
||||||
ExecReload=kill -QUIT $MAINPID
|
ExecReload=kill -QUIT $MAINPID
|
||||||
ExecReload=/bin/gazan -u -d -c /etc/gazan.conf
|
ExecReload=/bin/aralez -u -d -c /etc/aralez.conf
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
systemctl enable gazan.service.
|
systemctl enable aralez.service.
|
||||||
systemctl restart gazan.service.
|
systemctl restart aralez.service.
|
||||||
```
|
```
|
||||||
|
|
||||||
## 💡 Example
|
## 💡 Example
|
||||||
@@ -123,18 +140,19 @@ A sample `upstreams.yaml` entry:
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
provider: "file"
|
provider: "file"
|
||||||
stickysessions: false
|
sticky_sessions: false
|
||||||
globals:
|
to_https: false
|
||||||
headers:
|
headers:
|
||||||
- "Access-Control-Allow-Origin:*"
|
- "Access-Control-Allow-Origin:*"
|
||||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||||
- "Access-Control-Max-Age:86400"
|
- "Access-Control-Max-Age:86400"
|
||||||
authorization:
|
authorization:
|
||||||
- "jwt"
|
type: "jwt"
|
||||||
- "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||||
myhost.mydomain.com:
|
myhost.mydomain.com:
|
||||||
paths:
|
paths:
|
||||||
"/":
|
"/":
|
||||||
|
to_https: false
|
||||||
headers:
|
headers:
|
||||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||||
- "X-Proxy-From:Hopaaaaaaaaaaaar"
|
- "X-Proxy-From:Hopaaaaaaaaaaaar"
|
||||||
@@ -142,6 +160,7 @@ myhost.mydomain.com:
|
|||||||
- "127.0.0.1:8000"
|
- "127.0.0.1:8000"
|
||||||
- "127.0.0.2:8000"
|
- "127.0.0.2:8000"
|
||||||
"/foo":
|
"/foo":
|
||||||
|
to_https: true
|
||||||
headers:
|
headers:
|
||||||
- "X-Another-Header:Hohohohoho"
|
- "X-Another-Header:Hohohohoho"
|
||||||
servers:
|
servers:
|
||||||
@@ -151,15 +170,17 @@ myhost.mydomain.com:
|
|||||||
|
|
||||||
**This means:**
|
**This means:**
|
||||||
|
|
||||||
- Sticky sessions are disabled globally. This setting applies to all upstreams.
|
- Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS.
|
||||||
|
- HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream.
|
||||||
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
|
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
|
||||||
|
- Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez.
|
||||||
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
|
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
|
||||||
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
|
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
|
||||||
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
|
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
|
||||||
- Self signed certificates are silently accepted.
|
- Self signed certificates are silently accepted.
|
||||||
- Global headers (CORS for this case) will be injected to all upstreams
|
- Global headers (CORS for this case) will be injected to all upstreams
|
||||||
- Additional headers will be injected into the request for `myhost.mydomain.com`.
|
- Additional headers will be injected into the request for `myhost.mydomain.com`.
|
||||||
- You can choose any path, deep nested paths are supported, the best match is chosen.
|
- You can choose any path, deep nested paths are supported, the best match chosen.
|
||||||
- All requests to servers will require JWT token authentication (You can comment out the authorization to disable it),
|
- All requests to servers will require JWT token authentication (You can comment out the authorization to disable it),
|
||||||
- Firs parameter specifies the mechanism of authorisation `jwt`
|
- Firs parameter specifies the mechanism of authorisation `jwt`
|
||||||
- Second is the secret key for validating `jwt` tokens
|
- Second is the secret key for validating `jwt` tokens
|
||||||
@@ -185,10 +206,11 @@ To enable TLS for A proxy server: Currently only OpenSSL is supported, working o
|
|||||||
|
|
||||||
## 📡 Remote Config API
|
## 📡 Remote Config API
|
||||||
|
|
||||||
You can push new `upstreams.yaml` over HTTP to `config_address` (`:3000` by default). Useful for CI/CD automation or remote config updates.
|
Push new `upstreams.yaml` over HTTP to `config_address` (`:3000` by default). Useful for CI/CD automation or remote config updates.
|
||||||
|
URL parameter. `key=MASTERKEY` is required. `MASTERKEY` is the value of `master_key` in the `main.yaml`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf
|
curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf?key=${MASTERKEY}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -199,18 +221,18 @@ curl -XPOST --data-binary @./etc/upstreams.txt 127.0.0.1:3000/conf
|
|||||||
- Only one method can be active at a time.
|
- Only one method can be active at a time.
|
||||||
- `basic` : Standard HTTP Basic Authentication requests.
|
- `basic` : Standard HTTP Basic Authentication requests.
|
||||||
- `apikey` : Authentication via `x-api-key` header, which should match the value in config.
|
- `apikey` : Authentication via `x-api-key` header, which should match the value in config.
|
||||||
- `jwt`: JWT authentication implemented via `gazantoken=` url parameter. `/some/url?gazantoken=TOKEN`
|
- `jwt`: JWT authentication implemented via `araleztoken=` url parameter. `/some/url?araleztoken=TOKEN`
|
||||||
- `jwt`: JWT authentication implemented via `Authorization: Bearer <token>` header.
|
- `jwt`: JWT authentication implemented via `Authorization: Bearer <token>` header.
|
||||||
- To obtain JWT token, you should send **generate** request to built in api server's `/jwt` endpoint.
|
- To obtain JWT a token, you should send **generate** request to built in api server's `/jwt` endpoint.
|
||||||
- `masterkey`: should match configured `masterkey` in `main.yaml` and `upstreams.yaml`.
|
- `master_key`: should match configured `masterkey` in `main.yaml` and `upstreams.yaml`.
|
||||||
- `owner` : Just a placeholder, can be anything.
|
- `owner` : Just a placeholder, can be anything.
|
||||||
- `valid` : Time in minutes during which the generated token will be valid.
|
- `valid` : Time in minutes during which the generated token will be valid.
|
||||||
|
|
||||||
**Example JWT token generateion request**
|
**Example JWT token generation request**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
PAYLOAD='{
|
PAYLOAD='{
|
||||||
"masterkey": "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774",
|
"master_key": "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774",
|
||||||
"owner": "valod",
|
"owner": "valod",
|
||||||
"valid": 10
|
"valid": 10
|
||||||
}'
|
}'
|
||||||
@@ -230,7 +252,7 @@ curl -H "Authorization: Bearer ${TOK}" -H 'Host: myip.mydomain.com' http://127.0
|
|||||||
With URL parameter (Very useful if you want to generate and share temporary links)
|
With URL parameter (Very useful if you want to generate and share temporary links)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -H 'Host: myip.mydomain.com' "http://127.0.0.1:6193/?gazantoken=${TOK}`"
|
curl -H 'Host: myip.mydomain.com' "http://127.0.0.1:6193/?araleztoken=${TOK}`"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example Request with API Key**
|
**Example Request with API Key**
|
||||||
@@ -260,4 +282,164 @@ curl -u username:password -H 'Host: myip.mydomain.com' http://127.0.0.1:6193/
|
|||||||
- Transparent, fully automatic WebSocket upgrade support.
|
- Transparent, fully automatic WebSocket upgrade support.
|
||||||
- Transparent, fully automatic gRPC proxy.
|
- Transparent, fully automatic gRPC proxy.
|
||||||
- Sticky session support.
|
- Sticky session support.
|
||||||
- HTTP2 ready.
|
- HTTP2 ready.
|
||||||
|
|
||||||
|
📊 Why Choose Aralez? – Feature Comparison
|
||||||
|
|
||||||
|
| Feature | **Aralez** | **Nginx** | **HAProxy** | **Traefik** |
|
||||||
|
|----------------------------|----------------------------------------------------------------------|--------------------------|-------------------------|-----------------|
|
||||||
|
| **Hot Reload** | ✅ Yes (live, API/file) | ⚠️ Reloads config | ⚠️ Reloads config | ✅ Yes (dynamic) |
|
||||||
|
| **JWT Auth** | ✅ Built-in | ❌ External scripts | ❌ External Lua or agent | ⚠️ With plugins |
|
||||||
|
| **WebSocket Support** | ✅ Automatic | ⚠️ Manual config | ✅ Yes | ✅ Yes |
|
||||||
|
| **gRPC Support** | ✅ Automatic (no config) | ⚠️ Manual + HTTP/2 + TLS | ⚠️ Complex setup | ✅ Native |
|
||||||
|
| **TLS Termination** | ✅ Built-in (OpenSSL) | ✅ Yes | ✅ Yes | ✅ Yes |
|
||||||
|
| **TLS Upstream Detection** | ✅ Automatic | ❌ | ❌ | ❌ |
|
||||||
|
| **HTTP/2 Support** | ✅ Automatic | ⚠️ Requires extra config | ⚠️ Requires build flags | ✅ Native |
|
||||||
|
| **Sticky Sessions** | ✅ Cookie-based | ⚠️ In plus version only | ✅ | ✅ |
|
||||||
|
| **Prometheus Metrics** | ✅ [Built in](https://github.com/sadoyan/aralez/blob/main/METRICS.md) | ⚠️ With Lua or exporter | ⚠️ With external script | ✅ Native |
|
||||||
|
| **Built With** | 🦀 Rust | C | C | Go |
|
||||||
|
|
||||||
|
## 💡 Simple benchmark by [Oha](https://github.com/hatoo/oha)
|
||||||
|
|
||||||
|
⚠️ These benchmarks use :
|
||||||
|
|
||||||
|
- 3 async Rust echo servers on a local network with 1Gbit as upstreams.
|
||||||
|
- A dedicated server for running **Aralez**
|
||||||
|
- A dedicated server for running **Oha**
|
||||||
|
- The following upstreams configuration.
|
||||||
|
- 9 test URLs from simple `/` to nested up to 7 subpaths.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
myhost.mydomain.com:
|
||||||
|
paths:
|
||||||
|
"/":
|
||||||
|
to_https: false
|
||||||
|
headers:
|
||||||
|
- "X-Proxy-From:Aralez"
|
||||||
|
servers:
|
||||||
|
- "192.168.211.211:8000"
|
||||||
|
- "192.168.211.212:8000"
|
||||||
|
- "192.168.211.213:8000"
|
||||||
|
"/ping":
|
||||||
|
to_https: false
|
||||||
|
headers:
|
||||||
|
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||||
|
- "X-Proxy-From:Aralez"
|
||||||
|
servers:
|
||||||
|
- "192.168.211.211:8000"
|
||||||
|
- "192.168.211.212:8000"
|
||||||
|
```
|
||||||
|
|
||||||
|
## 💡 Results reflect synthetic performance under optimal conditions.
|
||||||
|
|
||||||
|
- CPU : Intel(R) Xeon(R) CPU E3-1270 v6 @ 3.80GHz
|
||||||
|
- 300 : simultaneous connections
|
||||||
|
- Duration : 10 Minutes
|
||||||
|
- Binary : aralez-x86_64-glibc
|
||||||
|
|
||||||
|
```
|
||||||
|
Summary:
|
||||||
|
Success rate: 100.00%
|
||||||
|
Total: 600.0027 secs
|
||||||
|
Slowest: 0.2138 secs
|
||||||
|
Fastest: 0.0002 secs
|
||||||
|
Average: 0.0023 secs
|
||||||
|
Requests/sec: 129777.3838
|
||||||
|
|
||||||
|
Total data: 0 B
|
||||||
|
Size/request: 0 B
|
||||||
|
Size/sec: 0 B
|
||||||
|
|
||||||
|
Response time histogram:
|
||||||
|
0.000 [1] |
|
||||||
|
0.022 [77668026] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
|
||||||
|
0.043 [190362] |
|
||||||
|
0.064 [7908] |
|
||||||
|
0.086 [319] |
|
||||||
|
0.107 [4] |
|
||||||
|
0.128 [0] |
|
||||||
|
0.150 [0] |
|
||||||
|
0.171 [0] |
|
||||||
|
0.192 [0] |
|
||||||
|
0.214 [4] |
|
||||||
|
|
||||||
|
Response time distribution:
|
||||||
|
10.00% in 0.0012 secs
|
||||||
|
25.00% in 0.0016 secs
|
||||||
|
50.00% in 0.0020 secs
|
||||||
|
75.00% in 0.0026 secs
|
||||||
|
90.00% in 0.0033 secs
|
||||||
|
95.00% in 0.0040 secs
|
||||||
|
99.00% in 0.0078 secs
|
||||||
|
99.90% in 0.0278 secs
|
||||||
|
99.99% in 0.0434 secs
|
||||||
|
|
||||||
|
|
||||||
|
Details (average, fastest, slowest):
|
||||||
|
DNS+dialup: 0.0161 secs, 0.0002 secs, 0.0316 secs
|
||||||
|
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
|
||||||
|
|
||||||
|
Status code distribution:
|
||||||
|
[200] 77866624 responses
|
||||||
|
|
||||||
|
Error distribution:
|
||||||
|
[158] aborted due to deadline
|
||||||
|
```
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
- CPU : Intel(R) Xeon(R) CPU E3-1270 v6 @ 3.80GHz
|
||||||
|
- 300 : simultaneous connections
|
||||||
|
- Duration : 10 Minutes
|
||||||
|
- Binary : aralez-x86_64-musl
|
||||||
|
|
||||||
|
```
|
||||||
|
Summary:
|
||||||
|
Success rate: 100.00%
|
||||||
|
Total: 600.0021 secs
|
||||||
|
Slowest: 0.2182 secs
|
||||||
|
Fastest: 0.0002 secs
|
||||||
|
Average: 0.0024 secs
|
||||||
|
Requests/sec: 123870.5820
|
||||||
|
|
||||||
|
Total data: 0 B
|
||||||
|
Size/request: 0 B
|
||||||
|
Size/sec: 0 B
|
||||||
|
|
||||||
|
Response time histogram:
|
||||||
|
0.000 [1] |
|
||||||
|
0.022 [74254679] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
|
||||||
|
0.044 [61400] |
|
||||||
|
0.066 [5911] |
|
||||||
|
0.087 [385] |
|
||||||
|
0.109 [0] |
|
||||||
|
0.131 [0] |
|
||||||
|
0.153 [0] |
|
||||||
|
0.175 [0] |
|
||||||
|
0.196 [0] |
|
||||||
|
0.218 [1] |
|
||||||
|
|
||||||
|
Response time distribution:
|
||||||
|
10.00% in 0.0012 secs
|
||||||
|
25.00% in 0.0016 secs
|
||||||
|
50.00% in 0.0021 secs
|
||||||
|
75.00% in 0.0028 secs
|
||||||
|
90.00% in 0.0037 secs
|
||||||
|
95.00% in 0.0045 secs
|
||||||
|
99.00% in 0.0077 secs
|
||||||
|
99.90% in 0.0214 secs
|
||||||
|
99.99% in 0.0424 secs
|
||||||
|
|
||||||
|
|
||||||
|
Details (average, fastest, slowest):
|
||||||
|
DNS+dialup: 0.0066 secs, 0.0002 secs, 0.0210 secs
|
||||||
|
DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
|
||||||
|
|
||||||
|
Status code distribution:
|
||||||
|
[200] 74322377 responses
|
||||||
|
|
||||||
|
Error distribution:
|
||||||
|
[228] aborted due to deadline
|
||||||
|
```
|
||||||
|
|
||||||
|

|
||||||
@@ -1,17 +1,19 @@
|
|||||||
# Main configuration file , applied on startup
|
# Main configuration file , applied on startup
|
||||||
threads: 12 # Nubber of daemon threads default setting
|
threads: 12 # Nubber of daemon threads default setting
|
||||||
#user: pastor # Username for running gazan after dropping root privileges, requires program to start as root
|
#user: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||||
#group: pastor # Group for running gazan after dropping root privileges, requires program to start as root
|
#group: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||||
daemon: false # Run in background
|
daemon: false # Run in background
|
||||||
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
|
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
|
||||||
pid_file: /tmp/gazan.pid # Path to PID file
|
pid_file: /tmp/aralez.pid # Path to PID file
|
||||||
error_log: /tmp/gazan_err.log # Path to error log
|
error_log: /tmp/aralez_err.log # Path to error log
|
||||||
upgrade_sock: /tmp/gazan.sock # Path to socket file
|
upgrade_sock: /tmp/aralez.sock # Path to socket file
|
||||||
config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location
|
config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location
|
||||||
|
config_tls_address: 0.0.0.0:3001 # HTTP TLS API address for pushing upstreams.yaml from remote location
|
||||||
|
config_tls_certificate: etc/server.crt # Mandatory if config_tls_address is set
|
||||||
|
config_tls_key_file: etc/key.pem # Mandatory if config_tls_address is set
|
||||||
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
|
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
|
||||||
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
|
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
|
||||||
tls_certificate: etc/server.crt # Mandatory if proxy_address_tls is set
|
proxy_certificates: etc/yoyo # Mandatory if proxy_address_tls set, should contain certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
|
||||||
tls_key_file: etc/key.pem # Mandatory if proxy_address_tls is set
|
|
||||||
upstreams_conf: etc/upstreams.yaml # the location of upstreams file
|
upstreams_conf: etc/upstreams.yaml # the location of upstreams file
|
||||||
log_level: info # info, warn, error, debug, trace, off
|
log_level: info # info, warn, error, debug, trace, off
|
||||||
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
|
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ upstreams:
|
|||||||
"/":
|
"/":
|
||||||
ssl: false
|
ssl: false
|
||||||
headers:
|
headers:
|
||||||
- "X-Proxy-From:Gazan"
|
- "X-Proxy-From:Aralez"
|
||||||
servers:
|
servers:
|
||||||
- "192.168.221.213:8000"
|
- "192.168.221.213:8000"
|
||||||
- "192.168.221.214:8000"
|
- "192.168.221.214:8000"
|
||||||
|
|||||||
@@ -1,37 +1,34 @@
|
|||||||
# The file is under watch and hot reload , changes are applied immediately, no need to restart or reload
|
# The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
|
||||||
provider: "file" # consul
|
provider: "file" # consul
|
||||||
stickysessions: true
|
sticky_sessions: false
|
||||||
globals:
|
to_ssl: false
|
||||||
headers: # Global headers, appended for all upstreams and all paths.
|
headers:
|
||||||
- "Access-Control-Allow-Origin:*"
|
- "Access-Control-Allow-Origin:*"
|
||||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||||
- "Access-Control-Max-Age:86400"
|
- "Access-Control-Max-Age:86400"
|
||||||
- "X-Custom-Header:Something Special"
|
- "X-Custom-Header:Something Special"
|
||||||
# authorization: # Optional, only one of auth methods below can be active at a time
|
authorization:
|
||||||
# - "basic"
|
type: "jwt"
|
||||||
# - "gazan:Gazanpass1234"
|
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||||
# - "apikey"
|
# type: "basic"
|
||||||
# - "5a28cc4c-ce10-4ff1-824e-743c38835f5c"
|
# creds: "user:Passw0rd"
|
||||||
# - "jwt"
|
# type: "apikey"
|
||||||
# - "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
# creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
|
||||||
consul: # If the provider is consul. Otherwise ignored
|
consul: # If the provider is consul. Otherwise, ignored.
|
||||||
servers:
|
servers:
|
||||||
- "http://master1:8500"
|
- "http://master1:8500"
|
||||||
- "http://192.168.22.1:8500"
|
- "http://192.168.22.1:8500"
|
||||||
- "http://master1.digitai.local:8500"
|
- "http://master1.foo.local:8500"
|
||||||
services: # proxy: The hostname to access proxy server, real : The real service name in Consul
|
services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database.
|
||||||
- proxy: "proxy-frontend-dev-frontend-srv"
|
- proxy: "proxy-frontend-dev-frontend-srv"
|
||||||
real: "frontend-dev-frontend-srv"
|
real: "frontend-dev-frontend-srv"
|
||||||
# - proxy: "proxy-gateway-test-gateway-srv"
|
|
||||||
# real: "gateway-test-gateway-srv"
|
|
||||||
# - proxy: "proxy-backoffice-dev-backoffice-srv"
|
|
||||||
# real: "backoffice-dev-backoffice-srv"
|
|
||||||
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
|
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
|
||||||
upstreams: # If provider is files. Otherwise ignored
|
upstreams:
|
||||||
myip.netangels.net: # Hostname, or header host to access the upstream
|
myip.mydomain.com:
|
||||||
paths: # URL path(s) for current upstream, closest match wins
|
paths:
|
||||||
"/":
|
"/":
|
||||||
headers: # Custom headers, set only for this Host and Path
|
to_https: false
|
||||||
|
headers:
|
||||||
- "X-Proxy-From:Gazan"
|
- "X-Proxy-From:Gazan"
|
||||||
servers: # List of upstreams HOST:PORT
|
servers: # List of upstreams HOST:PORT
|
||||||
- "127.0.0.1:8000"
|
- "127.0.0.1:8000"
|
||||||
@@ -39,6 +36,7 @@ upstreams: # If provider is files. Otherwise ignored
|
|||||||
- "127.0.0.3:8000"
|
- "127.0.0.3:8000"
|
||||||
- "127.0.0.4:8000"
|
- "127.0.0.4:8000"
|
||||||
"/ping":
|
"/ping":
|
||||||
|
to_https: true
|
||||||
headers:
|
headers:
|
||||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||||
- "X-Proxy-From:Gazan"
|
- "X-Proxy-From:Gazan"
|
||||||
@@ -48,7 +46,7 @@ upstreams: # If provider is files. Otherwise ignored
|
|||||||
"/draw":
|
"/draw":
|
||||||
servers:
|
servers:
|
||||||
- "192.168.1.1:8000"
|
- "192.168.1.1:8000"
|
||||||
polo.netangels.net:
|
polo.mydomain.com:
|
||||||
paths:
|
paths:
|
||||||
"/":
|
"/":
|
||||||
headers:
|
headers:
|
||||||
@@ -59,37 +57,4 @@ upstreams: # If provider is files. Otherwise ignored
|
|||||||
- "127.0.0.1:8000"
|
- "127.0.0.1:8000"
|
||||||
- "127.0.0.2:8000"
|
- "127.0.0.2:8000"
|
||||||
- "127.0.0.3:8000"
|
- "127.0.0.3:8000"
|
||||||
- "127.0.0.4:8000"
|
- "127.0.0.4:8000"
|
||||||
glop.netangels.net:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
headers:
|
|
||||||
- "X-Hopar-From:Hopaaaaaaaaaaaar"
|
|
||||||
servers:
|
|
||||||
- "192.168.1.10:8000"
|
|
||||||
- "192.168.1.1:8000"
|
|
||||||
apt.netangels.net:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
servers:
|
|
||||||
- "apt.netangels.net:443"
|
|
||||||
test.netangels.net:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
servers:
|
|
||||||
- "myip.netangels.net:80"
|
|
||||||
127.0.0.1:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
servers:
|
|
||||||
- "192.168.1.5:8080"
|
|
||||||
127.0.0.2:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
servers:
|
|
||||||
- "10.0.55.171:3000"
|
|
||||||
localpost:
|
|
||||||
paths:
|
|
||||||
"/":
|
|
||||||
servers:
|
|
||||||
- "127.0.0.1:9000"
|
|
||||||
@@ -4,6 +4,8 @@ pub mod discovery;
|
|||||||
mod filewatch;
|
mod filewatch;
|
||||||
pub mod healthcheck;
|
pub mod healthcheck;
|
||||||
pub mod jwt;
|
pub mod jwt;
|
||||||
|
pub mod metrics;
|
||||||
pub mod parceyaml;
|
pub mod parceyaml;
|
||||||
pub mod structs;
|
pub mod structs;
|
||||||
|
pub mod tls;
|
||||||
pub mod tools;
|
pub mod tools;
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl AuthValidator for ApiKeyAuth<'_> {
|
|||||||
impl AuthValidator for JwtAuth<'_> {
|
impl AuthValidator for JwtAuth<'_> {
|
||||||
fn validate(&self, session: &Session) -> bool {
|
fn validate(&self, session: &Session) -> bool {
|
||||||
let jwtsecret = self.0;
|
let jwtsecret = self.0;
|
||||||
if let Some(tok) = get_query_param(session, "gazantoken") {
|
if let Some(tok) = get_query_param(session, "araleztoken") {
|
||||||
return check_jwt(tok.as_str(), jwtsecret);
|
return check_jwt(tok.as_str(), jwtsecret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, tok
|
|||||||
Some(upstreams)
|
Some(upstreams)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)>> {
|
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)>> {
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
if let Some(token) = token {
|
if let Some(token) = token {
|
||||||
@@ -118,7 +118,7 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
|
|||||||
let to = Duration::from_secs(1);
|
let to = Duration::from_secs(1);
|
||||||
let u = client.get(url).timeout(to).send();
|
let u = client.get(url).timeout(to).send();
|
||||||
let mut values = Vec::new();
|
let mut values = Vec::new();
|
||||||
let upstreams: DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)> = DashMap::new();
|
let upstreams: DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)> = DashMap::new();
|
||||||
match u.await {
|
match u.await {
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
let jason = r.json::<Vec<Service>>().await;
|
let jason = r.json::<Vec<Service>>().await;
|
||||||
@@ -127,7 +127,7 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
|
|||||||
for service in whitelist {
|
for service in whitelist {
|
||||||
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||||
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||||
let to_add = (addr, prt, false, false);
|
let to_add = (addr, prt, false, false, false);
|
||||||
values.push(to_add);
|
values.push(to_add);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,9 @@ pub struct FromFileProvider {
|
|||||||
pub struct APIUpstreamProvider {
|
pub struct APIUpstreamProvider {
|
||||||
pub address: String,
|
pub address: String,
|
||||||
pub masterkey: String,
|
pub masterkey: String,
|
||||||
|
pub tls_address: Option<String>,
|
||||||
|
pub tls_certificate: Option<String>,
|
||||||
|
pub tls_key_file: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ConsulProvider {
|
pub struct ConsulProvider {
|
||||||
@@ -25,7 +28,7 @@ pub trait Discovery {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Discovery for APIUpstreamProvider {
|
impl Discovery for APIUpstreamProvider {
|
||||||
async fn start(&self, toreturn: Sender<Configuration>) {
|
async fn start(&self, toreturn: Sender<Configuration>) {
|
||||||
webserver::run_server(self.address.clone(), self.masterkey.clone(), toreturn).await;
|
webserver::run_server(self, toreturn).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,13 +20,13 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
|
|||||||
for val in fclone.iter() {
|
for val in fclone.iter() {
|
||||||
let host = val.key();
|
let host = val.key();
|
||||||
let inner = DashMap::new();
|
let inner = DashMap::new();
|
||||||
let mut _scheme: (String, u16, bool, bool) = ("".to_string(), 0, false, false);
|
let mut _scheme: (String, u16, bool, bool, bool) = ("".to_string(), 0, false, false, false);
|
||||||
for path_entry in val.value().iter() {
|
for path_entry in val.value().iter() {
|
||||||
// let inner = DashMap::new();
|
// let inner = DashMap::new();
|
||||||
let path = path_entry.key();
|
let path = path_entry.key();
|
||||||
let mut innervec= Vec::new();
|
let mut innervec= Vec::new();
|
||||||
for k in path_entry.value().0 .iter().enumerate() {
|
for k in path_entry.value().0 .iter().enumerate() {
|
||||||
let (ip, port, _ssl, _version) = k.1;
|
let (ip, port, _ssl, _version, _redir) = k.1;
|
||||||
let mut _link = String::new();
|
let mut _link = String::new();
|
||||||
let tls = detect_tls(ip, port).await;
|
let tls = detect_tls(ip, port).await;
|
||||||
let mut is_h2 = false;
|
let mut is_h2 = false;
|
||||||
@@ -52,13 +52,13 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
|
|||||||
// }else {
|
// }else {
|
||||||
// _scheme = (ip.to_string(), *port, false);
|
// _scheme = (ip.to_string(), *port, false);
|
||||||
// }
|
// }
|
||||||
_scheme = (ip.to_string(), *port, tls.0, is_h2);
|
_scheme = (ip.to_string(), *port, tls.0, is_h2, *_redir);
|
||||||
// let link = format!("{}{}:{}{}", _pref, ip, port, path);
|
// let link = format!("{}{}:{}{}", _pref, ip, port, path);
|
||||||
let resp = http_request(_link.as_str(), params.0, "").await;
|
let resp = http_request(_link.as_str(), params.0, "").await;
|
||||||
match resp.0 {
|
match resp.0 {
|
||||||
true => {
|
true => {
|
||||||
if resp.1 {
|
if resp.1 {
|
||||||
_scheme = (ip.to_string(), *port, tls.0, true);
|
_scheme = (ip.to_string(), *port, tls.0, true, *_redir);
|
||||||
}
|
}
|
||||||
innervec.push(_scheme.clone());
|
innervec.push(_scheme.clone());
|
||||||
}
|
}
|
||||||
@@ -75,7 +75,8 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
|
|||||||
if first_run == 1 {
|
if first_run == 1 {
|
||||||
info!("Performing initial hatchecks and upstreams ssl detection");
|
info!("Performing initial hatchecks and upstreams ssl detection");
|
||||||
clone_idmap_into(&totest, &idlist);
|
clone_idmap_into(&totest, &idlist);
|
||||||
info!("Gazan is up and ready to serve requests");
|
info!("Aralez is up and ready to serve requests, the upstreams list is:");
|
||||||
|
print_upstreams(&totest)
|
||||||
}
|
}
|
||||||
|
|
||||||
first_run+=1;
|
first_run+=1;
|
||||||
|
|||||||
87
src/utils/metrics.rs
Normal file
87
src/utils/metrics.rs
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
use pingora_http::Version;
|
||||||
|
use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
pub struct MetricTypes {
|
||||||
|
pub method: String,
|
||||||
|
pub code: String,
|
||||||
|
pub latency: Duration,
|
||||||
|
pub version: Version,
|
||||||
|
}
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
pub static ref REQUEST_COUNT: IntCounter = register_int_counter!(
|
||||||
|
"aralez_requests_total",
|
||||||
|
"Total number of requests handled by Aralez"
|
||||||
|
).unwrap();
|
||||||
|
pub static ref RESPONSE_CODES: IntCounterVec = register_int_counter_vec!(
|
||||||
|
"aralez_responses_total",
|
||||||
|
"Responses grouped by status code",
|
||||||
|
&["status"]
|
||||||
|
).unwrap();
|
||||||
|
pub static ref REQUEST_LATENCY: Histogram = register_histogram!(
|
||||||
|
"aralez_request_latency_seconds",
|
||||||
|
"Request latency in seconds",
|
||||||
|
vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]
|
||||||
|
).unwrap();
|
||||||
|
pub static ref RESPONSE_LATENCY: Histogram = register_histogram!(
|
||||||
|
"aralez_response_latency_seconds",
|
||||||
|
"Response latency in seconds",
|
||||||
|
vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0]
|
||||||
|
).unwrap();
|
||||||
|
pub static ref REQUESTS_BY_METHOD: IntCounterVec = register_int_counter_vec!(
|
||||||
|
"aralez_requests_by_method_total",
|
||||||
|
"Number of requests by HTTP method",
|
||||||
|
&["method"]
|
||||||
|
).unwrap();
|
||||||
|
pub static ref REQUESTS_BY_VERSION: IntCounterVec = register_int_counter_vec!(
|
||||||
|
"aralez_requests_by_version_total",
|
||||||
|
"Number of requests by HTTP versions",
|
||||||
|
&["version"]
|
||||||
|
).unwrap();
|
||||||
|
pub static ref ERROR_COUNT: IntCounter = register_int_counter!(
|
||||||
|
"aralez_errors_total",
|
||||||
|
"Total number of errors"
|
||||||
|
).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn calc_metrics(metric_types: &MetricTypes) {
|
||||||
|
REQUEST_COUNT.inc();
|
||||||
|
let timer = REQUEST_LATENCY.start_timer();
|
||||||
|
timer.observe_duration();
|
||||||
|
|
||||||
|
let version_str = match &metric_types.version {
|
||||||
|
&Version::HTTP_11 => "HTTP/1.1",
|
||||||
|
&Version::HTTP_2 => "HTTP/2.0",
|
||||||
|
&Version::HTTP_3 => "HTTP/3.0",
|
||||||
|
&Version::HTTP_10 => "HTTP/1.0",
|
||||||
|
_ => "Unknown",
|
||||||
|
};
|
||||||
|
REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc();
|
||||||
|
RESPONSE_CODES.with_label_values(&[&metric_types.code.to_string()]).inc();
|
||||||
|
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
|
||||||
|
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
pub fn calc_metrics(method: String, code: u16, latency: Duration) {
|
||||||
|
REQUEST_COUNT.inc();
|
||||||
|
let timer = REQUEST_LATENCY.start_timer();
|
||||||
|
timer.observe_duration();
|
||||||
|
RESPONSE_CODES.with_label_values(&[&code.to_string()]).inc();
|
||||||
|
REQUESTS_BY_METHOD.with_label_values(&[&method]).inc();
|
||||||
|
RESPONSE_LATENCY.observe(latency.as_secs_f64());
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut interval = tokio::time::interval(std::time::Duration::from_secs(5));
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
// read Pingora stats
|
||||||
|
let stats = pingora.get_stats();
|
||||||
|
|
||||||
|
// update Prometheus metrics accordingly
|
||||||
|
REQUEST_COUNT.set(stats.requests_total);
|
||||||
|
// ... etc
|
||||||
|
}
|
||||||
|
});
|
||||||
|
*/
|
||||||
@@ -13,7 +13,8 @@ pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
|||||||
consul: None,
|
consul: None,
|
||||||
typecfg: "".to_string(),
|
typecfg: "".to_string(),
|
||||||
extraparams: Extraparams {
|
extraparams: Extraparams {
|
||||||
stickysessions: false,
|
sticky_sessions: false,
|
||||||
|
to_https: None,
|
||||||
authentication: DashMap::new(),
|
authentication: DashMap::new(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -46,25 +47,27 @@ pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
|||||||
Ok(parsed) => {
|
Ok(parsed) => {
|
||||||
let global_headers = DashMap::new();
|
let global_headers = DashMap::new();
|
||||||
let mut hl = Vec::new();
|
let mut hl = Vec::new();
|
||||||
if let Some(globals) = &parsed.globals {
|
if let Some(headers) = &parsed.headers {
|
||||||
for headers in globals.get("headers").iter().by_ref() {
|
for header in headers.iter() {
|
||||||
for header in headers.iter() {
|
if let Some((key, val)) = header.split_once(':') {
|
||||||
if let Some((key, val)) = header.split_once(':') {
|
hl.push((key.to_string(), val.to_string()));
|
||||||
hl.push((key.to_string(), val.to_string()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
global_headers.insert("/".to_string(), hl);
|
global_headers.insert("/".to_string(), hl);
|
||||||
toreturn.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
|
toreturn.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
|
||||||
toreturn.extraparams.stickysessions = parsed.stickysessions;
|
|
||||||
let cfg = DashMap::new();
|
toreturn.extraparams.sticky_sessions = parsed.sticky_sessions;
|
||||||
if let Some(k) = globals.get("authorization") {
|
toreturn.extraparams.to_https = parsed.to_https;
|
||||||
cfg.insert("authorization".to_string(), k.to_owned());
|
|
||||||
toreturn.extraparams.authentication = cfg;
|
|
||||||
} else {
|
|
||||||
toreturn.extraparams.authentication = DashMap::new();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if let Some(auth) = &parsed.authorization {
|
||||||
|
let name = auth.get("type").unwrap().to_string();
|
||||||
|
let creds = auth.get("creds").unwrap().to_string();
|
||||||
|
let val: Vec<String> = vec![name, creds];
|
||||||
|
toreturn.extraparams.authentication.insert("authorization".to_string(), val);
|
||||||
|
} else {
|
||||||
|
toreturn.extraparams.authentication = DashMap::new();
|
||||||
|
}
|
||||||
|
|
||||||
match parsed.provider.as_str() {
|
match parsed.provider.as_str() {
|
||||||
"file" => {
|
"file" => {
|
||||||
toreturn.typecfg = "file".to_string();
|
toreturn.typecfg = "file".to_string();
|
||||||
@@ -86,8 +89,9 @@ pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
|||||||
for server in path_config.servers {
|
for server in path_config.servers {
|
||||||
if let Some((ip, port_str)) = server.split_once(':') {
|
if let Some((ip, port_str)) = server.split_once(':') {
|
||||||
if let Ok(port) = port_str.parse::<u16>() {
|
if let Ok(port) = port_str.parse::<u16>() {
|
||||||
// server_list.push((ip.to_string(), port, path_config.ssl));
|
// let to_https = matches!(path_config.to_https, Some(true));
|
||||||
server_list.push((ip.to_string(), port, true, false));
|
let to_https = path_config.to_https.unwrap_or(false);
|
||||||
|
server_list.push((ip.to_string(), port, true, false, to_https));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -139,5 +143,22 @@ pub fn parce_main_config(path: &str) -> AppConfig {
|
|||||||
cfo.local_server = Option::from((ip.to_string(), port));
|
cfo.local_server = Option::from((ip.to_string(), port));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
|
||||||
|
if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
|
||||||
|
if let Ok(port) = port_str.parse::<u16>() {
|
||||||
|
cfo.proxy_port_tls = Some(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// match cfo.config_tls_address.clone() {
|
||||||
|
// Some(tls_cert) => {
|
||||||
|
// if let Some((ip, port_str)) = tls_cert.split_once(':') {
|
||||||
|
// if let Ok(port) = port_str.parse::<u16>() {
|
||||||
|
// cfo.local_tls_server = Option::from((ip.to_string(), port));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// None => {}
|
||||||
|
// };
|
||||||
cfo
|
cfo
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,9 @@ use serde::{Deserialize, Serialize};
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::atomic::AtomicUsize;
|
use std::sync::atomic::AtomicUsize;
|
||||||
|
|
||||||
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<(String, u16, bool, bool)>, AtomicUsize)>>;
|
pub type InnerMap = (String, u16, bool, bool, bool);
|
||||||
pub type UpstreamsIdMap = DashMap<String, (String, u16, bool, bool)>;
|
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>;
|
||||||
|
pub type UpstreamsIdMap = DashMap<String, InnerMap>;
|
||||||
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
|
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -15,7 +16,8 @@ pub struct ServiceMapping {
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Extraparams {
|
pub struct Extraparams {
|
||||||
pub stickysessions: bool,
|
pub sticky_sessions: bool,
|
||||||
|
pub to_https: Option<bool>,
|
||||||
pub authentication: DashMap<String, Vec<String>>,
|
pub authentication: DashMap<String, Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,9 +30,12 @@ pub struct Consul {
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub provider: String,
|
pub provider: String,
|
||||||
pub stickysessions: bool,
|
pub sticky_sessions: bool,
|
||||||
|
pub to_https: Option<bool>,
|
||||||
pub upstreams: Option<HashMap<String, HostConfig>>,
|
pub upstreams: Option<HashMap<String, HostConfig>>,
|
||||||
pub globals: Option<HashMap<String, Vec<String>>>,
|
pub globals: Option<HashMap<String, Vec<String>>>,
|
||||||
|
pub headers: Option<Vec<String>>,
|
||||||
|
pub authorization: Option<HashMap<String, String>>,
|
||||||
pub consul: Option<Consul>,
|
pub consul: Option<Consul>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,6 +47,7 @@ pub struct HostConfig {
|
|||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct PathConfig {
|
pub struct PathConfig {
|
||||||
pub servers: Vec<String>,
|
pub servers: Vec<String>,
|
||||||
|
pub to_https: Option<bool>,
|
||||||
pub headers: Option<Vec<String>>,
|
pub headers: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -59,11 +65,22 @@ pub struct AppConfig {
|
|||||||
pub hc_method: String,
|
pub hc_method: String,
|
||||||
pub upstreams_conf: String,
|
pub upstreams_conf: String,
|
||||||
pub log_level: String,
|
pub log_level: String,
|
||||||
|
pub master_key: String,
|
||||||
pub config_address: String,
|
pub config_address: String,
|
||||||
pub proxy_address_http: String,
|
pub proxy_address_http: String,
|
||||||
pub master_key: String,
|
pub config_tls_address: Option<String>,
|
||||||
|
pub config_tls_certificate: Option<String>,
|
||||||
|
pub config_tls_key_file: Option<String>,
|
||||||
pub proxy_address_tls: Option<String>,
|
pub proxy_address_tls: Option<String>,
|
||||||
pub tls_certificate: Option<String>,
|
pub proxy_port_tls: Option<u16>,
|
||||||
pub tls_key_file: Option<String>,
|
// pub tls_certificate: Option<String>,
|
||||||
|
// pub tls_key_file: Option<String>,
|
||||||
pub local_server: Option<(String, u16)>,
|
pub local_server: Option<(String, u16)>,
|
||||||
|
pub proxy_certificates: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// #[derive(Debug)]
|
||||||
|
// pub struct CertificateMove {
|
||||||
|
// pub cert_tx: Sender<CertificateConfig>,
|
||||||
|
// pub cert_rx: Receiver<CertificateConfig>,
|
||||||
|
// }
|
||||||
|
|||||||
193
src/utils/tls.rs
Normal file
193
src/utils/tls.rs
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
use dashmap::DashMap;
|
||||||
|
use log::error;
|
||||||
|
use pingora::tls::ssl::{select_next_proto, AlpnError, NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef};
|
||||||
|
use rustls_pemfile::{read_one, Item};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::BufReader;
|
||||||
|
// use tokio::time::Instant;
|
||||||
|
use x509_parser::extensions::GeneralName;
|
||||||
|
use x509_parser::nom::Err as NomErr;
|
||||||
|
use x509_parser::prelude::*;
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Debug)]
|
||||||
|
pub struct CertificateConfig {
|
||||||
|
pub cert_path: String,
|
||||||
|
pub key_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct CertificateInfo {
|
||||||
|
common_names: Vec<String>,
|
||||||
|
alt_names: Vec<String>,
|
||||||
|
ssl_context: SslContext,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
cert_path: String, // Only used for logging
|
||||||
|
#[allow(dead_code)]
|
||||||
|
key_path: String, // Only used for logging
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Certificates {
|
||||||
|
configs: Vec<CertificateInfo>,
|
||||||
|
name_map: DashMap<String, SslContext>,
|
||||||
|
pub default_cert_path: String,
|
||||||
|
pub default_key_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Certificates {
|
||||||
|
pub fn new(configs: &Vec<CertificateConfig>) -> Option<Self> {
|
||||||
|
let default_cert = configs.first().expect("At least one TLS certificate required");
|
||||||
|
let mut cert_infos = Vec::new();
|
||||||
|
let name_map: DashMap<String, SslContext> = DashMap::new();
|
||||||
|
for config in configs {
|
||||||
|
let cert_info = load_cert_info(&config.cert_path, &config.key_path);
|
||||||
|
match cert_info {
|
||||||
|
Some(cert) => {
|
||||||
|
for name in &cert.common_names {
|
||||||
|
name_map.insert(name.clone(), cert.ssl_context.clone());
|
||||||
|
}
|
||||||
|
for name in &cert.alt_names {
|
||||||
|
name_map.insert(name.clone(), cert.ssl_context.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
cert_infos.push(cert)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
error!("Unable to load certificate info | public: {}, private: {}", &config.cert_path, &config.key_path);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Self {
|
||||||
|
name_map: name_map,
|
||||||
|
configs: cert_infos,
|
||||||
|
default_cert_path: default_cert.cert_path.clone(),
|
||||||
|
default_key_path: default_cert.key_path.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_ssl_context(&self, server_name: &str) -> Option<SslContext> {
|
||||||
|
if let Some(ctx) = self.name_map.get(server_name) {
|
||||||
|
return Some(ctx.clone());
|
||||||
|
}
|
||||||
|
for config in &self.configs {
|
||||||
|
for name in &config.common_names {
|
||||||
|
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
|
||||||
|
return Some(config.ssl_context.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for name in &config.alt_names {
|
||||||
|
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
|
||||||
|
return Some(config.ssl_context.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn server_name_callback(&self, ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert) -> Result<(), SniError> {
|
||||||
|
let server_name = ssl_ref.servername(NameType::HOST_NAME);
|
||||||
|
log::debug!("TLS connect: server_name = {:?}, ssl_ref = {:?}, ssl_alert = {:?}", server_name, ssl_ref, ssl_alert);
|
||||||
|
// let start_time = Instant::now();
|
||||||
|
if let Some(name) = server_name {
|
||||||
|
match self.find_ssl_context(name) {
|
||||||
|
Some(ctx) => {
|
||||||
|
ssl_ref.set_ssl_context(&*ctx).map_err(|_| SniError::ALERT_FATAL)?;
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
log::debug!("No matching server name found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// println!("Context ==> {:?} <==", start_time.elapsed());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_cert_info(cert_path: &str, key_path: &str) -> Option<CertificateInfo> {
|
||||||
|
let mut common_names = HashSet::new();
|
||||||
|
let mut alt_names = HashSet::new();
|
||||||
|
|
||||||
|
let file = File::open(cert_path);
|
||||||
|
match file {
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to open certificate file: {:?}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Ok(file) => {
|
||||||
|
let mut reader = BufReader::new(file);
|
||||||
|
match read_one(&mut reader) {
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Failed to decode PEM from certificate file: {:?}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Ok(leaf) => match leaf {
|
||||||
|
Some(Item::X509Certificate(cert)) => match X509Certificate::from_der(&cert) {
|
||||||
|
Err(NomErr::Error(e)) | Err(NomErr::Failure(e)) => {
|
||||||
|
log::error!("Failed to parse certificate: {:?}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
log::error!("Unknown error while parsing certificate");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Ok((_, x509)) => {
|
||||||
|
let subject = x509.subject();
|
||||||
|
for attr in subject.iter_common_name() {
|
||||||
|
if let Ok(cn) = attr.as_str() {
|
||||||
|
common_names.insert(cn.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(Some(san)) = x509.subject_alternative_name() {
|
||||||
|
for name in san.value.general_names.iter() {
|
||||||
|
if let GeneralName::DNSName(dns) = name {
|
||||||
|
let dns_string = dns.to_string();
|
||||||
|
if !common_names.contains(&dns_string) {
|
||||||
|
alt_names.insert(dns_string);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
log::error!("Failed to read certificate");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(ssl_context) = create_ssl_context(cert_path, key_path) {
|
||||||
|
Some(CertificateInfo {
|
||||||
|
cert_path: cert_path.to_string(),
|
||||||
|
key_path: key_path.to_string(),
|
||||||
|
common_names: common_names.into_iter().collect(),
|
||||||
|
alt_names: alt_names.into_iter().collect(),
|
||||||
|
ssl_context,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
log::error!("Failed to create SSL context from cert paths");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_ssl_context(cert_path: &str, key_path: &str) -> Result<SslContext, Box<dyn std::error::Error>> {
|
||||||
|
let mut ctx = SslContext::builder(SslMethod::tls())?;
|
||||||
|
ctx.set_certificate_chain_file(cert_path)?;
|
||||||
|
ctx.set_private_key_file(key_path, SslFiletype::PEM)?;
|
||||||
|
ctx.set_alpn_select_callback(prefer_h2);
|
||||||
|
let built = ctx.build();
|
||||||
|
Ok(built)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
|
||||||
|
match select_next_proto("\x02h2\x08http/1.1".as_bytes(), alpn_in) {
|
||||||
|
Some(p) => Ok(p),
|
||||||
|
_ => Err(AlpnError::NOACK),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,10 +1,17 @@
|
|||||||
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap};
|
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap};
|
||||||
|
use crate::utils::tls;
|
||||||
|
use crate::utils::tls::CertificateConfig;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
|
use log::{error, info};
|
||||||
|
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
use std::any::type_name;
|
use std::any::type_name;
|
||||||
use std::collections::HashSet;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
use std::fs;
|
||||||
use std::sync::atomic::AtomicUsize;
|
use std::sync::atomic::AtomicUsize;
|
||||||
|
use std::sync::mpsc::{channel, Sender};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||||
@@ -16,8 +23,8 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
|||||||
let path = path_entry.key();
|
let path = path_entry.key();
|
||||||
println!(" Path: {}", path);
|
println!(" Path: {}", path);
|
||||||
|
|
||||||
for (ip, port, ssl, vers) in path_entry.value().0.clone() {
|
for (ip, port, ssl, vers, to_https) in path_entry.value().0.clone() {
|
||||||
println!(" ===> IP: {}, Port: {}, SSL: {}, H2: {}", ip, port, ssl, vers);
|
println!(" ===> IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}", ip, port, ssl, vers, to_https);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -139,10 +146,67 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
|
|||||||
let hash = hasher.finalize();
|
let hash = hasher.finalize();
|
||||||
let hex_hash = base16ct::lower::encode_string(&hash);
|
let hex_hash = base16ct::lower::encode_string(&hash);
|
||||||
let hh = hex_hash[0..50].to_string();
|
let hh = hex_hash[0..50].to_string();
|
||||||
cloned.insert(id, (hh.clone(), 0000, false, false));
|
cloned.insert(id, (hh.clone(), 0000, false, false, false));
|
||||||
cloned.insert(hh, x.to_owned());
|
cloned.insert(hh, x.to_owned());
|
||||||
}
|
}
|
||||||
new_inner_map.insert(path.clone(), new_vec);
|
new_inner_map.insert(path.clone(), new_vec);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
|
||||||
|
let mut f = HashMap::new();
|
||||||
|
let mut certificate_configs: Vec<tls::CertificateConfig> = vec![];
|
||||||
|
let paths = fs::read_dir(dir).unwrap();
|
||||||
|
for path in paths {
|
||||||
|
let path_str = path.unwrap().path().to_str().unwrap().to_owned();
|
||||||
|
if path_str.ends_with(".crt") {
|
||||||
|
let name = path_str.replace(".crt", "");
|
||||||
|
let mut inner = vec![];
|
||||||
|
let domain = name.split("/").collect::<Vec<&str>>();
|
||||||
|
inner.push(name.clone() + ".crt");
|
||||||
|
inner.push(name.clone() + ".key");
|
||||||
|
f.insert(domain[domain.len() - 1].to_owned(), inner);
|
||||||
|
let y = CertificateConfig {
|
||||||
|
cert_path: name.clone() + ".crt",
|
||||||
|
key_path: name.clone() + ".key",
|
||||||
|
};
|
||||||
|
certificate_configs.push(y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_, v) in f.iter() {
|
||||||
|
let y = CertificateConfig {
|
||||||
|
cert_path: v[0].clone(),
|
||||||
|
key_path: v[1].clone(),
|
||||||
|
};
|
||||||
|
certificate_configs.push(y);
|
||||||
|
}
|
||||||
|
certificate_configs
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn watch_folder(path: String, sender: Sender<Vec<CertificateConfig>>) -> notify::Result<()> {
|
||||||
|
let (tx, rx) = channel();
|
||||||
|
let mut watcher = RecommendedWatcher::new(tx, Config::default())?;
|
||||||
|
watcher.watch(path.as_ref(), RecursiveMode::Recursive)?;
|
||||||
|
info!("Watching for certificates in : {}", path);
|
||||||
|
let certificate_configs = listdir(path.clone());
|
||||||
|
sender.send(certificate_configs)?;
|
||||||
|
let mut start = Instant::now();
|
||||||
|
loop {
|
||||||
|
match rx.recv_timeout(Duration::from_secs(1)) {
|
||||||
|
Ok(Ok(event)) => match &event.kind {
|
||||||
|
EventKind::Modify(ModifyKind::Data(_)) | EventKind::Create(_) | EventKind::Remove(_) => {
|
||||||
|
if start.elapsed() > Duration::from_secs(1) {
|
||||||
|
start = Instant::now();
|
||||||
|
let certificate_configs = listdir(path.clone());
|
||||||
|
sender.send(certificate_configs)?;
|
||||||
|
info!("Certificate changed: {:?}, {:?}", event.kind, event.paths);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
},
|
||||||
|
Ok(Err(e)) => error!("Watch error: {:?}", e),
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -30,10 +30,15 @@ impl BackgroundService for LB {
|
|||||||
|
|
||||||
let _ = tokio::spawn(async move { file_load.start(tx_file).await });
|
let _ = tokio::spawn(async move { file_load.start(tx_file).await });
|
||||||
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await });
|
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await });
|
||||||
|
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone()));
|
||||||
|
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone())).await;
|
||||||
|
|
||||||
let api_load = APIUpstreamProvider {
|
let api_load = APIUpstreamProvider {
|
||||||
address: self.config.config_address.clone(),
|
address: self.config.config_address.clone(),
|
||||||
masterkey: self.config.master_key.clone(),
|
masterkey: self.config.master_key.clone(),
|
||||||
|
tls_address: self.config.config_tls_address.clone(),
|
||||||
|
tls_certificate: self.config.config_tls_certificate.clone(),
|
||||||
|
tls_key_file: self.config.config_tls_key_file.clone(),
|
||||||
};
|
};
|
||||||
let tx_api = tx.clone();
|
let tx_api = tx.clone();
|
||||||
let _ = tokio::spawn(async move { api_load.start(tx_api).await });
|
let _ = tokio::spawn(async move { api_load.start(tx_api).await });
|
||||||
@@ -56,7 +61,8 @@ impl BackgroundService for LB {
|
|||||||
clone_dashmap_into(&ss.upstreams, &self.ump_upst);
|
clone_dashmap_into(&ss.upstreams, &self.ump_upst);
|
||||||
let current = self.extraparams.load_full();
|
let current = self.extraparams.load_full();
|
||||||
let mut new = (*current).clone();
|
let mut new = (*current).clone();
|
||||||
new.stickysessions = ss.extraparams.stickysessions;
|
new.sticky_sessions = ss.extraparams.sticky_sessions;
|
||||||
|
new.to_https = ss.extraparams.to_https;
|
||||||
new.authentication = ss.extraparams.authentication.clone();
|
new.authentication = ss.extraparams.authentication.clone();
|
||||||
self.extraparams.store(Arc::new(new));
|
self.extraparams.store(Arc::new(new));
|
||||||
self.headers.clear();
|
self.headers.clear();
|
||||||
@@ -79,8 +85,8 @@ impl BackgroundService for LB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info!("Upstreams list is changed, updating to:");
|
// info!("Upstreams list is changed, updating to:");
|
||||||
print_upstreams(&self.ump_full);
|
// print_upstreams(&self.ump_full);
|
||||||
}
|
}
|
||||||
None => {}
|
None => {}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
|
use crate::utils::structs::InnerMap;
|
||||||
use crate::web::proxyhttp::LB;
|
use crate::web::proxyhttp::LB;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait GetHost {
|
pub trait GetHost {
|
||||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<(String, u16, bool, bool)>;
|
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap>;
|
||||||
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>;
|
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>;
|
||||||
}
|
}
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl GetHost for LB {
|
impl GetHost for LB {
|
||||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<(String, u16, bool, bool)> {
|
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap> {
|
||||||
if let Some(b) = backend_id {
|
if let Some(b) = backend_id {
|
||||||
if let Some(bb) = self.ump_byid.get(b) {
|
if let Some(bb) = self.ump_byid.get(b) {
|
||||||
// println!("BIB :===> {:?}", Some(bb.value()));
|
// println!("BIB :===> {:?}", Some(bb.value()));
|
||||||
@@ -19,7 +20,7 @@ impl GetHost for LB {
|
|||||||
|
|
||||||
let host_entry = self.ump_upst.get(peer)?;
|
let host_entry = self.ump_upst.get(peer)?;
|
||||||
let mut current_path = path.to_string();
|
let mut current_path = path.to_string();
|
||||||
let mut best_match: Option<(String, u16, bool, bool)> = None;
|
let mut best_match: Option<InnerMap> = None;
|
||||||
loop {
|
loop {
|
||||||
if let Some(entry) = host_entry.get(¤t_path) {
|
if let Some(entry) = host_entry.get(¤t_path) {
|
||||||
let (servers, index) = entry.value();
|
let (servers, index) = entry.value();
|
||||||
|
|||||||
@@ -1,17 +1,21 @@
|
|||||||
use crate::utils::auth::authenticate;
|
use crate::utils::auth::authenticate;
|
||||||
|
use crate::utils::metrics::*;
|
||||||
use crate::utils::structs::{AppConfig, Extraparams, Headers, UpstreamsDashMap, UpstreamsIdMap};
|
use crate::utils::structs::{AppConfig, Extraparams, Headers, UpstreamsDashMap, UpstreamsIdMap};
|
||||||
use crate::web::gethosts::GetHost;
|
use crate::web::gethosts::GetHost;
|
||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
use axum::body::Bytes;
|
||||||
use log::{debug, warn};
|
use log::{debug, warn};
|
||||||
use pingora::http::RequestHeader;
|
use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
|
||||||
use pingora::prelude::*;
|
use pingora::prelude::*;
|
||||||
|
use pingora::ErrorSource::Upstream;
|
||||||
use pingora_core::listeners::ALPN;
|
use pingora_core::listeners::ALPN;
|
||||||
use pingora_core::prelude::HttpPeer;
|
use pingora_core::prelude::HttpPeer;
|
||||||
use pingora_http::ResponseHeader;
|
|
||||||
use pingora_proxy::{ProxyHttp, Session};
|
use pingora_proxy::{ProxyHttp, Session};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use tokio::time::Instant;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct LB {
|
pub struct LB {
|
||||||
pub ump_upst: Arc<UpstreamsDashMap>,
|
pub ump_upst: Arc<UpstreamsDashMap>,
|
||||||
pub ump_full: Arc<UpstreamsDashMap>,
|
pub ump_full: Arc<UpstreamsDashMap>,
|
||||||
@@ -23,6 +27,9 @@ pub struct LB {
|
|||||||
|
|
||||||
pub struct Context {
|
pub struct Context {
|
||||||
backend_id: String,
|
backend_id: String,
|
||||||
|
to_https: bool,
|
||||||
|
redirect_to: String,
|
||||||
|
start_time: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -31,7 +38,12 @@ impl ProxyHttp for LB {
|
|||||||
// fn new_ctx(&self) -> Self::CTX {}
|
// fn new_ctx(&self) -> Self::CTX {}
|
||||||
type CTX = Context;
|
type CTX = Context;
|
||||||
fn new_ctx(&self) -> Self::CTX {
|
fn new_ctx(&self) -> Self::CTX {
|
||||||
Context { backend_id: String::new() }
|
Context {
|
||||||
|
backend_id: String::new(),
|
||||||
|
to_https: false,
|
||||||
|
redirect_to: String::new(),
|
||||||
|
start_time: Instant::now(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
|
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
|
||||||
if let Some(auth) = self.extraparams.load().authentication.get("authorization") {
|
if let Some(auth) = self.extraparams.load().authentication.get("authorization") {
|
||||||
@@ -42,21 +54,16 @@ impl ProxyHttp for LB {
|
|||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
// if session.req_header().uri.path().starts_with("/denied") {
|
|
||||||
// let _ = session.respond_error(403).await;
|
|
||||||
// warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path().to_string());
|
|
||||||
// return Ok(true);
|
|
||||||
// };
|
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
async fn upstream_peer(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
|
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
|
||||||
let host_name = return_header_host(&session);
|
let host_name = return_header_host(&session);
|
||||||
|
|
||||||
match host_name {
|
match host_name {
|
||||||
Some(hostname) => {
|
Some(hostname) => {
|
||||||
// session.req_header_mut().headers.insert("X-Host-Name", host.to_string().parse().unwrap());
|
// session.req_header_mut().headers.insert("X-Host-Name", host.to_string().parse().unwrap());
|
||||||
let mut backend_id = None;
|
let mut backend_id = None;
|
||||||
if self.extraparams.load().stickysessions {
|
|
||||||
|
if self.extraparams.load().sticky_sessions {
|
||||||
if let Some(cookies) = session.req_header().headers.get("cookie") {
|
if let Some(cookies) = session.req_header().headers.get("cookie") {
|
||||||
if let Ok(cookie_str) = cookies.to_str() {
|
if let Ok(cookie_str) = cookies.to_str() {
|
||||||
for cookie in cookie_str.split(';') {
|
for cookie in cookie_str.split(';') {
|
||||||
@@ -73,7 +80,7 @@ impl ProxyHttp for LB {
|
|||||||
let ddr = self.get_host(hostname, hostname, backend_id);
|
let ddr = self.get_host(hostname, hostname, backend_id);
|
||||||
|
|
||||||
match ddr {
|
match ddr {
|
||||||
Some((address, port, ssl, is_h2)) => {
|
Some((address, port, ssl, is_h2, to_https)) => {
|
||||||
let mut peer = Box::new(HttpPeer::new((address.clone(), port.clone()), ssl, String::new()));
|
let mut peer = Box::new(HttpPeer::new((address.clone(), port.clone()), ssl, String::new()));
|
||||||
// if session.is_http2() {
|
// if session.is_http2() {
|
||||||
if is_h2 {
|
if is_h2 {
|
||||||
@@ -84,26 +91,52 @@ impl ProxyHttp for LB {
|
|||||||
peer.options.verify_cert = false;
|
peer.options.verify_cert = false;
|
||||||
peer.options.verify_hostname = false;
|
peer.options.verify_hostname = false;
|
||||||
}
|
}
|
||||||
// println!(" ==> {} ==> {} => {} => {:?}", hostname, address.as_str(), peer.options.alpn, is_h2);
|
// println!("{}, {}, alpn {}, h2 {:?}, to_https {}", hostname, address.as_str(), peer.options.alpn, is_h2, _to_https);
|
||||||
_ctx.backend_id = format!("{}:{}:{}", address.clone(), port.clone(), ssl);
|
if self.extraparams.load().to_https.unwrap_or(false) || to_https {
|
||||||
|
if let Some(stream) = session.stream() {
|
||||||
|
if stream.get_ssl().is_none() {
|
||||||
|
if let Some(addr) = session.server_addr() {
|
||||||
|
if let Some((host, _)) = addr.to_string().split_once(':') {
|
||||||
|
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str());
|
||||||
|
let port = self.config.proxy_port_tls.unwrap_or(403);
|
||||||
|
ctx.to_https = true;
|
||||||
|
ctx.redirect_to = format!("https://{}:{}{}", host, port, uri);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.backend_id = format!("{}:{}:{}", address.clone(), port.clone(), ssl);
|
||||||
Ok(peer)
|
Ok(peer)
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!("Upstream not found. Host: {:?}, Path: {}", hostname, session.req_header().uri);
|
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
|
||||||
Ok(return_no_host(&self.config.local_server))
|
Err(Box::new(Error {
|
||||||
|
etype: HTTPStatus(502),
|
||||||
|
esource: Upstream,
|
||||||
|
retry: RetryType::Decided(false),
|
||||||
|
cause: None,
|
||||||
|
context: Option::from(ImmutStr::Static("Upstream not found")),
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!("Upstream not found. Host: {:?}, Path: {}", host_name, session.req_header().uri);
|
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
|
||||||
Ok(return_no_host(&self.config.local_server))
|
Err(Box::new(Error {
|
||||||
|
etype: HTTPStatus(502),
|
||||||
|
esource: Upstream,
|
||||||
|
retry: RetryType::Decided(false),
|
||||||
|
cause: None,
|
||||||
|
context: None,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn upstream_request_filter(&self, _session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> {
|
async fn upstream_request_filter(&self, session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> {
|
||||||
let clientip = _session.client_addr();
|
match session.client_addr() {
|
||||||
match clientip {
|
|
||||||
Some(ip) => {
|
Some(ip) => {
|
||||||
let inet = ip.as_inet();
|
let inet = ip.as_inet();
|
||||||
match inet {
|
match inet {
|
||||||
@@ -122,20 +155,23 @@ impl ProxyHttp for LB {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn response_filter(&self, _session: &mut Session, _upstream_response: &mut ResponseHeader, _ctx: &mut Self::CTX) -> Result<()> {
|
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
|
||||||
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap();
|
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap();
|
||||||
if self.extraparams.load().stickysessions {
|
if self.extraparams.load().sticky_sessions {
|
||||||
let backend_id = _ctx.backend_id.clone();
|
let backend_id = ctx.backend_id.clone();
|
||||||
if let Some(bid) = self.ump_byid.get(&backend_id) {
|
if let Some(bid) = self.ump_byid.get(&backend_id) {
|
||||||
// let _ = _upstream_response.insert_header("set-cookie", format!("backend {}", bid.0));
|
|
||||||
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.0));
|
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ctx.to_https {
|
||||||
let host_name = return_header_host(&_session);
|
let mut redirect_response = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
|
||||||
match host_name {
|
redirect_response.insert_header("Location", ctx.redirect_to.clone())?;
|
||||||
|
redirect_response.insert_header("Content-Length", "0")?;
|
||||||
|
session.write_response_header(Box::new(redirect_response), false).await?;
|
||||||
|
}
|
||||||
|
match return_header_host(&session) {
|
||||||
Some(host) => {
|
Some(host) => {
|
||||||
let path = _session.req_header().uri.path();
|
let path = session.req_header().uri.path();
|
||||||
let host_header = host;
|
let host_header = host;
|
||||||
let split_header = host_header.split_once(':');
|
let split_header = host_header.split_once(':');
|
||||||
match split_header {
|
match split_header {
|
||||||
@@ -165,6 +201,13 @@ impl ProxyHttp for LB {
|
|||||||
async fn logging(&self, session: &mut Session, _e: Option<&pingora::Error>, ctx: &mut Self::CTX) {
|
async fn logging(&self, session: &mut Session, _e: Option<&pingora::Error>, ctx: &mut Self::CTX) {
|
||||||
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
|
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
|
||||||
debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
|
debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
|
||||||
|
let m = &MetricTypes {
|
||||||
|
method: session.req_header().method.to_string(),
|
||||||
|
code: session.response_written().map(|resp| resp.status.as_str().to_owned()).unwrap_or("0".to_string()),
|
||||||
|
latency: ctx.start_time.elapsed(),
|
||||||
|
version: session.req_header().version,
|
||||||
|
};
|
||||||
|
calc_metrics(m);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,9 +228,9 @@ fn return_header_host(session: &Session) -> Option<&str> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn return_no_host(inp: &Option<(String, u16)>) -> Box<HttpPeer> {
|
// fn return_no_host(inp: &Option<(String, u16)>) -> Box<HttpPeer> {
|
||||||
match inp {
|
// match inp {
|
||||||
Some(t) => Box::new(HttpPeer::new(t, false, String::new())),
|
// Some(t) => Box::new(HttpPeer::new(t, false, String::new())),
|
||||||
None => Box::new(HttpPeer::new(("0.0.0.0", 0), false, String::new())),
|
// None => Box::new(HttpPeer::new(("0.0.0.0", 0), false, String::new())),
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|||||||
106
src/web/start.rs
106
src/web/start.rs
@@ -1,16 +1,22 @@
|
|||||||
|
// use rustls::crypto::ring::default_provider;
|
||||||
use crate::utils::structs::Extraparams;
|
use crate::utils::structs::Extraparams;
|
||||||
|
use crate::utils::tls;
|
||||||
|
use crate::utils::tls::CertificateConfig;
|
||||||
|
use crate::utils::tools::*;
|
||||||
use crate::web::proxyhttp::LB;
|
use crate::web::proxyhttp::LB;
|
||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use log::info;
|
use log::info;
|
||||||
|
use pingora::tls::ssl::{SslAlert, SslRef};
|
||||||
|
use pingora_core::listeners::tls::TlsSettings;
|
||||||
use pingora_core::prelude::{background_service, Opt};
|
use pingora_core::prelude::{background_service, Opt};
|
||||||
use pingora_core::server::Server;
|
use pingora_core::server::Server;
|
||||||
use rustls::crypto::ring::default_provider;
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use std::env;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::{env, thread};
|
||||||
|
|
||||||
pub fn run() {
|
pub fn run() {
|
||||||
default_provider().install_default().expect("Failed to install rustls crypto provider");
|
// default_provider().install_default().expect("Failed to install rustls crypto provider");
|
||||||
let parameters = Some(Opt::parse_args()).unwrap();
|
let parameters = Some(Opt::parse_args()).unwrap();
|
||||||
let file = parameters.conf.clone().unwrap();
|
let file = parameters.conf.clone().unwrap();
|
||||||
let maincfg = crate::utils::parceyaml::parce_main_config(file.as_str());
|
let maincfg = crate::utils::parceyaml::parce_main_config(file.as_str());
|
||||||
@@ -23,7 +29,8 @@ pub fn run() {
|
|||||||
let im_config = Arc::new(DashMap::new());
|
let im_config = Arc::new(DashMap::new());
|
||||||
let hh_config = Arc::new(DashMap::new());
|
let hh_config = Arc::new(DashMap::new());
|
||||||
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
|
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
|
||||||
stickysessions: false,
|
sticky_sessions: false,
|
||||||
|
to_https: None,
|
||||||
authentication: DashMap::new(),
|
authentication: DashMap::new(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@@ -37,51 +44,77 @@ pub fn run() {
|
|||||||
headers: hh_config.clone(),
|
headers: hh_config.clone(),
|
||||||
extraparams: ec_config.clone(),
|
extraparams: ec_config.clone(),
|
||||||
};
|
};
|
||||||
let bg = LB {
|
// let bg = LB {
|
||||||
ump_upst: uf_config.clone(),
|
// ump_upst: uf_config.clone(),
|
||||||
ump_full: ff_config.clone(),
|
// ump_full: ff_config.clone(),
|
||||||
ump_byid: im_config.clone(),
|
// ump_byid: im_config.clone(),
|
||||||
config: cfg.clone(),
|
// config: cfg.clone(),
|
||||||
headers: hh_config.clone(),
|
// headers: hh_config.clone(),
|
||||||
extraparams: ec_config.clone(),
|
// extraparams: ec_config.clone(),
|
||||||
};
|
// config_rx: Arc::from(Mutex::new(rx)),
|
||||||
|
// };
|
||||||
|
|
||||||
// env_logger::Env::new();
|
// env_logger::Env::new();
|
||||||
// env_logger::init();
|
// env_logger::init();
|
||||||
|
|
||||||
let log_level = cfg.log_level.clone();
|
let log_level = cfg.log_level.clone();
|
||||||
match log_level.as_str() {
|
unsafe {
|
||||||
"info" => env::set_var("RUST_LOG", "info"),
|
match log_level.as_str() {
|
||||||
"error" => env::set_var("RUST_LOG", "error"),
|
"info" => env::set_var("RUST_LOG", "info"),
|
||||||
"warn" => env::set_var("RUST_LOG", "warn"),
|
"error" => env::set_var("RUST_LOG", "error"),
|
||||||
"debug" => env::set_var("RUST_LOG", "debug"),
|
"warn" => env::set_var("RUST_LOG", "warn"),
|
||||||
"trace" => env::set_var("RUST_LOG", "trace"),
|
"debug" => env::set_var("RUST_LOG", "debug"),
|
||||||
"off" => env::set_var("RUST_LOG", "off"),
|
"trace" => env::set_var("RUST_LOG", "trace"),
|
||||||
_ => {
|
"off" => env::set_var("RUST_LOG", "off"),
|
||||||
println!("Error reading log level, defaulting to: INFO");
|
_ => {
|
||||||
env::set_var("RUST_LOG", "info")
|
println!("Error reading log level, defaulting to: INFO");
|
||||||
|
env::set_var("RUST_LOG", "info")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
env_logger::builder()
|
env_logger::builder().init();
|
||||||
// .format_timestamp(None)
|
|
||||||
// .format_module_path(false)
|
|
||||||
// .format_source_path(false)
|
|
||||||
// .format_target(false)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
let bg_srvc = background_service("bgsrvc", bg);
|
let bg_srvc = background_service("bgsrvc", lb.clone());
|
||||||
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb);
|
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone());
|
||||||
let bind_address_http = cfg.proxy_address_http.clone();
|
let bind_address_http = cfg.proxy_address_http.clone();
|
||||||
|
|
||||||
let bind_address_tls = cfg.proxy_address_tls.clone();
|
let bind_address_tls = cfg.proxy_address_tls.clone();
|
||||||
match bind_address_tls {
|
match bind_address_tls {
|
||||||
Some(bind_address_tls) => {
|
Some(bind_address_tls) => {
|
||||||
info!("Running TLS listener on :{}", bind_address_tls);
|
let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
|
||||||
let cert_path = cfg.tls_certificate.clone().unwrap();
|
let certs_path = cfg.proxy_certificates.clone().unwrap();
|
||||||
let key_path = cfg.tls_key_file.clone().unwrap();
|
thread::spawn(move || {
|
||||||
let mut tls_settings = pingora_core::listeners::tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap();
|
watch_folder(certs_path, tx).unwrap();
|
||||||
tls_settings.enable_h2();
|
});
|
||||||
|
let certificate_configs = rx.recv().unwrap();
|
||||||
|
let first_set = tls::Certificates::new(&certificate_configs).unwrap_or_else(|| panic!("Unable to load initial certificate info"));
|
||||||
|
let certificates = Arc::new(ArcSwap::from_pointee(first_set));
|
||||||
|
let certs_for_callback = certificates.clone();
|
||||||
|
|
||||||
|
let certs_for_watcher = certificates.clone();
|
||||||
|
let new_certs = tls::Certificates::new(&certificate_configs);
|
||||||
|
certs_for_watcher.store(Arc::new(new_certs.unwrap()));
|
||||||
|
|
||||||
|
let mut tls_settings =
|
||||||
|
TlsSettings::intermediate(&certs_for_callback.load().default_cert_path, &certs_for_callback.load().default_key_path).expect("unable to load or parse cert/key");
|
||||||
|
|
||||||
|
tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certs_for_callback.load().server_name_callback(ssl_ref, ssl_alert));
|
||||||
|
tls_settings.set_alpn_select_callback(tls::prefer_h2);
|
||||||
proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
|
proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
|
||||||
|
|
||||||
|
let certs_for_watcher = certificates.clone();
|
||||||
|
thread::spawn(move || {
|
||||||
|
while let Ok(new_configs) = rx.recv() {
|
||||||
|
let new_certs = tls::Certificates::new(&new_configs);
|
||||||
|
match new_certs {
|
||||||
|
Some(new_certs) => {
|
||||||
|
certs_for_watcher.store(Arc::new(new_certs));
|
||||||
|
info!("Reload TLS certificates from {}", cfg.proxy_certificates.clone().unwrap())
|
||||||
|
}
|
||||||
|
None => {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
None => {}
|
None => {}
|
||||||
}
|
}
|
||||||
@@ -89,8 +122,5 @@ pub fn run() {
|
|||||||
proxy.add_tcp(bind_address_http.as_str());
|
proxy.add_tcp(bind_address_http.as_str());
|
||||||
server.add_service(proxy);
|
server.add_service(proxy);
|
||||||
server.add_service(bg_srvc);
|
server.add_service(bg_srvc);
|
||||||
// let mut prometheus_service_http = Service::prometheus_http_service();
|
|
||||||
// prometheus_service_http.add_tcp("0.0.0.0:1234");
|
|
||||||
// server.add_service(prometheus_service_http);
|
|
||||||
server.run_forever();
|
server.run_forever();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,26 @@
|
|||||||
|
use crate::utils::discovery::APIUpstreamProvider;
|
||||||
use crate::utils::structs::Configuration;
|
use crate::utils::structs::Configuration;
|
||||||
use axum::body::Body;
|
use axum::body::Body;
|
||||||
use axum::extract::State;
|
use axum::extract::{Query, State};
|
||||||
use axum::http::{Response, StatusCode};
|
use axum::http::{Response, StatusCode};
|
||||||
use axum::response::IntoResponse;
|
use axum::response::IntoResponse;
|
||||||
use axum::routing::{delete, get, head, post, put};
|
use axum::routing::{get, post};
|
||||||
use axum::{Json, Router};
|
use axum::{Json, Router};
|
||||||
|
use axum_server::tls_openssl::OpenSSLConfig;
|
||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use futures::SinkExt;
|
use futures::SinkExt;
|
||||||
use jsonwebtoken::{encode, EncodingKey, Header};
|
use jsonwebtoken::{encode, EncodingKey, Header};
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
|
use prometheus::{gather, Encoder, TextEncoder};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct InputKey {
|
struct InputKey {
|
||||||
masterkey: String,
|
master_key: String,
|
||||||
owner: String,
|
owner: String,
|
||||||
valid: u64,
|
valid: u64,
|
||||||
}
|
}
|
||||||
@@ -25,51 +30,66 @@ struct OutToken {
|
|||||||
token: String,
|
token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused_mut)]
|
#[derive(Clone)]
|
||||||
pub async fn run_server(bindaddress: String, masterkey: String, mut toreturn: Sender<Configuration>) {
|
struct AppState {
|
||||||
let mut tr = toreturn.clone();
|
master_key: String,
|
||||||
let app = Router::new()
|
config_sender: Sender<Configuration>,
|
||||||
.route("/{*wildcard}", get(senderror))
|
}
|
||||||
.route("/{*wildcard}", post(senderror))
|
|
||||||
.route("/{*wildcard}", put(senderror))
|
|
||||||
.route("/{*wildcard}", head(senderror))
|
|
||||||
.route("/{*wildcard}", delete(senderror))
|
|
||||||
.route("/jwt", post(jwt_gen))
|
|
||||||
.with_state(masterkey.clone())
|
|
||||||
.route(
|
|
||||||
"/conf",
|
|
||||||
post(|up: String| async move {
|
|
||||||
let serverlist = crate::utils::parceyaml::load_configuration(up.as_str(), "content");
|
|
||||||
|
|
||||||
match serverlist {
|
#[allow(unused_mut)]
|
||||||
Some(serverlist) => {
|
pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>) {
|
||||||
let _ = tr.send(serverlist).await.unwrap();
|
let app_state = AppState {
|
||||||
Response::builder().status(StatusCode::CREATED).body(Body::from("Config, conf file, updated!\n")).unwrap()
|
master_key: config.masterkey.clone(),
|
||||||
}
|
config_sender: to_return.clone(),
|
||||||
None => Response::builder()
|
};
|
||||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
let app = Router::new()
|
||||||
.body(Body::from("Failed to parce config file!\n"))
|
// .route("/{*wildcard}", get(senderror))
|
||||||
.unwrap(),
|
// .route("/{*wildcard}", post(senderror))
|
||||||
}
|
// .route("/{*wildcard}", put(senderror))
|
||||||
})
|
// .route("/{*wildcard}", head(senderror))
|
||||||
.with_state("state"),
|
// .route("/{*wildcard}", delete(senderror))
|
||||||
);
|
.route("/jwt", post(jwt_gen))
|
||||||
let listener = TcpListener::bind(bindaddress.clone()).await.unwrap();
|
.route("/conf", post(conf))
|
||||||
info!("Starting the API server on: {}", bindaddress);
|
.route("/metrics", get(metrics))
|
||||||
|
.with_state(app_state);
|
||||||
|
|
||||||
|
if let Some(value) = &config.tls_address {
|
||||||
|
let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
|
||||||
|
let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
|
||||||
|
let tls_app = app.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
|
||||||
|
eprintln!("TLS server failed: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
info!("Starting the TLS API server on: {}", value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let listener = TcpListener::bind(config.address.clone()).await.unwrap();
|
||||||
|
info!("Starting the API server on: {}", config.address);
|
||||||
axum::serve(listener, app).await.unwrap();
|
axum::serve(listener, app).await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse {
|
||||||
async fn senderror() -> impl IntoResponse {
|
if let Some(s) = params.get("key") {
|
||||||
Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap()
|
if s.to_owned() == st.master_key.to_owned() {
|
||||||
|
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content") {
|
||||||
|
st.config_sender.send(serverlist).await.unwrap();
|
||||||
|
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap();
|
||||||
|
} else {
|
||||||
|
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("Failed to parse config!\n")).unwrap();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn jwt_gen(State(masterkey): State<String>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) {
|
async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) {
|
||||||
if payload.masterkey == masterkey {
|
if payload.master_key == state.master_key {
|
||||||
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60);
|
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60);
|
||||||
let a = now.duration_since(UNIX_EPOCH).unwrap().as_secs();
|
let a = now.duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||||
let claim = crate::utils::jwt::Claims { user: payload.owner, exp: a };
|
let claim = crate::utils::jwt::Claims { user: payload.owner, exp: a };
|
||||||
match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.masterkey.as_ref())) {
|
match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.master_key.as_ref())) {
|
||||||
Ok(t) => {
|
Ok(t) => {
|
||||||
let tok = OutToken { token: t };
|
let tok = OutToken { token: t };
|
||||||
info!("Generating token: {:?}", tok);
|
info!("Generating token: {:?}", tok);
|
||||||
@@ -89,3 +109,28 @@ async fn jwt_gen(State(masterkey): State<String>, Json(payload): Json<InputKey>)
|
|||||||
(StatusCode::FORBIDDEN, Json(tok))
|
(StatusCode::FORBIDDEN, Json(tok))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn metrics() -> impl IntoResponse {
|
||||||
|
let metric_families = gather();
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
if let Err(e) = encoder.encode(&metric_families, &mut buffer) {
|
||||||
|
// encoding error fallback
|
||||||
|
return Response::builder()
|
||||||
|
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
|
.body(Body::from(format!("Failed to encode metrics: {}", e)))
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header("Content-Type", encoder.format_type())
|
||||||
|
.body(Body::from(buffer))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// #[allow(dead_code)]
|
||||||
|
// async fn senderror() -> impl IntoResponse {
|
||||||
|
// Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap()
|
||||||
|
// }
|
||||||
|
|||||||
Reference in New Issue
Block a user