mirror of
https://github.com/sadoyan/aralez.git
synced 2026-04-30 14:58:38 +08:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94b1f77734 | ||
|
|
9d986f9a28 | ||
|
|
3afa2f209f | ||
|
|
c151fdf58b | ||
|
|
438426153f | ||
|
|
9bb01fd1b0 | ||
|
|
abb5fef1d6 | ||
|
|
3618687ad5 | ||
|
|
a893b3c301 | ||
|
|
3ff262c7f4 | ||
|
|
062f02259f | ||
|
|
1a4c9b7d55 | ||
|
|
6ef7f23823 | ||
|
|
2b437c65fb | ||
|
|
38055ae94e | ||
|
|
703de9e909 | ||
|
|
2c8b01295c | ||
|
|
baebe1c00f | ||
|
|
6c1d3c5ef8 | ||
|
|
2d1a827007 | ||
|
|
a2a5250711 | ||
|
|
985e923342 | ||
|
|
0fc79c022f | ||
|
|
a43bccdfb8 | ||
|
|
5b87391fbb | ||
|
|
c68a4ad83d | ||
|
|
8ba8d32df1 | ||
|
|
7a839065e6 | ||
|
|
74821654f3 | ||
|
|
78c83b802f | ||
|
|
012505b77e | ||
|
|
21c4cb0901 | ||
|
|
86dd3d3402 | ||
|
|
d6b345202b | ||
|
|
5209d787e4 | ||
|
|
02de5f1c21 | ||
|
|
9519280026 | ||
|
|
e87c60cf4f | ||
|
|
25693a7058 | ||
|
|
3b0b385ec7 | ||
|
|
5359c2e8e9 | ||
|
|
2b62d1e6de | ||
|
|
8a290e5084 | ||
|
|
3541b20c80 | ||
|
|
bd5fed9be0 | ||
|
|
b916b152ea | ||
|
|
5d4915d6b9 | ||
|
|
3ea3996e27 | ||
|
|
dd069b8532 | ||
|
|
c78245e695 | ||
|
|
66b1a1c399 | ||
|
|
bba6dd8514 | ||
|
|
79485ac69d | ||
|
|
61c5625016 | ||
|
|
57bdc71acd | ||
|
|
9e09b829a6 | ||
|
|
d3602fa578 |
13
.cargo/config.toml
Normal file
13
.cargo/config.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[target.aarch64-unknown-linux-musl]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
|
||||
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
|
||||
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
|
||||
]
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
|
||||
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
|
||||
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
|
||||
]
|
||||
15
.github/FUNDING.yml
vendored
Normal file
15
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
polar: # Replace with a single Polar username
|
||||
buy_me_a_coffee: sadoyan
|
||||
thanks_dev: # Replace with a single thanks.dev username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -5,6 +5,8 @@
|
||||
*.dll
|
||||
*.exe
|
||||
*.sh
|
||||
/docs/
|
||||
/docs
|
||||
/target/
|
||||
*.iml
|
||||
.idea/
|
||||
|
||||
1898
Cargo.lock
generated
1898
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
70
Cargo.toml
70
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "aralez"
|
||||
version = "0.9.1"
|
||||
version = "0.9.2"
|
||||
edition = "2021"
|
||||
|
||||
[profile.release]
|
||||
@@ -11,45 +11,43 @@ panic = "abort"
|
||||
strip = true
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.45.1", features = ["full"] }
|
||||
#pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl
|
||||
pingora = { version = "0.5.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
tokio = { version = "1.49.0", features = ["full"] }
|
||||
pingora = { version = "0.8.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
dashmap = "7.0.0-rc2"
|
||||
pingora-core = "0.5.0"
|
||||
pingora-proxy = "0.5.0"
|
||||
pingora-http = "0.5.0"
|
||||
pingora-limits = "0.5.0"
|
||||
#pingora-pool = "0.5.0"
|
||||
async-trait = "0.1.88"
|
||||
env_logger = "0.11.8"
|
||||
log = "0.4.27"
|
||||
futures = "0.3.31"
|
||||
notify = "8.0.0"
|
||||
axum = { version = "0.8.4" }
|
||||
axum-server = { version = "0.7.2", features = ["tls-openssl"] }
|
||||
reqwest = { version = "0.12.20", features = ["json", "native-tls-alpn"] }
|
||||
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] }
|
||||
#reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
|
||||
|
||||
pingora-core = "0.8.0"
|
||||
pingora-proxy = "0.8.0"
|
||||
pingora-http = "0.8.0"
|
||||
pingora-limits = "0.8.0"
|
||||
async-trait = "0.1.89"
|
||||
env_logger = "0.11.9"
|
||||
log = "0.4.29"
|
||||
futures = "0.3.32"
|
||||
notify = "9.0.0-rc.2"
|
||||
axum = { version = "0.8.8" }
|
||||
#axum-server = { version = "0.8.0" }
|
||||
reqwest = { version = "0.13.2", features = ["json", "stream"] }
|
||||
serde_yaml = "0.9.34-deprecated"
|
||||
rand = "0.9.0"
|
||||
rand = "0.10.0"
|
||||
base64 = "0.22.1"
|
||||
jsonwebtoken = "9.3.1"
|
||||
tonic = "0.13.1"
|
||||
sha2 = { version = "0.11.0-rc.0", default-features = false }
|
||||
base16ct = { version = "0.2.0", features = ["alloc"] }
|
||||
#jsonwebtoken = { version = "10.3.0", features = ["aws_lc_rs"] }
|
||||
#jsonwebtoken = { version = "10.3.0", default-features = false, features = ["use_pem"] }
|
||||
jsonwebtoken = { version = "10.3.0", default-features = false, features = ["use_pem", "rust_crypto"] }
|
||||
tonic = "0.14.5"
|
||||
sha2 = { version = "0.11.0-rc.5", default-features = false }
|
||||
base16ct = { version = "1.0.0", features = ["alloc"] }
|
||||
urlencoding = "2.1.3"
|
||||
arc-swap = "1.7.1"
|
||||
#rustls = { version = "0.23.27", features = ["ring"] }
|
||||
mimalloc = { version = "0.1.47", default-features = false }
|
||||
arc-swap = "1.8.2"
|
||||
mimalloc = { version = "0.1.48", default-features = false }
|
||||
prometheus = "0.14.0"
|
||||
lazy_static = "1.5.0"
|
||||
#openssl = "0.10.73"
|
||||
x509-parser = "0.17.0"
|
||||
x509-parser = "0.18.1"
|
||||
rustls-pemfile = "2.2.0"
|
||||
tower-http = { version = "0.6.6", features = ["fs"] }
|
||||
once_cell = "1.20.2"
|
||||
#moka = { version = "0.12.10", features = ["sync"] }
|
||||
|
||||
|
||||
tower-http = { version = "0.6.8", features = ["fs"] }
|
||||
once_cell = "1.21.3"
|
||||
privdrop = "0.5.6"
|
||||
ctrlc = "3.5.2"
|
||||
port_check = "0.3.0"
|
||||
serde_json = "1.0.149"
|
||||
http = "1.4.0"
|
||||
itoa = "1.0.14"
|
||||
|
||||
201
LICENSE
Normal file
201
LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
120
METRICS.md
120
METRICS.md
@@ -1,120 +0,0 @@
|
||||
# 📈 Aralez Prometheus Metrics Reference
|
||||
|
||||
This document outlines Prometheus metrics for the [Aralez](https://github.com/sadoyan/aralez) reverse proxy.
|
||||
These metrics can be used for monitoring, alerting and performance analysis.
|
||||
|
||||
Exposed to `http://config_address/metrics`
|
||||
|
||||
By default `http://127.0.0.1:3000/metrics`
|
||||
|
||||
# 📊 Example Grafana dashboard during stress test :
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Prometheus Metrics
|
||||
|
||||
### 1. `aralez_requests_total`
|
||||
|
||||
- **Type**: `Counter`
|
||||
- **Purpose**: Total amount requests served by Aralez.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_requests_total[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. `aralez_errors_total`
|
||||
|
||||
- **Type**: `Counter`
|
||||
- **Purpose**: Count of requests that resulted in an error.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_errors_total[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. `aralez_responses_total{status="200"}`
|
||||
|
||||
- **Type**: `CounterVec`
|
||||
- **Purpose**: Count of responses by HTTP status code.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_responses_total{status=~"5.."}[5m]) > 0
|
||||
```
|
||||
|
||||
> Useful for alerting on 5xx errors.
|
||||
|
||||
---
|
||||
|
||||
### 4. `aralez_response_latency_seconds`
|
||||
|
||||
- **Type**: `Histogram`
|
||||
- **Purpose**: Tracks the latency of responses in seconds.
|
||||
|
||||
**Example bucket output:**
|
||||
|
||||
```prometheus
|
||||
aralez_response_latency_seconds_bucket{le="0.01"} 15
|
||||
aralez_response_latency_seconds_bucket{le="0.1"} 120
|
||||
aralez_response_latency_seconds_bucket{le="0.25"} 245
|
||||
aralez_response_latency_seconds_bucket{le="0.5"} 500
|
||||
...
|
||||
aralez_response_latency_seconds_count 1023
|
||||
aralez_response_latency_seconds_sum 42.6
|
||||
```
|
||||
|
||||
| Metric | Meaning |
|
||||
|-------------------------|---------------------------------------------------------------|
|
||||
| `bucket{le="0.1"} 120` | 120 requests were ≤ 100ms |
|
||||
| `bucket{le="0.25"} 245` | 245 requests were ≤ 250ms |
|
||||
| `count` | Total number of observations (i.e., total responses measured) |
|
||||
| `sum` | Total time of all responses, in seconds |
|
||||
|
||||
### 🔍 How to interpret:
|
||||
|
||||
- `le` means “less than or equal to”.
|
||||
- `count` is total amount of observations.
|
||||
- `sum` is the total time (in seconds) of all responses.
|
||||
|
||||
**PromQL examples:**
|
||||
|
||||
🔹 **95th percentile latency**
|
||||
|
||||
```promql
|
||||
histogram_quantile(0.95, rate(aralez_response_latency_seconds_bucket[5m]))
|
||||
|
||||
```
|
||||
|
||||
🔹 **Average latency**
|
||||
|
||||
```promql
|
||||
rate(aralez_response_latency_seconds_sum[5m]) / rate(aralez_response_latency_seconds_count[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Notes
|
||||
|
||||
- Metrics are registered after the first served request.
|
||||
|
||||
---
|
||||
✅ Summary of key metrics
|
||||
|
||||
| Metric Name | Type | What it Tells You |
|
||||
|---------------------------------------|------------|---------------------------|
|
||||
| `aralez_requests_total` | Counter | Total requests served |
|
||||
| `aralez_errors_total` | Counter | Number of failed requests |
|
||||
| `aralez_responses_total{status="200"}` | CounterVec | Response status breakdown |
|
||||
| `aralez_response_latency_seconds` | Histogram | How fast responses are |
|
||||
|
||||
📘 *Last updated: May 2025*
|
||||
101
README.md
101
README.md
@@ -1,19 +1,29 @@
|
||||

|
||||
|
||||
# Aralez (Արալեզ), Reverse proxy and service mesh built on top of Cloudflare's Pingora
|
||||
---
|
||||
|
||||
# Aralez (Արալեզ),
|
||||
|
||||
### **Reverse proxy built on top of Cloudflare's Pingora**
|
||||
|
||||
Aralez is a high-performance Rust reverse proxy with zero-configuration automatic protocol handling, TLS, and upstream management,
|
||||
featuring Consul and Kubernetes integration for dynamic pod discovery and health-checked routing, acting as a lightweight ingress-style proxy.
|
||||
|
||||
---
|
||||
What Aralez means ?
|
||||
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>.
|
||||
**Aralez = Արալեզ** <ins>Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them</ins>.
|
||||
|
||||
Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
|
||||
|
||||
[](https://www.buymeacoffee.com/sadoyan)
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Key Features
|
||||
|
||||
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
|
||||
- **TLS Termination** — Built-in OpenSSL support.
|
||||
- **Automatic load of certificates** — Automatically reads and loads certificates from a folder, without a restart.
|
||||
- **Automatic loading of certificates** — Automatically reads and loads certificates from a folder, without a restart.
|
||||
- **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
|
||||
- **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost.
|
||||
- **Global rate limiter** — Set rate limit for all virtualhosts.
|
||||
@@ -69,8 +79,8 @@ Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers
|
||||
| Key | Example Value | Description |
|
||||
|----------------------------------|--------------------------------------|----------------------------------------------------------------------------------------------------|
|
||||
| **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
|
||||
| **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **runuser** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **rungroup** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **daemon** | false | Run in background (boolean) |
|
||||
| **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
|
||||
| **pid_file** | /tmp/aralez.pid | Path to PID file |
|
||||
@@ -112,12 +122,23 @@ Make the binary executable `chmod 755 ./aralez-VERSION` and run.
|
||||
|
||||
File names:
|
||||
|
||||
| File Name | Description |
|
||||
|---------------------------|---------------------------------------------------------------|
|
||||
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
||||
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
||||
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
||||
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
||||
| File Name | Description |
|
||||
|---------------------------|--------------------------------------------------------------------------|
|
||||
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
||||
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
||||
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
||||
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
||||
| `sadoyan/aralez` | Docker image on Debian 13 slim (https://hub.docker.com/r/sadoyan/aralez) |
|
||||
|
||||
**Via docker**
|
||||
|
||||
```shell
|
||||
docker run -d \
|
||||
-v /local/path/to/config:/etc/aralez:ro \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
sadoyan/aralez
|
||||
```
|
||||
|
||||
## 💡 Note
|
||||
|
||||
@@ -170,7 +191,10 @@ provider: "file"
|
||||
sticky_sessions: false
|
||||
to_https: false
|
||||
rate_limit: 10
|
||||
headers:
|
||||
server_headers:
|
||||
- "X-Forwarded-Proto:https"
|
||||
- "X-Forwarded-Port:443"
|
||||
client_headers:
|
||||
- "Access-Control-Allow-Origin:*"
|
||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||
- "Access-Control-Max-Age:86400"
|
||||
@@ -182,7 +206,10 @@ myhost.mydomain.com:
|
||||
"/":
|
||||
rate_limit: 20
|
||||
to_https: false
|
||||
headers:
|
||||
server_headers:
|
||||
- "X-Something-Else:Foobar"
|
||||
- "X-Another-Header:Hohohohoho"
|
||||
client_headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Hopaaaaaaaaaaaar"
|
||||
servers:
|
||||
@@ -190,17 +217,23 @@ myhost.mydomain.com:
|
||||
- "127.0.0.2:8000"
|
||||
"/foo":
|
||||
to_https: true
|
||||
headers:
|
||||
client_headers:
|
||||
- "X-Another-Header:Hohohohoho"
|
||||
servers:
|
||||
- "127.0.0.4:8443"
|
||||
- "127.0.0.5:8443"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
```
|
||||
|
||||
**This means:**
|
||||
|
||||
- Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS.
|
||||
- HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream.
|
||||
- All upstreams will receive custom headers : `X-Forwarded-Proto:https` and `X-Forwarded-Port:443`
|
||||
- Additionally, myhost.mydomain.com with path `/` will receive custom headers : `X-Another-Header:Hohohohoho` and `X-Something-Else:Foobar`
|
||||
- Requests to each hosted domains will be limited to 10 requests per second per virtualhost.
|
||||
- Requests limits are calculated per requester ip plus requested virtualhost.
|
||||
- If the requester exceeds the limit it will receive `429 Too Many Requests` error.
|
||||
@@ -209,6 +242,7 @@ myhost.mydomain.com:
|
||||
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
|
||||
- Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez.
|
||||
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
|
||||
- Requests to `myhost.mydomain.com/.well-known/acme-challenge` will be proxied to `127.0.0.1:8001`, but healthcheks are disabled.
|
||||
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
|
||||
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
|
||||
- Self-signed certificates are silently accepted.
|
||||
@@ -318,20 +352,33 @@ curl -u username:password -H 'Host: myip.mydomain.com' http://127.0.0.1:6193/
|
||||
- Sticky session support.
|
||||
- HTTP2 ready.
|
||||
|
||||
📊 Why Choose Aralez? – Feature Comparison
|
||||
### 🧩 Summary Table: Feature Comparison
|
||||
|
||||
| Feature | **Aralez** | **Nginx** | **HAProxy** | **Traefik** |
|
||||
|----------------------------|----------------------------------------------------------------------|--------------------------|-------------------------|-----------------|
|
||||
| **Hot Reload** | ✅ Yes (live, API/file) | ⚠️ Reloads config | ⚠️ Reloads config | ✅ Yes (dynamic) |
|
||||
| **JWT Auth** | ✅ Built-in | ❌ External scripts | ❌ External Lua or agent | ⚠️ With plugins |
|
||||
| **WebSocket Support** | ✅ Automatic | ⚠️ Manual config | ✅ Yes | ✅ Yes |
|
||||
| **gRPC Support** | ✅ Automatic (no config) | ⚠️ Manual + HTTP/2 + TLS | ⚠️ Complex setup | ✅ Native |
|
||||
| **TLS Termination** | ✅ Built-in (OpenSSL) | ✅ Yes | ✅ Yes | ✅ Yes |
|
||||
| **TLS Upstream Detection** | ✅ Automatic | ❌ | ❌ | ❌ |
|
||||
| **HTTP/2 Support** | ✅ Automatic | ⚠️ Requires extra config | ⚠️ Requires build flags | ✅ Native |
|
||||
| **Sticky Sessions** | ✅ Cookie-based | ⚠️ In plus version only | ✅ | ✅ |
|
||||
| **Prometheus Metrics** | ✅ [Built in](https://github.com/sadoyan/aralez/blob/main/METRICS.md) | ⚠️ With Lua or exporter | ⚠️ With external script | ✅ Native |
|
||||
| **Built With** | 🦀 Rust | C | C | Go |
|
||||
| Feature / Proxy | **Aralez** | **Nginx** | **HAProxy** | **Traefik** | **Caddy** | **Envoy** |
|
||||
|----------------------------------|:-----------------:|:---------------------------:|:-----------------:|:--------------------------------:|:---------------:|:---------------:|
|
||||
| **Hot Reload (Zero Downtime)** | ✅ **Automatic** | ⚙️ Manual (graceful reload) | ⚙️ Manual | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **Auto Cert Reload (from disk)** | ✅ **Automatic** | ❌ No | ❌ No | ✅ Automatic (Let's Encrypt only) | ✅ Automatic | ⚙️ Manual |
|
||||
| **Auth: Basic / API Key / JWT** | ✅ **Built-in** | ⚙️ Basic only | ⚙️ Basic only | ✅ Config-based | ✅ Config-based | ✅ Config-based |
|
||||
| **TLS / HTTP2 Termination** | ✅ **Automatic** | ⚙️ Manual config | ⚙️ Manual config | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **Built-in A+ TLS Grades** | ✅ **Automatic** | ⚙️ Manual tuning | ⚙️ Manual | ⚙️ Manual | ✅ Automatic | ⚙️ Manual |
|
||||
| **gRPC Proxy** | ✅ **Zero-Config** | ⚙️ Manual setup | ⚙️ Manual | ⚙️ Needs config | ⚙️ Needs config | ⚙️ Needs config |
|
||||
| **SSL Proxy** | ✅ **Zero-Config** | ⚙️ Manual | ⚙️ Manual | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **HTTP/2 Proxy** | ✅ **Zero-Config** | ⚙️ Manual enable | ⚙️ Manual enable | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **WebSocket Proxy** | ✅ **Zero-Config** | ⚙️ Manual upgrade | ⚙️ Manual upgrade | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **Sticky Sessions** | ✅ **Built-in** | ⚙️ Config-based | ⚙️ Config-based | ✅ Automatic | ⚙️ Limited | ✅ Config-based |
|
||||
| **Prometheus Metrics** | ✅ **Built-in** | ⚙️ External exporter | ✅ Built-in | ✅ Built-in | ✅ Built-in | ✅ Built-in |
|
||||
| **Consul Integration** | ✅ **Yes** | ❌ No | ⚙️ Via DNS only | ✅ Yes | ❌ No | ✅ Yes |
|
||||
| **Kubernetes Integration** | ✅ **Yes** | ⚙️ Needs ingress setup | ⚙️ External | ✅ Yes | ⚙️ Limited | ✅ Yes |
|
||||
| **Request Limiter** | ✅ **Yes** | ✅ Config-based | ✅ Config-based | ✅ Config-based | ✅ Config-based | ✅ Config-based |
|
||||
| **Serve Static Files** | ✅ **Yes** | ✅ Yes | ⚙️ Basic | ✅ Automatic | ✅ Automatic | ❌ No |
|
||||
| **Upstream Health Checks** | ✅ **Automatic** | ⚙️ Manual config | ⚙️ Manual config | ✅ Automatic | ✅ Automatic | ✅ Automatic |
|
||||
| **Built With** | 🦀 **Rust** | C | C | Go | Go | C++ |
|
||||
|
||||
---
|
||||
|
||||
✅ **Automatic / Zero-Config** – Works immediately, no setup required
|
||||
⚙️ **Manual / Config-based** – Requires explicit configuration or modules
|
||||
❌ **No** – Not supported
|
||||
|
||||
## 💡 Simple benchmark by [Oha](https://github.com/hatoo/oha)
|
||||
|
||||
|
||||
BIN
assets/bench2.png
Normal file
BIN
assets/bench2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
@@ -1,7 +1,7 @@
|
||||
# Main configuration file, applied on startup
|
||||
threads: 12 # Number of daemon threads default setting
|
||||
#user: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||
#group: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||
#runuser: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||
#rungroup: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||
daemon: false # Run in background
|
||||
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
|
||||
pid_file: /tmp/aralez.pid # Path to PID file
|
||||
@@ -14,7 +14,7 @@ config_tls_certificate: /etc/server.crt # Mandatory if config_tls_address is set
|
||||
config_tls_key_file: /etc/key.pem # Mandatory if config_tls_address is set
|
||||
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
|
||||
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
|
||||
proxy_certificates: /etc/yoyo # Mandatory if proxy_address_tls set, should contain a certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
|
||||
proxy_certificates: /etc/certs # Mandatory if proxy_address_tls set, should contain a certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
|
||||
proxy_tls_grade: a+ # Grade of TLS suite for proxy (a+, a, b, c, unsafe), matching grades of Qualys SSL Labs
|
||||
upstreams_conf: /etc/upstreams.yaml # the location of upstreams file
|
||||
file_server_folder: /opt/storage # Optional, local folder to serve
|
||||
@@ -22,4 +22,4 @@ file_server_address: 127.0.0.1:3002 # Optional, Local address for file server. C
|
||||
log_level: info # info, warn, error, debug, trace, off
|
||||
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
|
||||
hc_interval: 2 #Interval for health checks in seconds
|
||||
master_key: 910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774 # Mater key for working with API server and JWT Secret
|
||||
master_key: 910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774 # Mater key for working with API server and JWT Secret
|
||||
@@ -1,45 +1,82 @@
|
||||
# The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
|
||||
provider: "file" # consul
|
||||
provider: "file" # "file" "consul" "kubernetes"
|
||||
sticky_sessions: false
|
||||
to_ssl: false
|
||||
#rate_limit: 100
|
||||
headers:
|
||||
to_https: false
|
||||
rate_limit: 100
|
||||
server_headers:
|
||||
- "X-Forwarded-Proto:https"
|
||||
- "X-Forwarded-Port:443"
|
||||
client_headers:
|
||||
- "Access-Control-Allow-Origin:*"
|
||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||
- "Access-Control-Max-Age:86400"
|
||||
- "X-Custom-Header:Something Special"
|
||||
authorization:
|
||||
type: "jwt"
|
||||
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||
#authorization:
|
||||
# type: "jwt"
|
||||
# creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||
# type: "basic"
|
||||
# creds: "user:Passw0rd"
|
||||
# creds: "username:Pa$$w0rd"
|
||||
# type: "apikey"
|
||||
# creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
|
||||
consul: # If the provider is consul. Otherwise, ignored.
|
||||
consul:
|
||||
servers:
|
||||
- "http://consul1:8500"
|
||||
- "http://consul2:8500"
|
||||
- "http://consul3:8500"
|
||||
services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database.
|
||||
- proxy: "proxy-frontend-dev-frontend-srv"
|
||||
real: "frontend-dev-frontend-srv"
|
||||
- "http://192.168.1.199:8500"
|
||||
- "http://192.168.1.200:8500"
|
||||
- "http://192.168.1.201:8500"
|
||||
services: # hostname: The hostname to access the proxy server, upstream : The real service name in Consul database.
|
||||
- hostname: "webapi-service"
|
||||
upstream: "webapi-service-health"
|
||||
path: "/one"
|
||||
client_headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
rate_limit: 1
|
||||
to_https: false
|
||||
- hostname: "webapi-service"
|
||||
upstream: "webapi-service-health"
|
||||
path: "/"
|
||||
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
|
||||
kubernetes:
|
||||
servers:
|
||||
- "192.168.1.55:443" #For testing only, overrides with KUBERNETES_SERVICE_HOST : KUBERNETES_SERVICE_PORT_HTTPS env variables.
|
||||
services:
|
||||
- hostname: "webapi-service"
|
||||
path: "/"
|
||||
upstream: "webapi-service"
|
||||
- hostname: "webapi-service"
|
||||
upstream: "vt-console-service"
|
||||
path: "/one"
|
||||
client_headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
rate_limit: 100
|
||||
to_https: false
|
||||
- hostname: "webapi-service"
|
||||
upstream: "vt-rambulik-service"
|
||||
path: "/two"
|
||||
- hostname: "websocket-service"
|
||||
upstream: "websocket-service"
|
||||
path: "/"
|
||||
tokenpath: "/path/to/kubetoken.txt" #If not set, will default to /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
upstreams:
|
||||
myip.mydomain.com:
|
||||
paths:
|
||||
rate_limit: 10 # Per path rate limit have higher priority than global rate limit. If not set, the global rate limit will be used
|
||||
"/":
|
||||
rate_limit: 200
|
||||
to_https: false
|
||||
headers:
|
||||
client_headers:
|
||||
- "X-Proxy-From:Aralez"
|
||||
servers: # List of upstreams HOST:PORT
|
||||
servers:
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
- "127.0.0.3:8000"
|
||||
- "127.0.0.4:8000"
|
||||
- "127.0.0.5:8000"
|
||||
"/ping":
|
||||
to_https: true
|
||||
headers:
|
||||
to_https: false
|
||||
server_headers:
|
||||
- "X-Forwarded-Proto:https"
|
||||
- "X-Forwarded-Port:443"
|
||||
client_headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
servers:
|
||||
@@ -51,7 +88,8 @@ upstreams:
|
||||
polo.mydomain.com:
|
||||
paths:
|
||||
"/":
|
||||
headers:
|
||||
to_https: false
|
||||
client_headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
servers:
|
||||
- "192.168.1.1:8000"
|
||||
@@ -59,4 +97,19 @@ upstreams:
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
- "127.0.0.3:8000"
|
||||
- "127.0.0.4:8000"
|
||||
- "127.0.0.4:8000"
|
||||
apt.mydomain.com:
|
||||
paths:
|
||||
"/":
|
||||
servers:
|
||||
- "192.168.1.10:443"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
localpost:
|
||||
paths:
|
||||
"/":
|
||||
to_https: false
|
||||
servers:
|
||||
- "127.0.0.1:9000"
|
||||
@@ -1,11 +1,16 @@
|
||||
pub mod auth;
|
||||
pub mod consul;
|
||||
pub mod discovery;
|
||||
pub mod dnsclient;
|
||||
mod filewatch;
|
||||
pub mod fordebug;
|
||||
pub mod healthcheck;
|
||||
pub mod httpclient;
|
||||
pub mod jwt;
|
||||
pub mod kuberconsul;
|
||||
pub mod metrics;
|
||||
pub mod parceyaml;
|
||||
pub mod state;
|
||||
pub mod structs;
|
||||
pub mod tls;
|
||||
pub mod tools;
|
||||
// pub mod watchksecret;
|
||||
|
||||
@@ -3,6 +3,7 @@ use base64::engine::general_purpose::STANDARD;
|
||||
use base64::Engine;
|
||||
use pingora_proxy::Session;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use urlencoding::decode;
|
||||
|
||||
trait AuthValidator {
|
||||
@@ -40,16 +41,6 @@ impl AuthValidator for JwtAuth<'_> {
|
||||
if let Some(tok) = get_query_param(session, "araleztoken") {
|
||||
return check_jwt(tok.as_str(), jwtsecret);
|
||||
}
|
||||
|
||||
// if let Some(header) = session.get_header("authorization") {
|
||||
// let h = header.to_str().ok().unwrap().split(" ").collect::<Vec<_>>();
|
||||
// match h.len() {
|
||||
// n => {
|
||||
// return check_jwt(h[n - 1], jwtsecret);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
if let Some(auth_header) = session.get_header("authorization") {
|
||||
if let Ok(header_str) = auth_header.to_str() {
|
||||
if let Some((scheme, token)) = header_str.split_once(' ') {
|
||||
@@ -66,22 +57,23 @@ fn validate(auth: &dyn AuthValidator, session: &Session) -> bool {
|
||||
auth.validate(session)
|
||||
}
|
||||
|
||||
pub fn authenticate(c: &[String], session: &Session) -> bool {
|
||||
match c[0].as_str() {
|
||||
// pub fn authenticate(c: &[Arc<str>], session: &Session) -> bool {
|
||||
pub fn authenticate(auth_type: &Arc<str>, credentials: &Arc<str>, session: &Session) -> bool {
|
||||
match &*auth_type.clone() {
|
||||
"basic" => {
|
||||
let auth = BasicAuth(c[1].as_str().into());
|
||||
let auth = BasicAuth(&*credentials.clone());
|
||||
validate(&auth, session)
|
||||
}
|
||||
"apikey" => {
|
||||
let auth = ApiKeyAuth(c[1].as_str().into());
|
||||
let auth = ApiKeyAuth(&*credentials.clone());
|
||||
validate(&auth, session)
|
||||
}
|
||||
"jwt" => {
|
||||
let auth = JwtAuth(c[1].as_str().into());
|
||||
let auth = JwtAuth(&*credentials.clone());
|
||||
validate(&auth, session)
|
||||
}
|
||||
_ => {
|
||||
println!("Unsupported authentication mechanism : {}", c[0]);
|
||||
println!("Unsupported authentication mechanism : {}", auth_type);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps};
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use log::{info, warn};
|
||||
use pingora::prelude::sleep;
|
||||
use rand::Rng;
|
||||
use reqwest::header::{HeaderMap, HeaderValue};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Service {
|
||||
#[serde(rename = "ServiceTaggedAddresses")]
|
||||
tagged_addresses: HashMap<String, TaggedAddress>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct TaggedAddress {
|
||||
#[serde(rename = "Address")]
|
||||
address: String,
|
||||
#[serde(rename = "Port")]
|
||||
port: u16,
|
||||
}
|
||||
|
||||
pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
let config = load_configuration(fp.as_str(), "filepath").await;
|
||||
let headers = DashMap::new();
|
||||
match config {
|
||||
Some(config) => {
|
||||
if config.typecfg.to_string() != "consul" {
|
||||
info!("Not running Consul discovery, requested type is: {}", config.typecfg);
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Consul Discovery is enabled : {}", config.typecfg);
|
||||
let consul = config.consul.clone();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
match consul {
|
||||
Some(consul) => {
|
||||
let servers = consul.servers.unwrap();
|
||||
info!("Consul Servers => {:?}", servers);
|
||||
let end = servers.len();
|
||||
|
||||
loop {
|
||||
let num = rand::rng().random_range(1..end);
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let upstreams = consul_request(consul_data, consul.services.clone(), consul.token.clone());
|
||||
match upstreams.await {
|
||||
Some(upstreams) => {
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, token: Option<String>) -> Option<UpstreamsDashMap> {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let ss = url.clone() + "/v1/catalog/service/";
|
||||
match whitelist {
|
||||
Some(whitelist) => {
|
||||
for k in whitelist.iter() {
|
||||
let pref: String = ss.clone() + &k.real;
|
||||
let list = get_by_http(pref.clone(), token.clone()).await;
|
||||
match list {
|
||||
Some(list) => {
|
||||
upstreams.insert(k.proxy.clone(), list);
|
||||
}
|
||||
None => {
|
||||
warn!("Whitelist not found for {}", k.proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = reqwest::Client::new();
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(token) = token {
|
||||
headers.insert("X-Consul-Token", HeaderValue::from_str(&token).unwrap());
|
||||
}
|
||||
let to = Duration::from_secs(1);
|
||||
let u = client.get(url).timeout(to).send();
|
||||
let mut values = Vec::new();
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
match u.await {
|
||||
Ok(r) => {
|
||||
let jason = r.json::<Vec<Service>>().await;
|
||||
match jason {
|
||||
Ok(whitelist) => {
|
||||
for service in whitelist {
|
||||
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let to_add = InnerMap {
|
||||
address: addr,
|
||||
port: prt,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
};
|
||||
values.push(to_add);
|
||||
}
|
||||
}
|
||||
Err(_) => return None,
|
||||
}
|
||||
}
|
||||
Err(_) => return None,
|
||||
}
|
||||
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
|
||||
Some(upstreams)
|
||||
}
|
||||
@@ -1,26 +1,34 @@
|
||||
use crate::utils::consul;
|
||||
use crate::utils::filewatch;
|
||||
use crate::utils::structs::Configuration;
|
||||
use crate::utils::kuberconsul::{ConsulDiscovery, KubernetesDiscovery, ServiceDiscovery};
|
||||
use crate::utils::structs::{Configuration, UpstreamsDashMap};
|
||||
use crate::web::webserver;
|
||||
use async_trait::async_trait;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct FromFileProvider {
|
||||
pub path: String,
|
||||
}
|
||||
pub struct APIUpstreamProvider {
|
||||
pub config_api_enabled: bool,
|
||||
pub address: String,
|
||||
pub masterkey: String,
|
||||
pub tls_address: Option<String>,
|
||||
pub tls_certificate: Option<String>,
|
||||
pub tls_key_file: Option<String>,
|
||||
// pub tls_address: Option<String>,
|
||||
// pub tls_certificate: Option<String>,
|
||||
// pub tls_key_file: Option<String>,
|
||||
pub file_server_address: Option<String>,
|
||||
pub file_server_folder: Option<String>,
|
||||
pub current_upstreams: Arc<UpstreamsDashMap>,
|
||||
pub full_upstreams: Arc<UpstreamsDashMap>,
|
||||
}
|
||||
|
||||
pub struct FromFileProvider {
|
||||
pub path: String,
|
||||
}
|
||||
|
||||
pub struct ConsulProvider {
|
||||
pub path: String,
|
||||
pub config: Arc<Configuration>,
|
||||
}
|
||||
|
||||
pub struct KubernetesProvider {
|
||||
pub config: Arc<Configuration>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -31,7 +39,7 @@ pub trait Discovery {
|
||||
#[async_trait]
|
||||
impl Discovery for APIUpstreamProvider {
|
||||
async fn start(&self, toreturn: Sender<Configuration>) {
|
||||
webserver::run_server(self, toreturn).await;
|
||||
webserver::run_server(self, toreturn, self.current_upstreams.clone(), self.full_upstreams.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +53,13 @@ impl Discovery for FromFileProvider {
|
||||
#[async_trait]
|
||||
impl Discovery for ConsulProvider {
|
||||
async fn start(&self, tx: Sender<Configuration>) {
|
||||
tokio::spawn(consul::start(self.path.clone(), tx.clone()));
|
||||
tokio::spawn(ConsulDiscovery.fetch_upstreams(self.config.clone(), tx));
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Discovery for KubernetesProvider {
|
||||
async fn start(&self, tx: Sender<Configuration>) {
|
||||
tokio::spawn(KubernetesDiscovery.fetch_upstreams(self.config.clone(), tx));
|
||||
}
|
||||
}
|
||||
|
||||
159
src/utils/dnsclient.rs
Normal file
159
src/utils/dnsclient.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
use crate::utils::structs::InnerMap;
|
||||
use dashmap::DashMap;
|
||||
use hickory_client::client::{Client, ClientHandle};
|
||||
use hickory_client::proto::rr::{DNSClass, Name, RecordType};
|
||||
use hickory_client::proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_client::proto::udp::UdpClientStream;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
type DnsError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
pub struct DnsClientPool {
|
||||
clients: Vec<Mutex<DnsClient>>,
|
||||
}
|
||||
|
||||
struct DnsClient {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
pub async fn start2(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let k8s = config.kubernetes.clone();
|
||||
match k8s {
|
||||
Some(k8s) => {
|
||||
let dnserver = k8s.servers.unwrap_or(vec!["127.0.0.1:53".to_string()]);
|
||||
let headers = DashMap::new();
|
||||
let end = dnserver.len() - 1;
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
let srv = dnserver.get(num).unwrap().to_string();
|
||||
let pool = DnsClientPool::new(5, srv.clone()).await;
|
||||
let u = UpstreamsDashMap::new();
|
||||
if let Some(whitelist) = k8s.services {
|
||||
loop {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
for service in whitelist.iter() {
|
||||
let ret = pool.query_srv(service.real.as_str(), srv.clone()).await;
|
||||
match ret {
|
||||
Ok(r) => {
|
||||
upstreams.insert(service.proxy.clone(), r);
|
||||
}
|
||||
Err(e) => eprintln!("DNS query failed for {:?}: {:?}", service, e),
|
||||
}
|
||||
}
|
||||
if !compare_dashmaps(&u, &upstreams) {
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
kubernetes: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &u);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsClient {
|
||||
pub async fn new(server: String) -> Result<Self, DnsError> {
|
||||
let server_details = server;
|
||||
let server: SocketAddr = server_details.parse().expect("Unable to parse socket address");
|
||||
let conn = UdpClientStream::builder(server, TokioRuntimeProvider::default()).build();
|
||||
let (client, bg) = Client::connect(conn).await.unwrap();
|
||||
tokio::spawn(bg);
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
pub async fn query_srv(&mut self, name: &str) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
let mut values = Vec::new();
|
||||
match tokio::time::timeout(Duration::from_secs(5), self.client.query(Name::from_str(name)?, DNSClass::IN, RecordType::SRV)).await {
|
||||
Ok(Ok(response)) => {
|
||||
for answer in response.answers() {
|
||||
if let hickory_client::proto::rr::RData::SRV(srv) = answer.data() {
|
||||
let to_add = InnerMap {
|
||||
address: srv.target().to_string(),
|
||||
port: srv.port(),
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
sticky_sessions: false,
|
||||
rate_limit: None,
|
||||
};
|
||||
values.push(to_add);
|
||||
}
|
||||
}
|
||||
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
|
||||
Ok(upstreams)
|
||||
}
|
||||
Ok(Err(e)) => Err(Box::new(e)),
|
||||
Err(_) => Err("DNS query timed out".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsClientPool {
|
||||
pub async fn new(pool_size: usize, server: String) -> Self {
|
||||
let mut clients = Vec::with_capacity(pool_size);
|
||||
for _ in 0..pool_size {
|
||||
if let Ok(client) = DnsClient::new(server.clone()).await {
|
||||
clients.push(Mutex::new(client));
|
||||
}
|
||||
}
|
||||
Self { clients }
|
||||
}
|
||||
|
||||
pub async fn query_srv(&self, name: &str, server: String) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
|
||||
// Try to get an available client
|
||||
for client_mutex in &self.clients {
|
||||
if let Ok(mut client) = client_mutex.try_lock() {
|
||||
let vay = client.query_srv(name).await;
|
||||
match vay {
|
||||
Ok(_) => return vay,
|
||||
Err(_) => {
|
||||
// If query fails, drop this client and create a new one
|
||||
*client = match DnsClient::new(server).await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
// Retry with the new client
|
||||
return client.query_srv(name).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all clients are busy, wait for the first one with a timeout
|
||||
match tokio::time::timeout(Duration::from_secs(2), self.clients[0].lock()).await {
|
||||
Ok(mut client) => client.query_srv(name).await,
|
||||
Err(_) => Err("All DNS clients are busy and timeout reached".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -2,7 +2,7 @@ use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::Configuration;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use log::{error, info, warn};
|
||||
use log::error;
|
||||
use notify::event::ModifyKind;
|
||||
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
use pingora::prelude::sleep;
|
||||
@@ -15,19 +15,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
let file_path = fp.as_str();
|
||||
let parent_dir = Path::new(file_path).parent().unwrap();
|
||||
let (local_tx, mut local_rx) = tokio::sync::mpsc::channel::<notify::Result<Event>>(1);
|
||||
let snd = load_configuration(file_path, "filepath").await;
|
||||
|
||||
match snd {
|
||||
Some(snd) => {
|
||||
if snd.typecfg != "file" {
|
||||
warn!("Disabling file watcher, requested discovery type is: {}", snd.typecfg);
|
||||
return;
|
||||
}
|
||||
info!("Watching for changes in {:?}", parent_dir);
|
||||
toreturn.send(snd).await.unwrap();
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
let _watcher_handle = task::spawn_blocking({
|
||||
let parent_dir = parent_dir.to_path_buf(); // Move directory path into the closure
|
||||
move || {
|
||||
@@ -53,7 +41,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
if start.elapsed() > Duration::from_secs(2) {
|
||||
start = Instant::now();
|
||||
// info!("Config File changed :=> {:?}", e);
|
||||
let snd = load_configuration(file_path, "filepath").await;
|
||||
let snd = load_configuration(file_path, "filepath").await.0;
|
||||
match snd {
|
||||
Some(snd) => {
|
||||
toreturn.send(snd).await.unwrap();
|
||||
|
||||
31
src/utils/fordebug.rs
Normal file
31
src/utils/fordebug.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use std::alloc::{GlobalAlloc, Layout, System};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub struct CountingAllocator;
|
||||
|
||||
pub static ALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
pub static DEALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
pub static ALLOC_BYTES: AtomicUsize = AtomicUsize::new(0);
|
||||
#[allow(dead_code)]
|
||||
unsafe impl GlobalAlloc for CountingAllocator {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
ALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
ALLOC_BYTES.fetch_add(layout.size(), Ordering::Relaxed);
|
||||
System.alloc(layout)
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
DEALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
System.dealloc(ptr, layout)
|
||||
}
|
||||
}
|
||||
|
||||
// Uncomment following lines and comment allocator in main.rs
|
||||
// #[global_allocator]
|
||||
// pub static A: CountingAllocator = CountingAllocator;
|
||||
#[allow(dead_code)]
|
||||
fn for_example() {
|
||||
let before = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
|
||||
let after = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
|
||||
println!("Allocations : {}", after - before);
|
||||
}
|
||||
@@ -15,12 +15,18 @@ pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>,
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = period.tick() => {
|
||||
populate_upstreams(&upslist, &fullist, &idlist, params, &client).await;
|
||||
// populate_upstreams(&upslist, &fullist, &idlist, params, &client).await;
|
||||
let totest = build_upstreams(&fullist, params.0, &client).await;
|
||||
if !compare_dashmaps(&totest, &upslist) {
|
||||
clone_dashmap_into(&totest, &upslist);
|
||||
clone_idmap_into(&totest, &idlist);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<UpstreamsDashMap>, idlist: &Arc<UpstreamsIdMap>, params: (&str, u64), client: &Client) {
|
||||
let totest = build_upstreams(fullist, params.0, client).await;
|
||||
if !compare_dashmaps(&totest, upslist) {
|
||||
@@ -28,6 +34,7 @@ pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<U
|
||||
clone_idmap_into(&totest, idlist);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
pub async fn initiate_upstreams(fullist: UpstreamsDashMap) -> UpstreamsDashMap {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
|
||||
@@ -46,7 +53,7 @@ async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Clie
|
||||
let mut innervec = Vec::new();
|
||||
|
||||
for (_, upstream) in path_entry.value().0.iter().enumerate() {
|
||||
let tls = detect_tls(upstream.address.as_str(), &upstream.port, &client).await;
|
||||
let tls = detect_tls(&upstream.address.to_string(), &upstream.port, &client).await;
|
||||
let is_h2 = matches!(tls.1, Some(Version::HTTP_2));
|
||||
|
||||
let link = if tls.0 {
|
||||
@@ -62,16 +69,22 @@ async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Clie
|
||||
is_http2: is_h2,
|
||||
to_https: upstream.to_https,
|
||||
rate_limit: upstream.rate_limit,
|
||||
healthcheck: upstream.healthcheck,
|
||||
authorization: upstream.authorization.clone(),
|
||||
};
|
||||
|
||||
let resp = http_request(&link, method, "", &client).await;
|
||||
if resp.0 {
|
||||
if resp.1 {
|
||||
scheme.is_http2 = is_h2; // could be adjusted further
|
||||
if scheme.healthcheck.unwrap_or(true) {
|
||||
let resp = http_request(&link, method, "", &client).await;
|
||||
if resp.0 {
|
||||
if resp.1 {
|
||||
scheme.is_http2 = is_h2; // could be adjusted further
|
||||
}
|
||||
innervec.push(Arc::from(scheme));
|
||||
} else {
|
||||
warn!("Dead Upstream : {}", link);
|
||||
}
|
||||
innervec.push(scheme);
|
||||
} else {
|
||||
warn!("Dead Upstream : {}", link);
|
||||
innervec.push(Arc::from(scheme));
|
||||
}
|
||||
}
|
||||
inner.insert(path.clone(), (innervec, AtomicUsize::new(0)));
|
||||
|
||||
82
src/utils/httpclient.rs
Normal file
82
src/utils/httpclient.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use crate::utils::kuberconsul::{match_path, ConsulService, KubeEndpoints};
|
||||
use crate::utils::structs::{InnerMap, ServiceMapping};
|
||||
use axum::http::{HeaderMap, HeaderValue};
|
||||
use dashmap::DashMap;
|
||||
use reqwest::Client;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub async fn for_consul(url: String, token: Option<String>, conf: &ServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(token) = token {
|
||||
headers.insert("X-Consul-Token", HeaderValue::from_str(&token).unwrap());
|
||||
}
|
||||
let to = Duration::from_secs(1);
|
||||
let resp = client.get(url).timeout(to).send().await.ok()?;
|
||||
if !resp.status().is_success() {
|
||||
eprintln!("Consul API returned status: {}", resp.status());
|
||||
return None;
|
||||
}
|
||||
let mut inner_vec = Vec::new();
|
||||
let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
|
||||
let endpoints: Vec<ConsulService> = resp.json().await.ok()?;
|
||||
for subsets in endpoints {
|
||||
// let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
// let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let to_add = Arc::from(InnerMap {
|
||||
address: Arc::from(&*addr),
|
||||
port: prt,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: conf.to_https.unwrap_or(false),
|
||||
rate_limit: conf.rate_limit,
|
||||
healthcheck: None,
|
||||
authorization: None,
|
||||
});
|
||||
inner_vec.push(to_add);
|
||||
}
|
||||
match_path(&conf, &upstreams, inner_vec.clone());
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
pub async fn for_kuber(url: &str, token: &str, conf: &ServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
|
||||
let to = Duration::from_secs(10);
|
||||
let client = Client::builder().timeout(Duration::from_secs(10)).danger_accept_invalid_certs(true).build().ok()?;
|
||||
let resp = client.get(url).timeout(to).bearer_auth(token).send().await.ok()?;
|
||||
if !resp.status().is_success() {
|
||||
eprintln!("Kubernetes API returned status: {}", resp.status());
|
||||
return None;
|
||||
}
|
||||
let endpoints: KubeEndpoints = resp.json().await.ok()?;
|
||||
|
||||
let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
|
||||
|
||||
if let Some(subsets) = endpoints.subsets {
|
||||
for subset in subsets {
|
||||
if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) {
|
||||
let mut inner_vec = Vec::new();
|
||||
for addr in addresses {
|
||||
for port in &ports {
|
||||
let to_add = Arc::from(InnerMap {
|
||||
address: Arc::from(addr.ip.clone()),
|
||||
port: port.port.clone(),
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: conf.to_https.unwrap_or(false),
|
||||
rate_limit: conf.rate_limit,
|
||||
healthcheck: None,
|
||||
authorization: None,
|
||||
});
|
||||
inner_vec.push(to_add);
|
||||
}
|
||||
}
|
||||
match_path(&conf, &upstreams, inner_vec.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(upstreams)
|
||||
}
|
||||
228
src/utils/kuberconsul.rs
Normal file
228
src/utils/kuberconsul.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use crate::utils::httpclient;
|
||||
use crate::utils::parceyaml::build_headers;
|
||||
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
|
||||
use async_trait::async_trait;
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use pingora::prelude::sleep;
|
||||
use rand::RngExt;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct KubeEndpoints {
|
||||
pub subsets: Option<Vec<KubeSubset>>,
|
||||
}
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct KubeSubset {
|
||||
pub addresses: Option<Vec<KubeAddress>>,
|
||||
pub ports: Option<Vec<KubePort>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct KubeAddress {
|
||||
pub ip: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct KubePort {
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ConsulService {
|
||||
#[serde(rename = "ServiceTaggedAddresses")]
|
||||
pub tagged_addresses: HashMap<String, ConsulTaggedAddress>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ConsulTaggedAddress {
|
||||
#[serde(rename = "Address")]
|
||||
pub address: String,
|
||||
#[serde(rename = "Port")]
|
||||
pub port: u16,
|
||||
}
|
||||
pub fn list_to_upstreams(lt: Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>, upstreams: &UpstreamsDashMap, i: &ServiceMapping) {
|
||||
if let Some(list) = lt {
|
||||
match upstreams.get(&*i.hostname.clone()) {
|
||||
Some(upstr) => {
|
||||
for (k, v) in list {
|
||||
upstr.value().insert(Arc::from(k.to_owned()), v);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
upstreams.insert(Arc::from(i.hostname.clone()), list);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn match_path(conf: &ServiceMapping, upstreams: &DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>, values: Vec<Arc<InnerMap>>) {
|
||||
match conf.path {
|
||||
Some(ref p) => {
|
||||
upstreams.insert(Arc::from(p.clone()), (values, AtomicUsize::new(0)));
|
||||
}
|
||||
None => {
|
||||
upstreams.insert(Arc::from("/"), (values, AtomicUsize::new(0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_token(path: &str) -> String {
|
||||
let mut file = File::open(path).await.unwrap();
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents).await.unwrap();
|
||||
contents.trim().to_string()
|
||||
}
|
||||
#[async_trait]
|
||||
pub trait ServiceDiscovery {
|
||||
async fn fetch_upstreams(&self, config: Arc<Configuration>, toreturn: Sender<Configuration>);
|
||||
}
|
||||
|
||||
pub struct KubernetesDiscovery;
|
||||
pub struct ConsulDiscovery;
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceDiscovery for KubernetesDiscovery {
|
||||
async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) {
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
|
||||
if let Some(kuber) = config.kubernetes.clone() {
|
||||
let servers = kuber.servers.unwrap_or(vec![format!(
|
||||
"{}:{}",
|
||||
env::var("KUBERNETES_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
|
||||
env::var("KUBERNETES_SERVICE_PORT_HTTPS").unwrap_or("0".to_string())
|
||||
)]);
|
||||
|
||||
let end = servers.len().saturating_sub(1);
|
||||
let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 };
|
||||
let server = servers.get(num).unwrap().to_string();
|
||||
let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string());
|
||||
let namespace = get_current_namespace().unwrap_or_else(|| "default".to_string());
|
||||
let token = read_token(path.as_str()).await;
|
||||
loop {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
if let Some(kuber) = config.kubernetes.clone() {
|
||||
if let Some(svc) = kuber.services {
|
||||
for service in svc {
|
||||
let header_list: DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>> = DashMap::new();
|
||||
let mut hl = Vec::new();
|
||||
build_headers(&service.client_headers, config.as_ref(), &mut hl);
|
||||
if !hl.is_empty() {
|
||||
match service.path.clone() {
|
||||
Some(path) => {
|
||||
header_list.insert(Arc::from(path.as_str()), hl);
|
||||
}
|
||||
None => {
|
||||
header_list.insert(Arc::from("/"), hl);
|
||||
}
|
||||
}
|
||||
|
||||
// header_list.insert(Arc::from(path.as_str()), hl);
|
||||
// header_list.insert(Arc::from(i.path).unwrap_or(Arc::from("/")).as_str(), hl);
|
||||
config.client_headers.insert(Arc::from(service.hostname.clone()), header_list);
|
||||
}
|
||||
let url = format!("https://{}/api/v1/namespaces/{}/endpoints/{}", server, namespace, service.hostname);
|
||||
// let url = format!("https://{}/api/v1/namespaces/{}/endpoints?labelSelector=app", server, namespace);
|
||||
let list = httpclient::for_kuber(&*url, &*token, &service).await;
|
||||
// println!("{:?}", list);
|
||||
list_to_upstreams(list, &upstreams, &service);
|
||||
}
|
||||
}
|
||||
if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await {
|
||||
toreturn.send(lt).await.unwrap();
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_current_namespace() -> Option<String> {
|
||||
let ns_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
|
||||
if Path::new(ns_path).exists() {
|
||||
if let Ok(contents) = fs::read_to_string(ns_path) {
|
||||
return Some(contents.trim().to_string());
|
||||
}
|
||||
}
|
||||
std::env::var("POD_NAMESPACE").ok()
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceDiscovery for ConsulDiscovery {
|
||||
async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) {
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
loop {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
|
||||
if let Some(consul) = config.consul.clone() {
|
||||
let servers = consul.servers.unwrap_or(vec![format!(
|
||||
"{}:{}",
|
||||
env::var("CONSUL_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
|
||||
env::var("CONSUL_SERVICE_PORT").unwrap_or("0".to_string())
|
||||
)]);
|
||||
let end = servers.len().saturating_sub(1);
|
||||
let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 };
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let ss = consul_data + "/v1/catalog/service/";
|
||||
|
||||
if let Some(svc) = consul.services {
|
||||
for i in svc {
|
||||
let header_list = DashMap::new();
|
||||
let mut hl = Vec::new();
|
||||
build_headers(&i.client_headers, config.as_ref(), &mut hl);
|
||||
if !hl.is_empty() {
|
||||
match i.path.clone() {
|
||||
Some(path) => {
|
||||
header_list.insert(Arc::from(path.as_str()), hl);
|
||||
}
|
||||
None => {
|
||||
header_list.insert(Arc::from("/"), hl);
|
||||
}
|
||||
}
|
||||
// header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl);
|
||||
config.client_headers.insert(Arc::from(i.hostname.clone()), header_list);
|
||||
}
|
||||
|
||||
let pref = ss.clone() + &i.upstream;
|
||||
let list = httpclient::for_consul(pref, consul.token.clone(), &i).await;
|
||||
list_to_upstreams(list, &upstreams, &i);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await {
|
||||
toreturn.send(lt).await.unwrap();
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn clone_compare(upstreams: &UpstreamsDashMap, prev_upstreams: &UpstreamsDashMap, config: &Arc<Configuration>) -> Option<Configuration> {
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
client_headers: config.client_headers.clone(),
|
||||
server_headers: config.server_headers.clone(),
|
||||
consul: config.consul.clone(),
|
||||
kubernetes: config.kubernetes.clone(),
|
||||
typecfg: config.typecfg.clone(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
print_upstreams(&tosend.upstreams);
|
||||
return Some(tosend);
|
||||
};
|
||||
None
|
||||
}
|
||||
@@ -1,10 +1,14 @@
|
||||
use http::method::Method;
|
||||
use http::StatusCode;
|
||||
use pingora_http::Version;
|
||||
use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct MetricTypes {
|
||||
pub method: String,
|
||||
pub code: String,
|
||||
pub method: Method,
|
||||
pub upstream: Arc<str>,
|
||||
pub code: Option<StatusCode>,
|
||||
pub latency: Duration,
|
||||
pub version: Version,
|
||||
}
|
||||
@@ -33,6 +37,11 @@ lazy_static::lazy_static! {
|
||||
"Number of requests by HTTP method",
|
||||
&["method"]
|
||||
).unwrap();
|
||||
pub static ref REQUESTS_BY_UPSTREAM: IntCounterVec = register_int_counter_vec!(
|
||||
"aralez_requests_by_upstream",
|
||||
"Number of requests by UPSTREAM server",
|
||||
&["upstream"]
|
||||
).unwrap();
|
||||
pub static ref REQUESTS_BY_VERSION: IntCounterVec = register_int_counter_vec!(
|
||||
"aralez_requests_by_version_total",
|
||||
"Number of requests by HTTP versions",
|
||||
@@ -57,7 +66,8 @@ pub fn calc_metrics(metric_types: &MetricTypes) {
|
||||
_ => "Unknown",
|
||||
};
|
||||
REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc();
|
||||
RESPONSE_CODES.with_label_values(&[&metric_types.code.to_string()]).inc();
|
||||
RESPONSE_CODES.with_label_values(&[metric_types.code.unwrap_or(http::StatusCode::GONE).as_str()]).inc();
|
||||
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
|
||||
REQUESTS_BY_UPSTREAM.with_label_values(&[metric_types.upstream.as_ref()]).inc();
|
||||
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use crate::utils::healthcheck;
|
||||
use crate::utils::state::{is_first_run, mark_not_first_run};
|
||||
use crate::utils::structs::*;
|
||||
use crate::utils::tools::{clone_dashmap, clone_dashmap_into, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info, warn};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
// use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::{env, fs};
|
||||
// use tokio::sync::oneshot::{Receiver, Sender};
|
||||
|
||||
pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
pub async fn load_configuration(d: &str, kind: &str) -> (Option<Configuration>, String) {
|
||||
let yaml_data = match kind {
|
||||
"filepath" => match fs::read_to_string(d) {
|
||||
Ok(data) => {
|
||||
@@ -19,7 +19,7 @@ pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
Err(e) => {
|
||||
error!("Reading: {}: {:?}", d, e);
|
||||
warn!("Running with empty upstreams list, update it via API");
|
||||
return None;
|
||||
return (None, e.to_string());
|
||||
}
|
||||
},
|
||||
"content" => {
|
||||
@@ -28,18 +28,20 @@ pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
}
|
||||
_ => {
|
||||
error!("Mismatched parameter, only filepath|content is allowed");
|
||||
return None;
|
||||
return (None, "Mismatched parameter, only filepath|content is allowed".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
let parsed: Config = match serde_yaml::from_str(&yaml_data) {
|
||||
Ok(cfg) => cfg,
|
||||
Ok(cfg) => {
|
||||
// println!("{:#?}", cfg);
|
||||
cfg
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to parse upstreams file: {}", e);
|
||||
return None;
|
||||
return (None, e.to_string());
|
||||
}
|
||||
};
|
||||
|
||||
let mut toreturn = Configuration::default();
|
||||
|
||||
populate_headers_and_auth(&mut toreturn, &parsed).await;
|
||||
@@ -48,52 +50,61 @@ pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
match parsed.provider.as_str() {
|
||||
"file" => {
|
||||
populate_file_upstreams(&mut toreturn, &parsed).await;
|
||||
Some(toreturn)
|
||||
(Some(toreturn), "Ok".to_string())
|
||||
}
|
||||
"consul" => {
|
||||
toreturn.consul = parsed.consul;
|
||||
if toreturn.consul.is_some() {
|
||||
Some(toreturn)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
(toreturn.consul.is_some().then_some(toreturn), "Ok".to_string())
|
||||
}
|
||||
"kubernetes" => {
|
||||
toreturn.kubernetes = parsed.kubernetes;
|
||||
(toreturn.kubernetes.is_some().then_some(toreturn), "Ok".to_string())
|
||||
}
|
||||
"kubernetes" => None,
|
||||
_ => {
|
||||
warn!("Unknown provider {}", parsed.provider);
|
||||
None
|
||||
(None, "Unknown provider".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) {
|
||||
if let Some(headers) = &parsed.headers {
|
||||
let mut hl = Vec::new();
|
||||
let mut ch: Vec<(Arc<str>, Arc<str>)> = Vec::new();
|
||||
if let Some(headers) = &parsed.client_headers {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
ch.push((Arc::from(key), Arc::from(val)));
|
||||
}
|
||||
}
|
||||
|
||||
let global_headers = DashMap::new();
|
||||
global_headers.insert("/".to_string(), hl);
|
||||
config.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
|
||||
}
|
||||
let global_headers: DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>> = DashMap::new();
|
||||
global_headers.insert(Arc::from("/"), ch);
|
||||
config.client_headers.insert(Arc::from("GLOBAL_CLIENT_HEADERS"), global_headers);
|
||||
|
||||
config.extraparams.sticky_sessions = parsed.sticky_sessions;
|
||||
let mut sh: Vec<(Arc<str>, Arc<str>)> = Vec::new();
|
||||
if let Some(headers) = &parsed.server_headers {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
sh.push((Arc::from(key.trim()), Arc::from(val.trim())));
|
||||
}
|
||||
}
|
||||
}
|
||||
let server_global_headers: DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>> = DashMap::new();
|
||||
server_global_headers.insert(Arc::from("/"), sh);
|
||||
config.server_headers.insert(Arc::from("GLOBAL_SERVER_HEADERS"), server_global_headers);
|
||||
config.extraparams.to_https = parsed.to_https;
|
||||
config.extraparams.sticky_sessions = parsed.sticky_sessions;
|
||||
config.extraparams.rate_limit = parsed.rate_limit;
|
||||
|
||||
if let Some(rate) = &parsed.rate_limit {
|
||||
info!("Applied Global Rate Limit : {} request per second", rate);
|
||||
}
|
||||
|
||||
if let Some(auth) = &parsed.authorization {
|
||||
let name = auth.get("type").unwrap_or(&"".to_string()).to_string();
|
||||
let creds = auth.get("creds").unwrap_or(&"".to_string()).to_string();
|
||||
config.extraparams.authentication.insert("authorization".to_string(), vec![name, creds]);
|
||||
} else {
|
||||
config.extraparams.authentication = DashMap::new();
|
||||
if let Some(pa) = &parsed.authorization {
|
||||
let y: InnerAuth = InnerAuth {
|
||||
auth_type: Arc::from(pa.auth_type.clone()),
|
||||
auth_cred: Arc::from(pa.auth_cred.clone()),
|
||||
};
|
||||
config.extraparams.authentication = Some(y);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,48 +113,60 @@ async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
|
||||
if let Some(upstreams) = &parsed.upstreams {
|
||||
for (hostname, host_config) in upstreams {
|
||||
let path_map = DashMap::new();
|
||||
let header_list = DashMap::new();
|
||||
let client_header_list = DashMap::new();
|
||||
let server_header_list = DashMap::new();
|
||||
for (path, path_config) in &host_config.paths {
|
||||
if let Some(rate) = &path_config.rate_limit {
|
||||
info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
|
||||
}
|
||||
|
||||
let mut hl: Vec<(Arc<str>, Arc<str>)> = Vec::new();
|
||||
let mut sl: Vec<(Arc<str>, Arc<str>)> = Vec::new();
|
||||
build_headers(&path_config.client_headers, config, &mut hl);
|
||||
build_headers(&path_config.server_headers, config, &mut sl);
|
||||
client_header_list.insert(Arc::from(path.as_str()), hl);
|
||||
server_header_list.insert(Arc::from(path.as_str()), sl);
|
||||
let mut server_list = Vec::new();
|
||||
let mut hl = Vec::new();
|
||||
|
||||
if let Some(headers) = &path_config.headers {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
header_list.insert(path.clone(), hl);
|
||||
|
||||
for server in &path_config.servers {
|
||||
let mut path_auth: Option<Arc<InnerAuth>> = None;
|
||||
if let Some(pa) = &path_config.authorization {
|
||||
let y: InnerAuth = InnerAuth {
|
||||
auth_type: Arc::from(pa.auth_type.clone()),
|
||||
auth_cred: Arc::from(pa.auth_cred.clone()),
|
||||
};
|
||||
path_auth = Some(Arc::from(y));
|
||||
}
|
||||
|
||||
if let Some((ip, port_str)) = server.split_once(':') {
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
server_list.push(InnerMap {
|
||||
address: ip.trim().to_string(),
|
||||
server_list.push(Arc::from(InnerMap {
|
||||
address: Arc::from(ip),
|
||||
port,
|
||||
is_ssl: true,
|
||||
is_http2: false,
|
||||
to_https: path_config.to_https.unwrap_or(false),
|
||||
// rate_limit: rate,
|
||||
rate_limit: path_config.rate_limit,
|
||||
});
|
||||
healthcheck: path_config.healthcheck,
|
||||
authorization: path_auth,
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
path_map.insert(path.clone(), (server_list, AtomicUsize::new(0)));
|
||||
path_map.insert(Arc::from(path.clone()), (server_list, AtomicUsize::new(0)));
|
||||
}
|
||||
config.headers.insert(hostname.clone(), header_list);
|
||||
imtdashmap.insert(hostname.clone(), path_map);
|
||||
config.client_headers.insert(Arc::from(hostname.clone()), client_header_list);
|
||||
config.server_headers.insert(Arc::from(hostname.clone()), server_header_list);
|
||||
imtdashmap.insert(Arc::from(hostname.clone()), path_map);
|
||||
}
|
||||
let y = clone_dashmap(&imtdashmap);
|
||||
let r = healthcheck::initiate_upstreams(y).await;
|
||||
clone_dashmap_into(&r, &config.upstreams);
|
||||
println!("Upstream Config:");
|
||||
|
||||
if is_first_run() {
|
||||
clone_dashmap_into(&imtdashmap, &config.upstreams);
|
||||
mark_not_first_run();
|
||||
} else {
|
||||
let y = clone_dashmap(&imtdashmap);
|
||||
let r = healthcheck::initiate_upstreams(y).await;
|
||||
clone_dashmap_into(&r, &config.upstreams);
|
||||
}
|
||||
info!("Upstream Config:");
|
||||
print_upstreams(&config.upstreams);
|
||||
}
|
||||
}
|
||||
@@ -218,3 +241,13 @@ fn log_builder(conf: &AppConfig) {
|
||||
}
|
||||
env_logger::builder().init();
|
||||
}
|
||||
|
||||
pub fn build_headers(path_config: &Option<Vec<String>>, _config: &Configuration, hl: &mut Vec<(Arc<str>, Arc<str>)>) {
|
||||
if let Some(headers) = &path_config {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((Arc::from(key.trim()), Arc::from(val.trim())));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
30
src/utils/state.rs
Normal file
30
src/utils/state.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use std::sync::RwLock;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SharedState {
|
||||
pub first_run: bool,
|
||||
}
|
||||
|
||||
pub static GLOBAL_STATE: Lazy<RwLock<SharedState>> = Lazy::new(|| RwLock::new(SharedState { first_run: true }));
|
||||
|
||||
pub fn mark_not_first_run() {
|
||||
let mut state = GLOBAL_STATE.write().unwrap();
|
||||
state.first_run = false;
|
||||
}
|
||||
|
||||
pub fn is_first_run() -> bool {
|
||||
let state = GLOBAL_STATE.read().unwrap();
|
||||
state.first_run
|
||||
}
|
||||
|
||||
/*
|
||||
impl SharedState {
|
||||
pub fn mark_first_run(&mut self) {
|
||||
self.first_run = false;
|
||||
}
|
||||
pub fn is_first_run(&self) -> bool {
|
||||
self.first_run
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -2,26 +2,40 @@ use dashmap::DashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>;
|
||||
pub type UpstreamsDashMap = DashMap<Arc<str>, DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>;
|
||||
|
||||
pub type UpstreamsIdMap = DashMap<String, InnerMap>;
|
||||
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceMapping {
|
||||
pub proxy: String,
|
||||
pub real: String,
|
||||
}
|
||||
pub type UpstreamsIdMap = DashMap<String, Arc<InnerMap>>;
|
||||
pub type Headers = DashMap<Arc<str>, DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>>>;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Extraparams {
|
||||
pub sticky_sessions: bool,
|
||||
pub to_https: Option<bool>,
|
||||
pub authentication: DashMap<String, Vec<String>>,
|
||||
pub sticky_sessions: bool,
|
||||
pub authentication: Option<InnerAuth>,
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceMapping {
|
||||
pub upstream: String,
|
||||
pub hostname: String,
|
||||
pub path: Option<String>,
|
||||
pub to_https: Option<bool>,
|
||||
pub sticky_sessions: Option<bool>,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub client_headers: Option<Vec<String>>,
|
||||
pub server_headers: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
|
||||
pub struct Kubernetes {
|
||||
pub servers: Option<Vec<String>>,
|
||||
pub services: Option<Vec<ServiceMapping>>,
|
||||
pub tokenpath: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
|
||||
pub struct Consul {
|
||||
pub servers: Option<Vec<String>>,
|
||||
@@ -31,19 +45,23 @@ pub struct Consul {
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub provider: String,
|
||||
pub sticky_sessions: bool,
|
||||
pub to_https: Option<bool>,
|
||||
pub sticky_sessions: bool,
|
||||
#[serde(default)]
|
||||
pub upstreams: Option<HashMap<String, HostConfig>>,
|
||||
#[serde(default)]
|
||||
pub globals: Option<HashMap<String, Vec<String>>>,
|
||||
#[serde(default)]
|
||||
pub headers: Option<Vec<String>>,
|
||||
pub client_headers: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub authorization: Option<HashMap<String, String>>,
|
||||
pub server_headers: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub authorization: Option<Auth>,
|
||||
#[serde(default)]
|
||||
pub consul: Option<Consul>,
|
||||
#[serde(default)]
|
||||
pub kubernetes: Option<Kubernetes>,
|
||||
#[serde(default)]
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
|
||||
@@ -52,19 +70,31 @@ pub struct HostConfig {
|
||||
pub paths: HashMap<String, PathConfig>,
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct Auth {
|
||||
#[serde(rename = "type")]
|
||||
pub auth_type: String,
|
||||
#[serde(rename = "creds")]
|
||||
pub auth_cred: String,
|
||||
}
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct PathConfig {
|
||||
pub servers: Vec<String>,
|
||||
pub to_https: Option<bool>,
|
||||
pub headers: Option<Vec<String>>,
|
||||
pub sticky_sessions: Option<bool>,
|
||||
pub client_headers: Option<Vec<String>>,
|
||||
pub server_headers: Option<Vec<String>>,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub healthcheck: Option<bool>,
|
||||
pub authorization: Option<Auth>,
|
||||
}
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Configuration {
|
||||
pub upstreams: UpstreamsDashMap,
|
||||
pub headers: Headers,
|
||||
pub client_headers: Headers,
|
||||
pub server_headers: Headers,
|
||||
pub consul: Option<Consul>,
|
||||
pub kubernetes: Option<Kubernetes>,
|
||||
pub typecfg: String,
|
||||
pub extraparams: Extraparams,
|
||||
}
|
||||
@@ -89,28 +119,56 @@ pub struct AppConfig {
|
||||
pub proxy_tls_grade: Option<String>,
|
||||
pub file_server_address: Option<String>,
|
||||
pub file_server_folder: Option<String>,
|
||||
pub runuser: Option<String>,
|
||||
pub rungroup: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct InnerAuth {
|
||||
pub auth_type: Arc<str>,
|
||||
pub auth_cred: Arc<str>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct InnerMap {
|
||||
pub address: String,
|
||||
pub address: Arc<str>,
|
||||
pub port: u16,
|
||||
pub is_ssl: bool,
|
||||
pub is_http2: bool,
|
||||
pub to_https: bool,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub healthcheck: Option<bool>,
|
||||
pub authorization: Option<Arc<InnerAuth>>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl InnerMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
address: Default::default(),
|
||||
address: Arc::from("127.0.0.1"),
|
||||
port: Default::default(),
|
||||
is_ssl: Default::default(),
|
||||
is_http2: Default::default(),
|
||||
to_https: Default::default(),
|
||||
rate_limit: Default::default(),
|
||||
healthcheck: Default::default(),
|
||||
authorization: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct InnerMapForJson {
|
||||
pub address: String,
|
||||
pub port: u16,
|
||||
pub is_ssl: bool,
|
||||
pub is_http2: bool,
|
||||
pub to_https: bool,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub healthcheck: Option<bool>,
|
||||
}
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct UpstreamSnapshotForJson {
|
||||
pub backends: Vec<InnerMapForJson>,
|
||||
pub requests: usize,
|
||||
}
|
||||
|
||||
@@ -193,9 +193,7 @@ pub struct CipherSuite {
|
||||
}
|
||||
const CIPHERS: CipherSuite = CipherSuite {
|
||||
high: "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305",
|
||||
// aa: "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256",
|
||||
medium: "ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:AES128-GCM-SHA256",
|
||||
// cc: "AES128-SHA:DES-CBC3-SHA",
|
||||
legacy: "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH",
|
||||
};
|
||||
|
||||
@@ -230,26 +228,30 @@ pub fn set_tsl_grade(tls_settings: &mut TlsSettings, grade: &str) {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1_2));
|
||||
// let _ = tls_settings.set_max_proto_version(Some(SslVersion::TLS1_3));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.high);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.high);
|
||||
// let _ = tls_settings.set_ciphersuites(CIPHERS.high);
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.high);
|
||||
info!("TLS grade: {:?}, => HIGH", tls_settings.options());
|
||||
}
|
||||
Some(TlsGrade::MEDIUM) => {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
// let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
info!("TLS grade: {:?}, => MEDIUM", tls_settings.options());
|
||||
}
|
||||
Some(TlsGrade::LEGACY) => {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::SSL3));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.legacy);
|
||||
// let _ = tls_settings.set_ciphersuites(CIPHERS.legacy);
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
|
||||
warn!("TLS grade: {:?}, => UNSAFE", tls_settings.options());
|
||||
}
|
||||
None => {
|
||||
// Defaults to MEDIUM
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
// let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
warn!("TLS grade is not detected defaulting top MEDIUM");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::structs::{InnerMap, InnerMapForJson, UpstreamSnapshotForJson, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::tls;
|
||||
use crate::utils::tls::CertificateConfig;
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info};
|
||||
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
use port_check::is_port_reachable;
|
||||
use privdrop::PrivDrop;
|
||||
use serde_json::{json, Value};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::type_name;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Write;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::net::SocketAddr;
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{fs, process, thread, time};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
@@ -24,8 +31,13 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
println!(" Path: {}", path);
|
||||
for f in path_entry.value().0.clone() {
|
||||
println!(
|
||||
" IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}",
|
||||
f.address, f.port, f.is_ssl, f.is_http2, f.to_https
|
||||
" IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}, Rate Limit: {}",
|
||||
f.address,
|
||||
f.port,
|
||||
f.is_ssl,
|
||||
f.is_http2,
|
||||
f.to_https,
|
||||
f.rate_limit.unwrap_or(0)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -112,17 +124,29 @@ pub fn compare_dashmaps(map1: &UpstreamsDashMap, map2: &UpstreamsDashMap) -> boo
|
||||
return false; // Path exists in map1 but not in map2
|
||||
};
|
||||
let (vec2, _counter2) = entry2.value();
|
||||
let set1: HashSet<_> = vec1.iter().collect();
|
||||
let set2: HashSet<_> = vec2.iter().collect();
|
||||
if set1 != set2 {
|
||||
|
||||
if vec1.len() != vec2.len() {
|
||||
return false;
|
||||
}
|
||||
for item in vec1.iter() {
|
||||
let count1 = vec1.iter().filter(|&x| x == item).count();
|
||||
let count2 = vec2.iter().filter(|&x| x == item).count();
|
||||
if count1 != count2 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// let set1: HashSet<_> = vec1.iter().collect();
|
||||
// let set2: HashSet<_> = vec2.iter().collect();
|
||||
// if set1 != set2 {
|
||||
// return false;
|
||||
// }
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn merge_headers(target: &DashMap<String, Vec<(String, String)>>, source: &DashMap<String, Vec<(String, String)>>) {
|
||||
pub fn merge_headers(target: &DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>>, source: &DashMap<Arc<str>, Vec<(Arc<str>, Arc<str>)>>) {
|
||||
for entry in source.iter() {
|
||||
let global_key = entry.key().clone();
|
||||
let global_values = entry.value().clone();
|
||||
@@ -149,15 +173,19 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
|
||||
let hex_hash = base16ct::lower::encode_string(&hash);
|
||||
let hh = hex_hash[0..50].to_string();
|
||||
let to_add = InnerMap {
|
||||
address: hh.clone(),
|
||||
address: Arc::from("127.0.0.1"),
|
||||
port: 0,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
healthcheck: None,
|
||||
authorization: None,
|
||||
};
|
||||
cloned.insert(id, to_add);
|
||||
cloned.insert(hh, x.to_owned());
|
||||
|
||||
cloned.insert(id, Arc::from(to_add));
|
||||
cloned.insert(hh, Arc::from(x.to_owned()));
|
||||
// println!("CLONNED :===========> {:?}", cloned);
|
||||
}
|
||||
new_inner_map.insert(path.clone(), new_vec);
|
||||
}
|
||||
@@ -220,3 +248,123 @@ pub fn watch_folder(path: String, sender: Sender<Vec<CertificateConfig>>) -> not
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn drop_priv(user: String, group: String, http_addr: String, tls_addr: Option<String>) {
|
||||
thread::sleep(time::Duration::from_millis(10));
|
||||
loop {
|
||||
thread::sleep(time::Duration::from_millis(10));
|
||||
if is_port_reachable(http_addr.clone()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(tls_addr) = tls_addr {
|
||||
loop {
|
||||
thread::sleep(time::Duration::from_millis(10));
|
||||
if is_port_reachable(tls_addr.clone()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Dropping ROOT privileges to: {}:{}", user, group);
|
||||
if let Err(e) = PrivDrop::default().user(user).group(group).apply() {
|
||||
error!("Failed to drop privileges: {}", e);
|
||||
process::exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_priv(addr: &str) {
|
||||
let port = SocketAddr::from_str(addr).map(|sa| sa.port()).unwrap();
|
||||
match port < 1024 {
|
||||
true => {
|
||||
let meta = std::fs::metadata("/proc/self").map(|m| m.uid()).unwrap();
|
||||
if meta != 0 {
|
||||
error!("Running on privileged port requires to start as ROOT");
|
||||
process::exit(1)
|
||||
}
|
||||
}
|
||||
false => {}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn upstreams_to_json(upstreams: &UpstreamsDashMap) -> serde_json::Result<String> {
|
||||
let mut outer = HashMap::new();
|
||||
|
||||
for outer_entry in upstreams.iter() {
|
||||
let mut inner_map = HashMap::new();
|
||||
|
||||
for inner_entry in outer_entry.value().iter() {
|
||||
let (backends, counter) = inner_entry.value();
|
||||
|
||||
inner_map.insert(
|
||||
inner_entry.key().to_string(),
|
||||
UpstreamSnapshotForJson {
|
||||
backends: backends
|
||||
.iter()
|
||||
.map(|a| InnerMapForJson {
|
||||
address: a.address.to_string(),
|
||||
port: a.port,
|
||||
is_ssl: a.is_ssl,
|
||||
is_http2: a.is_http2,
|
||||
to_https: a.to_https,
|
||||
rate_limit: a.rate_limit,
|
||||
healthcheck: a.healthcheck,
|
||||
})
|
||||
.collect(),
|
||||
requests: counter.load(Ordering::Relaxed),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
outer.insert(outer_entry.key().to_string(), inner_map);
|
||||
}
|
||||
|
||||
// serde_json::to_string_pretty(&outer)
|
||||
serde_json::to_string(&outer)
|
||||
}
|
||||
|
||||
pub fn upstreams_liveness_json(configured: &UpstreamsDashMap, current: &UpstreamsDashMap) -> Value {
|
||||
let mut result = serde_json::Map::new();
|
||||
|
||||
for host_entry in configured.iter() {
|
||||
let hostname = host_entry.key().to_string();
|
||||
let configured_paths = host_entry.value();
|
||||
|
||||
let mut paths_json = serde_json::Map::new();
|
||||
|
||||
for path_entry in configured_paths.iter() {
|
||||
let path = path_entry.key().clone();
|
||||
let (configured_backends, _) = path_entry.value();
|
||||
let backends_json: Vec<Value> = configured_backends
|
||||
.iter()
|
||||
.map(|backend| {
|
||||
let alive = if let Some(host_map) = current.get(&*hostname) {
|
||||
if let Some(path_entry) = host_map.get(&*path) {
|
||||
let list = &path_entry.value().0; // Vec<Arc<InnerMap>>
|
||||
list.iter().any(|b| b.address == backend.address && b.port == backend.port)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
json!({
|
||||
"address": &*backend.address,
|
||||
"port": backend.port,
|
||||
"alive": alive
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
paths_json.insert(
|
||||
path.to_string(),
|
||||
json!({
|
||||
"backends": backends_json
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
result.insert(hostname, Value::Object(paths_json));
|
||||
}
|
||||
Value::Object(result)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider};
|
||||
use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider, KubernetesProvider};
|
||||
use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::Configuration;
|
||||
use crate::utils::tools::*;
|
||||
use crate::utils::*;
|
||||
@@ -6,8 +7,8 @@ use crate::web::proxyhttp::LB;
|
||||
use async_trait::async_trait;
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc;
|
||||
use futures::StreamExt;
|
||||
use log::info;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use log::{error, info};
|
||||
use pingora_core::server::ShutdownWatch;
|
||||
use pingora_core::services::background::BackgroundService;
|
||||
use std::sync::Arc;
|
||||
@@ -15,35 +16,53 @@ use std::sync::Arc;
|
||||
#[async_trait]
|
||||
impl BackgroundService for LB {
|
||||
async fn start(&self, mut shutdown: ShutdownWatch) {
|
||||
info!("Starting background service");
|
||||
let (tx, mut rx) = mpsc::channel::<Configuration>(0);
|
||||
info!("Starting background service"); // tx: Sender<Configuration>
|
||||
let (mut tx, mut rx) = mpsc::channel::<Configuration>(1);
|
||||
let tx_api = tx.clone();
|
||||
let config = load_configuration(self.config.upstreams_conf.clone().as_str(), "filepath")
|
||||
.await
|
||||
.0
|
||||
.expect("Failed to load configuration");
|
||||
|
||||
let tx_file = tx.clone();
|
||||
let tx_consul = tx.clone();
|
||||
|
||||
let file_load = FromFileProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
let consul_load = ConsulProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
|
||||
let _ = tokio::spawn(async move { file_load.start(tx_file).await });
|
||||
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await });
|
||||
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone()));
|
||||
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone())).await;
|
||||
match config.typecfg.as_str() {
|
||||
"file" => {
|
||||
info!("Running File discovery, requested type is: {}", config.typecfg);
|
||||
tx.send(config).await.unwrap();
|
||||
let file_load = FromFileProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
let _ = tokio::spawn(async move { file_load.start(tx).await });
|
||||
}
|
||||
"kubernetes" => {
|
||||
info!("Running Kubernetes discovery, requested type is: {}", config.typecfg);
|
||||
let cf = Arc::from(config);
|
||||
let kuber_load = KubernetesProvider { config: cf.clone() };
|
||||
let _ = tokio::spawn(async move { kuber_load.start(tx).await });
|
||||
}
|
||||
"consul" => {
|
||||
info!("Running Consul discovery, requested type is: {}", config.typecfg);
|
||||
let cf = Arc::from(config);
|
||||
let consul_load = ConsulProvider { config: cf.clone() };
|
||||
let _ = tokio::spawn(async move { consul_load.start(tx).await });
|
||||
}
|
||||
_ => {
|
||||
error!("Unknown discovery type: {}", config.typecfg);
|
||||
}
|
||||
}
|
||||
|
||||
let api_load = APIUpstreamProvider {
|
||||
address: self.config.config_address.clone(),
|
||||
masterkey: self.config.master_key.clone(),
|
||||
config_api_enabled: self.config.config_api_enabled.clone(),
|
||||
tls_address: self.config.config_tls_address.clone(),
|
||||
tls_certificate: self.config.config_tls_certificate.clone(),
|
||||
tls_key_file: self.config.config_tls_key_file.clone(),
|
||||
// tls_address: self.config.config_tls_address.clone(),
|
||||
// tls_certificate: self.config.config_tls_certificate.clone(),
|
||||
// tls_key_file: self.config.config_tls_key_file.clone(),
|
||||
file_server_address: self.config.file_server_address.clone(),
|
||||
file_server_folder: self.config.file_server_folder.clone(),
|
||||
current_upstreams: self.ump_upst.clone(),
|
||||
full_upstreams: self.ump_full.clone(),
|
||||
};
|
||||
let tx_api = tx.clone();
|
||||
// let tx_api = tx.clone();
|
||||
let _ = tokio::spawn(async move { api_load.start(tx_api).await });
|
||||
|
||||
let uu = self.ump_upst.clone();
|
||||
@@ -64,27 +83,43 @@ impl BackgroundService for LB {
|
||||
clone_dashmap_into(&ss.upstreams, &self.ump_upst);
|
||||
let current = self.extraparams.load_full();
|
||||
let mut new = (*current).clone();
|
||||
new.sticky_sessions = ss.extraparams.sticky_sessions;
|
||||
new.to_https = ss.extraparams.to_https;
|
||||
new.sticky_sessions = ss.extraparams.sticky_sessions;
|
||||
new.authentication = ss.extraparams.authentication.clone();
|
||||
new.rate_limit = ss.extraparams.rate_limit;
|
||||
self.extraparams.store(Arc::new(new));
|
||||
self.headers.clear();
|
||||
self.client_headers.clear();
|
||||
self.server_headers.clear();
|
||||
|
||||
for entry in ss.upstreams.iter() {
|
||||
let global_key = entry.key().clone();
|
||||
let global_values = DashMap::new();
|
||||
let mut target_entry = ss.headers.entry(global_key).or_insert_with(DashMap::new);
|
||||
target_entry.extend(global_values);
|
||||
self.headers.insert(target_entry.key().to_owned(), target_entry.value().to_owned());
|
||||
let client_global_values = DashMap::new();
|
||||
let server_global_values = DashMap::new();
|
||||
|
||||
let mut client_target_entry = ss.client_headers.entry(global_key.clone()).or_insert_with(DashMap::new);
|
||||
client_target_entry.extend(client_global_values);
|
||||
let mut server_target_entry = ss.server_headers.entry(global_key).or_insert_with(DashMap::new);
|
||||
server_target_entry.extend(server_global_values);
|
||||
self.server_headers.insert(server_target_entry.key().to_owned(), server_target_entry.value().to_owned());
|
||||
}
|
||||
|
||||
for path in ss.headers.iter() {
|
||||
for path in ss.client_headers.iter() {
|
||||
let path_key = path.key().clone();
|
||||
let path_headers = path.value().clone();
|
||||
self.headers.insert(path_key.clone(), path_headers);
|
||||
if let Some(global_headers) = ss.headers.get("GLOBAL_HEADERS") {
|
||||
if let Some(existing_headers) = self.headers.get_mut(&path_key) {
|
||||
self.client_headers.insert(path_key.clone(), path_headers);
|
||||
if let Some(global_headers) = ss.client_headers.get("GLOBAL_CLIENT_HEADERS") {
|
||||
if let Some(existing_headers) = self.client_headers.get_mut(&path_key) {
|
||||
merge_headers(&existing_headers, &global_headers);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for path in ss.server_headers.iter() {
|
||||
let path_key = path.key().clone();
|
||||
let path_headers = path.value().clone();
|
||||
self.server_headers.insert(path_key.clone(), path_headers);
|
||||
if let Some(global_headers) = ss.server_headers.get("GLOBAL_SERVER_HEADERS") {
|
||||
if let Some(existing_headers) = self.server_headers.get_mut(&path_key) {
|
||||
merge_headers(&existing_headers, &global_headers);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,77 +2,120 @@ use crate::utils::structs::InnerMap;
|
||||
use crate::web::proxyhttp::LB;
|
||||
use async_trait::async_trait;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GetHostsReturHeaders {
|
||||
pub client_headers: Option<Vec<(Arc<str>, Arc<str>)>>,
|
||||
pub server_headers: Option<Vec<(Arc<str>, Arc<str>)>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait GetHost {
|
||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap>;
|
||||
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>;
|
||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>>;
|
||||
|
||||
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders>;
|
||||
// fn get_upstreams(&self) -> Arc<UpstreamsDashMap>;
|
||||
}
|
||||
#[async_trait]
|
||||
impl GetHost for LB {
|
||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap> {
|
||||
// fn get_upstreams(&self) -> Arc<UpstreamsDashMap> {
|
||||
// self.ump_full.clone()
|
||||
// }
|
||||
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>> {
|
||||
if let Some(b) = backend_id {
|
||||
if let Some(bb) = self.ump_byid.get(b) {
|
||||
// println!("BIB :===> {:?}", Some(bb.value()));
|
||||
return Some(bb.value().clone());
|
||||
}
|
||||
}
|
||||
|
||||
let host_entry = self.ump_upst.get(peer)?;
|
||||
let mut current_path = path.to_string();
|
||||
let mut best_match: Option<InnerMap> = None;
|
||||
let mut end = path.len();
|
||||
loop {
|
||||
if let Some(entry) = host_entry.get(¤t_path) {
|
||||
let slice = &path[..end];
|
||||
if let Some(entry) = host_entry.get(slice) {
|
||||
let (servers, index) = entry.value();
|
||||
if !servers.is_empty() {
|
||||
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
|
||||
best_match = Some(servers[idx].clone());
|
||||
break;
|
||||
return Some(servers[idx].clone());
|
||||
}
|
||||
}
|
||||
if let Some(pos) = current_path.rfind('/') {
|
||||
current_path.truncate(pos);
|
||||
if let Some(pos) = slice.rfind('/') {
|
||||
end = pos;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if best_match.is_none() {
|
||||
if let Some(entry) = host_entry.get("/") {
|
||||
let (servers, index) = entry.value();
|
||||
if !servers.is_empty() {
|
||||
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
|
||||
best_match = Some(servers[idx].clone());
|
||||
}
|
||||
if let Some(entry) = host_entry.get("/") {
|
||||
let (servers, index) = entry.value();
|
||||
if !servers.is_empty() {
|
||||
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
|
||||
return Some(servers[idx].clone());
|
||||
}
|
||||
}
|
||||
// println!("Best Match :===> {:?}", best_match);
|
||||
best_match
|
||||
None
|
||||
}
|
||||
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>> {
|
||||
let host_entry = self.headers.get(peer)?;
|
||||
let mut current_path = path.to_string();
|
||||
let mut best_match: Option<Vec<(String, String)>> = None;
|
||||
|
||||
loop {
|
||||
if let Some(entry) = host_entry.get(¤t_path) {
|
||||
if !entry.value().is_empty() {
|
||||
best_match = Some(entry.value().clone());
|
||||
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders> {
|
||||
let client_entry = self.client_headers.get(peer);
|
||||
let server_entry = self.server_headers.get(peer);
|
||||
if client_entry.is_none() && server_entry.is_none() {
|
||||
return None;
|
||||
}
|
||||
let mut current_path = path;
|
||||
let mut clnt_match = None;
|
||||
if let Some(client_entry) = client_entry {
|
||||
loop {
|
||||
if let Some(entry) = client_entry.get(current_path) {
|
||||
if !entry.value().is_empty() {
|
||||
clnt_match = Some(entry.value().clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
if current_path == "/" {
|
||||
break;
|
||||
}
|
||||
if let Some(pos) = current_path.rfind('/') {
|
||||
current_path = if pos == 0 { "/" } else { ¤t_path[..pos] };
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(pos) = current_path.rfind('/') {
|
||||
current_path.truncate(pos);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if best_match.is_none() {
|
||||
if let Some(entry) = host_entry.get("/") {
|
||||
if !entry.value().is_empty() {
|
||||
best_match = Some(entry.value().clone());
|
||||
current_path = path;
|
||||
let mut serv_match = None;
|
||||
if let Some(server_entry) = server_entry {
|
||||
loop {
|
||||
if let Some(entry) = server_entry.get(current_path) {
|
||||
if !entry.value().is_empty() {
|
||||
serv_match = Some(entry.value().clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
if current_path == "/" {
|
||||
if let Some(entry) = server_entry.get("/") {
|
||||
if !entry.value().is_empty() {
|
||||
serv_match = Some(entry.value().clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
if let Some(pos) = current_path.rfind('/') {
|
||||
current_path = if pos == 0 { "/" } else { ¤t_path[..pos] };
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
best_match
|
||||
let result = GetHostsReturHeaders {
|
||||
client_headers: clnt_match,
|
||||
server_headers: serv_match,
|
||||
};
|
||||
|
||||
if result.client_headers.is_some() || result.server_headers.is_some() {
|
||||
Some(result)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,43 +1,56 @@
|
||||
use crate::utils::auth::authenticate;
|
||||
use crate::utils::metrics::*;
|
||||
use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::web::gethosts::GetHost;
|
||||
use crate::web::gethosts::{GetHost, GetHostsReturHeaders};
|
||||
use arc_swap::ArcSwap;
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use log::{debug, warn};
|
||||
use dashmap::DashMap;
|
||||
// use x509_parser::asn1_rs::ToDer;
|
||||
use itoa::Buffer;
|
||||
use log::{debug, error, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
|
||||
use pingora::prelude::*;
|
||||
use pingora::ErrorSource::Upstream;
|
||||
use pingora_core::listeners::ALPN;
|
||||
use pingora_core::prelude::HttpPeer;
|
||||
// use pingora_core::protocols::TcpKeepalive;
|
||||
use pingora_limits::rate::Rate;
|
||||
use pingora_proxy::{ProxyHttp, Session};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::cell::RefCell;
|
||||
// use std::collections::BTreeMap;
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
|
||||
static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1)));
|
||||
static REVERSE_STORE: Lazy<DashMap<String, String>> = Lazy::new(|| DashMap::new());
|
||||
thread_local! {static IP_BUFFER: RefCell<String> = RefCell::new(String::with_capacity(50));}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LB {
|
||||
pub ump_upst: Arc<UpstreamsDashMap>,
|
||||
pub ump_full: Arc<UpstreamsDashMap>,
|
||||
pub ump_byid: Arc<UpstreamsIdMap>,
|
||||
pub headers: Arc<Headers>,
|
||||
pub client_headers: Arc<Headers>,
|
||||
pub server_headers: Arc<Headers>,
|
||||
pub config: Arc<AppConfig>,
|
||||
pub extraparams: Arc<ArcSwap<Extraparams>>,
|
||||
}
|
||||
|
||||
pub struct Context {
|
||||
backend_id: String,
|
||||
backend_id: Option<String>,
|
||||
to_https: bool,
|
||||
redirect_to: String,
|
||||
sticky_sessions: bool,
|
||||
redirect_to: Option<String>,
|
||||
start_time: Instant,
|
||||
hostname: Option<String>,
|
||||
upstream_peer: Option<InnerMap>,
|
||||
hostname: Option<Arc<str>>,
|
||||
upstream_peer: Option<Arc<InnerMap>>,
|
||||
extraparams: arc_swap::Guard<Arc<Extraparams>>,
|
||||
client_headers: Option<Arc<Vec<(Arc<str>, Arc<str>)>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -45,62 +58,72 @@ impl ProxyHttp for LB {
|
||||
type CTX = Context;
|
||||
fn new_ctx(&self) -> Self::CTX {
|
||||
Context {
|
||||
backend_id: String::new(),
|
||||
backend_id: None,
|
||||
to_https: false,
|
||||
redirect_to: String::new(),
|
||||
sticky_sessions: false,
|
||||
redirect_to: None,
|
||||
start_time: Instant::now(),
|
||||
hostname: None,
|
||||
upstream_peer: None,
|
||||
extraparams: self.extraparams.load(),
|
||||
client_headers: None,
|
||||
}
|
||||
}
|
||||
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
|
||||
let ep = _ctx.extraparams.clone();
|
||||
|
||||
if let Some(auth) = ep.authentication.get("authorization") {
|
||||
let authenticated = authenticate(&auth.value(), &session);
|
||||
// let ep = _ctx.extraparams.as_ref();
|
||||
if let Some(auth) = &_ctx.extraparams.authentication {
|
||||
let authenticated = authenticate(&auth.auth_type, &auth.auth_cred, &session);
|
||||
if !authenticated {
|
||||
let _ = session.respond_error(401).await;
|
||||
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
|
||||
return Ok(true);
|
||||
}
|
||||
};
|
||||
|
||||
let hostname = return_header_host(&session);
|
||||
_ctx.hostname = hostname.clone();
|
||||
|
||||
}
|
||||
let hostname = return_header_host_from_upstream(session, &self.ump_upst);
|
||||
_ctx.hostname = hostname;
|
||||
let mut backend_id = None;
|
||||
|
||||
if ep.sticky_sessions {
|
||||
if _ctx.extraparams.sticky_sessions {
|
||||
if let Some(cookies) = session.req_header().headers.get("cookie") {
|
||||
if let Ok(cookie_str) = cookies.to_str() {
|
||||
for cookie in cookie_str.split(';') {
|
||||
let trimmed = cookie.trim();
|
||||
if let Some(value) = trimmed.strip_prefix("backend_id=") {
|
||||
backend_id = Some(value);
|
||||
break;
|
||||
}
|
||||
if let Some(pos) = cookie_str.find("backend_id=") {
|
||||
let value = &cookie_str[pos + "backend_id=".len()..];
|
||||
let end = value.find(';').unwrap_or(value.len());
|
||||
backend_id = Some(&value[..end]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match hostname {
|
||||
match _ctx.hostname.as_ref() {
|
||||
None => return Ok(false),
|
||||
Some(host) => {
|
||||
let optioninnermap = self.get_host(host.as_str(), host.as_str(), backend_id);
|
||||
let optioninnermap = self.get_host(host, session.req_header().uri.path(), backend_id);
|
||||
|
||||
match optioninnermap {
|
||||
None => return Ok(false),
|
||||
Some(ref innermap) => {
|
||||
if let Some(rate) = innermap.rate_limit.or(ep.rate_limit) {
|
||||
// let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip().to_string()).unwrap_or_else(|| host.to_string());
|
||||
// Inner auth works only if global is disabled.
|
||||
if let Some(auth) = &innermap.authorization {
|
||||
if _ctx.extraparams.authentication.is_none() {
|
||||
let authenticated = authenticate(&auth.auth_type, &auth.auth_cred, &session);
|
||||
if !authenticated {
|
||||
let _ = session.respond_error(401).await;
|
||||
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rate) = innermap.rate_limit.or(_ctx.extraparams.rate_limit) {
|
||||
let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip());
|
||||
let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1);
|
||||
if curr_window_requests > rate {
|
||||
let mut header = ResponseHeader::build(429, None).unwrap();
|
||||
header.insert_header("X-Rate-Limit-Limit", rate.to_string()).unwrap();
|
||||
header.insert_header("X-Rate-Limit-Remaining", "0").unwrap();
|
||||
header.insert_header("X-Rate-Limit-Reset", "1").unwrap();
|
||||
let mut buf = Buffer::new();
|
||||
let rate_str = buf.format(rate);
|
||||
let mut header = ResponseHeader::build(429, None)?;
|
||||
header.insert_header("X-Rate-Limit-Limit", rate_str)?;
|
||||
header.insert_header("X-Rate-Limit-Remaining", "0")?;
|
||||
header.insert_header("X-Rate-Limit-Reset", "1")?;
|
||||
session.set_keepalive(None);
|
||||
session.write_response_header(Box::new(header), true).await?;
|
||||
debug!("Rate limited: {:?}, {}", rate_key, rate);
|
||||
@@ -115,54 +138,72 @@ impl ProxyHttp for LB {
|
||||
Ok(false)
|
||||
}
|
||||
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
|
||||
// let host_name = return_header_host(&session);
|
||||
match ctx.hostname.as_ref() {
|
||||
Some(hostname) => {
|
||||
match ctx.upstream_peer.as_ref() {
|
||||
// Some((address, port, ssl, is_h2, to_https)) => {
|
||||
Some(innermap) => {
|
||||
let mut peer = Box::new(HttpPeer::new((innermap.address.clone(), innermap.port.clone()), innermap.is_ssl, String::new()));
|
||||
// if session.is_http2() {
|
||||
if innermap.is_http2 {
|
||||
peer.options.alpn = ALPN::H2;
|
||||
}
|
||||
if innermap.is_ssl {
|
||||
peer.sni = hostname.clone();
|
||||
peer.options.verify_cert = false;
|
||||
peer.options.verify_hostname = false;
|
||||
}
|
||||
if ctx.to_https || innermap.to_https {
|
||||
if let Some(stream) = session.stream() {
|
||||
if stream.get_ssl().is_none() {
|
||||
if let Some(addr) = session.server_addr() {
|
||||
if let Some((host, _)) = addr.to_string().split_once(':') {
|
||||
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str());
|
||||
let port = self.config.proxy_port_tls.unwrap_or(403);
|
||||
ctx.to_https = true;
|
||||
ctx.redirect_to = format!("https://{}:{}{}", host, port, uri);
|
||||
}
|
||||
}
|
||||
Some(hostname) => match ctx.upstream_peer.as_ref() {
|
||||
Some(innermap) => {
|
||||
let mut peer = Box::new(HttpPeer::new((&*innermap.address, innermap.port), innermap.is_ssl, hostname.to_string()));
|
||||
|
||||
if innermap.is_http2 {
|
||||
peer.options.alpn = ALPN::H2;
|
||||
}
|
||||
if innermap.is_ssl {
|
||||
peer.options.verify_cert = false;
|
||||
peer.options.verify_hostname = false;
|
||||
}
|
||||
|
||||
// Experimental optionsv
|
||||
// The following TCP optimizations were tested but caused performance degrade under heavy load:
|
||||
// peer.options.tcp_keepalive = Some(TcpKeepalive {
|
||||
// idle: Duration::from_secs(60),
|
||||
// interval: Duration::from_secs(10),
|
||||
// count: 5,
|
||||
// user_timeout: Duration::from_secs(30),
|
||||
// });
|
||||
//
|
||||
// peer.options.idle_timeout = Some(Duration::from_secs(300));
|
||||
// peer.options.tcp_recv_buf = Some(128 * 1024);
|
||||
// End of experimental options
|
||||
|
||||
if ctx.extraparams.to_https.unwrap_or(false) || innermap.to_https {
|
||||
if let Some(stream) = session.stream() {
|
||||
if stream.get_ssl().is_none() {
|
||||
if let Some(host) = ctx.hostname.as_ref() {
|
||||
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str());
|
||||
let port = self.config.proxy_port_tls.unwrap_or(443);
|
||||
ctx.to_https = true;
|
||||
let mut s = String::with_capacity(64);
|
||||
write!(&mut s, "https://{}:{}{}", host, port, uri).unwrap_or_default();
|
||||
ctx.redirect_to = Some(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctx.backend_id = format!("{}:{}:{}", innermap.address.clone(), innermap.port.clone(), innermap.is_ssl);
|
||||
Ok(peer)
|
||||
}
|
||||
None => {
|
||||
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
|
||||
Err(Box::new(Error {
|
||||
etype: HTTPStatus(502),
|
||||
esource: Upstream,
|
||||
retry: RetryType::Decided(false),
|
||||
cause: None,
|
||||
context: Option::from(ImmutStr::Static("Upstream not found")),
|
||||
}))
|
||||
if ctx.extraparams.sticky_sessions {
|
||||
let mut s = String::with_capacity(64);
|
||||
write!(&mut s, "{}:{}:{}", innermap.address, innermap.port, innermap.is_ssl).unwrap();
|
||||
ctx.backend_id = Some(s);
|
||||
ctx.sticky_sessions = true;
|
||||
}
|
||||
Ok(peer)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
|
||||
error!("Failed to send error response: {:?}", e);
|
||||
}
|
||||
Err(Box::new(Error {
|
||||
etype: HTTPStatus(502),
|
||||
esource: Upstream,
|
||||
retry: RetryType::Decided(false),
|
||||
cause: None,
|
||||
context: Option::from(ImmutStr::Static("Upstream not found")),
|
||||
}))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error");
|
||||
if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
|
||||
error!("Failed to send error response: {:?}", e);
|
||||
}
|
||||
Err(Box::new(Error {
|
||||
etype: HTTPStatus(502),
|
||||
esource: Upstream,
|
||||
@@ -174,73 +215,71 @@ impl ProxyHttp for LB {
|
||||
}
|
||||
}
|
||||
|
||||
async fn upstream_request_filter(&self, session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> {
|
||||
match session.client_addr() {
|
||||
Some(ip) => {
|
||||
let inet = ip.as_inet();
|
||||
match inet {
|
||||
Some(addr) => {
|
||||
_upstream_request
|
||||
.insert_header("X-Forwarded-For", addr.to_string().split(':').collect::<Vec<&str>>()[0])
|
||||
.unwrap();
|
||||
}
|
||||
None => warn!("Malformed Client IP: {:?}", inet),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Cannot detect client IP");
|
||||
async fn upstream_request_filter(&self, session: &mut Session, upstream_request: &mut RequestHeader, ctx: &mut Self::CTX) -> Result<()> {
|
||||
if let Some(hostname) = ctx.hostname.as_deref() {
|
||||
upstream_request.insert_header("Host", hostname)?;
|
||||
}
|
||||
|
||||
if let Some(client_ip) = session.client_addr() {
|
||||
IP_BUFFER.with(|buffer| {
|
||||
let mut buf = buffer.borrow_mut();
|
||||
buf.clear();
|
||||
write!(buf, "{}", client_ip).unwrap_or(());
|
||||
upstream_request.append_header("x-forward-for", buf.as_str()).unwrap_or(false);
|
||||
});
|
||||
}
|
||||
let hostname = ctx.hostname.as_deref().unwrap_or("localhost");
|
||||
let path = session.req_header().uri.path();
|
||||
let GetHostsReturHeaders { server_headers, client_headers } = match self.get_header(hostname, path) {
|
||||
Some(h) => h,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if let Some(sh) = server_headers {
|
||||
for (k, v) in sh {
|
||||
upstream_request.insert_header(k.to_string(), v.as_ref())?;
|
||||
}
|
||||
}
|
||||
if let Some(ch) = client_headers {
|
||||
ctx.client_headers = Some(Arc::new(ch));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// async fn request_body_filter(&self, _session: &mut Session, _body: &mut Option<Bytes>, _end_of_stream: bool, _ctx: &mut Self::CTX) -> Result<()>
|
||||
// where
|
||||
// Self::CTX: Send + Sync,
|
||||
// {
|
||||
// Ok(())
|
||||
// }
|
||||
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
|
||||
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap();
|
||||
if ctx.extraparams.sticky_sessions {
|
||||
let backend_id = ctx.backend_id.clone();
|
||||
if let Some(bid) = self.ump_byid.get(&backend_id) {
|
||||
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.address));
|
||||
if ctx.sticky_sessions {
|
||||
if let Some(bid) = ctx.backend_id.clone() {
|
||||
if REVERSE_STORE.get(&*bid).is_none() {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(bid.clone().into_bytes());
|
||||
let hash = hasher.finalize();
|
||||
let hex_hash = base16ct::lower::encode_string(&hash);
|
||||
let hh = hex_hash[0..50].to_string();
|
||||
REVERSE_STORE.insert(bid.clone(), hh.clone());
|
||||
REVERSE_STORE.insert(hh.clone(), bid.clone());
|
||||
}
|
||||
if let Some(tt) = REVERSE_STORE.get(&*bid) {
|
||||
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", tt.value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.to_https {
|
||||
let mut redirect_response = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
|
||||
redirect_response.insert_header("Location", ctx.redirect_to.clone())?;
|
||||
redirect_response.insert_header("Location", ctx.redirect_to.clone().unwrap_or(String::from("/")))?;
|
||||
redirect_response.insert_header("Content-Length", "0")?;
|
||||
session.write_response_header(Box::new(redirect_response), false).await?;
|
||||
}
|
||||
match ctx.hostname.as_ref() {
|
||||
Some(host) => {
|
||||
let path = session.req_header().uri.path();
|
||||
let host_header = host;
|
||||
let split_header = host_header.split_once(':');
|
||||
match split_header {
|
||||
Some(sh) => {
|
||||
let yoyo = self.get_header(sh.0, path);
|
||||
for k in yoyo.iter() {
|
||||
for t in k.iter() {
|
||||
_upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let yoyo = self.get_header(host_header, path);
|
||||
for k in yoyo.iter() {
|
||||
for t in k.iter() {
|
||||
_upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ALLOCATIONS !
|
||||
if let Some(client_headers) = &ctx.client_headers {
|
||||
for (k, v) in client_headers.iter() {
|
||||
_upstream_response.append_header(k.to_string(), v.as_ref())?;
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
session.set_keepalive(Some(300));
|
||||
// END ALLOCATIONS !
|
||||
|
||||
// session.set_keepalive(Some(300));
|
||||
// println!("session.get_keepalive: {:?}", session.get_keepalive());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -248,28 +287,22 @@ impl ProxyHttp for LB {
|
||||
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
|
||||
debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
|
||||
let m = &MetricTypes {
|
||||
method: session.req_header().method.to_string(),
|
||||
code: session.response_written().map(|resp| resp.status.as_str().to_owned()).unwrap_or("0".to_string()),
|
||||
method: session.req_header().method.clone(),
|
||||
code: session.response_written().map(|resp| resp.status),
|
||||
latency: ctx.start_time.elapsed(),
|
||||
version: session.req_header().version,
|
||||
upstream: ctx.hostname.clone().unwrap_or(Arc::from("localhost")),
|
||||
};
|
||||
calc_metrics(m);
|
||||
}
|
||||
}
|
||||
|
||||
fn return_header_host(session: &Session) -> Option<String> {
|
||||
if session.is_http2() {
|
||||
match session.req_header().uri.host() {
|
||||
Some(host) => Option::from(host.to_string()),
|
||||
None => None,
|
||||
}
|
||||
fn return_header_host_from_upstream(session: &Session, ump_upst: &UpstreamsDashMap) -> Option<Arc<str>> {
|
||||
let host_str = if session.is_http2() {
|
||||
session.req_header().uri.host()?
|
||||
} else {
|
||||
match session.req_header().headers.get("host") {
|
||||
Some(host) => {
|
||||
let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>();
|
||||
Option::from(header_host[0].to_string())
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
let h = session.req_header().headers.get("host")?.to_str().ok()?;
|
||||
h.split_once(':').map_or(h, |(host, _)| host)
|
||||
};
|
||||
ump_upst.get(host_str).map(|entry| entry.key().clone())
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::utils::tls::CertificateConfig;
|
||||
use crate::utils::tools::*;
|
||||
use crate::web::proxyhttp::LB;
|
||||
use arc_swap::ArcSwap;
|
||||
use ctrlc;
|
||||
use dashmap::DashMap;
|
||||
use log::info;
|
||||
use pingora::tls::ssl::{SslAlert, SslRef};
|
||||
@@ -14,7 +15,6 @@ use pingora_core::server::Server;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
pub fn run() {
|
||||
// default_provider().install_default().expect("Failed to install rustls crypto provider");
|
||||
let parameters = Some(Opt::parse_args()).unwrap();
|
||||
@@ -27,12 +27,13 @@ pub fn run() {
|
||||
let uf_config = Arc::new(DashMap::new());
|
||||
let ff_config = Arc::new(DashMap::new());
|
||||
let im_config = Arc::new(DashMap::new());
|
||||
let hh_config = Arc::new(DashMap::new());
|
||||
let ch_config = Arc::new(DashMap::new());
|
||||
let sh_config = Arc::new(DashMap::new());
|
||||
|
||||
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
|
||||
sticky_sessions: false,
|
||||
to_https: None,
|
||||
authentication: DashMap::new(),
|
||||
sticky_sessions: false,
|
||||
authentication: None,
|
||||
rate_limit: None,
|
||||
}));
|
||||
|
||||
@@ -43,37 +44,24 @@ pub fn run() {
|
||||
ump_full: ff_config,
|
||||
ump_byid: im_config,
|
||||
config: cfg.clone(),
|
||||
headers: hh_config,
|
||||
client_headers: ch_config,
|
||||
server_headers: sh_config,
|
||||
extraparams: ec_config,
|
||||
};
|
||||
/*
|
||||
let log_level = cfg.log_level.clone();
|
||||
unsafe {
|
||||
match log_level.as_str() {
|
||||
"info" => env::set_var("RUST_LOG", "info"),
|
||||
"error" => env::set_var("RUST_LOG", "error"),
|
||||
"warn" => env::set_var("RUST_LOG", "warn"),
|
||||
"debug" => env::set_var("RUST_LOG", "debug"),
|
||||
"trace" => env::set_var("RUST_LOG", "trace"),
|
||||
"off" => env::set_var("RUST_LOG", "off"),
|
||||
_ => {
|
||||
println!("Error reading log level, defaulting to: INFO");
|
||||
env::set_var("RUST_LOG", "info")
|
||||
}
|
||||
}
|
||||
}
|
||||
env_logger::builder().init();
|
||||
*/
|
||||
|
||||
let grade = cfg.proxy_tls_grade.clone().unwrap_or("medium".to_string());
|
||||
info!("TLS grade set to: [ {} ]", grade);
|
||||
|
||||
let bg_srvc = background_service("bgsrvc", lb.clone());
|
||||
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone());
|
||||
let bind_address_http = cfg.proxy_address_http.clone();
|
||||
|
||||
let bind_address_tls = cfg.proxy_address_tls.clone();
|
||||
|
||||
check_priv(bind_address_http.as_str());
|
||||
|
||||
match bind_address_tls {
|
||||
Some(bind_address_tls) => {
|
||||
check_priv(bind_address_tls.as_str());
|
||||
let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
|
||||
let certs_path = cfg.proxy_certificates.clone().unwrap();
|
||||
thread::spawn(move || {
|
||||
@@ -104,7 +92,6 @@ pub fn run() {
|
||||
match new_certs {
|
||||
Some(new_certs) => {
|
||||
certs_for_watcher.store(Arc::new(new_certs));
|
||||
info!("Reload TLS certificates from {}", cfg.proxy_certificates.clone().unwrap())
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
@@ -117,5 +104,15 @@ pub fn run() {
|
||||
proxy.add_tcp(bind_address_http.as_str());
|
||||
server.add_service(proxy);
|
||||
server.add_service(bg_srvc);
|
||||
server.run_forever();
|
||||
|
||||
thread::spawn(move || server.run_forever());
|
||||
|
||||
if let (Some(user), Some(group)) = (cfg.rungroup.clone(), cfg.runuser.clone()) {
|
||||
drop_priv(user, group, cfg.proxy_address_http.clone(), cfg.proxy_address_tls.clone());
|
||||
}
|
||||
|
||||
let (tx, rx) = channel();
|
||||
ctrlc::set_handler(move || tx.send(()).expect("Could not send signal on channel.")).expect("Error setting Ctrl-C handler");
|
||||
rx.recv().expect("Could not receive from channel.");
|
||||
info!("Signal received ! Exiting...");
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use crate::utils::discovery::APIUpstreamProvider;
|
||||
use crate::utils::structs::Configuration;
|
||||
use crate::utils::structs::{Config, Configuration, UpstreamsDashMap};
|
||||
use crate::utils::tools::{upstreams_liveness_json, upstreams_to_json};
|
||||
use axum::body::Body;
|
||||
use axum::extract::{Query, State};
|
||||
use axum::http::{Response, StatusCode};
|
||||
use axum::response::IntoResponse;
|
||||
use axum::routing::{get, post};
|
||||
use axum::{Json, Router};
|
||||
use axum_server::tls_openssl::OpenSSLConfig;
|
||||
// use axum_server::tls_openssl::OpenSSLConfig;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use jsonwebtoken::{encode, EncodingKey, Header};
|
||||
@@ -14,7 +15,8 @@ use log::{error, info, warn};
|
||||
use prometheus::{gather, Encoder, TextEncoder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
// use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use tokio::net::TcpListener;
|
||||
use tower_http::services::ServeDir;
|
||||
@@ -36,16 +38,19 @@ struct AppState {
|
||||
master_key: String,
|
||||
config_sender: Sender<Configuration>,
|
||||
config_api_enabled: bool,
|
||||
current_upstreams: Arc<UpstreamsDashMap>,
|
||||
full_upstreams: Arc<UpstreamsDashMap>,
|
||||
}
|
||||
|
||||
#[allow(unused_mut)]
|
||||
pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>) {
|
||||
pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>, upstreams_curr: Arc<UpstreamsDashMap>, upstreams_full: Arc<UpstreamsDashMap>) {
|
||||
let app_state = AppState {
|
||||
master_key: config.masterkey.clone(),
|
||||
config_sender: to_return.clone(),
|
||||
config_api_enabled: config.config_api_enabled.clone(),
|
||||
current_upstreams: upstreams_curr,
|
||||
full_upstreams: upstreams_full,
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
// .route("/{*wildcard}", get(senderror))
|
||||
// .route("/{*wildcard}", post(senderror))
|
||||
@@ -56,19 +61,20 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
|
||||
.route("/jwt", post(jwt_gen))
|
||||
.route("/conf", post(conf))
|
||||
.route("/metrics", get(metrics))
|
||||
.route("/status", get(status))
|
||||
.with_state(app_state);
|
||||
|
||||
if let Some(value) = &config.tls_address {
|
||||
let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
|
||||
let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
|
||||
let tls_app = app.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
|
||||
eprintln!("TLS server failed: {}", e);
|
||||
}
|
||||
});
|
||||
info!("Starting the TLS API server on: {}", value);
|
||||
}
|
||||
// if let Some(value) = &config.tls_address {
|
||||
// let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
|
||||
// let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
|
||||
// let tls_app = app.clone();
|
||||
// tokio::spawn(async move {
|
||||
// if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
|
||||
// eprintln!("TLS server failed: {}", e);
|
||||
// }
|
||||
// });
|
||||
// info!("Starting the TLS API server on: {}", value);
|
||||
// }
|
||||
|
||||
if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) {
|
||||
let static_files = ServeDir::new(folder);
|
||||
@@ -82,27 +88,41 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
}
|
||||
|
||||
async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse {
|
||||
async fn conf(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse {
|
||||
if !st.config_api_enabled {
|
||||
return Response::builder()
|
||||
.status(StatusCode::FORBIDDEN)
|
||||
.body(Body::from("Config remote API is disabled !\n"))
|
||||
.unwrap();
|
||||
return Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Config API is disabled !\n")).unwrap();
|
||||
}
|
||||
|
||||
if let Some(s) = params.get("key") {
|
||||
if s.to_owned() == st.master_key {
|
||||
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content").await {
|
||||
st.config_sender.send(serverlist).await.unwrap();
|
||||
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap();
|
||||
} else {
|
||||
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("Failed to parse config!\n")).unwrap();
|
||||
};
|
||||
let strcontent = content.as_str();
|
||||
let parsed = serde_yaml::from_str::<Config>(strcontent);
|
||||
match parsed {
|
||||
Ok(_) => {
|
||||
if let Some(s) = params.get("key") {
|
||||
if s.to_owned() == st.master_key {
|
||||
let _ = tokio::spawn(async move { apply_config(content.as_str(), st).await });
|
||||
return Response::builder().status(StatusCode::OK).body(Body::from("Accepted! Applying in background\n")).unwrap();
|
||||
}
|
||||
}
|
||||
return Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap();
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to parse upstreams file: {}", err);
|
||||
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from(format!("Failed: {}\n", err))).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap()
|
||||
}
|
||||
|
||||
async fn apply_config(content: &str, mut st: AppState) {
|
||||
let sl = crate::utils::parceyaml::load_configuration(content, "content").await;
|
||||
if let Some(serverlist) = sl.0 {
|
||||
let _ = st.config_sender.send(serverlist).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) {
|
||||
if payload.master_key == state.master_key {
|
||||
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60);
|
||||
@@ -132,7 +152,6 @@ async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -
|
||||
async fn metrics() -> impl IntoResponse {
|
||||
let metric_families = gather();
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
if let Err(e) = encoder.encode(&metric_families, &mut buffer) {
|
||||
// encoding error fallback
|
||||
@@ -141,7 +160,6 @@ async fn metrics() -> impl IntoResponse {
|
||||
.body(Body::from(format!("Failed to encode metrics: {}", e)))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", encoder.format_type())
|
||||
@@ -149,7 +167,35 @@ async fn metrics() -> impl IntoResponse {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// async fn senderror() -> impl IntoResponse {
|
||||
// Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap()
|
||||
// }
|
||||
async fn status(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
|
||||
if let Some(_) = params.get("live") {
|
||||
let r = upstreams_liveness_json(&st.full_upstreams, &st.current_upstreams);
|
||||
return Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(Body::from(format!("{}", r)))
|
||||
.unwrap();
|
||||
}
|
||||
if let Some(_) = params.get("all") {
|
||||
let resp = upstreams_to_json(&st.current_upstreams);
|
||||
match resp {
|
||||
Ok(j) => {
|
||||
return Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(Body::from(j))
|
||||
.unwrap()
|
||||
}
|
||||
Err(e) => {
|
||||
return Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(Body::from(format!("Failed to get status: {}", e)))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(Body::from(format!("Parameter mismatch")))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user