93 Commits

Author SHA1 Message Date
Ara Sadoyan
bee307793c restructurisation grades 2026-04-27 15:28:54 +02:00
Ara Sadoyan
6e83775127 restructurisation 2026-04-27 15:22:31 +02:00
Ara Sadoyan
baded40e6e Cache for JWT tokens, to minimize crypto. BRAKING: Claims key "valid" renamed to "exp" 2026-04-17 17:53:31 +02:00
Ara Sadoyan
c0a419f6f7 completed implementation of #17 2026-04-15 18:23:57 +02:00
Ara Sadoyan
8aff2fa875 Standardizing implementation of #17 2026-04-14 16:11:24 +02:00
Ara Sadoyan
9b4ee26a2b Working on #17 2026-04-13 20:06:57 +02:00
Ara Sadoyan
f135106a44 Changes in authentication 2026-04-08 19:05:19 +02:00
Ara Sadoyan
389c12119a code cleanup and improvements. 2026-04-08 17:00:06 +02:00
Ara Sadoyan
93a8661281 Cargo cleanup, dependency merge 2026-04-08 15:14:46 +02:00
Ara Sadoyan
0505ce2849 split upstreams.yaml file 2026-03-30 19:04:32 +02:00
Ara Sadoyan
72ed870538 split upstreams.yaml file 2026-03-27 19:24:30 +01:00
Ara Sadoyan
68140d0cf0 tye changes, optimization 2026-03-26 17:40:22 +01:00
Ara Sadoyan
7b9b206c13 optimization & cleanup 2026-03-26 16:58:53 +01:00
Ara Sadoyan
4706b281bc cleanup 2026-03-26 14:17:59 +01:00
Ara Sadoyan
1f8efc6af7 FUNDING.yml 2026-03-25 15:16:47 +01:00
Ara Sadoyan
9f595b2709 example config file update 2026-03-25 11:15:55 +01:00
Ara Sadoyan
ed44516015 added redirect_to directive for upstreams 2026-03-24 16:08:14 +01:00
Ara Sadoyan
17da7862e3 upstreams ID hashing update 2026-03-18 20:06:50 +01:00
Ara Sadoyan
24d00da855 performance improvement, sticky session minor bug fix 2026-03-17 19:21:05 +01:00
Ara Sadoyan
c9422759aa Minor performance improvement 2026-03-17 13:54:42 +01:00
Ara Sadoyan
94b1f77734 Type changes, auth override policy 2026-03-04 12:35:45 +01:00
Ara Sadoyan
9d986f9a28 Path level authentication 2026-03-03 19:35:16 +01:00
Ara Sadoyan
3afa2f209f pingora 0.8.0 upgrade 2026-03-03 13:54:53 +01:00
Ara Sadoyan
c151fdf58b moving to boringssl 2026-02-19 18:11:54 +01:00
Ara Sadoyan
438426153f removed unwrap 2026-02-18 12:00:33 +01:00
Ara Sadoyan
9bb01fd1b0 minor improvements 2026-02-17 18:22:46 +01:00
Ara Sadoyan
abb5fef1d6 minor improvements 2026-02-17 17:03:52 +01:00
Ara Sadoyan
3618687ad5 Memory allocation improvements for proxyhttp, fix issue with sticky session . 2026-02-10 19:07:43 +01:00
Ara Sadoyan
a893b3c301 Memory allocation improvements for metrics collector . 2026-02-05 13:57:39 +01:00
Ara Sadoyan
3ff262c7f4 Merge pull request #13 from yerke/patch-1
Fix grammar and formatting in README.md
2026-02-04 14:41:50 +01:00
Yerkebulan Tulibergenov
062f02259f Fix grammar and formatting in README.md 2026-01-30 23:59:10 -08:00
Ara Sadoyan
1a4c9b7d55 Performance optimization in headers 2026-01-28 16:07:45 +01:00
Ara Sadoyan
6ef7f23823 Performance optimization v2 2026-01-28 13:20:31 +01:00
Ara Sadoyan
2b437c65fb Performance improvement. String removal from hot paths. 2026-01-27 16:19:51 +01:00
Ara Sadoyan
38055ae94e added new metric aralez_requests_by_upstream 2026-01-25 18:08:15 +01:00
Ara Sadoyan
703de9e909 updates on API server https://sadoyan.github.io/aralez-docs/assets/api/ 2026-01-22 16:50:51 +01:00
Ara Sadoyan
2c8b01295c Minor subfunction removal 2026-01-21 20:01:16 +01:00
Ara Sadoyan
baebe1c00f Async apply of config via API 2026-01-20 19:16:27 +01:00
Ara Sadoyan
6c1d3c5ef8 Error handling on API server 2026-01-09 18:44:36 +01:00
Ara Sadoyan
2d1a827007 Removed unneeded loop 2025-12-14 12:09:11 +01:00
Ara Sadoyan
a2a5250711 Performance improvements on data types . 2025-12-11 15:21:34 +01:00
Ara Sadoyan
985e923342 to https redirect bug fix 2025-12-11 13:37:40 +01:00
Ara Sadoyan
0fc79c022f perf: optimize header handling and concurrent access patterns 2025-12-10 19:09:04 +01:00
Ara Sadoyan
a43bccdfb8 minor, performance improvements 2025-11-28 13:13:15 +01:00
Ara Sadoyan
5b87391fbb some more type changes, performance improvements 2025-11-27 18:47:04 +01:00
Ara Sadoyan
c68a4ad83d Type changes, performance improvements 2025-11-27 18:03:34 +01:00
Ara Sadoyan
8ba8d32df1 Performance improvements, type changes 2025-11-26 12:12:41 +01:00
Ara Sadoyan
7a839065e6 update on kubernetes web client 2025-11-24 17:57:44 +01:00
Ara Sadoyan
74821654f3 Added support to send custom headers to upstream servers. 2025-11-22 23:18:06 +01:00
Ara Sadoyan
78c83b802f Merge Consul & Kubernetes discovery 2025-10-26 15:26:09 +01:00
Ara Sadoyan
012505b77e Cleaning up the code 2025-10-24 15:27:15 +02:00
Ara Sadoyan
21c4cb0901 Update README.md 2025-10-18 11:49:51 +02:00
Ara Sadoyan
86dd3d3402 README update 2025-10-18 11:48:48 +02:00
Ara Sadoyan
d6b345202b README update 2025-10-17 17:03:45 +02:00
Ara Sadoyan
5209d787e4 README update 2025-10-17 16:44:57 +02:00
Ara Sadoyan
02de5f1c21 Merge remote-tracking branch 'origin/main' 2025-10-16 19:05:15 +02:00
Ara Sadoyan
9519280026 Path filter, and rate limiter for Consul 2025-10-16 19:04:46 +02:00
Ara Sadoyan
e87c60cf4f unifying kubernetes and file provider configs 2025-10-15 19:13:33 +02:00
Ara Sadoyan
25693a7058 Path filtering and rate limit for kubernetes 2025-10-15 13:42:05 +02:00
Ara Sadoyan
3b0b385ec7 Create FUNDING.yml 2025-10-03 11:02:21 +02:00
Ara Sadoyan
5359c2e8e9 Create LICENSE 2025-10-02 11:14:40 +02:00
Ara Sadoyan
2b62d1e6de configs update 2025-10-02 10:56:55 +02:00
Ara Sadoyan
8a290e5084 Kubernetes path based routing 2025-10-01 20:18:36 +02:00
Ara Sadoyan
3541b20c80 intermediate minor optimization 2025-10-01 13:47:30 +02:00
Ara Sadoyan
bd5fed9be0 Fix drop privileges, check root 2025-09-28 12:23:53 +02:00
Ara Sadoyan
b916b152ea Changed config file parser at startup, to keep initially dead nodes in list. 2025-09-25 18:32:46 +02:00
Ara Sadoyan
5d4915d6b9 Fixed drop root privileges on ports below 1024 2025-09-19 12:46:17 +02:00
Ara Sadoyan
3ea3996e27 upgrade to pingora 0.6 2025-09-18 14:15:50 +02:00
Ara Sadoyan
dd069b8532 minor fix 2025-09-17 16:51:57 +02:00
Ara Sadoyan
c78245e695 disable HC for upstream. 2025-09-16 12:54:23 +02:00
Ara Sadoyan
66b1a1c399 upstreams pathconfig fix 2025-09-15 15:22:21 +02:00
Ara Sadoyan
bba6dd8514 minor cleanup 2025-09-09 14:51:37 +02:00
Ara Sadoyan
79485ac69d minor cleanup 2025-09-04 18:16:09 +02:00
Ara Sadoyan
61c5625016 A coffee :-) 2025-09-02 14:57:47 +02:00
Ara Sadoyan
57bdc71acd A coffee :-) 2025-09-02 14:56:36 +02:00
Ara Sadoyan
9e09b829a6 README update 2025-09-01 17:02:57 +02:00
Ara Sadoyan
d3602fa578 Added Kubernetes API support, fo ingress controller. 2025-09-01 16:32:30 +02:00
Ara Sadoyan
e304482667 Optimized healthchecks and config file loading 2025-08-20 14:03:09 +02:00
Ara Sadoyan
f8118f9596 TLS grades change 2025-08-05 19:08:58 +02:00
Ara Sadoyan
f654312466 SSL cipher management 2025-07-29 21:25:27 +02:00
Ara Sadoyan
b44f7069a0 Configurable TLS ciphers 2025-07-27 11:15:49 +02:00
Ara Sadoyan
a44979ec82 Configurable TLS ciphers 2025-07-27 11:13:39 +02:00
Ara Sadoyan
ece4fa20af README 2025-07-24 13:50:15 +02:00
Ara Sadoyan
2ad3a059ab Per path rate limiter 2025-07-24 13:34:15 +02:00
Ara Sadoyan
6f012cee69 Code cleanup 2025-07-22 17:40:58 +02:00
Ara Sadoyan
51c88c8f7c Some structural changes and improvements 2025-07-12 16:17:45 +02:00
Ara Sadoyan
f91bc41103 benchmark image 2025-07-10 17:46:05 +02:00
Ara Sadoyan
21e1276ff5 Readme update 2025-07-09 15:22:38 +02:00
Ara Sadoyan
8463cdabbc Added configurable rate limiter 2025-07-09 15:01:20 +02:00
Ara Sadoyan
d0e4b52ce6 Enable/Disable config API from config 2025-07-04 15:06:05 +02:00
Ara Sadoyan
b552d24497 README 2025-07-02 19:00:05 +02:00
Ara Sadoyan
2e33d692bb Added optional minimal file server 2025-07-02 18:29:14 +02:00
Ara Sadoyan
e586967830 Code cleanup, nothing special 2025-06-30 18:24:25 +02:00
37 changed files with 4702 additions and 2092 deletions

13
.cargo/config.toml Normal file
View File

@@ -0,0 +1,13 @@
[target.aarch64-unknown-linux-musl]
rustflags = [
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
]
[target.x86_64-unknown-linux-musl]
rustflags = [
"-C", "link-arg=-Wl,--defsym=fopen64=fopen",
"-C", "link-arg=-Wl,--defsym=fseeko64=fseeko",
"-C", "link-arg=-Wl,--defsym=ftello64=ftello"
]

15
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
# These are supported funding model platforms
github: sadoyan
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
polar: # Replace with a single Polar username
buy_me_a_coffee: sadoyan
thanks_dev: # Replace with a single thanks.dev username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

3
.gitignore vendored
View File

@@ -5,9 +5,12 @@
*.dll *.dll
*.exe *.exe
*.sh *.sh
/docs/
/docs
/target/ /target/
*.iml *.iml
.idea/ .idea/
.etc/
*.ipr *.ipr
*.iws *.iws
/out/ /out/

2952
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "aralez" name = "aralez"
version = "0.9.1" version = "0.9.2"
edition = "2021" edition = "2021"
[profile.release] [profile.release]
@@ -11,39 +11,38 @@ panic = "abort"
strip = true strip = true
[dependencies] [dependencies]
tokio = { version = "1.45.1", features = ["full"] } tokio = { version = "1.52.1", features = ["full"] }
#pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl pingora = { version = "0.8.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
pingora = { version = "0.5.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl serde = { version = "1.0.228", features = ["derive"] }
serde = { version = "1.0.219", features = ["derive"] }
dashmap = "7.0.0-rc2" dashmap = "7.0.0-rc2"
pingora-core = "0.5.0" pingora-core = "0.8.0"
pingora-proxy = "0.5.0" pingora-proxy = "0.8.0"
pingora-http = "0.5.0" pingora-http = "0.8.0"
async-trait = "0.1.88" pingora-limits = "0.8.0"
env_logger = "0.11.8" async-trait = "0.1.89"
log = "0.4.27" env_logger = "0.11.10"
futures = "0.3.31" log = "0.4.29"
notify = "8.0.0" futures = "0.3.32"
axum = { version = "0.8.4" } notify = "9.0.0-rc.3"
axum-server = { version = "0.7.2", features = ["tls-openssl"] } axum = { version = "0.8.9" }
reqwest = { version = "0.12.20", features = ["json", "native-tls-alpn"] } reqwest = { version = "0.13.2", features = ["json", "stream", "blocking"] }
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] } serde_yml = "0.0.12"
#reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] } rand = "0.10.1"
serde_yaml = "0.9.34-deprecated"
rand = "0.9.0"
base64 = "0.22.1" base64 = "0.22.1"
jsonwebtoken = "9.3.1" jsonwebtoken = { version = "10.3.0", default-features = false, features = ["use_pem", "rust_crypto"] }
tonic = "0.13.1" tonic = "0.14.5"
sha2 = { version = "0.11.0-rc.0", default-features = false } sha2 = { version = "0.11.0-rc.5", default-features = false }
base16ct = { version = "0.2.0", features = ["alloc"] } base16ct = { version = "1.0.0", features = ["alloc"] }
urlencoding = "2.1.3" urlencoding = "2.1.3"
arc-swap = "1.7.1" arc-swap = "1.9.1"
#rustls = { version = "0.23.27", features = ["ring"] } mimalloc = { version = "0.1.50", default-features = false }
mimalloc = { version = "0.1.47", default-features = false }
prometheus = "0.14.0" prometheus = "0.14.0"
lazy_static = "1.5.0" x509-parser = "0.18.1"
#openssl = "0.10.73"
x509-parser = "0.17.0"
rustls-pemfile = "2.2.0" rustls-pemfile = "2.2.0"
tower-http = { version = "0.6.8", features = ["fs"] }
privdrop = "0.5.6"
ctrlc = "3.5.2"
serde_json = "1.0.149"
subtle = "2.6.1"
moka = { version = "0.12.1", features = ["sync"] }
ahash = "0.8.12"

201
LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,120 +0,0 @@
# 📈 Aralez Prometheus Metrics Reference
This document outlines Prometheus metrics for the [Aralez](https://github.com/sadoyan/aralez) reverse proxy.
These metrics can be used for monitoring, alerting and performance analysis.
Exposed to `http://config_address/metrics`
By default `http://127.0.0.1:3000/metrics`
# 📊 Example Grafana dashboard during stress test :
![Aralez](https://netangels.net/utils/dash.png)
---
## 🛠️ Prometheus Metrics
### 1. `aralez_requests_total`
- **Type**: `Counter`
- **Purpose**: Total amount requests served by Aralez.
**PromQL example:**
```promql
rate(aralez_requests_total[5m])
```
---
### 2. `aralez_errors_total`
- **Type**: `Counter`
- **Purpose**: Count of requests that resulted in an error.
**PromQL example:**
```promql
rate(aralez_errors_total[5m])
```
---
### 3. `aralez_responses_total{status="200"}`
- **Type**: `CounterVec`
- **Purpose**: Count of responses by HTTP status code.
**PromQL example:**
```promql
rate(aralez_responses_total{status=~"5.."}[5m]) > 0
```
> Useful for alerting on 5xx errors.
---
### 4. `aralez_response_latency_seconds`
- **Type**: `Histogram`
- **Purpose**: Tracks the latency of responses in seconds.
**Example bucket output:**
```prometheus
aralez_response_latency_seconds_bucket{le="0.01"} 15
aralez_response_latency_seconds_bucket{le="0.1"} 120
aralez_response_latency_seconds_bucket{le="0.25"} 245
aralez_response_latency_seconds_bucket{le="0.5"} 500
...
aralez_response_latency_seconds_count 1023
aralez_response_latency_seconds_sum 42.6
```
| Metric | Meaning |
|-------------------------|---------------------------------------------------------------|
| `bucket{le="0.1"} 120` | 120 requests were ≤ 100ms |
| `bucket{le="0.25"} 245` | 245 requests were ≤ 250ms |
| `count` | Total number of observations (i.e., total responses measured) |
| `sum` | Total time of all responses, in seconds |
### 🔍 How to interpret:
- `le` means “less than or equal to”.
- `count` is total amount of observations.
- `sum` is the total time (in seconds) of all responses.
**PromQL examples:**
🔹 **95th percentile latency**
```promql
histogram_quantile(0.95, rate(aralez_response_latency_seconds_bucket[5m]))
```
🔹 **Average latency**
```promql
rate(aralez_response_latency_seconds_sum[5m]) / rate(aralez_response_latency_seconds_count[5m])
```
---
## ✅ Notes
- Metrics are registered after the first served request.
---
✅ Summary of key metrics
| Metric Name | Type | What it Tells You |
|---------------------------------------|------------|---------------------------|
| `aralez_requests_total` | Counter | Total requests served |
| `aralez_errors_total` | Counter | Number of failed requests |
| `aralez_responses_total{status="200"}` | CounterVec | Response status breakdown |
| `aralez_response_latency_seconds` | Histogram | How fast responses are |
📘 *Last updated: May 2025*

148
README.md
View File

@@ -1,19 +1,33 @@
![Aralez](https://netangels.net/utils/aralez-white.jpg) ![Aralez](https://netangels.net/utils/aralez-white.jpg)
# Aralez (Արալեզ), Reverse proxy and service mesh built on top of Cloudflare's Pingora ---
# Aralez (Արալեզ),
### **Reverse proxy built on top of Cloudflare's Pingora**
Aralez is a high-performance Rust reverse proxy with zero-configuration automatic protocol handling, TLS, and upstream management,
featuring Consul and Kubernetes integration for dynamic pod discovery and health-checked routing, acting as a lightweight ingress-style proxy.
---
What Aralez means ? What Aralez means ?
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>. **Aralez = Արալեզ** <ins>Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them</ins>.
Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box. Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
[![Buy Me A Coffee](https://img.shields.io/badge/☕-Buy%20me%20a%20coffee-orange)](https://www.buymeacoffee.com/sadoyan)
--- ---
## 🔧 Key Features ## 🔧 Key Features
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required. - **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
- **TLS Termination** — Built-in OpenSSL support. - **TLS Termination** — Built-in OpenSSL support.
- **Automatic loading of certificates** — Automatically reads and loads certificates from a folder, without a restart.
- **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection. - **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
- **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost.
- **Global rate limiter** — Set rate limit for all virtualhosts.
- **Per path rate limiter** — Set rate limit for specific paths. Path limits will override global limits.
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification. - **Authentication** — Supports Basic Auth, API tokens, and JWT verification.
- **Basic Auth** - **Basic Auth**
- **API Key** via `x-api-key` header - **API Key** via `x-api-key` header
@@ -24,6 +38,7 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers
- Failover with health checks - Failover with health checks
- Sticky sessions via cookies - Sticky sessions via cookies
- **Unified Port** — Serve HTTP and WebSocket traffic over the same connection. - **Unified Port** — Serve HTTP and WebSocket traffic over the same connection.
- **Built in file server** — Build in minimalistic file server for serving static files, should be added as upstreams for public access.
- **Memory Safe** — Created purely on Rust. - **Memory Safe** — Created purely on Rust.
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O. - **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O.
@@ -62,10 +77,10 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers
### 🔧 `main.yaml` ### 🔧 `main.yaml`
| Key | Example Value | Description | | Key | Example Value | Description |
|----------------------------------|--------------------------------------|--------------------------------------------------------------------------------------------------| |----------------------------------|--------------------------------------|----------------------------------------------------------------------------------------------------|
| **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 | | **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
| **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root | | **runuser** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
| **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root | | **rungroup** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
| **daemon** | false | Run in background (boolean) | | **daemon** | false | Run in background (boolean) |
| **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections | | **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
| **pid_file** | /tmp/aralez.pid | Path to PID file | | **pid_file** | /tmp/aralez.pid | Path to PID file |
@@ -74,6 +89,7 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers
| **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location | | **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location |
| **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location | | **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location |
| **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional | | **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional |
| **proxy_tls_grade** | (high, medium, unsafe) | Grade of TLS ciphers, for easy configuration. High matches Qualys SSL Labs A+ (defaults to medium) |
| **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional | | **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional |
| **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address | | **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address |
| **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) | | **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) |
@@ -83,6 +99,9 @@ Built on Rust, on top of **Cloudflares Pingora engine**, **Aralez** delivers
| **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE | | **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE |
| **hc_interval** | 2 | Interval for health checks in seconds | | **hc_interval** | 2 | Interval for health checks in seconds |
| **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation | | **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation |
| **file_server_folder** | /some/local/folder | Optional, local folder to serve |
| **file_server_address** | 127.0.0.1:3002 | Optional, Local address for file server. Can set as upstream for public access |
| **config_api_enabled** | true | Boolean to enable/disable remote config push capability |
### 🌐 `upstreams.yaml` ### 🌐 `upstreams.yaml`
@@ -104,11 +123,42 @@ Make the binary executable `chmod 755 ./aralez-VERSION` and run.
File names: File names:
| File Name | Description | | File Name | Description |
|---------------------------|---------------------------------------------------------------| |---------------------------------|--------------------------------------------------------------------------|
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency | | `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies | | `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
| `aralez-x86_64-compat-musl.gz` | Static Linux x86_64 binary, compatible with old pre Haswell CPUs |
| `aralez-x86_64-compat-glibc.gz` | Dynamic Linux x86_64 binary, compatible with old pre Haswell CPUs |
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency | | `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies | | `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
| `sadoyan/aralez` | Docker image on Debian 13 slim (https://hub.docker.com/r/sadoyan/aralez) |
**Via docker**
```shell
docker run -d \
-v /local/path/to/config:/etc/aralez:ro \
-p 80:80 \
-p 443:443 \
sadoyan/aralez
```
## 💡 Note
In general **glibc** builds are working faster, but have few, basic, system dependencies for example :
```
linux-vdso.so.1 (0x00007ffeea33b000)
libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f09e7377000)
libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f09e6320000)
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f09e613f000)
/lib64/ld-linux-x86-64.so.2 (0x00007f09e73b1000)
```
These are common to any Linux systems, so the binary should work on almost any Linux system.
**musl** builds are 100% portable, static compiled binaries and have zero system depencecies.
In general musl builds have a little less performance.
The most intensive tests shows 107k-110k requests per second on **Glibc** binaries against 97k-100k **Musl** ones.
## 🔌 Running the Proxy ## 🔌 Running the Proxy
@@ -142,7 +192,11 @@ A sample `upstreams.yaml` entry:
provider: "file" provider: "file"
sticky_sessions: false sticky_sessions: false
to_https: false to_https: false
headers: rate_limit: 10
server_headers:
- "X-Forwarded-Proto:https"
- "X-Forwarded-Port:443"
client_headers:
- "Access-Control-Allow-Origin:*" - "Access-Control-Allow-Origin:*"
- "Access-Control-Allow-Methods:POST, GET, OPTIONS" - "Access-Control-Allow-Methods:POST, GET, OPTIONS"
- "Access-Control-Max-Age:86400" - "Access-Control-Max-Age:86400"
@@ -152,8 +206,12 @@ authorization:
myhost.mydomain.com: myhost.mydomain.com:
paths: paths:
"/": "/":
rate_limit: 20
to_https: false to_https: false
headers: server_headers:
- "X-Something-Else:Foobar"
- "X-Another-Header:Hohohohoho"
client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Hopaaaaaaaaaaaar" - "X-Proxy-From:Hopaaaaaaaaaaaar"
servers: servers:
@@ -161,24 +219,36 @@ myhost.mydomain.com:
- "127.0.0.2:8000" - "127.0.0.2:8000"
"/foo": "/foo":
to_https: true to_https: true
headers: client_headers:
- "X-Another-Header:Hohohohoho" - "X-Another-Header:Hohohohoho"
servers: servers:
- "127.0.0.4:8443" - "127.0.0.4:8443"
- "127.0.0.5:8443" - "127.0.0.5:8443"
"/.well-known/acme-challenge":
healthcheck: false
servers:
- "127.0.0.1:8001"
``` ```
**This means:** **This means:**
- Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS. - Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS.
- HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream. - HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream.
- All upstreams will receive custom headers : `X-Forwarded-Proto:https` and `X-Forwarded-Port:443`
- Additionally, myhost.mydomain.com with path `/` will receive custom headers : `X-Another-Header:Hohohohoho` and `X-Something-Else:Foobar`
- Requests to each hosted domains will be limited to 10 requests per second per virtualhost.
- Requests limits are calculated per requester ip plus requested virtualhost.
- If the requester exceeds the limit it will receive `429 Too Many Requests` error.
- Optional. Rate limiter will be disabled if the parameter is entirely removed from config.
- Requests to `myhost.mydomain.com/` will be limited to 20 requests per second.
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`. - Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
- Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez. - Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez.
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`. - Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
- Requests to `myhost.mydomain.com/.well-known/acme-challenge` will be proxied to `127.0.0.1:8001`, but healthcheks are disabled.
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter. - SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS. - Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
- Self signed certificates are silently accepted. - Self-signed certificates are silently accepted.
- Global headers (CORS for this case) will be injected to all upstreams - Global headers (CORS for this case) will be injected to all upstreams.
- Additional headers will be injected into the request for `myhost.mydomain.com`. - Additional headers will be injected into the request for `myhost.mydomain.com`.
- You can choose any path, deep nested paths are supported, the best match chosen. - You can choose any path, deep nested paths are supported, the best match chosen.
- All requests to servers will require JWT token authentication (You can comment out the authorization to disable it), - All requests to servers will require JWT token authentication (You can comment out the authorization to disable it),
@@ -284,20 +354,33 @@ curl -u username:password -H 'Host: myip.mydomain.com' http://127.0.0.1:6193/
- Sticky session support. - Sticky session support.
- HTTP2 ready. - HTTP2 ready.
📊 Why Choose Aralez? Feature Comparison ### 🧩 Summary Table: Feature Comparison
| Feature | **Aralez** | **Nginx** | **HAProxy** | **Traefik** | | Feature / Proxy | **Aralez** | **Nginx** | **HAProxy** | **Traefik** | **Caddy** | **Envoy** |
|----------------------------|----------------------------------------------------------------------|--------------------------|-------------------------|-----------------| |----------------------------------|:-----------------:|:---------------------------:|:-----------------:|:--------------------------------:|:---------------:|:---------------:|
| **Hot Reload** | ✅ Yes (live, API/file) | ⚠️ Reloads config | ⚠️ Reloads config | ✅ Yes (dynamic) | | **Hot Reload (Zero Downtime)** | **Automatic** | ⚙️ Manual (graceful reload) | ⚙️ Manual | ✅ Automatic | ✅ Automatic | Automatic |
| **JWT Auth** | ✅ Built-in | ❌ External scripts | ❌ External Lua or agent | ⚠️ With plugins | | **Auto Cert Reload (from disk)** | ✅ **Automatic** | ❌ No | ❌ No | ✅ Automatic (Let's Encrypt only) | ✅ Automatic | ⚙️ Manual |
| **WebSocket Support** | ✅ Automatic | ⚠️ Manual config | ✅ Yes | ✅ Yes | | **Auth: Basic / API Key / JWT** | **Built-in** | ⚙️ Basic only | ⚙️ Basic only | ✅ Config-based | ✅ Config-based | ✅ Config-based |
| **gRPC Support** | ✅ Automatic (no config) | ⚠️ Manual + HTTP/2 + TLS | ⚠️ Complex setup | ✅ Native | | **TLS / HTTP2 Termination** | ✅ **Automatic** | ⚙️ Manual config | ⚙️ Manual config | ✅ Automatic | ✅ Automatic | Automatic |
| **TLS Termination** | ✅ Built-in (OpenSSL) | ✅ Yes | ✅ Yes | ✅ Yes | | **Built-in A+ TLS Grades** | **Automatic** | ⚙️ Manual tuning | ⚙️ Manual | ⚙️ Manual | ✅ Automatic | ⚙️ Manual |
| **TLS Upstream Detection** | ✅ Automatic | | ❌ | ❌ | | **gRPC Proxy** | ✅ **Zero-Config** | ⚙️ Manual setup | ⚙️ Manual | ⚙️ Needs config | ⚙️ Needs config | ⚙️ Needs config |
| **HTTP/2 Support** | ✅ Automatic | ⚠️ Requires extra config | ⚠️ Requires build flags | ✅ Native | | **SSL Proxy** | ✅ **Zero-Config** | ⚙️ Manual | ⚙️ Manual | ✅ Automatic | ✅ Automatic | ✅ Automatic |
| **Sticky Sessions** | ✅ Cookie-based | In plus version only | | | | **HTTP/2 Proxy** | ✅ **Zero-Config** | ⚙️ Manual enable | Manual enable | ✅ Automatic | ✅ Automatic | ✅ Automatic |
| **Prometheus Metrics** | ✅ [Built in](https://github.com/sadoyan/aralez/blob/main/METRICS.md) | ⚠️ With Lua or exporter | ⚠️ With external script | ✅ Native | | **WebSocket Proxy** | ✅ **Zero-Config** | ⚙️ Manual upgrade | ⚙️ Manual upgrade | ✅ Automatic | ✅ Automatic | ✅ Automatic |
| **Built With** | 🦀 Rust | C | C | Go | | **Sticky Sessions** | **Built-in** | ⚙️ Config-based | ⚙️ Config-based | ✅ Automatic | ⚙️ Limited | ✅ Config-based |
| **Prometheus Metrics** | ✅ **Built-in** | ⚙️ External exporter | ✅ Built-in | ✅ Built-in | ✅ Built-in | ✅ Built-in |
| **Consul Integration** | ✅ **Yes** | ❌ No | ⚙️ Via DNS only | ✅ Yes | ❌ No | ✅ Yes |
| **Kubernetes Integration** | ✅ **Yes** | ⚙️ Needs ingress setup | ⚙️ External | ✅ Yes | ⚙️ Limited | ✅ Yes |
| **Request Limiter** | ✅ **Yes** | ✅ Config-based | ✅ Config-based | ✅ Config-based | ✅ Config-based | ✅ Config-based |
| **Serve Static Files** | ✅ **Yes** | ✅ Yes | ⚙️ Basic | ✅ Automatic | ✅ Automatic | ❌ No |
| **Upstream Health Checks** | ✅ **Automatic** | ⚙️ Manual config | ⚙️ Manual config | ✅ Automatic | ✅ Automatic | ✅ Automatic |
| **Built With** | 🦀 **Rust** | C | C | Go | Go | C++ |
---
**Automatic / Zero-Config** Works immediately, no setup required
⚙️ **Manual / Config-based** Requires explicit configuration or modules
**No** Not supported
## 💡 Simple benchmark by [Oha](https://github.com/hatoo/oha) ## 💡 Simple benchmark by [Oha](https://github.com/hatoo/oha)
@@ -443,3 +526,20 @@ Error distribution:
``` ```
![Aralez](https://netangels.net/utils/musl10.png) ![Aralez](https://netangels.net/utils/musl10.png)
## 🚀 Aralez, Nginx, Traefik performance benchmark
This benchmark is done on 4 servers. With CPU Intel(R) Xeon(R) E-2174G CPU @ 3.80GHz, 64 GB RAM.
1. Sever runs Aralez, Traefik, Nginx on different ports. Tuned as much as I could .
2. 3x Upstreams servers, running Nginx. Replying with dummy json hardcoded in config file for max performance.
All servers are connected to the same switch with 1GB port in datacenter , not a home lab. The results:
![Aralez](https://raw.githubusercontent.com/sadoyan/aralez/refs/heads/main/assets/bench.png)
The results show requests per second performed by Load balancer. You can see 3 batches with 800 concurrent users.
1. Requests via http1.1 to plain text endpoint.
2. Requests to via http2 to SSL endpoint.
3. Mixed workload with plain http1.1 and htt2 SSL.

BIN
assets/bench.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 KiB

BIN
assets/bench2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

View File

@@ -1,20 +1,24 @@
# Main configuration file, applied on startup # Main configuration file, applied on startup
threads: 12 # Nubber of daemon threads default setting threads: 12 # Number of daemon threads default setting
#user: pastor # Username for running aralez after dropping root privileges, requires program to start as root #runuser: pastor # Username for running aralez after dropping root privileges, requires program to start as root
#group: pastor # Group for running aralez after dropping root privileges, requires program to start as root #rungroup: pastor # Group for running aralez after dropping root privileges, requires program to start as root
daemon: false # Run in background daemon: false # Run in background
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
pid_file: /tmp/aralez.pid # Path to PID file pid_file: /tmp/aralez.pid # Path to PID file
error_log: /tmp/aralez_err.log # Path to error log error_log: /tmp/aralez_err.log # Path to error log
upgrade_sock: /tmp/aralez.sock # Path to socket file upgrade_sock: /tmp/aralez.sock # Path to socket file
config_api_enabled: true # Boolean to enable/disable remote config push capability.
config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location
config_tls_address: 0.0.0.0:3001 # HTTP TLS API address for pushing upstreams.yaml from remote location config_tls_address: 0.0.0.0:3001 # HTTP TLS API address for pushing upstreams.yaml from remote location
config_tls_certificate: etc/server.crt # Mandatory if config_tls_address is set config_tls_certificate: /etc/server.crt # Mandatory if config_tls_address is set
config_tls_key_file: etc/key.pem # Mandatory if config_tls_address is set config_tls_key_file: /etc/key.pem # Mandatory if config_tls_address is set
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
proxy_certificates: etc/yoyo # Mandatory if proxy_address_tls set, should contain certificate and key files strictly in a format {NAME}.crt, {NAME}.key. proxy_certificates: /etc/certs # Mandatory if proxy_address_tls set, should contain a certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
upstreams_conf: etc/upstreams.yaml # the location of upstreams file proxy_tls_grade: a+ # Grade of TLS suite for proxy (a+, a, b, c, unsafe), matching grades of Qualys SSL Labs
upstreams_conf: /etc/upstreams.yaml # the location of upstreams file
file_server_folder: /opt/storage # Optional, local folder to serve
file_server_address: 127.0.0.1:3002 # Optional, Local address for file server. Can set as upstream for public access.
log_level: info # info, warn, error, debug, trace, off log_level: info # info, warn, error, debug, trace, off
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
hc_interval: 2 #Interval for health checks in seconds hc_interval: 2 #Interval for health checks in seconds

View File

@@ -1,45 +1,87 @@
# The file under watch and hot reload, changes are applied immediately, no need to restart or reload. # The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
provider: "file" # consul provider: "file" # "file" "consul" "kubernetes"
sticky_sessions: false sticky_sessions: false
to_ssl: false to_https: false
headers: rate_limit: 100
server_headers:
- "X-Forwarded-Proto:https"
- "X-Forwarded-Port:443"
client_headers:
- "Access-Control-Allow-Origin:*" - "Access-Control-Allow-Origin:*"
- "Access-Control-Allow-Methods:POST, GET, OPTIONS" - "Access-Control-Allow-Methods:POST, GET, OPTIONS"
- "Access-Control-Max-Age:86400" - "Access-Control-Max-Age:86400"
- "X-Custom-Header:Something Special" #authorization:
authorization: # type: "jwt"
type: "jwt" # creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
# type: "basic" # type: "basic"
# creds: "user:Passw0rd" # creds: "username:Pa$$w0rd"
# type: "apikey" # type: "apikey"
# creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae" # creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
consul: # If the provider is consul. Otherwise, ignored. consul:
servers: servers:
- "http://master1:8500" - "http://192.168.1.199:8500"
- "http://192.168.22.1:8500" - "http://192.168.1.200:8500"
- "http://master1.foo.local:8500" - "http://192.168.1.201:8500"
services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database. services: # hostname: The hostname to access the proxy server, upstream : The real service name in Consul database.
- proxy: "proxy-frontend-dev-frontend-srv" - hostname: "webapi-service"
real: "frontend-dev-frontend-srv" upstream: "webapi-service-health"
path: "/one"
client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Aralez"
rate_limit: 1
to_https: false
- hostname: "webapi-service"
upstream: "webapi-service-health"
path: "/"
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
kubernetes:
servers:
- "192.168.1.55:443" #For testing only, overrides with KUBERNETES_SERVICE_HOST : KUBERNETES_SERVICE_PORT_HTTPS env variables.
services:
- hostname: "webapi-service"
path: "/"
upstream: "webapi-service"
- hostname: "webapi-service"
upstream: "console-service"
path: "/one"
client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Aralez"
rate_limit: 100
to_https: false
- hostname: "webapi-service"
upstream: "rambul-service"
path: "/two"
- hostname: "websocket-service"
upstream: "websocket-service"
path: "/"
tokenpath: "/path/to/kubetoken.txt" #If not set, will default to /var/run/secrets/kubernetes.io/serviceaccount/token
upstreams: upstreams:
myip.mydomain.com: myip.mydomain.com:
paths: paths:
"/": "/":
rate_limit: 200
to_https: false to_https: false
headers: client_headers:
- "X-Proxy-From:Gazan" - "X-Proxy-From:Aralez"
servers: # List of upstreams HOST:PORT servers:
- "127.0.0.1:8000" - "127.0.0.1:8000"
- "127.0.0.2:8000" - "127.0.0.2:8000"
- "127.0.0.3:8000" - "127.0.0.3:8000"
- "127.0.0.4:8000" - "127.0.0.4:8000"
- "127.0.0.5:8000"
"/ping": "/ping":
to_https: true authorization: # Will be ignored if global authentication is enabled.
headers: type: "basic"
creds: "admin:admin"
to_https: false
server_headers:
- "X-Forwarded-Proto:https"
- "X-Forwarded-Port:443"
client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
- "X-Proxy-From:Gazan" - "X-Proxy-From:Aralez"
servers: servers:
- "127.0.0.1:8000" - "127.0.0.1:8000"
- "127.0.0.2:8000" - "127.0.0.2:8000"
@@ -49,7 +91,8 @@ upstreams:
polo.mydomain.com: polo.mydomain.com:
paths: paths:
"/": "/":
headers: to_https: false
client_headers:
- "X-Some-Thing:Yaaaaaaaaaaaaaaa" - "X-Some-Thing:Yaaaaaaaaaaaaaaa"
servers: servers:
- "192.168.1.1:8000" - "192.168.1.1:8000"
@@ -58,3 +101,19 @@ upstreams:
- "127.0.0.2:8000" - "127.0.0.2:8000"
- "127.0.0.3:8000" - "127.0.0.3:8000"
- "127.0.0.4:8000" - "127.0.0.4:8000"
apt.mydomain.com:
paths:
"/":
servers:
- "192.168.1.10:443"
"/.well-known/acme-challenge":
healthcheck: false
servers:
- "127.0.0.1:8001"
rdr.mydomain.com:
paths:
"/":
redirect_to: "https://som.other.domain:6194"
healthcheck: false
servers:
- "127.0.0.1:8080"

View File

@@ -1,8 +1,10 @@
mod tls;
mod utils; mod utils;
mod web; mod web;
#[global_allocator] #[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
// pub static A: CountingAllocator = CountingAllocator;
fn main() { fn main() {
web::start::run(); web::start::run();

2
src/tls.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod grades;
pub mod load;

75
src/tls/grades.rs Normal file
View File

@@ -0,0 +1,75 @@
use log::{info, warn};
use pingora::tls::ssl::{select_next_proto, AlpnError, SslRef, SslVersion};
use pingora_core::listeners::tls::TlsSettings;
#[derive(Debug)]
pub struct CipherSuite {
pub high: &'static str,
pub medium: &'static str,
pub legacy: &'static str,
}
const CIPHERS: CipherSuite = CipherSuite {
high: "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305",
medium: "ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:AES128-GCM-SHA256",
legacy: "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH",
};
#[derive(Debug)]
pub enum TlsGrade {
HIGH,
MEDIUM,
LEGACY,
}
impl TlsGrade {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_ascii_lowercase().as_str() {
"high" => Some(TlsGrade::HIGH),
"medium" => Some(TlsGrade::MEDIUM),
"unsafe" => Some(TlsGrade::LEGACY),
_ => None,
}
}
}
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
match select_next_proto("\x02h2\x08http/1.1".as_bytes(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::NOACK),
}
}
pub fn set_tsl_grade(tls_settings: &mut TlsSettings, grade: &str) {
let config_grade = TlsGrade::from_str(grade);
match config_grade {
Some(TlsGrade::HIGH) => {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1_2));
// let _ = tls_settings.set_max_proto_version(Some(SslVersion::TLS1_3));
let _ = tls_settings.set_cipher_list(CIPHERS.high);
// let _ = tls_settings.set_ciphersuites(CIPHERS.high);
let _ = tls_settings.set_cipher_list(CIPHERS.high);
info!("TLS grade: {:?}, => HIGH", tls_settings.options());
}
Some(TlsGrade::MEDIUM) => {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
// let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
info!("TLS grade: {:?}, => MEDIUM", tls_settings.options());
}
Some(TlsGrade::LEGACY) => {
let _ = tls_settings.set_min_proto_version(Some(SslVersion::SSL3));
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
// let _ = tls_settings.set_ciphersuites(CIPHERS.legacy);
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
warn!("TLS grade: {:?}, => UNSAFE", tls_settings.options());
}
None => {
// Defaults to MEDIUM
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
// let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
warn!("TLS grade is not detected defaulting top MEDIUM");
}
}
}

View File

@@ -1,16 +1,15 @@
use crate::tls::grades;
use dashmap::DashMap; use dashmap::DashMap;
use log::error; use log::error;
use pingora::tls::ssl::{select_next_proto, AlpnError, NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef}; use pingora::tls::ssl::{NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef};
use rustls_pemfile::{read_one, Item}; use rustls_pemfile::{read_one, Item};
use serde::Deserialize; use serde::Deserialize;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::File; use std::fs::File;
use std::io::BufReader; use std::io::BufReader;
// use tokio::time::Instant;
use x509_parser::extensions::GeneralName; use x509_parser::extensions::GeneralName;
use x509_parser::nom::Err as NomErr; use x509_parser::nom::Err as NomErr;
use x509_parser::prelude::*; use x509_parser::prelude::*;
#[derive(Clone, Deserialize, Debug)] #[derive(Clone, Deserialize, Debug)]
pub struct CertificateConfig { pub struct CertificateConfig {
pub cert_path: String, pub cert_path: String,
@@ -37,12 +36,12 @@ pub struct Certificates {
} }
impl Certificates { impl Certificates {
pub fn new(configs: &Vec<CertificateConfig>) -> Option<Self> { pub fn new(configs: &Vec<CertificateConfig>, _grade: &str) -> Option<Self> {
let default_cert = configs.first().expect("At least one TLS certificate required"); let default_cert = configs.first().expect("At least one TLS certificate required");
let mut cert_infos = Vec::new(); let mut cert_infos = Vec::new();
let name_map: DashMap<String, SslContext> = DashMap::new(); let name_map: DashMap<String, SslContext> = DashMap::new();
for config in configs { for config in configs {
let cert_info = load_cert_info(&config.cert_path, &config.key_path); let cert_info = load_cert_info(&config.cert_path, &config.key_path, _grade);
match cert_info { match cert_info {
Some(cert) => { Some(cert) => {
for name in &cert.common_names { for name in &cert.common_names {
@@ -106,7 +105,7 @@ impl Certificates {
} }
} }
fn load_cert_info(cert_path: &str, key_path: &str) -> Option<CertificateInfo> { fn load_cert_info(cert_path: &str, key_path: &str, _grade: &str) -> Option<CertificateInfo> {
let mut common_names = HashSet::new(); let mut common_names = HashSet::new();
let mut alt_names = HashSet::new(); let mut alt_names = HashSet::new();
@@ -180,14 +179,7 @@ fn create_ssl_context(cert_path: &str, key_path: &str) -> Result<SslContext, Box
let mut ctx = SslContext::builder(SslMethod::tls())?; let mut ctx = SslContext::builder(SslMethod::tls())?;
ctx.set_certificate_chain_file(cert_path)?; ctx.set_certificate_chain_file(cert_path)?;
ctx.set_private_key_file(key_path, SslFiletype::PEM)?; ctx.set_private_key_file(key_path, SslFiletype::PEM)?;
ctx.set_alpn_select_callback(prefer_h2); ctx.set_alpn_select_callback(grades::prefer_h2);
let built = ctx.build(); let built = ctx.build();
Ok(built) Ok(built)
} }
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
match select_next_proto("\x02h2\x08http/1.1".as_bytes(), alpn_in) {
Some(p) => Ok(p),
_ => Err(AlpnError::NOACK),
}
}

View File

@@ -1,11 +1,15 @@
pub mod auth; pub mod auth;
pub mod consul;
pub mod discovery; pub mod discovery;
pub mod dnsclient;
mod filewatch; mod filewatch;
pub mod fordebug;
pub mod healthcheck; pub mod healthcheck;
pub mod httpclient;
pub mod jwt; pub mod jwt;
pub mod kuberconsul;
pub mod metrics; pub mod metrics;
pub mod parceyaml; pub mod parceyaml;
pub mod state;
pub mod structs; pub mod structs;
pub mod tls;
pub mod tools; pub mod tools;
// pub mod watchksecret;

View File

@@ -1,55 +1,191 @@
use crate::utils::jwt::check_jwt; use crate::utils::jwt::check_jwt;
// use reqwest::Client;
use axum::http::StatusCode;
use base64::engine::general_purpose::STANDARD; use base64::engine::general_purpose::STANDARD;
use base64::Engine; use base64::Engine;
use pingora_proxy::Session; use pingora_proxy::Session;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, LazyLock};
use subtle::ConstantTimeEq;
use urlencoding::decode; use urlencoding::decode;
// use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
use pingora::http::RequestHeader;
// --------------------------------- //
use pingora_core::connectors::http::Connector;
use pingora_core::upstreams::peer::HttpPeer;
use pingora_http::ResponseHeader;
// --------------------------------- //
#[async_trait::async_trait]
trait AuthValidator { trait AuthValidator {
fn validate(&self, session: &Session) -> bool; async fn validate(&self, session: &mut Session) -> bool;
} }
struct BasicAuth<'a>(&'a str); struct BasicAuth<'a>(&'a str);
struct ApiKeyAuth<'a>(&'a str); struct ApiKeyAuth<'a>(&'a str);
struct JwtAuth<'a>(&'a str); struct JwtAuth<'a>(&'a str);
struct ForwardAuth<'a>(&'a str);
pub static AUTH_CONNECTOR: LazyLock<Connector> = LazyLock::new(|| Connector::new(None));
#[async_trait::async_trait]
impl AuthValidator for ForwardAuth<'_> {
async fn validate(&self, session: &mut Session) -> bool {
let method = match session.req_header().method.as_str() {
"HEAD" => "HEAD",
_ => "GET",
};
let auth_url = self.0;
let (plain, tls) = if let Some(p) = auth_url.strip_prefix("http://") {
(p, false)
} else if let Some(p) = auth_url.strip_prefix("https://") {
(p, true)
} else {
return false;
};
let (addr, uri) = if let Some(pos) = plain.find('/') {
(&plain[..pos], &plain[pos..])
} else {
(plain, "/")
};
let hp = match split_host_port(addr, tls) {
Some(hp) => hp,
None => return false,
};
let peer = HttpPeer::new((hp.0, hp.1), tls, hp.0.to_string());
let (mut http_session, _) = match AUTH_CONNECTOR.get_http_session(&peer).await {
Ok(s) => s,
Err(e) => {
log::warn!("ForwardAuth: connect failed: {}", e);
return false;
}
};
let mut auth_req = match RequestHeader::build(method, uri.as_bytes(), None) {
Ok(r) => r,
Err(e) => {
log::warn!("ForwardAuth: failed to build request: {}", e);
return false;
}
};
// auth_req.headers = session.req_header().headers.clone();
auth_req.insert_header("Host", addr).ok();
auth_req.insert_header("X-Forwarded-Uri", uri).ok();
auth_req.insert_header("X-Forwarded-Method", session.req_header().method.as_str()).ok();
if let Some(auth) = session.req_header().headers.get("authorization") {
auth_req.insert_header("Authorization", auth.clone()).ok();
}
if let Some(cookie) = session.req_header().headers.get("cookie") {
auth_req.insert_header("Cookie", cookie.clone()).ok();
}
if tls {
auth_req.insert_header("X-Forwarded-Proto", "https").ok();
} else {
auth_req.insert_header("X-Forwarded-Proto", "http").ok();
}
if let Err(e) = http_session.write_request_header(Box::new(auth_req)).await {
log::warn!("ForwardAuth: write failed: {}", e);
return false;
}
let status = match http_session.read_response_header().await {
Ok(_) => http_session.response_header().map(|r| r.status.as_u16()).unwrap_or(500),
Err(e) => {
log::warn!("ForwardAuth: read failed: {}", e);
return false;
}
};
let auth_headers_to_forward: Vec<(String, String)> = if let Some(resp_header) = http_session.response_header() {
resp_header
.headers
.iter()
.filter_map(|(name, value)| {
let name_str = name.as_str();
if name_str.starts_with("x-") || name_str.starts_with("remote-") || name_str.starts_with("locat") {
value.to_str().ok().map(|v| (name_str.to_string(), v.to_string()))
} else {
None
}
})
.collect()
} else {
Vec::new()
};
AUTH_CONNECTOR.release_http_session(http_session, &peer, None).await;
if (200..300).contains(&status) {
for (name, value) in auth_headers_to_forward {
session.req_header_mut().insert_header(name, value).ok();
}
true
} else if status == 302 || status == 301 {
let resp = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None);
match resp {
Ok(mut r) => {
for (name, value) in auth_headers_to_forward {
r.insert_header(name, value).ok();
}
let _ = r.insert_header("Content-Length", "0");
let _ = session.write_response_header(Box::new(r), true).await;
true
}
Err(_) => return false,
}
} else {
false
}
}
}
#[async_trait::async_trait]
impl AuthValidator for BasicAuth<'_> { impl AuthValidator for BasicAuth<'_> {
fn validate(&self, session: &Session) -> bool { async fn validate(&self, session: &mut Session) -> bool {
if let Some(header) = session.get_header("authorization") { if let Some(header) = session.get_header("authorization") {
if let Some((_, val)) = header.to_str().ok().unwrap().split_once(' ') { if let Some(h) = header.to_str().ok() {
let decoded = STANDARD.decode(val).ok().unwrap(); if let Some((_, val)) = h.split_once(' ') {
let decoded_str = String::from_utf8(decoded).ok().unwrap(); if let Some(decoded) = STANDARD.decode(val).ok() {
return decoded_str == self.0; if decoded.as_slice().ct_eq(self.0.as_bytes()).into() {
return true;
}
}
}
} }
} }
false false
} }
} }
#[async_trait::async_trait]
impl AuthValidator for ApiKeyAuth<'_> { impl AuthValidator for ApiKeyAuth<'_> {
fn validate(&self, session: &Session) -> bool { async fn validate(&self, session: &mut Session) -> bool {
if let Some(header) = session.get_header("x-api-key") { if let Some(header) = session.get_header("x-api-key") {
return header.to_str().ok().unwrap() == self.0; if let Some(h) = header.to_str().ok() {
return h.as_bytes().ct_eq(self.0.as_bytes()).into();
}
} }
false false
} }
} }
#[async_trait::async_trait]
impl AuthValidator for JwtAuth<'_> { impl AuthValidator for JwtAuth<'_> {
fn validate(&self, session: &Session) -> bool { async fn validate(&self, session: &mut Session) -> bool {
let jwtsecret = self.0; let jwtsecret = self.0;
if let Some(tok) = get_query_param(session, "araleztoken") { if let Some(tok) = get_query_param(session, "araleztoken") {
return check_jwt(tok.as_str(), jwtsecret); return check_jwt(tok.as_str(), jwtsecret);
} }
// if let Some(header) = session.get_header("authorization") {
// let h = header.to_str().ok().unwrap().split(" ").collect::<Vec<_>>();
// match h.len() {
// n => {
// return check_jwt(h[n - 1], jwtsecret);
// }
// }
// }
if let Some(auth_header) = session.get_header("authorization") { if let Some(auth_header) = session.get_header("authorization") {
if let Ok(header_str) = auth_header.to_str() { if let Ok(header_str) = auth_header.to_str() {
if let Some((scheme, token)) = header_str.split_once(' ') { if let Some((scheme, token)) = header_str.split_once(' ') {
@@ -62,32 +198,21 @@ impl AuthValidator for JwtAuth<'_> {
false false
} }
} }
fn validate(auth: &dyn AuthValidator, session: &Session) -> bool {
auth.validate(session)
}
pub fn authenticate(c: &[String], session: &Session) -> bool { pub async fn authenticate(auth_type: &Arc<str>, credentials: &Arc<str>, session: &mut Session) -> bool {
match c[0].as_str() { match &**auth_type {
"basic" => { "basic" => BasicAuth(credentials).validate(session).await,
let auth = BasicAuth(c[1].as_str().into()); "apikey" => ApiKeyAuth(credentials).validate(session).await,
validate(&auth, session) "jwt" => JwtAuth(credentials).validate(session).await,
} "forward" => ForwardAuth(credentials).validate(session).await,
"apikey" => {
let auth = ApiKeyAuth(c[1].as_str().into());
validate(&auth, session)
}
"jwt" => {
let auth = JwtAuth(c[1].as_str().into());
validate(&auth, session)
}
_ => { _ => {
println!("Unsupported authentication mechanism : {}", c[0]); log::warn!("Unsupported authentication mechanism : {}", auth_type);
false false
} }
} }
} }
pub fn get_query_param(session: &Session, key: &str) -> Option<String> { pub fn get_query_param(session: &mut Session, key: &str) -> Option<String> {
let query = session.req_header().uri.query()?; let query = session.req_header().uri.query()?;
let params: HashMap<_, _> = query let params: HashMap<_, _> = query
@@ -99,6 +224,24 @@ pub fn get_query_param(session: &Session, key: &str) -> Option<String> {
Some((k, v)) Some((k, v))
}) })
.collect(); .collect();
params.get(key).and_then(|v| decode(v).ok()).map(|s| s.to_string())
params.get(key).map(|v| decode(v).ok()).flatten().map(|s| s.to_string()) }
fn split_host_port(addr: &str, tls: bool) -> Option<(&str, u16, bool, &str)> {
match addr.split_once(':') {
Some((h, p)) => match p.parse::<u16>() {
Ok(port) => return Some((h, port, tls, h)),
Err(_) => {
log::warn!("ForwardAuth: invalid port in {}", addr);
return None;
}
},
None => {
if tls {
return Some((addr, 443u16, tls, addr));
} else {
return Some((addr, 80u16, tls, addr));
}
}
};
} }

View File

@@ -1,141 +0,0 @@
use crate::utils::parceyaml::load_configuration;
use crate::utils::structs::{Configuration, ServiceMapping, UpstreamsDashMap};
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps};
use dashmap::DashMap;
use futures::channel::mpsc::Sender;
use futures::SinkExt;
use log::{info, warn};
use pingora::prelude::sleep;
use rand::Rng;
use reqwest::header::{HeaderMap, HeaderValue};
use serde::Deserialize;
use std::collections::HashMap;
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
#[derive(Debug, Deserialize)]
struct Service {
#[serde(rename = "ServiceTaggedAddresses")]
tagged_addresses: HashMap<String, TaggedAddress>,
}
#[derive(Debug, Deserialize)]
struct TaggedAddress {
#[serde(rename = "Address")]
address: String,
#[serde(rename = "Port")]
port: u16,
}
pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
let config = load_configuration(fp.as_str(), "filepath");
let headers = DashMap::new();
match config {
Some(config) => {
if config.typecfg.to_string() != "consul" {
info!("Not running Consul discovery, requested type is: {}", config.typecfg);
return;
}
info!("Consul Discovery is enabled : {}", config.typecfg);
let consul = config.consul.clone();
let prev_upstreams = UpstreamsDashMap::new();
match consul {
Some(consul) => {
let servers = consul.servers.unwrap();
info!("Consul Servers => {:?}", servers);
let end = servers.len();
loop {
let num = rand::rng().random_range(1..end);
headers.clear();
for (k, v) in config.headers.clone() {
headers.insert(k.to_string(), v);
}
let consul_data = servers.get(num).unwrap().to_string();
let upstreams = consul_request(consul_data, consul.services.clone(), consul.token.clone());
match upstreams.await {
Some(upstreams) => {
if !compare_dashmaps(&upstreams, &prev_upstreams) {
let mut tosend: Configuration = Configuration {
upstreams: Default::default(),
headers: Default::default(),
consul: None,
typecfg: "".to_string(),
extraparams: config.extraparams.clone(),
};
clone_dashmap_into(&upstreams, &prev_upstreams);
clone_dashmap_into(&upstreams, &tosend.upstreams);
tosend.headers = headers.clone();
tosend.extraparams.authentication = config.extraparams.authentication.clone();
tosend.typecfg = config.typecfg.clone();
tosend.consul = config.consul.clone();
toreturn.send(tosend).await.unwrap();
}
}
None => {}
}
sleep(Duration::from_secs(5)).await;
}
}
None => {}
}
}
None => {}
}
}
async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, token: Option<String>) -> Option<UpstreamsDashMap> {
let upstreams = UpstreamsDashMap::new();
let ss = url.clone() + "/v1/catalog/service/";
match whitelist {
Some(whitelist) => {
for k in whitelist.iter() {
let pref: String = ss.clone() + &k.real;
let list = get_by_http(pref.clone(), token.clone()).await;
match list {
Some(list) => {
upstreams.insert(k.proxy.clone(), list);
}
None => {
warn!("Whitelist not found for {}", k.proxy);
}
}
}
}
None => {}
}
Some(upstreams)
}
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)>> {
let client = reqwest::Client::new();
let mut headers = HeaderMap::new();
if let Some(token) = token {
headers.insert("X-Consul-Token", HeaderValue::from_str(&token).unwrap());
}
let to = Duration::from_secs(1);
let u = client.get(url).timeout(to).send();
let mut values = Vec::new();
let upstreams: DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)> = DashMap::new();
match u.await {
Ok(r) => {
let jason = r.json::<Vec<Service>>().await;
match jason {
Ok(whitelist) => {
for service in whitelist {
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
let to_add = (addr, prt, false, false, false);
values.push(to_add);
}
}
Err(_) => return None,
}
}
Err(_) => return None,
}
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
Some(upstreams)
}

View File

@@ -1,23 +1,34 @@
use crate::utils::consul;
use crate::utils::filewatch; use crate::utils::filewatch;
use crate::utils::structs::Configuration; use crate::utils::kuberconsul::{ConsulDiscovery, KubernetesDiscovery, ServiceDiscovery};
use crate::utils::structs::{Configuration, UpstreamsDashMap};
use crate::web::webserver; use crate::web::webserver;
use async_trait::async_trait; use async_trait::async_trait;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use std::sync::Arc;
pub struct APIUpstreamProvider {
pub config_api_enabled: bool,
pub address: String,
pub masterkey: String,
// pub tls_address: Option<String>,
// pub tls_certificate: Option<String>,
// pub tls_key_file: Option<String>,
pub file_server_address: Option<String>,
pub file_server_folder: Option<String>,
pub current_upstreams: Arc<UpstreamsDashMap>,
pub full_upstreams: Arc<UpstreamsDashMap>,
}
pub struct FromFileProvider { pub struct FromFileProvider {
pub path: String, pub path: String,
} }
pub struct APIUpstreamProvider {
pub address: String,
pub masterkey: String,
pub tls_address: Option<String>,
pub tls_certificate: Option<String>,
pub tls_key_file: Option<String>,
}
pub struct ConsulProvider { pub struct ConsulProvider {
pub path: String, pub config: Arc<Configuration>,
}
pub struct KubernetesProvider {
pub config: Arc<Configuration>,
} }
#[async_trait] #[async_trait]
@@ -28,7 +39,7 @@ pub trait Discovery {
#[async_trait] #[async_trait]
impl Discovery for APIUpstreamProvider { impl Discovery for APIUpstreamProvider {
async fn start(&self, toreturn: Sender<Configuration>) { async fn start(&self, toreturn: Sender<Configuration>) {
webserver::run_server(self, toreturn).await; webserver::run_server(self, toreturn, self.current_upstreams.clone(), self.full_upstreams.clone()).await;
} }
} }
@@ -42,6 +53,13 @@ impl Discovery for FromFileProvider {
#[async_trait] #[async_trait]
impl Discovery for ConsulProvider { impl Discovery for ConsulProvider {
async fn start(&self, tx: Sender<Configuration>) { async fn start(&self, tx: Sender<Configuration>) {
tokio::spawn(consul::start(self.path.clone(), tx.clone())); tokio::spawn(ConsulDiscovery.fetch_upstreams(self.config.clone(), tx));
}
}
#[async_trait]
impl Discovery for KubernetesProvider {
async fn start(&self, tx: Sender<Configuration>) {
tokio::spawn(KubernetesDiscovery.fetch_upstreams(self.config.clone(), tx));
} }
} }

159
src/utils/dnsclient.rs Normal file
View File

@@ -0,0 +1,159 @@
/*
use crate::utils::structs::InnerMap;
use dashmap::DashMap;
use hickory_client::client::{Client, ClientHandle};
use hickory_client::proto::rr::{DNSClass, Name, RecordType};
use hickory_client::proto::runtime::TokioRuntimeProvider;
use hickory_client::proto::udp::UdpClientStream;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
use tokio::sync::Mutex;
type DnsError = Box<dyn std::error::Error + Send + Sync + 'static>;
pub struct DnsClientPool {
clients: Vec<Mutex<DnsClient>>,
}
struct DnsClient {
client: Client,
}
pub async fn start2(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
let k8s = config.kubernetes.clone();
match k8s {
Some(k8s) => {
let dnserver = k8s.servers.unwrap_or(vec!["127.0.0.1:53".to_string()]);
let headers = DashMap::new();
let end = dnserver.len() - 1;
let mut num = 0;
if end > 0 {
num = rand::rng().random_range(0..end);
}
let srv = dnserver.get(num).unwrap().to_string();
let pool = DnsClientPool::new(5, srv.clone()).await;
let u = UpstreamsDashMap::new();
if let Some(whitelist) = k8s.services {
loop {
let upstreams = UpstreamsDashMap::new();
for service in whitelist.iter() {
let ret = pool.query_srv(service.real.as_str(), srv.clone()).await;
match ret {
Ok(r) => {
upstreams.insert(service.proxy.clone(), r);
}
Err(e) => eprintln!("DNS query failed for {:?}: {:?}", service, e),
}
}
if !compare_dashmaps(&u, &upstreams) {
headers.clear();
for (k, v) in config.headers.clone() {
headers.insert(k.to_string(), v);
}
let mut tosend: Configuration = Configuration {
upstreams: Default::default(),
headers: Default::default(),
consul: None,
kubernetes: None,
typecfg: "".to_string(),
extraparams: config.extraparams.clone(),
};
clone_dashmap_into(&upstreams, &u);
clone_dashmap_into(&upstreams, &tosend.upstreams);
tosend.headers = headers.clone();
tosend.extraparams.authentication = config.extraparams.authentication.clone();
tosend.typecfg = config.typecfg.clone();
tosend.consul = config.consul.clone();
print_upstreams(&tosend.upstreams);
toreturn.send(tosend).await.unwrap();
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
None => {}
}
}
impl DnsClient {
pub async fn new(server: String) -> Result<Self, DnsError> {
let server_details = server;
let server: SocketAddr = server_details.parse().expect("Unable to parse socket address");
let conn = UdpClientStream::builder(server, TokioRuntimeProvider::default()).build();
let (client, bg) = Client::connect(conn).await.unwrap();
tokio::spawn(bg);
Ok(Self { client })
}
pub async fn query_srv(&mut self, name: &str) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
let mut values = Vec::new();
match tokio::time::timeout(Duration::from_secs(5), self.client.query(Name::from_str(name)?, DNSClass::IN, RecordType::SRV)).await {
Ok(Ok(response)) => {
for answer in response.answers() {
if let hickory_client::proto::rr::RData::SRV(srv) = answer.data() {
let to_add = InnerMap {
address: srv.target().to_string(),
port: srv.port(),
is_ssl: false,
is_http2: false,
to_https: false,
sticky_sessions: false,
rate_limit: None,
};
values.push(to_add);
}
}
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
Ok(upstreams)
}
Ok(Err(e)) => Err(Box::new(e)),
Err(_) => Err("DNS query timed out".into()),
}
}
}
impl DnsClientPool {
pub async fn new(pool_size: usize, server: String) -> Self {
let mut clients = Vec::with_capacity(pool_size);
for _ in 0..pool_size {
if let Ok(client) = DnsClient::new(server.clone()).await {
clients.push(Mutex::new(client));
}
}
Self { clients }
}
pub async fn query_srv(&self, name: &str, server: String) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
// Try to get an available client
for client_mutex in &self.clients {
if let Ok(mut client) = client_mutex.try_lock() {
let vay = client.query_srv(name).await;
match vay {
Ok(_) => return vay,
Err(_) => {
// If query fails, drop this client and create a new one
*client = match DnsClient::new(server).await {
Ok(c) => c,
Err(e) => return Err(e),
};
// Retry with the new client
return client.query_srv(name).await;
}
}
}
}
// If all clients are busy, wait for the first one with a timeout
match tokio::time::timeout(Duration::from_secs(2), self.clients[0].lock()).await {
Ok(mut client) => client.query_srv(name).await,
Err(_) => Err("All DNS clients are busy and timeout reached".into()),
}
}
}
*/

View File

@@ -2,7 +2,7 @@ use crate::utils::parceyaml::load_configuration;
use crate::utils::structs::Configuration; use crate::utils::structs::Configuration;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::SinkExt; use futures::SinkExt;
use log::{error, info, warn}; use log::error;
use notify::event::ModifyKind; use notify::event::ModifyKind;
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher}; use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use pingora::prelude::sleep; use pingora::prelude::sleep;
@@ -15,19 +15,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
let file_path = fp.as_str(); let file_path = fp.as_str();
let parent_dir = Path::new(file_path).parent().unwrap(); let parent_dir = Path::new(file_path).parent().unwrap();
let (local_tx, mut local_rx) = tokio::sync::mpsc::channel::<notify::Result<Event>>(1); let (local_tx, mut local_rx) = tokio::sync::mpsc::channel::<notify::Result<Event>>(1);
let snd = load_configuration(file_path, "filepath");
match snd {
Some(snd) => {
if snd.typecfg != "file" {
warn!("Disabling file watcher, requested discovery type is: {}", snd.typecfg);
return;
}
info!("Watching for changes in {:?}", parent_dir);
toreturn.send(snd).await.unwrap();
}
None => {}
}
let _watcher_handle = task::spawn_blocking({ let _watcher_handle = task::spawn_blocking({
let parent_dir = parent_dir.to_path_buf(); // Move directory path into the closure let parent_dir = parent_dir.to_path_buf(); // Move directory path into the closure
move || { move || {
@@ -53,7 +41,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
if start.elapsed() > Duration::from_secs(2) { if start.elapsed() > Duration::from_secs(2) {
start = Instant::now(); start = Instant::now();
// info!("Config File changed :=> {:?}", e); // info!("Config File changed :=> {:?}", e);
let snd = load_configuration(file_path, "filepath"); let snd = load_configuration(file_path, "filepath").await.0;
match snd { match snd {
Some(snd) => { Some(snd) => {
toreturn.send(snd).await.unwrap(); toreturn.send(snd).await.unwrap();

31
src/utils/fordebug.rs Normal file
View File

@@ -0,0 +1,31 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
pub struct CountingAllocator;
pub static ALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static DEALLOC_COUNT: AtomicUsize = AtomicUsize::new(0);
pub static ALLOC_BYTES: AtomicUsize = AtomicUsize::new(0);
#[allow(dead_code)]
unsafe impl GlobalAlloc for CountingAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
ALLOC_BYTES.fetch_add(layout.size(), Ordering::Relaxed);
System.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
DEALLOC_COUNT.fetch_add(1, Ordering::Relaxed);
System.dealloc(ptr, layout)
}
}
// Uncomment following lines and comment allocator in main.rs
// #[global_allocator]
// pub static A: CountingAllocator = CountingAllocator;
#[allow(dead_code)]
fn for_example() {
let before = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
let after = crate::utils::fordebug::ALLOC_COUNT.load(Ordering::Relaxed);
println!("Allocations : {}", after - before);
}

View File

@@ -1,7 +1,7 @@
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::utils::tools::*; use crate::utils::tools::*;
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info, warn}; use log::{error, warn};
use reqwest::{Client, Version}; use reqwest::{Client, Version};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc; use std::sync::Arc;
@@ -11,143 +11,139 @@ use tonic::transport::Endpoint;
pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>, idlist: Arc<UpstreamsIdMap>, params: (&str, u64)) { pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>, idlist: Arc<UpstreamsIdMap>, params: (&str, u64)) {
let mut period = interval(Duration::from_secs(params.1)); let mut period = interval(Duration::from_secs(params.1));
let mut first_run = 0; let client = Client::builder().timeout(Duration::from_secs(params.1)).danger_accept_invalid_certs(true).build().unwrap();
loop { loop {
tokio::select! { tokio::select! {
_ = period.tick() => { _ = period.tick() => {
let totest : UpstreamsDashMap = DashMap::new(); // populate_upstreams(&upslist, &fullist, &idlist, params, &client).await;
let fclone : UpstreamsDashMap = clone_dashmap(&fullist); let totest = build_upstreams(&fullist, params.0, &client).await;
for val in fclone.iter() {
let host = val.key();
let inner = DashMap::new();
let mut _scheme: (String, u16, bool, bool, bool) = ("".to_string(), 0, false, false, false);
for path_entry in val.value().iter() {
// let inner = DashMap::new();
let path = path_entry.key();
let mut innervec= Vec::new();
for k in path_entry.value().0 .iter().enumerate() {
let (ip, port, _ssl, _version, _redir) = k.1;
let mut _link = String::new();
let tls = detect_tls(ip, port).await;
let mut is_h2 = false;
// if tls.1 == Some(Version::HTTP_11) {
// println!(" V1: ==> {:?}", tls.1)
// }else if tls.1 == Some(Version::HTTP_2) {
// is_h2 = true;
// println!(" V2: ==> {:?}", tls.1)
// }
if tls.1 == Some(Version::HTTP_2) {
is_h2 = true;
// println!(" V2: ==> {} ==> {:?}", tls.0, tls.1)
}
match tls.0 {
true => _link = format!("https://{}:{}{}", ip, port, path),
false => _link = format!("http://{}:{}{}", ip, port, path),
}
// if _pref == "https://" {
// _scheme = (ip.to_string(), *port, true);
// }else {
// _scheme = (ip.to_string(), *port, false);
// }
_scheme = (ip.to_string(), *port, tls.0, is_h2, *_redir);
// let link = format!("{}{}:{}{}", _pref, ip, port, path);
let resp = http_request(_link.as_str(), params.0, "").await;
match resp.0 {
true => {
if resp.1 {
_scheme = (ip.to_string(), *port, tls.0, true, *_redir);
}
innervec.push(_scheme.clone());
}
false => {
warn!("Dead Upstream : {}", _link);
}
}
}
inner.insert(path.clone().to_owned(), (innervec, AtomicUsize::new(0)));
}
totest.insert(host.clone(), inner);
}
if first_run == 1 {
info!("Performing initial hatchecks and upstreams ssl detection");
clone_idmap_into(&totest, &idlist);
info!("Aralez is up and ready to serve requests, the upstreams list is:");
print_upstreams(&totest)
}
first_run+=1;
if !compare_dashmaps(&totest, &upslist) { if !compare_dashmaps(&totest, &upslist) {
clone_dashmap_into(&totest, &upslist); clone_dashmap_into(&totest, &upslist);
clone_idmap_into(&totest, &idlist); clone_idmap_into(&totest, &idlist);
} }
} }
} }
} }
} }
#[allow(dead_code)] /*
async fn http_request(url: &str, method: &str, payload: &str) -> (bool, bool) { pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<UpstreamsDashMap>, idlist: &Arc<UpstreamsIdMap>, params: (&str, u64), client: &Client) {
let client = Client::builder().danger_accept_invalid_certs(true).build().unwrap(); let totest = build_upstreams(fullist, params.0, client).await;
let timeout = Duration::from_secs(1); if !compare_dashmaps(&totest, upslist) {
clone_dashmap_into(&totest, upslist);
clone_idmap_into(&totest, idlist);
}
}
*/
pub async fn initiate_upstreams(fullist: UpstreamsDashMap) -> UpstreamsDashMap {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
build_upstreams(&fullist, "HEAD", &client).await
}
async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Client) -> UpstreamsDashMap {
let totest: UpstreamsDashMap = DashMap::new();
let fclone = clone_dashmap(fullist);
for val in fclone.iter() {
let host = val.key();
let inner = DashMap::new();
for path_entry in val.value().iter() {
let path = path_entry.key();
let mut innervec = Vec::new();
for (_, upstream) in path_entry.value().0.iter().enumerate() {
let tls = detect_tls(&upstream.address.to_string(), &upstream.port, &client).await;
let is_h2 = matches!(tls.1, Some(Version::HTTP_2));
let link = if tls.0 {
format!("https://{}:{}{}", upstream.address, upstream.port, path)
} else {
format!("http://{}:{}{}", upstream.address, upstream.port, path)
};
let mut scheme = InnerMap {
address: upstream.address.clone(),
port: upstream.port,
is_ssl: tls.0,
is_http2: is_h2,
to_https: upstream.to_https,
rate_limit: upstream.rate_limit,
healthcheck: upstream.healthcheck,
redirect_to: upstream.redirect_to.clone(),
authorization: upstream.authorization.clone(),
};
if scheme.healthcheck.unwrap_or(true) {
let resp = http_request(&link, method, "", &client).await;
if resp.0 {
if resp.1 {
scheme.is_http2 = is_h2; // could be adjusted further
}
innervec.push(Arc::from(scheme));
} else {
warn!("Dead Upstream : {}", link);
}
} else {
innervec.push(Arc::from(scheme));
}
}
inner.insert(path.clone(), (innervec, AtomicUsize::new(0)));
}
totest.insert(host.clone(), inner);
}
totest
}
async fn http_request(url: &str, method: &str, payload: &str, client: &Client) -> (bool, bool) {
if !["POST", "GET", "HEAD"].contains(&method) { if !["POST", "GET", "HEAD"].contains(&method) {
error!("Method {} not supported. Only GET|POST|HEAD are supported ", method); error!("Method {} not supported. Only GET|POST|HEAD are supported ", method);
return (false, false); return (false, false);
} }
async fn send_request(client: &Client, method: &str, url: &str, payload: &str, timeout: Duration) -> Option<reqwest::Response> { async fn send_request(client: &Client, method: &str, url: &str, payload: &str) -> Option<reqwest::Response> {
match method { match method {
"POST" => client.post(url).body(payload.to_owned()).timeout(timeout).send().await.ok(), "POST" => client.post(url).body(payload.to_owned()).send().await.ok(),
"GET" => client.get(url).timeout(timeout).send().await.ok(), "GET" => client.get(url).send().await.ok(),
"HEAD" => client.head(url).timeout(timeout).send().await.ok(), "HEAD" => client.head(url).send().await.ok(),
_ => None, _ => None,
} }
} }
match send_request(&client, method, url, payload, timeout).await { match send_request(&client, method, url, payload).await {
Some(response) => { Some(response) => {
let status = response.status().as_u16(); let status = response.status().as_u16();
((99..499).contains(&status), false) ((99..499).contains(&status), false)
} }
None => { None => (ping_grpc(&url).await, true),
// let fallback_url = url.replace("https", "http");
// ping_grpc(&fallback_url).await
(ping_grpc(&url).await, true)
}
} }
} }
pub async fn ping_grpc(addr: &str) -> bool { pub async fn ping_grpc(addr: &str) -> bool {
let endpoint_result = Endpoint::from_shared(addr.to_owned()); let endpoint = match Endpoint::from_shared(addr.to_owned()) {
Ok(e) => e.timeout(Duration::from_secs(2)),
if let Ok(endpoint) = endpoint_result { Err(_) => return false,
let endpoint = endpoint.timeout(Duration::from_secs(2)); };
tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await.ok().and_then(Result::ok).is_some()
match tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await {
Ok(Ok(_channel)) => {
// println!("{:?} ==> {:?} ==> {}", endpoint, _channel, addr);
true
}
_ => false,
}
} else {
false
}
} }
async fn detect_tls(ip: &str, port: &u16) -> (bool, Option<Version>) { async fn detect_tls(ip: &str, port: &u16, client: &Client) -> (bool, Option<Version>) {
let url = format!("https://{}:{}", ip, port); let https_url = format!("https://{}:{}", ip, port);
// let url = format!("{}:{}", ip, port); match client.get(&https_url).send().await {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap(); Ok(response) => {
match client.get(&url).send().await { // println!("{} => {:?} (HTTPS)", https_url, response.version());
Ok(response) => (true, Some(response.version())), return (true, Some(response.version()));
Err(e) => { }
if e.is_builder() || e.is_connect() || e.to_string().contains("tls") { _ => {}
(false, None) }
let http_url = format!("http://{}:{}", ip, port);
match client.get(&http_url).send().await {
Ok(response) => {
// println!("{} => {:?} (HTTP)", http_url, response.version());
(false, Some(response.version()))
}
Err(_) => {
if ping_grpc(&http_url).await {
(false, Some(Version::HTTP_2))
} else { } else {
(false, None) (false, None)
} }

86
src/utils/httpclient.rs Normal file
View File

@@ -0,0 +1,86 @@
use crate::utils::kuberconsul::{match_path, ConsulService, KubeEndpoints};
use crate::utils::structs::{GlobalServiceMapping, InnerMap};
use axum::http::{HeaderMap, HeaderValue};
use dashmap::DashMap;
use reqwest::Client;
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::time::Duration;
pub async fn for_consul(url: String, token: Option<String>, conf: &GlobalServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
let mut headers = HeaderMap::new();
if let Some(token) = token {
headers.insert("X-Consul-Token", HeaderValue::from_str(&token).unwrap());
}
let to = Duration::from_secs(1);
let resp = client.get(url).timeout(to).send().await.ok()?;
if !resp.status().is_success() {
eprintln!("Consul API returned status: {}", resp.status());
return None;
}
let mut inner_vec = Vec::new();
let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
let endpoints: Vec<ConsulService> = resp.json().await.ok()?;
for subsets in endpoints {
// let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
// let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
// let redirect_link = conf.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
let to_add = Arc::from(InnerMap {
address: Arc::from(&*addr),
port: prt,
is_ssl: false,
is_http2: false,
to_https: conf.to_https.unwrap_or(false),
rate_limit: conf.rate_limit,
redirect_to: None,
healthcheck: None,
authorization: None,
});
inner_vec.push(to_add);
}
match_path(&conf, &upstreams, inner_vec.clone());
Some(upstreams)
}
pub async fn for_kuber(url: &str, token: &str, conf: &GlobalServiceMapping) -> Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>> {
let to = Duration::from_secs(10);
let client = Client::builder().timeout(Duration::from_secs(10)).danger_accept_invalid_certs(true).build().ok()?;
let resp = client.get(url).timeout(to).bearer_auth(token).send().await.ok()?;
if !resp.status().is_success() {
eprintln!("Kubernetes API returned status: {}", resp.status());
return None;
}
let endpoints: KubeEndpoints = resp.json().await.ok()?;
let upstreams: DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)> = DashMap::new();
if let Some(subsets) = endpoints.subsets {
for subset in subsets {
if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) {
let mut inner_vec = Vec::new();
for addr in addresses {
for port in &ports {
// let redirect_link = conf.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
let to_add = Arc::from(InnerMap {
address: Arc::from(addr.ip.clone()),
port: port.port.clone(),
is_ssl: false,
is_http2: false,
to_https: conf.to_https.unwrap_or(false),
rate_limit: conf.rate_limit,
healthcheck: None,
redirect_to: None,
authorization: None,
});
inner_vec.push(to_add);
}
}
match_path(&conf, &upstreams, inner_vec.clone());
}
}
}
Some(upstreams)
}

View File

@@ -1,16 +1,87 @@
use ahash::AHasher;
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _};
use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation};
use moka::sync::Cache;
use moka::Expiry;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::hash::{Hash, Hasher};
use std::sync::LazyLock;
use std::time::{Duration, Instant, SystemTime};
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub(crate) struct Claims { pub struct Claims {
pub(crate) user: String, pub master_key: String,
pub(crate) exp: u64, pub owner: String,
pub exp: u64,
pub random: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Expired {
exp: Option<u64>,
}
static JWT_VALIDATION: LazyLock<Validation> = LazyLock::new(|| Validation::new(Algorithm::HS256));
static JWT_CACHE: LazyLock<Cache<u64, u64>> = LazyLock::new(|| Cache::builder().max_capacity(100_000).expire_after(JwtExpiry).build());
struct JwtExpiry;
impl Expiry<u64, u64> for JwtExpiry {
fn expire_after_create(&self, _key: &u64, value: &u64, _current_time: Instant) -> Option<Duration> {
let now = SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs();
if *value > now {
Some(Duration::from_secs(value - now))
} else {
Some(Duration::ZERO)
}
}
}
pub fn check_jwt(token: &str, secret: &str) -> bool {
let key = hash_token(token, secret);
let now = SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs();
if let Some(exp) = JWT_CACHE.get(&key) {
if exp < now {
return false;
}
return true;
}
match is_expired(token, now) {
Ok(true) => return false,
Ok(false) => {}
Err(_) => return false,
}
match decode::<Claims>(token, &DecodingKey::from_secret(secret.as_ref()), &JWT_VALIDATION) {
Ok(data) => {
let now = SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs();
if data.claims.exp > now {
JWT_CACHE.insert(key, data.claims.exp);
true
} else {
false
}
} }
pub fn check_jwt(input: &str, secret: &str) -> bool {
let validation = Validation::new(Algorithm::HS256);
let token_data = decode::<Claims>(&input, &DecodingKey::from_secret(secret.as_ref()), &validation);
match token_data {
Ok(_) => true,
Err(_) => false, Err(_) => false,
} }
} }
fn is_expired(token: &str, now: u64) -> Result<bool, Box<dyn std::error::Error>> {
let parts: Vec<&str> = token.split('.').collect();
if parts.len() != 3 {
return Err("Invalid JWT format".into());
}
let decoded = URL_SAFE_NO_PAD.decode(parts[1])?;
let claims: Expired = serde_json::from_slice(&decoded)?;
if let Some(exp) = claims.exp {
Ok(exp < now)
} else {
Ok(true)
}
}
fn hash_token(token: &str, secret: &str) -> u64 {
let mut hasher = AHasher::default();
token.hash(&mut hasher);
secret.hash(&mut hasher);
hasher.finish()
}

228
src/utils/kuberconsul.rs Normal file
View File

@@ -0,0 +1,228 @@
use crate::utils::httpclient;
use crate::utils::parceyaml::build_headers;
use crate::utils::structs::{Configuration, GlobalServiceMapping, InnerMap, UpstreamsDashMap};
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
use async_trait::async_trait;
use dashmap::DashMap;
use futures::channel::mpsc::Sender;
use futures::SinkExt;
use pingora::prelude::sleep;
use rand::RngExt;
use serde::Deserialize;
use std::collections::HashMap;
use std::env;
use std::fs;
use std::path::Path;
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::time::Duration;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
#[derive(Debug, serde::Deserialize)]
pub struct KubeEndpoints {
pub subsets: Option<Vec<KubeSubset>>,
}
#[derive(Debug, serde::Deserialize)]
pub struct KubeSubset {
pub addresses: Option<Vec<KubeAddress>>,
pub ports: Option<Vec<KubePort>>,
}
#[derive(Debug, serde::Deserialize)]
pub struct KubeAddress {
pub ip: String,
}
#[derive(Debug, serde::Deserialize)]
pub struct KubePort {
pub port: u16,
}
#[derive(Debug, Deserialize)]
pub struct ConsulService {
#[serde(rename = "ServiceTaggedAddresses")]
pub tagged_addresses: HashMap<String, ConsulTaggedAddress>,
}
#[derive(Debug, Deserialize)]
pub struct ConsulTaggedAddress {
#[serde(rename = "Address")]
pub address: String,
#[serde(rename = "Port")]
pub port: u16,
}
pub fn list_to_upstreams(lt: Option<DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>, upstreams: &UpstreamsDashMap, i: &GlobalServiceMapping) {
if let Some(list) = lt {
match upstreams.get(&*i.hostname.clone()) {
Some(upstr) => {
for (k, v) in list {
upstr.value().insert(Arc::from(k.to_owned()), v);
}
}
None => {
upstreams.insert(Arc::from(i.hostname.clone()), list);
}
};
}
}
pub fn match_path(conf: &GlobalServiceMapping, upstreams: &DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>, values: Vec<Arc<InnerMap>>) {
match conf.path {
Some(ref p) => {
upstreams.insert(Arc::from(p.clone()), (values, AtomicUsize::new(0)));
}
None => {
upstreams.insert(Arc::from("/"), (values, AtomicUsize::new(0)));
}
}
}
async fn read_token(path: &str) -> String {
let mut file = File::open(path).await.unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).await.unwrap();
contents.trim().to_string()
}
#[async_trait]
pub trait ServiceDiscovery {
async fn fetch_upstreams(&self, config: Arc<Configuration>, toreturn: Sender<Configuration>);
}
pub struct KubernetesDiscovery;
pub struct ConsulDiscovery;
#[async_trait]
impl ServiceDiscovery for KubernetesDiscovery {
async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) {
let prev_upstreams = UpstreamsDashMap::new();
if let Some(kuber) = config.kubernetes.clone() {
let servers = kuber.servers.unwrap_or(vec![format!(
"{}:{}",
env::var("KUBERNETES_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
env::var("KUBERNETES_SERVICE_PORT_HTTPS").unwrap_or("0".to_string())
)]);
let end = servers.len().saturating_sub(1);
let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 };
let server = servers.get(num).unwrap().to_string();
let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string());
let namespace = get_current_namespace().unwrap_or_else(|| "default".to_string());
let token = read_token(path.as_str()).await;
loop {
let upstreams = UpstreamsDashMap::new();
if let Some(kuber) = config.kubernetes.clone() {
if let Some(svc) = kuber.services {
for service in svc {
let header_list: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
let mut hl = Vec::new();
build_headers(&service.client_headers, config.as_ref(), &mut hl);
if !hl.is_empty() {
match service.path.clone() {
Some(path) => {
header_list.insert(Arc::from(path.as_str()), hl);
}
None => {
header_list.insert(Arc::from("/"), hl);
}
}
// header_list.insert(Arc::from(path.as_str()), hl);
// header_list.insert(Arc::from(i.path).unwrap_or(Arc::from("/")).as_str(), hl);
config.client_headers.insert(Arc::from(service.hostname.clone()), header_list);
}
let url = format!("https://{}/api/v1/namespaces/{}/endpoints/{}", server, namespace, service.hostname);
// let url = format!("https://{}/api/v1/namespaces/{}/endpoints?labelSelector=app", server, namespace);
let list = httpclient::for_kuber(&*url, &*token, &service).await;
// println!("{:?}", list);
list_to_upstreams(list, &upstreams, &service);
}
}
if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await {
toreturn.send(lt).await.unwrap();
}
}
sleep(Duration::from_secs(5)).await;
}
}
}
}
fn get_current_namespace() -> Option<String> {
let ns_path = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
if Path::new(ns_path).exists() {
if let Ok(contents) = fs::read_to_string(ns_path) {
return Some(contents.trim().to_string());
}
}
std::env::var("POD_NAMESPACE").ok()
}
#[async_trait]
impl ServiceDiscovery for ConsulDiscovery {
async fn fetch_upstreams(&self, config: Arc<Configuration>, mut toreturn: Sender<Configuration>) {
let prev_upstreams = UpstreamsDashMap::new();
loop {
let upstreams = UpstreamsDashMap::new();
if let Some(consul) = config.consul.clone() {
let servers = consul.servers.unwrap_or(vec![format!(
"{}:{}",
env::var("CONSUL_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
env::var("CONSUL_SERVICE_PORT").unwrap_or("0".to_string())
)]);
let end = servers.len().saturating_sub(1);
let num = if end > 0 { rand::rng().random_range(0..end) } else { 0 };
let consul_data = servers.get(num).unwrap().to_string();
let ss = consul_data + "/v1/catalog/service/";
if let Some(svc) = consul.services {
for i in svc {
let header_list = DashMap::new();
let mut hl = Vec::new();
build_headers(&i.client_headers, config.as_ref(), &mut hl);
if !hl.is_empty() {
match i.path.clone() {
Some(path) => {
header_list.insert(Arc::from(path.as_str()), hl);
}
None => {
header_list.insert(Arc::from("/"), hl);
}
}
// header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl);
config.client_headers.insert(Arc::from(i.hostname.clone()), header_list);
}
let pref = ss.clone() + &i.upstream;
let list = httpclient::for_consul(pref, consul.token.clone(), &i).await;
list_to_upstreams(list, &upstreams, &i);
}
}
}
if let Some(lt) = clone_compare(&upstreams, &prev_upstreams, &config).await {
toreturn.send(lt).await.unwrap();
}
sleep(Duration::from_secs(5)).await;
}
}
}
async fn clone_compare(upstreams: &UpstreamsDashMap, prev_upstreams: &UpstreamsDashMap, config: &Arc<Configuration>) -> Option<Configuration> {
if !compare_dashmaps(&upstreams, &prev_upstreams) {
let tosend: Configuration = Configuration {
upstreams: Default::default(),
client_headers: config.client_headers.clone(),
server_headers: config.server_headers.clone(),
consul: config.consul.clone(),
kubernetes: config.kubernetes.clone(),
typecfg: config.typecfg.clone(),
extraparams: config.extraparams.clone(),
};
clone_dashmap_into(&upstreams, &prev_upstreams);
clone_dashmap_into(&upstreams, &tosend.upstreams);
print_upstreams(&tosend.upstreams);
return Some(tosend);
};
None
}

View File

@@ -1,48 +1,51 @@
use pingora_http::Method;
use pingora_http::StatusCode;
use pingora_http::Version; use pingora_http::Version;
use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec}; use prometheus::{register_histogram, register_int_counter, register_int_counter_vec, Histogram, IntCounter, IntCounterVec};
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
pub struct MetricTypes { pub struct MetricTypes {
pub method: String, pub method: Method,
pub code: String, pub upstream: Arc<str>,
pub code: Option<StatusCode>,
pub latency: Duration, pub latency: Duration,
pub version: Version, pub version: Version,
} }
lazy_static::lazy_static! {
pub static ref REQUEST_COUNT: IntCounter = register_int_counter!( use std::sync::LazyLock;
"aralez_requests_total",
"Total number of requests handled by Aralez" pub static REQUEST_COUNT: LazyLock<IntCounter> = LazyLock::new(|| register_int_counter!("aralez_requests_total", "Total number of requests handled by Aralez").unwrap());
).unwrap();
pub static ref RESPONSE_CODES: IntCounterVec = register_int_counter_vec!( pub static RESPONSE_CODES: LazyLock<IntCounterVec> =
"aralez_responses_total", LazyLock::new(|| register_int_counter_vec!("aralez_responses_total", "Responses grouped by status code", &["status"]).unwrap());
"Responses grouped by status code",
&["status"] pub static REQUEST_LATENCY: LazyLock<Histogram> = LazyLock::new(|| {
).unwrap(); register_histogram!(
pub static ref REQUEST_LATENCY: Histogram = register_histogram!(
"aralez_request_latency_seconds", "aralez_request_latency_seconds",
"Request latency in seconds", "Request latency in seconds",
vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0] vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]
).unwrap(); )
pub static ref RESPONSE_LATENCY: Histogram = register_histogram!( .unwrap()
});
pub static RESPONSE_LATENCY: LazyLock<Histogram> = LazyLock::new(|| {
register_histogram!(
"aralez_response_latency_seconds", "aralez_response_latency_seconds",
"Response latency in seconds", "Response latency in seconds",
vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0] vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0]
).unwrap(); )
pub static ref REQUESTS_BY_METHOD: IntCounterVec = register_int_counter_vec!( .unwrap()
"aralez_requests_by_method_total", });
"Number of requests by HTTP method",
&["method"] pub static REQUESTS_BY_METHOD: LazyLock<IntCounterVec> =
).unwrap(); LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_method_total", "Number of requests by HTTP method", &["method"]).unwrap());
pub static ref REQUESTS_BY_VERSION: IntCounterVec = register_int_counter_vec!(
"aralez_requests_by_version_total", pub static REQUESTS_BY_UPSTREAM: LazyLock<IntCounterVec> =
"Number of requests by HTTP versions", LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_upstream", "Number of requests by UPSTREAM server", &["upstream"]).unwrap());
&["version"]
).unwrap(); pub static REQUESTS_BY_VERSION: LazyLock<IntCounterVec> =
pub static ref ERROR_COUNT: IntCounter = register_int_counter!( LazyLock::new(|| register_int_counter_vec!("aralez_requests_by_version_total", "Number of requests by HTTP versions", &["version"]).unwrap());
"aralez_errors_total",
"Total number of errors"
).unwrap();
}
pub fn calc_metrics(metric_types: &MetricTypes) { pub fn calc_metrics(metric_types: &MetricTypes) {
REQUEST_COUNT.inc(); REQUEST_COUNT.inc();
@@ -57,31 +60,8 @@ pub fn calc_metrics(metric_types: &MetricTypes) {
_ => "Unknown", _ => "Unknown",
}; };
REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc(); REQUESTS_BY_VERSION.with_label_values(&[&version_str]).inc();
RESPONSE_CODES.with_label_values(&[&metric_types.code.to_string()]).inc(); RESPONSE_CODES.with_label_values(&[metric_types.code.unwrap_or(StatusCode::GONE).as_str()]).inc();
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc(); REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
REQUESTS_BY_UPSTREAM.with_label_values(&[metric_types.upstream.as_ref()]).inc();
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64()); RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
} }
/*
pub fn calc_metrics(method: String, code: u16, latency: Duration) {
REQUEST_COUNT.inc();
let timer = REQUEST_LATENCY.start_timer();
timer.observe_duration();
RESPONSE_CODES.with_label_values(&[&code.to_string()]).inc();
REQUESTS_BY_METHOD.with_label_values(&[&method]).inc();
RESPONSE_LATENCY.observe(latency.as_secs_f64());
}
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(5));
loop {
interval.tick().await;
// read Pingora stats
let stats = pingora.get_stats();
// update Prometheus metrics accordingly
REQUEST_COUNT.set(stats.requests_total);
// ... etc
}
});
*/

View File

@@ -1,139 +1,217 @@
use crate::utils::healthcheck;
use crate::utils::state::{is_first_run, mark_not_first_run};
use crate::utils::structs::*; use crate::utils::structs::*;
use crate::utils::tools::{clone_dashmap, clone_dashmap_into, print_upstreams};
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info, warn}; use log::{error, info, warn};
use serde_yaml::Error;
use std::collections::HashMap; use std::collections::HashMap;
use std::fs; use std::path::Path;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::{env, fs};
pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> { pub async fn load_configuration(d: &str, kind: &str) -> (Option<Configuration>, String) {
let mut toreturn: Configuration = Configuration { let mut conf_files = Vec::new();
upstreams: Default::default(), let yaml_data = match kind {
headers: Default::default(), "filepath" => match fs::read_to_string(d) {
consul: None,
typecfg: "".to_string(),
extraparams: Extraparams {
sticky_sessions: false,
to_https: None,
authentication: DashMap::new(),
},
};
toreturn.upstreams = UpstreamsDashMap::new();
toreturn.headers = Headers::new();
let mut yaml_data = d.to_string();
match kind {
"filepath" => {
let _ = match fs::read_to_string(d) {
Ok(data) => { Ok(data) => {
info!("Reading upstreams from {}", d); let mut confdir = Path::new(d).parent().unwrap().to_path_buf();
yaml_data = data confdir.push("conf.d");
if let Ok(entries) = fs::read_dir(&confdir) {
let mut paths: Vec<_> = entries
.flatten()
.map(|e| e.path())
.filter(|p| p.extension().and_then(|e| e.to_str()) == Some("yaml"))
.collect();
paths.sort();
for path in paths {
let content = fs::read_to_string(&path);
match content {
Ok(content) => {
conf_files.push(content);
} }
Err(e) => { Err(e) => {
error!("Reading: {}: {:?}", d, e.to_string()); error!("Reading: {}: {:?}", path.display(), e)
warn!("Running with empty upstreams list, update it via API");
return None;
} }
}; };
} }
}
info!("Reading upstreams from {}", d);
data
}
Err(e) => {
error!("Reading: {}: {:?}", d, e);
warn!("Running with empty upstreams list, update it via API");
return (None, e.to_string());
}
},
"content" => { "content" => {
info!("Reading upstreams from API post body"); info!("Reading upstreams from API post body");
d.to_string()
}
_ => {
error!("Mismatched parameter, only filepath|content is allowed");
return (None, "Mismatched parameter, only filepath|content is allowed".to_string());
}
};
let mut parsed: Config = match serde_yml::from_str(&yaml_data) {
Ok(cfg) => cfg,
Err(e) => {
error!("Failed to parse upstreams file: {}", e);
return (None, e.to_string());
}
};
if let Some(ref mut upstreams) = parsed.upstreams {
for uconf in conf_files {
let p: HashMap<String, HostConfig> = match serde_yml::from_str(&uconf) {
Ok(ucfg) => ucfg,
Err(e) => {
error!("Failed to parse upstreams file: {}", e);
return (None, e.to_string());
}
};
upstreams.extend(p);
} }
_ => error!("Mismatched parameter, only filepath|content is allowed "),
} }
let p: Result<Config, Error> = serde_yaml::from_str(&yaml_data); let mut toreturn = Configuration::default();
match p { populate_headers_and_auth(&mut toreturn, &parsed).await;
Ok(parsed) => { toreturn.typecfg = parsed.provider.clone();
let global_headers = DashMap::new();
let mut hl = Vec::new();
if let Some(headers) = &parsed.headers {
for header in headers.iter() {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.to_string(), val.to_string()));
}
}
global_headers.insert("/".to_string(), hl);
toreturn.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
toreturn.extraparams.sticky_sessions = parsed.sticky_sessions;
toreturn.extraparams.to_https = parsed.to_https;
}
if let Some(auth) = &parsed.authorization {
let name = auth.get("type").unwrap().to_string();
let creds = auth.get("creds").unwrap().to_string();
let val: Vec<String> = vec![name, creds];
toreturn.extraparams.authentication.insert("authorization".to_string(), val);
} else {
toreturn.extraparams.authentication = DashMap::new();
}
match parsed.provider.as_str() { match parsed.provider.as_str() {
"file" => { "file" => {
toreturn.typecfg = "file".to_string(); populate_file_upstreams(&mut toreturn, &parsed).await;
if let Some(upstream) = parsed.upstreams { (Some(toreturn), "Ok".to_string())
for (hostname, host_config) in upstream {
let path_map = DashMap::new();
let header_list = DashMap::new();
for (path, path_config) in host_config.paths {
let mut server_list = Vec::new();
let mut hl = Vec::new();
if let Some(headers) = &path_config.headers {
for header in headers.iter().by_ref() {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.to_string(), val.to_string()));
}
}
}
header_list.insert(path.clone(), hl);
for server in path_config.servers {
if let Some((ip, port_str)) = server.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() {
// let to_https = matches!(path_config.to_https, Some(true));
let to_https = path_config.to_https.unwrap_or(false);
server_list.push((ip.to_string(), port, true, false, to_https));
}
}
}
path_map.insert(path, (server_list, AtomicUsize::new(0)));
}
toreturn.headers.insert(hostname.clone(), header_list);
toreturn.upstreams.insert(hostname, path_map);
}
}
Some(toreturn)
} }
"consul" => { "consul" => {
toreturn.typecfg = "consul".to_string(); toreturn.consul = parsed.consul;
let consul = parsed.consul; (toreturn.consul.is_some().then_some(toreturn), "Ok".to_string())
match consul {
Some(consul) => {
toreturn.consul = Some(consul);
Some(toreturn)
} }
None => None, "kubernetes" => {
toreturn.kubernetes = parsed.kubernetes;
(toreturn.kubernetes.is_some().then_some(toreturn), "Ok".to_string())
} }
}
"kubernetes" => None,
_ => { _ => {
warn!("Unknown provider {}", parsed.provider); warn!("Unknown provider {}", parsed.provider);
None (None, "Unknown provider".to_string())
}
}
}
Err(e) => {
error!("Failed to parse upstreams file: {}", e);
None
} }
} }
} }
async fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) {
let mut ch: Vec<(String, Arc<str>)> = Vec::new();
if let Some(headers) = &parsed.client_headers {
for header in headers {
if let Some((key, val)) = header.split_once(':') {
ch.push((key.to_string(), Arc::from(val)));
}
}
}
let global_headers: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
global_headers.insert(Arc::from("/"), ch);
config.client_headers.insert(Arc::from("GLOBAL_CLIENT_HEADERS"), global_headers);
let mut sh: Vec<(String, Arc<str>)> = Vec::new();
if let Some(headers) = &parsed.server_headers {
for header in headers {
if let Some((key, val)) = header.split_once(':') {
sh.push((key.to_string(), Arc::from(val.trim())));
}
}
}
let server_global_headers: DashMap<Arc<str>, Vec<(String, Arc<str>)>> = DashMap::new();
server_global_headers.insert(Arc::from("/"), sh);
config.server_headers.insert(Arc::from("GLOBAL_SERVER_HEADERS"), server_global_headers);
config.extraparams.to_https = parsed.to_https;
config.extraparams.sticky_sessions = parsed.sticky_sessions;
config.extraparams.rate_limit = parsed.rate_limit;
if let Some(rate) = &parsed.rate_limit {
info!("Applied Global Rate Limit : {} request per second", rate);
}
if let Some(pa) = &parsed.authorization {
let y: InnerAuth = InnerAuth {
auth_type: Arc::from(pa.auth_type.clone()),
auth_cred: Arc::from(pa.auth_cred.clone()),
};
config.extraparams.authentication = Some(Arc::from(y));
}
}
async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
let imtdashmap = UpstreamsDashMap::new();
if let Some(upstreams) = &parsed.upstreams {
for (hostname, host_config) in upstreams {
let path_map = DashMap::new();
let client_header_list = DashMap::new();
let server_header_list = DashMap::new();
for (path, path_config) in &host_config.paths {
if let Some(rate) = &path_config.rate_limit {
info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
}
let mut hl: Vec<(String, Arc<str>)> = Vec::new();
let mut sl: Vec<(String, Arc<str>)> = Vec::new();
build_headers(&path_config.client_headers, config, &mut hl);
build_headers(&path_config.server_headers, config, &mut sl);
client_header_list.insert(Arc::from(path.as_str()), hl);
server_header_list.insert(Arc::from(path.as_str()), sl);
let mut server_list = Vec::new();
for server in &path_config.servers {
let mut path_auth: Option<Arc<InnerAuth>> = None;
if let Some(pa) = &path_config.authorization {
let y: InnerAuth = InnerAuth {
auth_type: Arc::from(pa.auth_type.clone()),
auth_cred: Arc::from(pa.auth_cred.clone()),
};
path_auth = Some(Arc::from(y));
}
let redirect_link = path_config.redirect_to.as_ref().map(|www| Arc::from(www.as_str()));
if let Some((ip, port_str)) = server.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() {
server_list.push(Arc::from(InnerMap {
address: Arc::from(ip),
port,
is_ssl: false,
is_http2: false,
to_https: path_config.to_https.unwrap_or(false),
rate_limit: path_config.rate_limit,
healthcheck: path_config.healthcheck,
redirect_to: redirect_link,
authorization: path_auth,
}));
}
}
}
path_map.insert(Arc::from(path.clone()), (server_list, AtomicUsize::new(0)));
}
config.client_headers.insert(Arc::from(hostname.clone()), client_header_list);
config.server_headers.insert(Arc::from(hostname.clone()), server_header_list);
imtdashmap.insert(Arc::from(hostname.clone()), path_map);
}
if is_first_run() {
clone_dashmap_into(&imtdashmap, &config.upstreams);
mark_not_first_run();
} else {
let y = clone_dashmap(&imtdashmap);
let r = healthcheck::initiate_upstreams(y).await;
clone_dashmap_into(&r, &config.upstreams);
}
info!("Upstream Config:");
print_upstreams(&config.upstreams);
}
}
pub fn parce_main_config(path: &str) -> AppConfig { pub fn parce_main_config(path: &str) -> AppConfig {
info!("Parsing configuration");
let data = fs::read_to_string(path).unwrap(); let data = fs::read_to_string(path).unwrap();
let reply = DashMap::new(); let reply = DashMap::new();
let cfg: HashMap<String, String> = serde_yaml::from_str(&*data).expect("Failed to parse main config file"); let cfg: HashMap<String, String> = serde_yml::from_str(&*data).expect("Failed to parse main config file");
let mut cfo: AppConfig = serde_yaml::from_str(&*data).expect("Failed to parse main config file"); let mut cfo: AppConfig = serde_yml::from_str(&*data).expect("Failed to parse main config file");
log_builder(&cfo);
cfo.hc_method = cfo.hc_method.to_uppercase(); cfo.hc_method = cfo.hc_method.to_uppercase();
for (k, v) in cfg { for (k, v) in cfg {
reply.insert(k.to_string(), v.to_string()); reply.insert(k.to_string(), v.to_string());
@@ -143,22 +221,80 @@ pub fn parce_main_config(path: &str) -> AppConfig {
cfo.local_server = Option::from((ip.to_string(), port)); cfo.local_server = Option::from((ip.to_string(), port));
} }
} }
// if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
// if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
// if let Ok(port) = port_str.parse::<u16>() {
// cfo.proxy_port_tls = Some(port);
// }
// }
// };
if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() { if let Some(tlsport_cfg) = cfo.proxy_address_tls.clone() {
if let Some((_, port_str)) = tlsport_cfg.split_once(':') { if let Some((_, port_str)) = tlsport_cfg.split_once(':') {
if let Ok(port) = port_str.parse::<u16>() { cfo.proxy_port_tls = Some(port_str.to_string());
cfo.proxy_port_tls = Some(port);
}
} }
}; };
// match cfo.config_tls_address.clone() {
// Some(tls_cert) => { if let Some((_, port_str)) = cfo.proxy_address_http.split_once(':') {
// if let Some((ip, port_str)) = tls_cert.split_once(':') { cfo.proxy_port = Some(port_str.to_string());
// if let Ok(port) = port_str.parse::<u16>() { }
// cfo.local_tls_server = Option::from((ip.to_string(), port));
// } cfo.proxy_tls_grade = parce_tls_grades(cfo.proxy_tls_grade.clone());
// }
// }
// None => {}
// };
cfo cfo
} }
fn parce_tls_grades(what: Option<String>) -> Option<String> {
match what {
Some(g) => match g.to_ascii_lowercase().as_str() {
"high" => {
// info!("TLS grade set to: [ HIGH ]");
Some("high".to_string())
}
"medium" => {
// info!("TLS grade set to: [ MEDIUM ]");
Some("medium".to_string())
}
"unsafe" => {
// info!("TLS grade set to: [ UNSAFE ]");
Some("unsafe".to_string())
}
_ => {
warn!("Error parsing TLS grade, defaulting to: `medium`");
Some("medium".to_string())
}
},
None => {
warn!("TLS grade not set, defaulting to: medium");
Some("medium".to_string())
}
}
}
fn log_builder(conf: &AppConfig) {
let log_level = conf.log_level.clone();
unsafe {
match log_level.as_str() {
"info" => env::set_var("RUST_LOG", "info"),
"error" => env::set_var("RUST_LOG", "error"),
"warn" => env::set_var("RUST_LOG", "warn"),
"debug" => env::set_var("RUST_LOG", "debug"),
"trace" => env::set_var("RUST_LOG", "trace"),
"off" => env::set_var("RUST_LOG", "off"),
_ => {
println!("Error reading log level, defaulting to: INFO");
env::set_var("RUST_LOG", "info")
}
}
}
env_logger::builder().init();
}
pub fn build_headers(path_config: &Option<Vec<String>>, _config: &Configuration, hl: &mut Vec<(String, Arc<str>)>) {
if let Some(headers) = &path_config {
for header in headers {
if let Some((key, val)) = header.split_once(':') {
hl.push((key.trim().to_string(), Arc::from(val.trim())));
}
}
}
}

29
src/utils/state.rs Normal file
View File

@@ -0,0 +1,29 @@
use std::sync::{LazyLock, RwLock};
#[derive(Debug)]
pub struct SharedState {
pub first_run: bool,
}
pub static GLOBAL_STATE: LazyLock<RwLock<SharedState>> = LazyLock::new(|| RwLock::new(SharedState { first_run: true }));
pub fn mark_not_first_run() {
let mut state = GLOBAL_STATE.write().unwrap();
state.first_run = false;
}
pub fn is_first_run() -> bool {
let state = GLOBAL_STATE.read().unwrap();
state.first_run
}
/*
impl SharedState {
pub fn mark_first_run(&mut self) {
self.first_run = false;
}
pub fn is_first_run(&self) -> bool {
self.first_run
}
}
*/

View File

@@ -2,64 +2,107 @@ use dashmap::DashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
pub type InnerMap = (String, u16, bool, bool, bool); pub type UpstreamsDashMap = DashMap<Arc<str>, DashMap<Arc<str>, (Vec<Arc<InnerMap>>, AtomicUsize)>>;
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>;
pub type UpstreamsIdMap = DashMap<String, InnerMap>;
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
#[derive(Debug, Clone, Serialize, Deserialize)] pub type UpstreamsIdMap = DashMap<String, Arc<InnerMap>>;
pub struct ServiceMapping { pub type Headers = DashMap<Arc<str>, DashMap<Arc<str>, Vec<(String, Arc<str>)>>>;
pub proxy: String, // pub type UpstreamsSerDde = Option<HashMap<String, HostConfig>>;
pub real: String, // pub type UpstreamsSerDe = HashMap<String, HostConfig>;
}
#[derive(Clone, Debug)] #[derive(Clone, Debug, Default)]
pub struct Extraparams { pub struct Extraparams {
pub sticky_sessions: bool,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub authentication: DashMap<String, Vec<String>>, pub sticky_sessions: bool,
pub authentication: Option<Arc<InnerAuth>>,
pub rate_limit: Option<isize>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct GlobalServiceMapping {
pub upstream: String,
pub hostname: String,
pub path: Option<String>,
pub to_https: Option<bool>,
pub sticky_sessions: Option<bool>,
pub rate_limit: Option<isize>,
pub client_headers: Option<Vec<String>>,
pub server_headers: Option<Vec<String>>,
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct Kubernetes {
pub servers: Option<Vec<String>>,
pub services: Option<Vec<GlobalServiceMapping>>,
pub tokenpath: Option<String>,
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct Consul { pub struct Consul {
pub servers: Option<Vec<String>>, pub servers: Option<Vec<String>>,
pub services: Option<Vec<ServiceMapping>>, pub services: Option<Vec<GlobalServiceMapping>>,
pub token: Option<String>, pub token: Option<String>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct Config { pub struct Config {
pub provider: String, pub provider: String,
pub sticky_sessions: bool,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub sticky_sessions: bool,
#[serde(default)]
pub upstreams: Option<HashMap<String, HostConfig>>, pub upstreams: Option<HashMap<String, HostConfig>>,
#[serde(default)]
pub globals: Option<HashMap<String, Vec<String>>>, pub globals: Option<HashMap<String, Vec<String>>>,
pub headers: Option<Vec<String>>, #[serde(default)]
pub authorization: Option<HashMap<String, String>>, pub client_headers: Option<Vec<String>>,
#[serde(default)]
pub server_headers: Option<Vec<String>>,
#[serde(default)]
pub authorization: Option<Auth>,
#[serde(default)]
pub consul: Option<Consul>, pub consul: Option<Consul>,
#[serde(default)]
pub kubernetes: Option<Kubernetes>,
#[serde(default)]
pub rate_limit: Option<isize>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct HostConfig { pub struct HostConfig {
pub paths: HashMap<String, PathConfig>, pub paths: HashMap<String, PathConfig>,
pub rate_limit: Option<isize>,
} }
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
#[derive(Debug, Serialize, Deserialize)] pub struct Auth {
#[serde(rename = "type")]
pub auth_type: String,
#[serde(rename = "data")]
pub auth_cred: String,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct PathConfig { pub struct PathConfig {
pub servers: Vec<String>, pub servers: Vec<String>,
pub to_https: Option<bool>, pub to_https: Option<bool>,
pub headers: Option<Vec<String>>, pub sticky_sessions: Option<bool>,
pub client_headers: Option<Vec<String>>,
pub server_headers: Option<Vec<String>>,
pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>,
pub redirect_to: Option<String>,
pub authorization: Option<Auth>,
} }
#[derive(Debug)] #[derive(Debug, Default)]
pub struct Configuration { pub struct Configuration {
pub upstreams: UpstreamsDashMap, pub upstreams: UpstreamsDashMap,
pub headers: Headers, pub client_headers: Headers,
pub server_headers: Headers,
pub consul: Option<Consul>, pub consul: Option<Consul>,
pub kubernetes: Option<Kubernetes>,
pub typecfg: String, pub typecfg: String,
pub extraparams: Extraparams, pub extraparams: Extraparams,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
pub struct AppConfig { pub struct AppConfig {
pub hc_interval: u16, pub hc_interval: u16,
pub hc_method: String, pub hc_method: String,
@@ -68,19 +111,70 @@ pub struct AppConfig {
pub master_key: String, pub master_key: String,
pub config_address: String, pub config_address: String,
pub proxy_address_http: String, pub proxy_address_http: String,
pub config_api_enabled: bool,
pub config_tls_address: Option<String>, pub config_tls_address: Option<String>,
pub config_tls_certificate: Option<String>, pub config_tls_certificate: Option<String>,
pub config_tls_key_file: Option<String>, pub config_tls_key_file: Option<String>,
pub proxy_address_tls: Option<String>, pub proxy_address_tls: Option<String>,
pub proxy_port_tls: Option<u16>, pub proxy_port_tls: Option<String>,
// pub tls_certificate: Option<String>, pub proxy_port: Option<String>,
// pub tls_key_file: Option<String>,
pub local_server: Option<(String, u16)>, pub local_server: Option<(String, u16)>,
pub proxy_certificates: Option<String>, pub proxy_certificates: Option<String>,
pub proxy_tls_grade: Option<String>,
pub file_server_address: Option<String>,
pub file_server_folder: Option<String>,
pub runuser: Option<String>,
pub rungroup: Option<String>,
} }
// #[derive(Debug)] #[derive(Debug, Default, Clone, PartialEq, Eq)]
// pub struct CertificateMove { pub struct InnerAuth {
// pub cert_tx: Sender<CertificateConfig>, pub auth_type: Arc<str>,
// pub cert_rx: Receiver<CertificateConfig>, pub auth_cred: Arc<str>,
// } }
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct InnerMap {
pub address: Arc<str>,
pub port: u16,
pub is_ssl: bool,
pub is_http2: bool,
pub to_https: bool,
pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>,
pub redirect_to: Option<Arc<str>>,
pub authorization: Option<Arc<InnerAuth>>,
}
#[allow(dead_code)]
impl InnerMap {
pub fn new() -> Self {
Self {
address: Arc::from("127.0.0.1"),
port: Default::default(),
is_ssl: Default::default(),
is_http2: Default::default(),
to_https: Default::default(),
rate_limit: Default::default(),
healthcheck: Default::default(),
redirect_to: Default::default(),
authorization: Default::default(),
}
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct InnerMapForJson {
pub address: String,
pub port: u16,
pub is_ssl: bool,
pub is_http2: bool,
pub to_https: bool,
pub rate_limit: Option<isize>,
pub healthcheck: Option<bool>,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct UpstreamSnapshotForJson {
pub backends: Vec<InnerMapForJson>,
pub requests: usize,
}

View File

@@ -1,17 +1,24 @@
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap}; use crate::tls::load;
use crate::utils::tls; use crate::tls::load::CertificateConfig;
use crate::utils::tls::CertificateConfig; use crate::utils::structs::{InnerMap, InnerMapForJson, UpstreamSnapshotForJson, UpstreamsDashMap, UpstreamsIdMap};
use dashmap::DashMap; use dashmap::DashMap;
use log::{error, info}; use log::{error, info};
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher}; use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use privdrop::PrivDrop;
use serde_json::{json, Value};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::any::type_name; use std::any::type_name;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::fmt::Write; use std::fmt::Write;
use std::fs; use std::net::SocketAddr;
use std::sync::atomic::AtomicUsize; use std::net::TcpListener;
use std::os::unix::fs::MetadataExt;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Sender}; use std::sync::mpsc::{channel, Sender};
use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::{fs, process, thread, time};
#[allow(dead_code)] #[allow(dead_code)]
pub fn print_upstreams(upstreams: &UpstreamsDashMap) { pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
@@ -22,9 +29,16 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
for path_entry in host_entry.value().iter() { for path_entry in host_entry.value().iter() {
let path = path_entry.key(); let path = path_entry.key();
println!(" Path: {}", path); println!(" Path: {}", path);
for f in path_entry.value().0.clone() {
for (ip, port, ssl, vers, to_https) in path_entry.value().0.clone() { println!(
println!(" ===> IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}", ip, port, ssl, vers, to_https); " IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}, Rate Limit: {}",
f.address,
f.port,
f.is_ssl,
f.is_http2,
f.to_https,
f.rate_limit.unwrap_or(0)
);
} }
} }
} }
@@ -110,17 +124,29 @@ pub fn compare_dashmaps(map1: &UpstreamsDashMap, map2: &UpstreamsDashMap) -> boo
return false; // Path exists in map1 but not in map2 return false; // Path exists in map1 but not in map2
}; };
let (vec2, _counter2) = entry2.value(); let (vec2, _counter2) = entry2.value();
let set1: HashSet<_> = vec1.iter().collect();
let set2: HashSet<_> = vec2.iter().collect(); if vec1.len() != vec2.len() {
if set1 != set2 {
return false; return false;
} }
for item in vec1.iter() {
let count1 = vec1.iter().filter(|&x| x == item).count();
let count2 = vec2.iter().filter(|&x| x == item).count();
if count1 != count2 {
return false;
}
}
// let set1: HashSet<_> = vec1.iter().collect();
// let set2: HashSet<_> = vec2.iter().collect();
// if set1 != set2 {
// return false;
// }
} }
} }
true true
} }
pub fn merge_headers(target: &DashMap<String, Vec<(String, String)>>, source: &DashMap<String, Vec<(String, String)>>) { pub fn merge_headers(target: &DashMap<Arc<str>, Vec<(String, Arc<str>)>>, source: &DashMap<Arc<str>, Vec<(String, Arc<str>)>>) {
for entry in source.iter() { for entry in source.iter() {
let global_key = entry.key().clone(); let global_key = entry.key().clone();
let global_values = entry.value().clone(); let global_values = entry.value().clone();
@@ -140,23 +166,50 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
let new_vec = vec.clone(); let new_vec = vec.clone();
for x in vec.iter() { for x in vec.iter() {
let mut id = String::new(); let mut id = String::new();
write!(&mut id, "{}:{}:{}", x.0, x.1, x.2).unwrap(); write!(
&mut id,
"{}:{}:{}:{}:{}:{}:{}:{:?}",
outer_entry.key(),
x.address,
x.port,
x.is_http2,
x.to_https,
x.rate_limit.unwrap_or_default(),
x.healthcheck.unwrap_or_default(),
x.authorization
)
.unwrap_or(());
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
// address: "127.0.0.3", port: 8000, is_ssl: false, is_http2: false, to_https: false, rate_limit: Some(200), healthcheck: None, authorization: None } }
hasher.update(id.clone().into_bytes()); hasher.update(id.clone().into_bytes());
let hash = hasher.finalize(); let hash = hasher.finalize();
let hex_hash = base16ct::lower::encode_string(&hash); let hex_hash = base16ct::lower::encode_string(&hash);
let hh = hex_hash[0..50].to_string(); let hh = hex_hash[0..50].to_string();
cloned.insert(id, (hh.clone(), 0000, false, false, false)); let to_add = InnerMap {
cloned.insert(hh, x.to_owned()); address: Arc::from("127.0.0.1"),
port: 0,
is_ssl: false,
is_http2: false,
to_https: false,
rate_limit: None,
healthcheck: None,
redirect_to: None,
authorization: None,
};
cloned.insert(id, Arc::from(to_add));
cloned.insert(hh, Arc::from(x.to_owned()));
// println!("CLONNED :===========> {:?}", cloned);
} }
new_inner_map.insert(path.clone(), new_vec); new_inner_map.insert(path.clone(), new_vec);
} }
} }
info!("Upstreams are fully populated. Ready to server requests");
} }
pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> { pub fn listdir(dir: String) -> Vec<load::CertificateConfig> {
let mut f = HashMap::new(); let mut f = HashMap::new();
let mut certificate_configs: Vec<tls::CertificateConfig> = vec![]; let mut certificate_configs: Vec<load::CertificateConfig> = vec![];
let paths = fs::read_dir(dir).unwrap(); let paths = fs::read_dir(dir).unwrap();
for path in paths { for path in paths {
let path_str = path.unwrap().path().to_str().unwrap().to_owned(); let path_str = path.unwrap().path().to_str().unwrap().to_owned();
@@ -174,13 +227,13 @@ pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
certificate_configs.push(y); certificate_configs.push(y);
} }
} }
for (_, v) in f.iter() { // for (_, v) in f.iter() {
let y = CertificateConfig { // let y = CertificateConfig {
cert_path: v[0].clone(), // cert_path: v[0].clone(),
key_path: v[1].clone(), // key_path: v[1].clone(),
}; // };
certificate_configs.push(y); // certificate_configs.push(y);
} // }
certificate_configs certificate_configs
} }
@@ -210,3 +263,143 @@ pub fn watch_folder(path: String, sender: Sender<Vec<CertificateConfig>>) -> not
} }
} }
} }
pub fn drop_priv(user: String, group: String, http_addr: String, tls_addr: Option<String>) {
thread::sleep(time::Duration::from_millis(10));
loop {
thread::sleep(time::Duration::from_millis(10));
if port_is_available(http_addr.clone()) {
break;
}
}
if let Some(tls_addr) = tls_addr {
loop {
thread::sleep(time::Duration::from_millis(10));
if port_is_available(tls_addr.clone()) {
break;
}
}
}
info!("Dropping ROOT privileges to: {}:{}", user, group);
if let Err(e) = PrivDrop::default().user(user).group(group).apply() {
error!("Failed to drop privileges: {}", e);
process::exit(1)
}
}
fn port_is_available(addr: String) -> bool {
match TcpListener::bind(addr) {
Ok(_) => false,
Err(_) => true,
}
}
pub fn check_priv(addr: &str) {
let port = SocketAddr::from_str(addr).map(|sa| sa.port()).unwrap();
match port < 1024 {
true => {
let meta = std::fs::metadata("/proc/self").map(|m| m.uid()).unwrap();
if meta != 0 {
error!("Running on privileged port requires to start as ROOT");
process::exit(1)
}
}
false => {}
}
}
#[allow(dead_code)]
pub fn upstreams_to_json(upstreams: &UpstreamsDashMap) -> serde_json::Result<String> {
let mut outer = HashMap::new();
for outer_entry in upstreams.iter() {
let mut inner_map = HashMap::new();
for inner_entry in outer_entry.value().iter() {
let (backends, counter) = inner_entry.value();
inner_map.insert(
inner_entry.key().to_string(),
UpstreamSnapshotForJson {
backends: backends
.iter()
.map(|a| InnerMapForJson {
address: a.address.to_string(),
port: a.port,
is_ssl: a.is_ssl,
is_http2: a.is_http2,
to_https: a.to_https,
rate_limit: a.rate_limit,
healthcheck: a.healthcheck,
})
.collect(),
requests: counter.load(Ordering::Relaxed),
},
);
}
outer.insert(outer_entry.key().to_string(), inner_map);
}
// serde_json::to_string_pretty(&outer)
serde_json::to_string(&outer)
}
pub fn upstreams_liveness_json(configured: &UpstreamsDashMap, current: &UpstreamsDashMap) -> Value {
let mut result = serde_json::Map::new();
for host_entry in configured.iter() {
let hostname = host_entry.key().to_string();
let configured_paths = host_entry.value();
let mut paths_json = serde_json::Map::new();
for path_entry in configured_paths.iter() {
let path = path_entry.key().clone();
let (configured_backends, _) = path_entry.value();
let backends_json: Vec<Value> = configured_backends
.iter()
.map(|backend| {
let alive = if let Some(host_map) = current.get(&*hostname) {
if let Some(path_entry) = host_map.get(&*path) {
let list = &path_entry.value().0; // Vec<Arc<InnerMap>>
list.iter().any(|b| b.address == backend.address && b.port == backend.port)
} else {
false
}
} else {
false
};
json!({
"address": &*backend.address,
"port": backend.port,
"alive": alive
})
})
.collect();
paths_json.insert(
path.to_string(),
json!({
"backends": backends_json
}),
);
}
result.insert(hostname, Value::Object(paths_json));
}
Value::Object(result)
}
#[allow(dead_code)]
pub fn prepend(prefix: &str, val: &Option<Arc<str>>, uri: &str, port: &str) -> Option<String> {
val.as_ref().map(|s| {
let mut buf = String::with_capacity(32);
buf.push_str(prefix);
buf.push_str(s);
buf.push_str(":");
buf.push_str(port);
buf.push_str(uri);
buf
})
}

View File

@@ -1,4 +1,5 @@
use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider}; use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider, KubernetesProvider};
use crate::utils::parceyaml::load_configuration;
use crate::utils::structs::Configuration; use crate::utils::structs::Configuration;
use crate::utils::tools::*; use crate::utils::tools::*;
use crate::utils::*; use crate::utils::*;
@@ -6,8 +7,8 @@ use crate::web::proxyhttp::LB;
use async_trait::async_trait; use async_trait::async_trait;
use dashmap::DashMap; use dashmap::DashMap;
use futures::channel::mpsc; use futures::channel::mpsc;
use futures::StreamExt; use futures::{SinkExt, StreamExt};
use log::info; use log::{error, info};
use pingora_core::server::ShutdownWatch; use pingora_core::server::ShutdownWatch;
use pingora_core::services::background::BackgroundService; use pingora_core::services::background::BackgroundService;
use std::sync::Arc; use std::sync::Arc;
@@ -15,32 +16,53 @@ use std::sync::Arc;
#[async_trait] #[async_trait]
impl BackgroundService for LB { impl BackgroundService for LB {
async fn start(&self, mut shutdown: ShutdownWatch) { async fn start(&self, mut shutdown: ShutdownWatch) {
info!("Starting background service"); info!("Starting background service"); // tx: Sender<Configuration>
let (tx, mut rx) = mpsc::channel::<Configuration>(0); let (mut tx, mut rx) = mpsc::channel::<Configuration>(1);
let tx_api = tx.clone();
let tx_file = tx.clone(); let config = load_configuration(self.config.upstreams_conf.clone().as_str(), "filepath")
let tx_consul = tx.clone(); .await
.0
.expect("Failed to load configuration");
match config.typecfg.as_str() {
"file" => {
info!("Running File discovery, requested type is: {}", config.typecfg);
tx.send(config).await.unwrap();
let file_load = FromFileProvider { let file_load = FromFileProvider {
path: self.config.upstreams_conf.clone(), path: self.config.upstreams_conf.clone(),
}; };
let consul_load = ConsulProvider { let _ = tokio::spawn(async move { file_load.start(tx).await });
path: self.config.upstreams_conf.clone(), }
}; "kubernetes" => {
info!("Running Kubernetes discovery, requested type is: {}", config.typecfg);
let _ = tokio::spawn(async move { file_load.start(tx_file).await }); let cf = Arc::from(config);
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await }); let kuber_load = KubernetesProvider { config: cf.clone() };
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone())); let _ = tokio::spawn(async move { kuber_load.start(tx).await });
// let _ = tokio::spawn(tls::watch_certs(self.config.proxy_certificates.clone().unwrap(), self.cert_tx.clone())).await; }
"consul" => {
info!("Running Consul discovery, requested type is: {}", config.typecfg);
let cf = Arc::from(config);
let consul_load = ConsulProvider { config: cf.clone() };
let _ = tokio::spawn(async move { consul_load.start(tx).await });
}
_ => {
error!("Unknown discovery type: {}", config.typecfg);
}
}
let api_load = APIUpstreamProvider { let api_load = APIUpstreamProvider {
address: self.config.config_address.clone(), address: self.config.config_address.clone(),
masterkey: self.config.master_key.clone(), masterkey: self.config.master_key.clone(),
tls_address: self.config.config_tls_address.clone(), config_api_enabled: self.config.config_api_enabled.clone(),
tls_certificate: self.config.config_tls_certificate.clone(), // tls_address: self.config.config_tls_address.clone(),
tls_key_file: self.config.config_tls_key_file.clone(), // tls_certificate: self.config.config_tls_certificate.clone(),
// tls_key_file: self.config.config_tls_key_file.clone(),
file_server_address: self.config.file_server_address.clone(),
file_server_folder: self.config.file_server_folder.clone(),
current_upstreams: self.ump_upst.clone(),
full_upstreams: self.ump_full.clone(),
}; };
let tx_api = tx.clone(); // let tx_api = tx.clone();
let _ = tokio::spawn(async move { api_load.start(tx_api).await }); let _ = tokio::spawn(async move { api_load.start(tx_api).await });
let uu = self.ump_upst.clone(); let uu = self.ump_upst.clone();
@@ -61,26 +83,43 @@ impl BackgroundService for LB {
clone_dashmap_into(&ss.upstreams, &self.ump_upst); clone_dashmap_into(&ss.upstreams, &self.ump_upst);
let current = self.extraparams.load_full(); let current = self.extraparams.load_full();
let mut new = (*current).clone(); let mut new = (*current).clone();
new.sticky_sessions = ss.extraparams.sticky_sessions;
new.to_https = ss.extraparams.to_https; new.to_https = ss.extraparams.to_https;
new.sticky_sessions = ss.extraparams.sticky_sessions;
new.authentication = ss.extraparams.authentication.clone(); new.authentication = ss.extraparams.authentication.clone();
new.rate_limit = ss.extraparams.rate_limit;
self.extraparams.store(Arc::new(new)); self.extraparams.store(Arc::new(new));
self.headers.clear(); self.client_headers.clear();
self.server_headers.clear();
for entry in ss.upstreams.iter() { for entry in ss.upstreams.iter() {
let global_key = entry.key().clone(); let global_key = entry.key().clone();
let global_values = DashMap::new(); let client_global_values = DashMap::new();
let mut target_entry = ss.headers.entry(global_key).or_insert_with(DashMap::new); let server_global_values = DashMap::new();
target_entry.extend(global_values);
self.headers.insert(target_entry.key().to_owned(), target_entry.value().to_owned()); let mut client_target_entry = ss.client_headers.entry(global_key.clone()).or_insert_with(DashMap::new);
client_target_entry.extend(client_global_values);
let mut server_target_entry = ss.server_headers.entry(global_key).or_insert_with(DashMap::new);
server_target_entry.extend(server_global_values);
self.server_headers.insert(server_target_entry.key().to_owned(), server_target_entry.value().to_owned());
} }
for path in ss.headers.iter() { for path in ss.client_headers.iter() {
let path_key = path.key().clone(); let path_key = path.key().clone();
let path_headers = path.value().clone(); let path_headers = path.value().clone();
self.headers.insert(path_key.clone(), path_headers); self.client_headers.insert(path_key.clone(), path_headers);
if let Some(global_headers) = ss.headers.get("GLOBAL_HEADERS") { if let Some(global_headers) = ss.client_headers.get("GLOBAL_CLIENT_HEADERS") {
if let Some(existing_headers) = self.headers.get_mut(&path_key) { if let Some(existing_headers) = self.client_headers.get_mut(&path_key) {
merge_headers(&existing_headers, &global_headers);
}
}
}
for path in ss.server_headers.iter() {
let path_key = path.key().clone();
let path_headers = path.value().clone();
self.server_headers.insert(path_key.clone(), path_headers);
if let Some(global_headers) = ss.server_headers.get("GLOBAL_SERVER_HEADERS") {
if let Some(existing_headers) = self.server_headers.get_mut(&path_key) {
merge_headers(&existing_headers, &global_headers); merge_headers(&existing_headers, &global_headers);
} }
} }

View File

@@ -2,77 +2,120 @@ use crate::utils::structs::InnerMap;
use crate::web::proxyhttp::LB; use crate::web::proxyhttp::LB;
use async_trait::async_trait; use async_trait::async_trait;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct GetHostsReturHeaders {
pub client_headers: Option<Vec<(String, Arc<str>)>>,
pub server_headers: Option<Vec<(String, Arc<str>)>>,
}
#[async_trait] #[async_trait]
pub trait GetHost { pub trait GetHost {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap>; fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>>;
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>>;
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders>;
// fn get_upstreams(&self) -> Arc<UpstreamsDashMap>;
} }
#[async_trait] #[async_trait]
impl GetHost for LB { impl GetHost for LB {
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<InnerMap> { // fn get_upstreams(&self) -> Arc<UpstreamsDashMap> {
// self.ump_full.clone()
// }
fn get_host(&self, peer: &str, path: &str, backend_id: Option<&str>) -> Option<Arc<InnerMap>> {
if let Some(b) = backend_id { if let Some(b) = backend_id {
if let Some(bb) = self.ump_byid.get(b) { if let Some(bb) = self.ump_byid.get(b) {
// println!("BIB :===> {:?}", Some(bb.value()));
return Some(bb.value().clone()); return Some(bb.value().clone());
} }
} }
let host_entry = self.ump_upst.get(peer)?; let host_entry = self.ump_upst.get(peer)?;
let mut current_path = path.to_string(); let mut end = path.len();
let mut best_match: Option<InnerMap> = None;
loop { loop {
if let Some(entry) = host_entry.get(&current_path) { let slice = &path[..end];
if let Some(entry) = host_entry.get(slice) {
let (servers, index) = entry.value(); let (servers, index) = entry.value();
if !servers.is_empty() { if !servers.is_empty() {
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len(); let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
best_match = Some(servers[idx].clone()); return Some(servers[idx].clone());
break;
} }
} }
if let Some(pos) = current_path.rfind('/') { if let Some(pos) = slice.rfind('/') {
current_path.truncate(pos); end = pos;
} else { } else {
break; break;
} }
} }
if best_match.is_none() {
if let Some(entry) = host_entry.get("/") { if let Some(entry) = host_entry.get("/") {
let (servers, index) = entry.value(); let (servers, index) = entry.value();
if !servers.is_empty() { if !servers.is_empty() {
let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len(); let idx = index.fetch_add(1, Ordering::Relaxed) % servers.len();
best_match = Some(servers[idx].clone()); return Some(servers[idx].clone());
} }
} }
None
} }
// println!("BMT :===> {:?}", best_match);
best_match
}
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>> {
let host_entry = self.headers.get(peer)?;
let mut current_path = path.to_string();
let mut best_match: Option<Vec<(String, String)>> = None;
fn get_header(&self, peer: &str, path: &str) -> Option<GetHostsReturHeaders> {
let client_entry = self.client_headers.get(peer);
let server_entry = self.server_headers.get(peer);
if client_entry.is_none() && server_entry.is_none() {
return None;
}
let mut current_path = path;
let mut clnt_match = None;
if let Some(client_entry) = client_entry {
loop { loop {
if let Some(entry) = host_entry.get(&current_path) { if let Some(entry) = client_entry.get(current_path) {
if !entry.value().is_empty() { if !entry.value().is_empty() {
best_match = Some(entry.value().clone()); clnt_match = Some(entry.value().clone());
break; break;
} }
} }
if current_path == "/" {
break;
}
if let Some(pos) = current_path.rfind('/') { if let Some(pos) = current_path.rfind('/') {
current_path.truncate(pos); current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else { } else {
break; break;
} }
} }
if best_match.is_none() { }
if let Some(entry) = host_entry.get("/") { current_path = path;
let mut serv_match = None;
if let Some(server_entry) = server_entry {
loop {
if let Some(entry) = server_entry.get(current_path) {
if !entry.value().is_empty() { if !entry.value().is_empty() {
best_match = Some(entry.value().clone()); serv_match = Some(entry.value().clone());
break;
}
}
if current_path == "/" {
if let Some(entry) = server_entry.get("/") {
if !entry.value().is_empty() {
serv_match = Some(entry.value().clone());
break;
}
}
break;
}
if let Some(pos) = current_path.rfind('/') {
current_path = if pos == 0 { "/" } else { &current_path[..pos] };
} else {
break;
} }
} }
} }
best_match let result = GetHostsReturHeaders {
client_headers: clnt_match,
server_headers: serv_match,
};
if result.client_headers.is_some() || result.server_headers.is_some() {
Some(result)
} else {
None
}
} }
} }

View File

@@ -1,117 +1,209 @@
use crate::utils::auth::authenticate; use crate::utils::auth::authenticate;
use crate::utils::metrics::*; use crate::utils::metrics::*;
use crate::utils::structs::{AppConfig, Extraparams, Headers, UpstreamsDashMap, UpstreamsIdMap}; use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap};
use crate::web::gethosts::GetHost; use crate::web::gethosts::{GetHost, GetHostsReturHeaders};
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use axum::body::Bytes; use axum::body::Bytes;
use log::{debug, warn}; use dashmap::DashMap;
use log::{debug, error, warn};
use pingora::http::{RequestHeader, ResponseHeader, StatusCode}; use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
use pingora::prelude::*; use pingora::prelude::*;
use pingora::ErrorSource::Upstream; use pingora::ErrorSource::Upstream;
use pingora_core::listeners::ALPN; use pingora_core::listeners::ALPN;
use pingora_core::prelude::HttpPeer; use pingora_core::prelude::HttpPeer;
// use pingora_core::protocols::TcpKeepalive;
use pingora_limits::rate::Rate;
use pingora_proxy::{ProxyHttp, Session}; use pingora_proxy::{ProxyHttp, Session};
use std::sync::Arc; // use prometheus::{register_int_counter, IntCounter};
use sha2::{Digest, Sha256};
use std::cell::RefCell;
use std::fmt::Write;
use std::sync::{Arc, LazyLock};
use std::time::Duration;
use tokio::time::Instant; use tokio::time::Instant;
// static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1)));
// static REVERSE_STORE: Lazy<DashMap<String, String>> = Lazy::new(|| DashMap::new());
static REVERSE_STORE: LazyLock<DashMap<String, String>> = LazyLock::new(|| DashMap::new());
thread_local! {static IP_BUFFER: RefCell<String> = RefCell::new(String::with_capacity(50));}
pub static RATE_LIMITER: LazyLock<Rate> = LazyLock::new(|| Rate::new(Duration::from_secs(1)));
#[derive(Clone)] #[derive(Clone)]
pub struct LB { pub struct LB {
pub ump_upst: Arc<UpstreamsDashMap>, pub ump_upst: Arc<UpstreamsDashMap>,
pub ump_full: Arc<UpstreamsDashMap>, pub ump_full: Arc<UpstreamsDashMap>,
pub ump_byid: Arc<UpstreamsIdMap>, pub ump_byid: Arc<UpstreamsIdMap>,
pub headers: Arc<Headers>, pub client_headers: Arc<Headers>,
pub server_headers: Arc<Headers>,
pub config: Arc<AppConfig>, pub config: Arc<AppConfig>,
pub extraparams: Arc<ArcSwap<Extraparams>>, pub extraparams: Arc<ArcSwap<Extraparams>>,
} }
pub struct Context { pub struct Context {
backend_id: String, backend_id: Option<String>,
to_https: bool, sticky_sessions: bool,
redirect_to: String, // redirect_to: Option<String>,
start_time: Instant, start_time: Instant,
hostname: Option<Arc<str>>,
upstream_peer: Option<Arc<InnerMap>>,
extraparams: arc_swap::Guard<Arc<Extraparams>>,
client_headers: Option<Vec<(String, Arc<str>)>>,
} }
#[async_trait] #[async_trait]
impl ProxyHttp for LB { impl ProxyHttp for LB {
// type CTX = ();
// fn new_ctx(&self) -> Self::CTX {}
type CTX = Context; type CTX = Context;
fn new_ctx(&self) -> Self::CTX { fn new_ctx(&self) -> Self::CTX {
Context { Context {
backend_id: String::new(), backend_id: None,
to_https: false, sticky_sessions: false,
redirect_to: String::new(), // redirect_to: None,
start_time: Instant::now(), start_time: Instant::now(),
hostname: None,
upstream_peer: None,
extraparams: self.extraparams.load(),
client_headers: None,
} }
} }
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> { async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
if let Some(auth) = self.extraparams.load().authentication.get("authorization") { let hostname = return_header_host_from_upstream(session, &self.ump_upst);
let authenticated = authenticate(&auth.value(), &session); _ctx.hostname = hostname;
if !authenticated { let mut backend_id = None;
if _ctx.extraparams.sticky_sessions {
if let Some(cookies) = session.req_header().headers.get("cookie") {
if let Ok(cookie_str) = cookies.to_str() {
if let Some(pos) = cookie_str.find("backend_id=") {
let value = &cookie_str[pos + "backend_id=".len()..];
let end = value.find(';').unwrap_or(value.len());
backend_id = Some(&value[..end]);
}
}
}
}
match _ctx.hostname.as_ref() {
None => return Ok(false),
Some(host) => {
let optioninnermap = self.get_host(host, session.req_header().uri.path(), backend_id);
match optioninnermap {
None => return Ok(false),
Some(ref innermap) => {
if let Some(auth) = _ctx.extraparams.authentication.as_ref().or(innermap.authorization.as_ref()) {
if !authenticate(&auth.auth_type, &auth.auth_cred, session).await {
let _ = session.respond_error(401).await; let _ = session.respond_error(401).await;
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path().to_string()); warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
return Ok(true); return Ok(true);
} }
}; }
if let Some(rate) = innermap.rate_limit.or(_ctx.extraparams.rate_limit) {
let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip());
let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1);
if curr_window_requests > rate {
let header = ResponseHeader::build(429, None)?;
session.set_keepalive(None);
session.write_response_header(Box::new(header), true).await?;
debug!("Rate limited: {:?}, {}", rate_key, rate);
return Ok(true);
}
}
if let Some(redirect_to) = &innermap.redirect_to {
let uri = session.req_header().uri.path();
let capacity = redirect_to.len() + uri.len();
let mut s = String::with_capacity(capacity);
s.push_str(redirect_to);
s.push_str(uri);
let mut resp = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
resp.insert_header("Location", s)?;
resp.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(resp), true).await?;
return Ok(true);
}
if _ctx.extraparams.to_https.unwrap_or(false) || innermap.to_https {
if let Some(stream) = session.stream() {
if stream.get_ssl().is_none() {
if let Some(host) = _ctx.hostname.as_ref() {
let port = self.config.proxy_port_tls.as_deref().unwrap_or("443");
let uri = session.req_header().uri.path();
let capacity = host.len() + uri.len() + 8;
let mut s = String::with_capacity(capacity);
s.push_str("https://");
s.push_str(host);
if port != "443" {
s.push_str(":");
s.push_str(&port);
}
s.push_str(uri);
let mut resp = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?;
resp.insert_header("Location", s)?;
resp.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(resp), true).await?;
return Ok(true);
}
}
}
}
}
}
_ctx.upstream_peer = optioninnermap;
}
}
Ok(false) Ok(false)
} }
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> { async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
let host_name = return_header_host(&session); match ctx.hostname.as_ref() {
match host_name { Some(hostname) => match ctx.upstream_peer.as_ref() {
Some(hostname) => { Some(innermap) => {
// session.req_header_mut().headers.insert("X-Host-Name", host.to_string().parse().unwrap()); let mut peer = Box::new(HttpPeer::new((&*innermap.address, innermap.port), innermap.is_ssl, hostname.to_string()));
let mut backend_id = None;
if self.extraparams.load().sticky_sessions { if innermap.is_http2 {
if let Some(cookies) = session.req_header().headers.get("cookie") {
if let Ok(cookie_str) = cookies.to_str() {
for cookie in cookie_str.split(';') {
let trimmed = cookie.trim();
if let Some(value) = trimmed.strip_prefix("backend_id=") {
backend_id = Some(value);
break;
}
}
}
}
}
let ddr = self.get_host(hostname, hostname, backend_id);
match ddr {
Some((address, port, ssl, is_h2, to_https)) => {
let mut peer = Box::new(HttpPeer::new((address.clone(), port.clone()), ssl, String::new()));
// if session.is_http2() {
if is_h2 {
peer.options.alpn = ALPN::H2; peer.options.alpn = ALPN::H2;
} }
if ssl { if innermap.is_ssl {
peer.sni = hostname.to_string();
peer.options.verify_cert = false; peer.options.verify_cert = false;
peer.options.verify_hostname = false; peer.options.verify_hostname = false;
} }
// println!("{}, {}, alpn {}, h2 {:?}, to_https {}", hostname, address.as_str(), peer.options.alpn, is_h2, _to_https); /*
if self.extraparams.load().to_https.unwrap_or(false) || to_https { Experimental optionsv
if let Some(stream) = session.stream() { The following TCP optimizations were tested but caused performance degrade under heavy load:
if stream.get_ssl().is_none() { peer.options.tcp_keepalive = Some(TcpKeepalive {
if let Some(addr) = session.server_addr() { idle: Duration::from_secs(60),
if let Some((host, _)) = addr.to_string().split_once(':') { interval: Duration::from_secs(10),
let uri = session.req_header().uri.path_and_query().map_or("/", |pq| pq.as_str()); count: 5,
let port = self.config.proxy_port_tls.unwrap_or(403); user_timeout: Duration::from_secs(30),
ctx.to_https = true; });
ctx.redirect_to = format!("https://{}:{}{}", host, port, uri);
}
}
}
}
}
ctx.backend_id = format!("{}:{}:{}", address.clone(), port.clone(), ssl); peer.options.idle_timeout = Some(Duration::from_secs(300));
peer.options.tcp_recv_buf = Some(128 * 1024);
End of experimental options
*/
if ctx.extraparams.sticky_sessions {
let mut s = String::with_capacity(64);
write!(
&mut s,
"{}:{}:{}:{}:{}:{}:{}:{:?}",
hostname,
innermap.address,
innermap.port,
innermap.is_http2,
innermap.to_https,
innermap.rate_limit.unwrap_or_default(),
innermap.healthcheck.unwrap_or_default(),
innermap.authorization
)
.unwrap_or(());
ctx.backend_id = Some(s);
ctx.sticky_sessions = true;
}
Ok(peer) Ok(peer)
} }
None => { None => {
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error"); if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
error!("Failed to send error response: {:?}", e);
}
Err(Box::new(Error { Err(Box::new(Error {
etype: HTTPStatus(502), etype: HTTPStatus(502),
esource: Upstream, esource: Upstream,
@@ -120,10 +212,11 @@ impl ProxyHttp for LB {
context: Option::from(ImmutStr::Static("Upstream not found")), context: Option::from(ImmutStr::Static("Upstream not found")),
})) }))
} }
} },
}
None => { None => {
session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await.expect("Failed to send error"); if let Err(e) = session.respond_error_with_body(502, Bytes::from("502 Bad Gateway\n")).await {
error!("Failed to send error response: {:?}", e);
}
Err(Box::new(Error { Err(Box::new(Error {
etype: HTTPStatus(502), etype: HTTPStatus(502),
esource: Upstream, esource: Upstream,
@@ -135,66 +228,69 @@ impl ProxyHttp for LB {
} }
} }
async fn upstream_request_filter(&self, session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> { async fn upstream_request_filter(&self, session: &mut Session, upstream_request: &mut RequestHeader, ctx: &mut Self::CTX) -> Result<()> {
match session.client_addr() { // if let Some(hostname) = ctx.hostname.as_deref() {
Some(ip) => { // upstream_request.insert_header("Host", hostname)?;
let inet = ip.as_inet(); // }
match inet {
Some(addr) => { if let Some(client_ip) = session.client_addr() {
_upstream_request IP_BUFFER.with(|buffer| {
.insert_header("X-Forwarded-For", addr.to_string().split(':').collect::<Vec<&str>>()[0]) let mut buf = buffer.borrow_mut();
.unwrap(); buf.clear();
write!(buf, "{}", client_ip).unwrap_or(());
upstream_request.append_header("X-Forwarded-For", buf.as_str()).unwrap_or(false);
});
} }
None => warn!("Malformed Client IP: {:?}", inet),
let hostname = ctx.hostname.as_deref().unwrap_or("localhost");
let path = session.req_header().uri.path();
let GetHostsReturHeaders { server_headers, client_headers } = match self.get_header(hostname, path) {
Some(h) => h,
None => return Ok(()),
};
if let Some(sh) = server_headers {
for (k, v) in sh {
upstream_request.insert_header(k, v.as_ref())?;
} }
} }
None => { if let Some(ch) = client_headers {
warn!("Cannot detect client IP"); ctx.client_headers = Some(ch);
}
} }
Ok(()) Ok(())
} }
async fn response_filter(&self, _session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
if ctx.sticky_sessions {
if let Some(bid) = &ctx.backend_id {
let tt = if let Some(existing) = REVERSE_STORE.get(bid) {
existing.value().clone()
} else {
let mut hasher = Sha256::new();
hasher.update(bid.as_bytes());
let hash = hasher.finalize();
let hex_hash = base16ct::lower::encode_string(&hash);
let hh = hex_hash[0..50].to_string();
REVERSE_STORE.insert(bid.clone(), hh.clone());
REVERSE_STORE.insert(hh.clone(), bid.clone());
hh
};
// let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", tt));
let mut buf = String::with_capacity(80);
buf.push_str("backend_id=");
buf.push_str(&tt);
buf.push_str("; Path=/; Max-Age=600; HttpOnly; SameSite=Lax");
let _ = _upstream_response.insert_header("set-cookie", buf.as_str());
}
}
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> { if let Some(client_headers) = &ctx.client_headers {
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap(); for (k, v) in client_headers.iter() {
if self.extraparams.load().sticky_sessions { _upstream_response.append_header(k.clone(), v.as_ref())?;
let backend_id = ctx.backend_id.clone();
if let Some(bid) = self.ump_byid.get(&backend_id) {
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.0));
} }
} }
if ctx.to_https {
let mut redirect_response = ResponseHeader::build(StatusCode::MOVED_PERMANENTLY, None)?; // session.set_keepalive(Some(300));
redirect_response.insert_header("Location", ctx.redirect_to.clone())?; // println!("session.get_keepalive: {:?}", session.get_keepalive());
redirect_response.insert_header("Content-Length", "0")?;
session.write_response_header(Box::new(redirect_response), false).await?;
}
match return_header_host(&session) {
Some(host) => {
let path = session.req_header().uri.path();
let host_header = host;
let split_header = host_header.split_once(':');
match split_header {
Some(sh) => {
let yoyo = self.get_header(sh.0, path);
for k in yoyo.iter() {
for t in k.iter() {
_upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
}
}
}
None => {
let yoyo = self.get_header(host_header, path);
for k in yoyo.iter() {
for t in k.iter() {
_upstream_response.insert_header(t.0.clone(), t.1.clone()).unwrap();
}
}
}
}
}
None => {}
}
Ok(()) Ok(())
} }
@@ -202,35 +298,23 @@ impl ProxyHttp for LB {
let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16()); let response_code = session.response_written().map_or(0, |resp| resp.status.as_u16());
debug!("{}, response code: {response_code}", self.request_summary(session, ctx)); debug!("{}, response code: {response_code}", self.request_summary(session, ctx));
let m = &MetricTypes { let m = &MetricTypes {
method: session.req_header().method.to_string(), method: session.req_header().method.clone(),
code: session.response_written().map(|resp| resp.status.as_str().to_owned()).unwrap_or("0".to_string()), code: session.response_written().map(|resp| resp.status),
latency: ctx.start_time.elapsed(), latency: ctx.start_time.elapsed(),
version: session.req_header().version, version: session.req_header().version,
// upstream: ctx.hostname.clone().unwrap_or(Arc::from("localhost")),
upstream: ctx.hostname.take().unwrap_or_else(|| Arc::from("localhost")),
}; };
calc_metrics(m); calc_metrics(m);
} }
} }
fn return_header_host(session: &Session) -> Option<&str> { fn return_header_host_from_upstream(session: &Session, ump_upst: &UpstreamsDashMap) -> Option<Arc<str>> {
if session.is_http2() { let host_str = if session.is_http2() {
match session.req_header().uri.host() { session.req_header().uri.host()?
Some(host) => Option::from(host),
None => None,
}
} else { } else {
match session.req_header().headers.get("host") { let h = session.req_header().headers.get("host")?.to_str().ok()?;
Some(host) => { h.split_once(':').map_or(h, |(host, _)| host)
let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>(); };
Option::from(header_host[0]) ump_upst.get(host_str).map(|entry| entry.key().clone())
} }
None => None,
}
}
}
// fn return_no_host(inp: &Option<(String, u16)>) -> Box<HttpPeer> {
// match inp {
// Some(t) => Box::new(HttpPeer::new(t, false, String::new())),
// None => Box::new(HttpPeer::new(("0.0.0.0", 0), false, String::new())),
// }
// }

View File

@@ -1,10 +1,12 @@
// use rustls::crypto::ring::default_provider; // use rustls::crypto::ring::default_provider;
use crate::tls::grades;
use crate::tls::load;
use crate::tls::load::CertificateConfig;
use crate::utils::structs::Extraparams; use crate::utils::structs::Extraparams;
use crate::utils::tls;
use crate::utils::tls::CertificateConfig;
use crate::utils::tools::*; use crate::utils::tools::*;
use crate::web::proxyhttp::LB; use crate::web::proxyhttp::LB;
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use ctrlc;
use dashmap::DashMap; use dashmap::DashMap;
use log::info; use log::info;
use pingora::tls::ssl::{SslAlert, SslRef}; use pingora::tls::ssl::{SslAlert, SslRef};
@@ -13,8 +15,7 @@ use pingora_core::prelude::{background_service, Opt};
use pingora_core::server::Server; use pingora_core::server::Server;
use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc; use std::sync::Arc;
use std::{env, thread}; use std::thread;
pub fn run() { pub fn run() {
// default_provider().install_default().expect("Failed to install rustls crypto provider"); // default_provider().install_default().expect("Failed to install rustls crypto provider");
let parameters = Some(Opt::parse_args()).unwrap(); let parameters = Some(Opt::parse_args()).unwrap();
@@ -27,89 +28,71 @@ pub fn run() {
let uf_config = Arc::new(DashMap::new()); let uf_config = Arc::new(DashMap::new());
let ff_config = Arc::new(DashMap::new()); let ff_config = Arc::new(DashMap::new());
let im_config = Arc::new(DashMap::new()); let im_config = Arc::new(DashMap::new());
let hh_config = Arc::new(DashMap::new()); let ch_config = Arc::new(DashMap::new());
let sh_config = Arc::new(DashMap::new());
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams { let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
sticky_sessions: false,
to_https: None, to_https: None,
authentication: DashMap::new(), sticky_sessions: false,
authentication: None,
rate_limit: None,
})); }));
let cfg = Arc::new(maincfg); let cfg = Arc::new(maincfg);
let lb = LB { let lb = LB {
ump_upst: uf_config.clone(), ump_upst: uf_config,
ump_full: ff_config.clone(), ump_full: ff_config,
ump_byid: im_config.clone(), ump_byid: im_config,
config: cfg.clone(), config: cfg.clone(),
headers: hh_config.clone(), client_headers: ch_config,
extraparams: ec_config.clone(), server_headers: sh_config,
extraparams: ec_config,
}; };
// let bg = LB {
// ump_upst: uf_config.clone(),
// ump_full: ff_config.clone(),
// ump_byid: im_config.clone(),
// config: cfg.clone(),
// headers: hh_config.clone(),
// extraparams: ec_config.clone(),
// config_rx: Arc::from(Mutex::new(rx)),
// };
// env_logger::Env::new(); let grade = cfg.proxy_tls_grade.clone().unwrap_or("medium".to_string());
// env_logger::init(); info!("TLS grade set to: [ {} ]", grade);
let log_level = cfg.log_level.clone();
unsafe {
match log_level.as_str() {
"info" => env::set_var("RUST_LOG", "info"),
"error" => env::set_var("RUST_LOG", "error"),
"warn" => env::set_var("RUST_LOG", "warn"),
"debug" => env::set_var("RUST_LOG", "debug"),
"trace" => env::set_var("RUST_LOG", "trace"),
"off" => env::set_var("RUST_LOG", "off"),
_ => {
println!("Error reading log level, defaulting to: INFO");
env::set_var("RUST_LOG", "info")
}
}
}
env_logger::builder().init();
let bg_srvc = background_service("bgsrvc", lb.clone()); let bg_srvc = background_service("bgsrvc", lb.clone());
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone()); let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone());
let bind_address_http = cfg.proxy_address_http.clone(); let bind_address_http = cfg.proxy_address_http.clone();
let bind_address_tls = cfg.proxy_address_tls.clone(); let bind_address_tls = cfg.proxy_address_tls.clone();
check_priv(bind_address_http.as_str());
match bind_address_tls { match bind_address_tls {
Some(bind_address_tls) => { Some(bind_address_tls) => {
check_priv(bind_address_tls.as_str());
let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel(); let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
let certs_path = cfg.proxy_certificates.clone().unwrap(); let certs_path = cfg.proxy_certificates.clone().unwrap();
thread::spawn(move || { thread::spawn(move || {
watch_folder(certs_path, tx).unwrap(); watch_folder(certs_path, tx).unwrap();
}); });
let certificate_configs = rx.recv().unwrap(); let certificate_configs = rx.recv().unwrap();
let first_set = tls::Certificates::new(&certificate_configs).unwrap_or_else(|| panic!("Unable to load initial certificate info")); let first_set = load::Certificates::new(&certificate_configs, grade.as_str()).unwrap_or_else(|| panic!("Unable to load initial certificate info"));
let certificates = Arc::new(ArcSwap::from_pointee(first_set)); let certificates = Arc::new(ArcSwap::from_pointee(first_set));
let certs_for_callback = certificates.clone(); let certs_for_callback = certificates.clone();
let certs_for_watcher = certificates.clone(); let certs_for_watcher = certificates.clone();
let new_certs = tls::Certificates::new(&certificate_configs); let new_certs = load::Certificates::new(&certificate_configs, grade.as_str());
certs_for_watcher.store(Arc::new(new_certs.unwrap())); certs_for_watcher.store(Arc::new(new_certs.unwrap()));
let mut tls_settings = let mut tls_settings =
TlsSettings::intermediate(&certs_for_callback.load().default_cert_path, &certs_for_callback.load().default_key_path).expect("unable to load or parse cert/key"); TlsSettings::intermediate(&certs_for_callback.load().default_cert_path, &certs_for_callback.load().default_key_path).expect("unable to load or parse cert/key");
grades::set_tsl_grade(&mut tls_settings, grade.as_str());
tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certs_for_callback.load().server_name_callback(ssl_ref, ssl_alert)); tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certs_for_callback.load().server_name_callback(ssl_ref, ssl_alert));
tls_settings.set_alpn_select_callback(tls::prefer_h2); tls_settings.set_alpn_select_callback(grades::prefer_h2);
proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings); proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
let certs_for_watcher = certificates.clone(); let certs_for_watcher = certificates.clone();
thread::spawn(move || { thread::spawn(move || {
while let Ok(new_configs) = rx.recv() { while let Ok(new_configs) = rx.recv() {
let new_certs = tls::Certificates::new(&new_configs); let new_certs = load::Certificates::new(&new_configs, grade.as_str());
match new_certs { match new_certs {
Some(new_certs) => { Some(new_certs) => {
certs_for_watcher.store(Arc::new(new_certs)); certs_for_watcher.store(Arc::new(new_certs));
info!("Reload TLS certificates from {}", cfg.proxy_certificates.clone().unwrap())
} }
None => {} None => {}
}; };
@@ -122,5 +105,15 @@ pub fn run() {
proxy.add_tcp(bind_address_http.as_str()); proxy.add_tcp(bind_address_http.as_str());
server.add_service(proxy); server.add_service(proxy);
server.add_service(bg_srvc); server.add_service(bg_srvc);
server.run_forever();
thread::spawn(move || server.run_forever());
if let (Some(user), Some(group)) = (cfg.rungroup.clone(), cfg.runuser.clone()) {
drop_priv(user, group, cfg.proxy_address_http.clone(), cfg.proxy_address_tls.clone());
}
let (tx, rx) = channel();
ctrlc::set_handler(move || tx.send(()).expect("Could not send signal on channel.")).expect("Error setting Ctrl-C handler");
rx.recv().expect("Could not receive from channel.");
info!("Signal received ! Exiting...");
} }

View File

@@ -1,29 +1,27 @@
use crate::utils::discovery::APIUpstreamProvider; use crate::utils::discovery::APIUpstreamProvider;
use crate::utils::structs::Configuration; // use std::net::SocketAddr;
use crate::utils::jwt::Claims;
use crate::utils::structs::{Config, Configuration, UpstreamsDashMap};
use crate::utils::tools::{upstreams_liveness_json, upstreams_to_json};
use axum::body::Body; use axum::body::Body;
use axum::extract::{Query, State}; use axum::extract::{Query, State};
use axum::http::{Response, StatusCode}; use axum::http::{header::HeaderMap, Response, StatusCode};
use axum::response::IntoResponse; use axum::response::IntoResponse;
use axum::routing::{get, post}; use axum::routing::{get, post};
use axum::{Json, Router}; use axum::{Json, Router};
use axum_server::tls_openssl::OpenSSLConfig; // use axum_server::tls_openssl::OpenSSLConfig;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::SinkExt; use futures::SinkExt;
use jsonwebtoken::{encode, EncodingKey, Header}; use jsonwebtoken::{encode, EncodingKey, Header};
use log::{error, info, warn}; use log::{error, info, warn};
use prometheus::{gather, Encoder, TextEncoder}; use prometheus::{gather, Encoder, TextEncoder};
use serde::{Deserialize, Serialize}; use serde::Serialize;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::SocketAddr; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use subtle::ConstantTimeEq;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tower_http::services::ServeDir;
#[derive(Deserialize)]
struct InputKey {
master_key: String,
owner: String,
valid: u64,
}
#[derive(Serialize, Debug)] #[derive(Serialize, Debug)]
struct OutToken { struct OutToken {
@@ -34,13 +32,19 @@ struct OutToken {
struct AppState { struct AppState {
master_key: String, master_key: String,
config_sender: Sender<Configuration>, config_sender: Sender<Configuration>,
config_api_enabled: bool,
current_upstreams: Arc<UpstreamsDashMap>,
full_upstreams: Arc<UpstreamsDashMap>,
} }
#[allow(unused_mut)] #[allow(unused_mut)]
pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>) { pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Configuration>, upstreams_curr: Arc<UpstreamsDashMap>, upstreams_full: Arc<UpstreamsDashMap>) {
let app_state = AppState { let app_state = AppState {
master_key: config.masterkey.clone(), master_key: config.masterkey.clone(),
config_sender: to_return.clone(), config_sender: to_return.clone(),
config_api_enabled: config.config_api_enabled.clone(),
current_upstreams: upstreams_curr,
full_upstreams: upstreams_full,
}; };
let app = Router::new() let app = Router::new()
// .route("/{*wildcard}", get(senderror)) // .route("/{*wildcard}", get(senderror))
@@ -48,21 +52,30 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
// .route("/{*wildcard}", put(senderror)) // .route("/{*wildcard}", put(senderror))
// .route("/{*wildcard}", head(senderror)) // .route("/{*wildcard}", head(senderror))
// .route("/{*wildcard}", delete(senderror)) // .route("/{*wildcard}", delete(senderror))
// .nest_service("/static", static_files)
.route("/jwt", post(jwt_gen)) .route("/jwt", post(jwt_gen))
.route("/conf", post(conf)) .route("/conf", post(conf))
.route("/metrics", get(metrics)) .route("/metrics", get(metrics))
.route("/status", get(status))
.with_state(app_state); .with_state(app_state);
if let Some(value) = &config.tls_address { // if let Some(value) = &config.tls_address {
let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap(); // let cf = OpenSSLConfig::from_pem_file(config.tls_certificate.clone().unwrap(), config.tls_key_file.clone().unwrap()).unwrap();
let addr: SocketAddr = value.parse().expect("Unable to parse socket address"); // let addr: SocketAddr = value.parse().expect("Unable to parse socket address");
let tls_app = app.clone(); // let tls_app = app.clone();
tokio::spawn(async move { // tokio::spawn(async move {
if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await { // if let Err(e) = axum_server::bind_openssl(addr, cf).serve(tls_app.into_make_service()).await {
eprintln!("TLS server failed: {}", e); // eprintln!("TLS server failed: {}", e);
} // }
}); // });
info!("Starting the TLS API server on: {}", value); // info!("Starting the TLS API server on: {}", value);
// }
if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) {
let static_files = ServeDir::new(folder);
let static_serve: Router = Router::new().fallback_service(static_files);
let static_listen = TcpListener::bind(address).await.unwrap();
let _ = tokio::spawn(async move { axum::serve(static_listen, static_serve).await.unwrap() });
} }
let listener = TcpListener::bind(config.address.clone()).await.unwrap(); let listener = TcpListener::bind(config.address.clone()).await.unwrap();
@@ -70,29 +83,51 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
axum::serve(listener, app).await.unwrap(); axum::serve(listener, app).await.unwrap();
} }
async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse { async fn conf(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>, headers: HeaderMap, content: String) -> impl IntoResponse {
if let Some(s) = params.get("key") { if !st.config_api_enabled {
if s.to_owned() == st.master_key.to_owned() { return Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Config API is disabled !\n")).unwrap();
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content") { }
st.config_sender.send(serverlist).await.unwrap(); if let Some(s) = headers.get("x-api-key").and_then(|v| v.to_str().ok()).or(params.get("key").map(|s| s.as_str())) {
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap(); if s.as_bytes().ct_eq(st.master_key.as_bytes()).into() {
} else { let strcontent = content.as_str();
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("Failed to parse config!\n")).unwrap(); let parsed = serde_yml::from_str::<Config>(strcontent);
}; match parsed {
Ok(_) => {
let _ = tokio::spawn(async move { apply_config(content.as_str(), st).await });
return Response::builder().status(StatusCode::OK).body(Body::from("Accepted! Applying in background\n")).unwrap();
}
Err(err) => {
error!("Failed to parse upstreams file: {}", err);
return Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from(format!("Failed: {}\n", err))).unwrap();
}
}
} }
} }
Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap() Response::builder().status(StatusCode::FORBIDDEN).body(Body::from("Access Denied !\n")).unwrap()
} }
async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -> (StatusCode, Json<OutToken>) { async fn apply_config(content: &str, mut st: AppState) {
let sl = crate::utils::parceyaml::load_configuration(content, "content").await;
if let Some(serverlist) = sl.0 {
let _ = st.config_sender.send(serverlist).await;
}
}
async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<Claims>) -> (StatusCode, Json<OutToken>) {
if payload.master_key == state.master_key { if payload.master_key == state.master_key {
let now = SystemTime::now() + Duration::from_secs(payload.valid * 60); let now = SystemTime::now() + Duration::from_secs(payload.exp * 60);
let a = now.duration_since(UNIX_EPOCH).unwrap().as_secs(); let expire = now.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
let claim = crate::utils::jwt::Claims { user: payload.owner, exp: a };
let claim = Claims {
master_key: String::new(),
owner: payload.owner,
exp: expire,
random: payload.random,
};
match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.master_key.as_ref())) { match encode(&Header::default(), &claim, &EncodingKey::from_secret(payload.master_key.as_ref())) {
Ok(t) => { Ok(t) => {
let tok = OutToken { token: t }; let tok = OutToken { token: t };
info!("Generating token: {:?}", tok); info!("Generating token: {:?}", tok.token);
(StatusCode::CREATED, Json(tok)) (StatusCode::CREATED, Json(tok))
} }
Err(e) => { Err(e) => {
@@ -113,7 +148,6 @@ async fn jwt_gen(State(state): State<AppState>, Json(payload): Json<InputKey>) -
async fn metrics() -> impl IntoResponse { async fn metrics() -> impl IntoResponse {
let metric_families = gather(); let metric_families = gather();
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
let mut buffer = Vec::new(); let mut buffer = Vec::new();
if let Err(e) = encoder.encode(&metric_families, &mut buffer) { if let Err(e) = encoder.encode(&metric_families, &mut buffer) {
// encoding error fallback // encoding error fallback
@@ -122,7 +156,6 @@ async fn metrics() -> impl IntoResponse {
.body(Body::from(format!("Failed to encode metrics: {}", e))) .body(Body::from(format!("Failed to encode metrics: {}", e)))
.unwrap(); .unwrap();
} }
Response::builder() Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
.header("Content-Type", encoder.format_type()) .header("Content-Type", encoder.format_type())
@@ -130,7 +163,35 @@ async fn metrics() -> impl IntoResponse {
.unwrap() .unwrap()
} }
// #[allow(dead_code)] async fn status(State(st): State<AppState>, Query(params): Query<HashMap<String, String>>) -> impl IntoResponse {
// async fn senderror() -> impl IntoResponse { if let Some(_) = params.get("live") {
// Response::builder().status(StatusCode::BAD_GATEWAY).body(Body::from("No live upstream found!\n")).unwrap() let r = upstreams_liveness_json(&st.full_upstreams, &st.current_upstreams);
// } return Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(format!("{}", r)))
.unwrap();
}
if let Some(_) = params.get("all") {
let resp = upstreams_to_json(&st.current_upstreams);
match resp {
Ok(j) => {
return Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(j))
.unwrap()
}
Err(e) => {
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("Failed to get status: {}", e)))
.unwrap();
}
}
}
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("Parameter mismatch")))
.unwrap()
}