mirror of
https://github.com/sadoyan/aralez.git
synced 2026-04-30 14:58:38 +08:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c78245e695 | ||
|
|
66b1a1c399 | ||
|
|
bba6dd8514 | ||
|
|
79485ac69d | ||
|
|
61c5625016 | ||
|
|
57bdc71acd | ||
|
|
9e09b829a6 | ||
|
|
d3602fa578 | ||
|
|
e304482667 | ||
|
|
f8118f9596 | ||
|
|
f654312466 | ||
|
|
b44f7069a0 | ||
|
|
a44979ec82 | ||
|
|
ece4fa20af | ||
|
|
2ad3a059ab | ||
|
|
6f012cee69 | ||
|
|
51c88c8f7c | ||
|
|
f91bc41103 | ||
|
|
21e1276ff5 | ||
|
|
8463cdabbc | ||
|
|
d0e4b52ce6 | ||
|
|
b552d24497 | ||
|
|
2e33d692bb | ||
|
|
e586967830 | ||
|
|
8d4e434d6a |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,9 +5,12 @@
|
||||
*.dll
|
||||
*.exe
|
||||
*.sh
|
||||
/docs/
|
||||
/docs
|
||||
/target/
|
||||
*.iml
|
||||
.idea/
|
||||
.etc/
|
||||
*.ipr
|
||||
*.iws
|
||||
/out/
|
||||
|
||||
210
Cargo.lock
generated
210
Cargo.lock
generated
@@ -128,10 +128,11 @@ dependencies = [
|
||||
"log",
|
||||
"mimalloc",
|
||||
"notify",
|
||||
"openssl",
|
||||
"once_cell",
|
||||
"pingora",
|
||||
"pingora-core",
|
||||
"pingora-http",
|
||||
"pingora-limits",
|
||||
"pingora-proxy",
|
||||
"prometheus 0.14.0",
|
||||
"rand 0.9.1",
|
||||
@@ -142,6 +143,7 @@ dependencies = [
|
||||
"sha2",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tower-http",
|
||||
"urlencoding",
|
||||
"x509-parser",
|
||||
]
|
||||
@@ -601,9 +603,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.2.0-rc.2"
|
||||
version = "0.2.0-rc.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "170d71b5b14dec99db7739f6fc7d6ec2db80b78c3acb77db48392ccc3d8a9ea0"
|
||||
checksum = "8a23fa214dea9efd4dacee5a5614646b30216ae0f05d4bb51bafb50e9da1c5be"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
@@ -684,12 +686,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.11.0-pre.10"
|
||||
version = "0.11.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c478574b20020306f98d61c8ca3322d762e1ff08117422ac6106438605ea516"
|
||||
checksum = "460dd7f37e4950526b54a5a6b1f41b6c8e763c58eb9a8fc8fc05ba5c2f44ca7b"
|
||||
dependencies = [
|
||||
"block-buffer 0.11.0-rc.4",
|
||||
"crypto-common 0.2.0-rc.2",
|
||||
"crypto-common 0.2.0-rc.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1079,6 +1081,12 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-range-header"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
|
||||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.9.5"
|
||||
@@ -1169,21 +1177,28 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hyper-util"
|
||||
version = "0.1.10"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
|
||||
checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http-body",
|
||||
"hyper",
|
||||
"ipnet",
|
||||
"libc",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"windows-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1371,6 +1386,16 @@ version = "2.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is_terminal_polyfill"
|
||||
version = "1.70.1"
|
||||
@@ -1469,15 +1494,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.169"
|
||||
version = "0.2.174"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
|
||||
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
|
||||
|
||||
[[package]]
|
||||
name = "libmimalloc-sys"
|
||||
version = "0.1.42"
|
||||
version = "0.1.43"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec9d6fac27761dabcd4ee73571cdb06b7022dc99089acbe5435691edffaac0f4"
|
||||
checksum = "bf88cd67e9de251c1781dbe2f641a1a3ad66eaae831b8a2c38fbdc5ddae16d4d"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
@@ -1588,9 +1613,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mimalloc"
|
||||
version = "0.1.46"
|
||||
version = "0.1.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "995942f432bbb4822a7e9c3faa87a695185b0d09273ba85f097b54f4e458f2af"
|
||||
checksum = "b1791cbe101e95af5764f06f20f6760521f7158f69dbf9d6baf941ee1bf6bc40"
|
||||
dependencies = [
|
||||
"libmimalloc-sys",
|
||||
]
|
||||
@@ -1601,6 +1626,16 @@ version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "mime_guess"
|
||||
version = "2.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
|
||||
dependencies = [
|
||||
"mime",
|
||||
"unicase",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "minimal-lexical"
|
||||
version = "0.2.1"
|
||||
@@ -2052,6 +2087,15 @@ dependencies = [
|
||||
"crc32fast",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pingora-limits"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a719a8cb5558ca06bd6076c97b8905d500ea556da89e132ba53d4272844f95b9"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pingora-load-balancing"
|
||||
version = "0.5.0"
|
||||
@@ -2403,15 +2447,14 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.15"
|
||||
version = "0.12.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb"
|
||||
checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
@@ -2420,29 +2463,26 @@ dependencies = [
|
||||
"hyper-rustls",
|
||||
"hyper-tls",
|
||||
"hyper-util",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"native-tls",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"rustls-pemfile",
|
||||
"rustls-pki-types",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tower-service",
|
||||
"url",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"web-sys",
|
||||
"windows-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2709,13 +2749,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.11.0-pre.5"
|
||||
version = "0.11.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19b4241d1a56954dce82cecda5c8e9c794eef6f53abe5e5216bac0a0ea71ffa7"
|
||||
checksum = "aa1d2e6b3cc4e43a8258a9a3b17aa5dfd2cc5186c7024bba8a64aa65b2c71a59"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest 0.11.0-pre.10",
|
||||
"digest 0.11.0-rc.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2762,9 +2802,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.8"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
|
||||
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
@@ -3004,9 +3044,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.0"
|
||||
version = "1.45.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
@@ -3101,9 +3141,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.13.0"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b"
|
||||
checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
@@ -3147,6 +3187,34 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-http"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
"http-range-header",
|
||||
"httpdate",
|
||||
"iri-string",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-layer"
|
||||
version = "0.3.3"
|
||||
@@ -3447,29 +3515,29 @@ checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
|
||||
|
||||
[[package]]
|
||||
name = "windows-registry"
|
||||
version = "0.4.0"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3"
|
||||
checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings",
|
||||
"windows-targets 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.3.2"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252"
|
||||
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-strings"
|
||||
version = "0.3.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319"
|
||||
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
@@ -3525,29 +3593,13 @@ dependencies = [
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.5"
|
||||
@@ -3560,12 +3612,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -3578,12 +3624,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.5"
|
||||
@@ -3596,24 +3636,12 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -3626,12 +3654,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.5"
|
||||
@@ -3644,12 +3666,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.5"
|
||||
@@ -3662,12 +3678,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -3680,12 +3690,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.33.0"
|
||||
|
||||
19
Cargo.toml
19
Cargo.toml
@@ -11,7 +11,7 @@ panic = "abort"
|
||||
strip = true
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.45.0", features = ["full"] }
|
||||
tokio = { version = "1.45.1", features = ["full"] }
|
||||
#pingora = { version = "0.5.0", features = ["lb", "rustls"] } # openssl, rustls, boringssl
|
||||
pingora = { version = "0.5.0", features = ["lb", "openssl"] } # openssl, rustls, boringssl
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
@@ -19,6 +19,8 @@ dashmap = "7.0.0-rc2"
|
||||
pingora-core = "0.5.0"
|
||||
pingora-proxy = "0.5.0"
|
||||
pingora-http = "0.5.0"
|
||||
pingora-limits = "0.5.0"
|
||||
#pingora-pool = "0.5.0"
|
||||
async-trait = "0.1.88"
|
||||
env_logger = "0.11.8"
|
||||
log = "0.4.27"
|
||||
@@ -26,7 +28,7 @@ futures = "0.3.31"
|
||||
notify = "8.0.0"
|
||||
axum = { version = "0.8.4" }
|
||||
axum-server = { version = "0.7.2", features = ["tls-openssl"] }
|
||||
reqwest = { version = "0.12.15", features = ["json", "native-tls-alpn"] }
|
||||
reqwest = { version = "0.12.20", features = ["json", "native-tls-alpn"] }
|
||||
#reqwest = { version = "0.12.15", features = ["json", "rustls-tls"] }
|
||||
#reqwest = { version = "0.12.15", default-features = false, features = ["rustls-tls", "json"] }
|
||||
|
||||
@@ -34,18 +36,21 @@ serde_yaml = "0.9.34-deprecated"
|
||||
rand = "0.9.0"
|
||||
base64 = "0.22.1"
|
||||
jsonwebtoken = "9.3.1"
|
||||
tonic = "0.13.0"
|
||||
sha2 = { version = "0.11.0-pre.5", default-features = false }
|
||||
tonic = "0.13.1"
|
||||
sha2 = { version = "0.11.0-rc.0", default-features = false }
|
||||
base16ct = { version = "0.2.0", features = ["alloc"] }
|
||||
urlencoding = "2.1.3"
|
||||
arc-swap = "1.7.1"
|
||||
#rustls = { version = "0.23.27", features = ["ring"] }
|
||||
mimalloc = { version = "0.1.46", default-features = false }
|
||||
mimalloc = { version = "0.1.47", default-features = false }
|
||||
prometheus = "0.14.0"
|
||||
lazy_static = "1.5.0"
|
||||
openssl = "0.10.72"
|
||||
#openssl = "0.10.73"
|
||||
x509-parser = "0.17.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
#hickory-client = { version = "0.25.2" }
|
||||
tower-http = { version = "0.6.6", features = ["fs"] }
|
||||
once_cell = "1.20.2"
|
||||
#moka = { version = "0.12.10", features = ["sync"] }
|
||||
|
||||
#openssl = "0.10.72"
|
||||
|
||||
|
||||
120
METRICS.md
120
METRICS.md
@@ -1,120 +0,0 @@
|
||||
# 📈 Aralez Prometheus Metrics Reference
|
||||
|
||||
This document outlines Prometheus metrics for the [Aralez](https://github.com/sadoyan/aralez) reverse proxy.
|
||||
These metrics can be used for monitoring, alerting and performance analysis.
|
||||
|
||||
Exposed to `http://config_address/metrics`
|
||||
|
||||
By default `http://127.0.0.1:3000/metrics`
|
||||
|
||||
# 📊 Example Grafana dashboard during stress test :
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Prometheus Metrics
|
||||
|
||||
### 1. `aralez_requests_total`
|
||||
|
||||
- **Type**: `Counter`
|
||||
- **Purpose**: Total amount requests served by Aralez.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_requests_total[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. `aralez_errors_total`
|
||||
|
||||
- **Type**: `Counter`
|
||||
- **Purpose**: Count of requests that resulted in an error.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_errors_total[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. `aralez_responses_total{status="200"}`
|
||||
|
||||
- **Type**: `CounterVec`
|
||||
- **Purpose**: Count of responses by HTTP status code.
|
||||
|
||||
**PromQL example:**
|
||||
|
||||
```promql
|
||||
rate(aralez_responses_total{status=~"5.."}[5m]) > 0
|
||||
```
|
||||
|
||||
> Useful for alerting on 5xx errors.
|
||||
|
||||
---
|
||||
|
||||
### 4. `aralez_response_latency_seconds`
|
||||
|
||||
- **Type**: `Histogram`
|
||||
- **Purpose**: Tracks the latency of responses in seconds.
|
||||
|
||||
**Example bucket output:**
|
||||
|
||||
```prometheus
|
||||
aralez_response_latency_seconds_bucket{le="0.01"} 15
|
||||
aralez_response_latency_seconds_bucket{le="0.1"} 120
|
||||
aralez_response_latency_seconds_bucket{le="0.25"} 245
|
||||
aralez_response_latency_seconds_bucket{le="0.5"} 500
|
||||
...
|
||||
aralez_response_latency_seconds_count 1023
|
||||
aralez_response_latency_seconds_sum 42.6
|
||||
```
|
||||
|
||||
| Metric | Meaning |
|
||||
|-------------------------|---------------------------------------------------------------|
|
||||
| `bucket{le="0.1"} 120` | 120 requests were ≤ 100ms |
|
||||
| `bucket{le="0.25"} 245` | 245 requests were ≤ 250ms |
|
||||
| `count` | Total number of observations (i.e., total responses measured) |
|
||||
| `sum` | Total time of all responses, in seconds |
|
||||
|
||||
### 🔍 How to interpret:
|
||||
|
||||
- `le` means “less than or equal to”.
|
||||
- `count` is total amount of observations.
|
||||
- `sum` is the total time (in seconds) of all responses.
|
||||
|
||||
**PromQL examples:**
|
||||
|
||||
🔹 **95th percentile latency**
|
||||
|
||||
```promql
|
||||
histogram_quantile(0.95, rate(aralez_response_latency_seconds_bucket[5m]))
|
||||
|
||||
```
|
||||
|
||||
🔹 **Average latency**
|
||||
|
||||
```promql
|
||||
rate(aralez_response_latency_seconds_sum[5m]) / rate(aralez_response_latency_seconds_count[5m])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Notes
|
||||
|
||||
- Metrics are registered after the first served request.
|
||||
|
||||
---
|
||||
✅ Summary of key metrics
|
||||
|
||||
| Metric Name | Type | What it Tells You |
|
||||
|---------------------------------------|------------|---------------------------|
|
||||
| `aralez_requests_total` | Counter | Total requests served |
|
||||
| `aralez_errors_total` | Counter | Number of failed requests |
|
||||
| `aralez_responses_total{status="200"}` | CounterVec | Response status breakdown |
|
||||
| `aralez_response_latency_seconds` | Histogram | How fast responses are |
|
||||
|
||||
📘 *Last updated: May 2025*
|
||||
141
README.md
141
README.md
@@ -1,19 +1,29 @@
|
||||

|
||||
|
||||
# Aralez (Արալեզ), Reverse proxy and service mesh built on top of Cloudflare's Pingora
|
||||
---
|
||||
|
||||
**What Aralez means?**
|
||||
<ins>Aralez = Արալեզ. Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>.
|
||||
# Aralez (Արալեզ),
|
||||
|
||||
### **Reverse proxy and service mesh built on top of Cloudflare's Pingora**
|
||||
|
||||
What Aralez means ?
|
||||
**Aralez = Արալեզ** <ins>.Named after the legendary Armenian guardian spirit, winged dog-like creature, that descend upon fallen heroes to lick their wounds and resurrect them.</ins>.
|
||||
|
||||
Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers world-class performance, security and scalability — right out of the box.
|
||||
|
||||
[](https://www.buymeacoffee.com/sadoyan)
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Key Features
|
||||
|
||||
- **Dynamic Config Reloads** — Upstreams can be updated live via API, no restart required.
|
||||
- **TLS Termination** — Built-in OpenSSL support.
|
||||
- **Automatic load of certificates** — Automatically reads and loads certificates from a folder, without a restart.
|
||||
- **Upstreams TLS detection** — Aralez will automatically detect if upstreams uses secure connection.
|
||||
- **Built in rate limiter** — Limit requests to server, by setting up upper limit for requests per seconds, per virtualhost.
|
||||
- **Global rate limiter** — Set rate limit for all virtualhosts.
|
||||
- **Per path rate limiter** — Set rate limit for specific paths. Path limits will override global limits.
|
||||
- **Authentication** — Supports Basic Auth, API tokens, and JWT verification.
|
||||
- **Basic Auth**
|
||||
- **API Key** via `x-api-key` header
|
||||
@@ -24,6 +34,7 @@ Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers
|
||||
- Failover with health checks
|
||||
- Sticky sessions via cookies
|
||||
- **Unified Port** — Serve HTTP and WebSocket traffic over the same connection.
|
||||
- **Built in file server** — Build in minimalistic file server for serving static files, should be added as upstreams for public access.
|
||||
- **Memory Safe** — Created purely on Rust.
|
||||
- **High Performance** — Built with [Pingora](https://github.com/cloudflare/pingora) and tokio for async I/O.
|
||||
|
||||
@@ -61,28 +72,32 @@ Built on Rust, on top of **Cloudflare’s Pingora engine**, **Aralez** delivers
|
||||
|
||||
### 🔧 `main.yaml`
|
||||
|
||||
| Key | Example Value | Description |
|
||||
|----------------------------------|--------------------------------------|--------------------------------------------------------------------------------------------------|
|
||||
| **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
|
||||
| **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **daemon** | false | Run in background (boolean) |
|
||||
| **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
|
||||
| **pid_file** | /tmp/aralez.pid | Path to PID file |
|
||||
| **error_log** | /tmp/aralez_err.log | Path to error log file |
|
||||
| **upgrade_sock** | /tmp/aralez.sock | Path to live upgrade socket file |
|
||||
| **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location |
|
||||
| **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location |
|
||||
| **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional |
|
||||
| **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional |
|
||||
| **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address |
|
||||
| **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) |
|
||||
| **proxy_certificates** | etc/certs/ | The directory containing certificate and key files. In a format {NAME}.crt, {NAME}.key. |
|
||||
| **upstreams_conf** | etc/upstreams.yaml | The location of upstreams file |
|
||||
| **log_level** | info | Log level , possible values : info, warn, error, debug, trace, off |
|
||||
| **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE |
|
||||
| **hc_interval** | 2 | Interval for health checks in seconds |
|
||||
| **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation |
|
||||
| Key | Example Value | Description |
|
||||
|----------------------------------|--------------------------------------|----------------------------------------------------------------------------------------------------|
|
||||
| **threads** | 12 | Number of running daemon threads. Optional, defaults to 1 |
|
||||
| **user** | aralez | Optional, Username for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **group** | aralez | Optional,Group for running aralez after dropping root privileges, requires to launch as root |
|
||||
| **daemon** | false | Run in background (boolean) |
|
||||
| **upstream_keepalive_pool_size** | 500 | Pool size for upstream keepalive connections |
|
||||
| **pid_file** | /tmp/aralez.pid | Path to PID file |
|
||||
| **error_log** | /tmp/aralez_err.log | Path to error log file |
|
||||
| **upgrade_sock** | /tmp/aralez.sock | Path to live upgrade socket file |
|
||||
| **config_address** | 0.0.0.0:3000 | HTTP API address for pushing upstreams.yaml from remote location |
|
||||
| **config_tls_address** | 0.0.0.0:3001 | HTTPS API address for pushing upstreams.yaml from remote location |
|
||||
| **config_tls_certificate** | etc/server.crt | Certificate file path for API. Mandatory if proxy_address_tls is set, else optional |
|
||||
| **proxy_tls_grade** | (high, medium, unsafe) | Grade of TLS ciphers, for easy configuration. High matches Qualys SSL Labs A+ (defaults to medium) |
|
||||
| **config_tls_key_file** | etc/key.pem | Private Key file path. Mandatory if proxy_address_tls is set, else optional |
|
||||
| **proxy_address_http** | 0.0.0.0:6193 | Aralez HTTP bind address |
|
||||
| **proxy_address_tls** | 0.0.0.0:6194 | Aralez HTTPS bind address (Optional) |
|
||||
| **proxy_certificates** | etc/certs/ | The directory containing certificate and key files. In a format {NAME}.crt, {NAME}.key. |
|
||||
| **upstreams_conf** | etc/upstreams.yaml | The location of upstreams file |
|
||||
| **log_level** | info | Log level , possible values : info, warn, error, debug, trace, off |
|
||||
| **hc_method** | HEAD | Healthcheck method (HEAD, GET, POST are supported) UPPERCASE |
|
||||
| **hc_interval** | 2 | Interval for health checks in seconds |
|
||||
| **master_key** | 5aeff7f9-7b94-447c-af60-e8c488544a3e | Master key for working with API server and JWT Secret generation |
|
||||
| **file_server_folder** | /some/local/folder | Optional, local folder to serve |
|
||||
| **file_server_address** | 127.0.0.1:3002 | Optional, Local address for file server. Can set as upstream for public access |
|
||||
| **config_api_enabled** | true | Boolean to enable/disable remote config push capability |
|
||||
|
||||
### 🌐 `upstreams.yaml`
|
||||
|
||||
@@ -103,12 +118,41 @@ Make the binary executable `chmod 755 ./aralez-VERSION` and run.
|
||||
|
||||
File names:
|
||||
|
||||
| File Name | Description |
|
||||
|---------------------------|---------------------------------------------------------------|
|
||||
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
||||
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
||||
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
||||
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
||||
| File Name | Description |
|
||||
|---------------------------|--------------------------------------------------------------------------|
|
||||
| `aralez-x86_64-musl.gz` | Static Linux x86_64 binary, without any system dependency |
|
||||
| `aralez-x86_64-glibc.gz` | Dynamic Linux x86_64 binary, with minimal system dependencies |
|
||||
| `aralez-aarch64-musl.gz` | Static Linux ARM64 binary, without any system dependency |
|
||||
| `aralez-aarch64-glibc.gz` | Dynamic Linux ARM64 binary, with minimal system dependencies |
|
||||
| `sadoyan/aralez` | Docker image on Debian 13 slim (https://hub.docker.com/r/sadoyan/aralez) |
|
||||
|
||||
**Via docker**
|
||||
|
||||
```shell
|
||||
docker run -d \
|
||||
-v /local/path/to/config:/etc/aralez:ro \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
sadoyan/aralez
|
||||
```
|
||||
|
||||
## 💡 Note
|
||||
|
||||
In general **glibc** builds are working faster, but have few, basic, system dependencies for example :
|
||||
|
||||
```
|
||||
linux-vdso.so.1 (0x00007ffeea33b000)
|
||||
libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f09e7377000)
|
||||
libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f09e6320000)
|
||||
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f09e613f000)
|
||||
/lib64/ld-linux-x86-64.so.2 (0x00007f09e73b1000)
|
||||
```
|
||||
|
||||
These are common to any Linux systems, so the binary should work on almost any Linux system.
|
||||
|
||||
**musl** builds are 100% portable, static compiled binaries and have zero system depencecies.
|
||||
In general musl builds have a little less performance.
|
||||
The most intensive tests shows 107k-110k requests per second on **Glibc** binaries against 97k-100k **Musl** ones.
|
||||
|
||||
## 🔌 Running the Proxy
|
||||
|
||||
@@ -142,6 +186,7 @@ A sample `upstreams.yaml` entry:
|
||||
provider: "file"
|
||||
sticky_sessions: false
|
||||
to_https: false
|
||||
rate_limit: 10
|
||||
headers:
|
||||
- "Access-Control-Allow-Origin:*"
|
||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||
@@ -152,6 +197,7 @@ authorization:
|
||||
myhost.mydomain.com:
|
||||
paths:
|
||||
"/":
|
||||
rate_limit: 20
|
||||
to_https: false
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
@@ -166,19 +212,29 @@ myhost.mydomain.com:
|
||||
servers:
|
||||
- "127.0.0.4:8443"
|
||||
- "127.0.0.5:8443"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
```
|
||||
|
||||
**This means:**
|
||||
|
||||
- Sticky sessions are disabled globally. This setting applies to all upstreams. If enabled all requests will be 301 redirected to HTTPS.
|
||||
- HTTP to HTTPS redirect disabled globally, but can be overridden by `to_https` setting per upstream.
|
||||
- Requests to each hosted domains will be limited to 10 requests per second per virtualhost.
|
||||
- Requests limits are calculated per requester ip plus requested virtualhost.
|
||||
- If the requester exceeds the limit it will receive `429 Too Many Requests` error.
|
||||
- Optional. Rate limiter will be disabled if the parameter is entirely removed from config.
|
||||
- Requests to `myhost.mydomain.com/` will be limited to 20 requests per second.
|
||||
- Requests to `myhost.mydomain.com/` will be proxied to `127.0.0.1` and `127.0.0.2`.
|
||||
- Plain HTTP to `myhost.mydomain.com/foo` will get 301 redirect to configured TLS port of Aralez.
|
||||
- Requests to `myhost.mydomain.com/foo` will be proxied to `127.0.0.4` and `127.0.0.5`.
|
||||
- Requests to `myhost.mydomain.com/.well-known/acme-challenge` will be proxied to `127.0.0.1:8001`, but healthcheks are disabled.
|
||||
- SSL/TLS for upstreams is detected automatically, no need to set any config parameter.
|
||||
- Assuming the `127.0.0.5:8443` is SSL protected. The inner traffic will use TLS.
|
||||
- Self signed certificates are silently accepted.
|
||||
- Global headers (CORS for this case) will be injected to all upstreams
|
||||
- Self-signed certificates are silently accepted.
|
||||
- Global headers (CORS for this case) will be injected to all upstreams.
|
||||
- Additional headers will be injected into the request for `myhost.mydomain.com`.
|
||||
- You can choose any path, deep nested paths are supported, the best match chosen.
|
||||
- All requests to servers will require JWT token authentication (You can comment out the authorization to disable it),
|
||||
@@ -442,4 +498,21 @@ Error distribution:
|
||||
[228] aborted due to deadline
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
## 🚀 Aralez, Nginx, Traefik performance benchmark
|
||||
|
||||
This benchmark is done on 4 servers. With CPU Intel(R) Xeon(R) E-2174G CPU @ 3.80GHz, 64 GB RAM.
|
||||
|
||||
1. Sever runs Aralez, Traefik, Nginx on different ports. Tuned as much as I could .
|
||||
2. 3x Upstreams servers, running Nginx. Replying with dummy json hardcoded in config file for max performance.
|
||||
|
||||
All servers are connected to the same switch with 1GB port in datacenter , not a home lab. The results:
|
||||

|
||||
|
||||
The results show requests per second performed by Load balancer. You can see 3 batches with 800 concurrent users.
|
||||
|
||||
1. Requests via http1.1 to plain text endpoint.
|
||||
2. Requests to via http2 to SSL endpoint.
|
||||
3. Mixed workload with plain http1.1 and htt2 SSL.
|
||||
|
||||
|
||||
BIN
assets/bench.png
Normal file
BIN
assets/bench.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 160 KiB |
@@ -1,5 +1,5 @@
|
||||
# Main configuration file , applied on startup
|
||||
threads: 12 # Nubber of daemon threads default setting
|
||||
# Main configuration file, applied on startup
|
||||
threads: 12 # Number of daemon threads default setting
|
||||
#user: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||
#group: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||
daemon: false # Run in background
|
||||
@@ -7,15 +7,19 @@ upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
|
||||
pid_file: /tmp/aralez.pid # Path to PID file
|
||||
error_log: /tmp/aralez_err.log # Path to error log
|
||||
upgrade_sock: /tmp/aralez.sock # Path to socket file
|
||||
config_api_enabled: true # Boolean to enable/disable remote config push capability.
|
||||
config_address: 0.0.0.0:3000 # HTTP API address for pushing upstreams.yaml from remote location
|
||||
config_tls_address: 0.0.0.0:3001 # HTTP TLS API address for pushing upstreams.yaml from remote location
|
||||
config_tls_certificate: etc/server.crt # Mandatory if config_tls_address is set
|
||||
config_tls_key_file: etc/key.pem # Mandatory if config_tls_address is set
|
||||
config_tls_certificate: /etc/server.crt # Mandatory if config_tls_address is set
|
||||
config_tls_key_file: /etc/key.pem # Mandatory if config_tls_address is set
|
||||
proxy_address_http: 0.0.0.0:6193 # Proxy HTTP bind address
|
||||
proxy_address_tls: 0.0.0.0:6194 # Optional, Proxy TLS bind address
|
||||
proxy_certificates: etc/yoyo # Mandatory if proxy_address_tls set, should contain certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
|
||||
upstreams_conf: etc/upstreams.yaml # the location of upstreams file
|
||||
proxy_certificates: /etc/certs # Mandatory if proxy_address_tls set, should contain a certificate and key files strictly in a format {NAME}.crt, {NAME}.key.
|
||||
proxy_tls_grade: a+ # Grade of TLS suite for proxy (a+, a, b, c, unsafe), matching grades of Qualys SSL Labs
|
||||
upstreams_conf: /etc/upstreams.yaml # the location of upstreams file
|
||||
file_server_folder: /opt/storage # Optional, local folder to serve
|
||||
file_server_address: 127.0.0.1:3002 # Optional, Local address for file server. Can set as upstream for public access.
|
||||
log_level: info # info, warn, error, debug, trace, off
|
||||
hc_method: HEAD # Healthcheck method (HEAD, GET, POST are supported) UPPERCASE
|
||||
hc_interval: 2 #Interval for health checks in seconds
|
||||
master_key: 910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774 # Mater key for working with API server and JWT Secret
|
||||
master_key: 910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774 # Mater key for working with API server and JWT Secret
|
||||
@@ -1,7 +1,8 @@
|
||||
# The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
|
||||
provider: "file" # consul
|
||||
provider: "file" # consul, kubernetes
|
||||
sticky_sessions: false
|
||||
to_ssl: false
|
||||
#rate_limit: 100
|
||||
headers:
|
||||
- "Access-Control-Allow-Origin:*"
|
||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||
@@ -16,20 +17,32 @@ authorization:
|
||||
# creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
|
||||
consul: # If the provider is consul. Otherwise, ignored.
|
||||
servers:
|
||||
- "http://master1:8500"
|
||||
- "http://192.168.22.1:8500"
|
||||
- "http://master1.foo.local:8500"
|
||||
- "http://consul1:8500"
|
||||
- "http://consul2:8500"
|
||||
- "http://consul3:8500"
|
||||
services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database.
|
||||
- proxy: "proxy-frontend-dev-frontend-srv"
|
||||
real: "frontend-dev-frontend-srv"
|
||||
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
|
||||
kubernetes:
|
||||
servers:
|
||||
- "172.16.0.11:5443" # KUBERNETES_SERVICE_HOST : KUBERNETES_SERVICE_PORT_HTTPS
|
||||
services:
|
||||
- proxy: "vt-api-service-v2"
|
||||
real: "vt-api-service-v2"
|
||||
- proxy: "vt-search-service"
|
||||
real: "vt-search-service"
|
||||
- proxy: "vt-websocket-service"
|
||||
real: "vt-websocket-service"
|
||||
tokenpath: "/tmp/token.txt" # /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
upstreams:
|
||||
myip.mydomain.com:
|
||||
paths:
|
||||
rate_limit: 10 # Per path rate limit have higher priority than global rate limit. If not set, the global rate limit will be used
|
||||
"/":
|
||||
to_https: false
|
||||
headers:
|
||||
- "X-Proxy-From:Gazan"
|
||||
- "X-Proxy-From:Aralez"
|
||||
servers: # List of upstreams HOST:PORT
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
@@ -39,7 +52,7 @@ upstreams:
|
||||
to_https: true
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Gazan"
|
||||
- "X-Proxy-From:Aralez"
|
||||
servers:
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
@@ -52,9 +65,11 @@ upstreams:
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
servers:
|
||||
- "192.168.1.1:8000"
|
||||
- "192.168.1.10:8000"
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
- "127.0.0.3:8000"
|
||||
- "127.0.0.4:8000"
|
||||
- "127.0.0.4:8000"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
@@ -1,9 +1,11 @@
|
||||
pub mod auth;
|
||||
pub mod consul;
|
||||
pub mod discovery;
|
||||
pub mod dnsclient;
|
||||
mod filewatch;
|
||||
pub mod healthcheck;
|
||||
pub mod jwt;
|
||||
pub mod kuber;
|
||||
pub mod metrics;
|
||||
pub mod parceyaml;
|
||||
pub mod structs;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::{Configuration, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps};
|
||||
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
@@ -11,6 +10,7 @@ use reqwest::header::{HeaderMap, HeaderValue};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -27,59 +27,53 @@ struct TaggedAddress {
|
||||
port: u16,
|
||||
}
|
||||
|
||||
pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
let config = load_configuration(fp.as_str(), "filepath");
|
||||
pub async fn start(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let headers = DashMap::new();
|
||||
match config {
|
||||
Some(config) => {
|
||||
if config.typecfg.to_string() != "consul" {
|
||||
info!("Not running Consul discovery, requested type is: {}", config.typecfg);
|
||||
return;
|
||||
}
|
||||
info!("Consul Discovery is enabled : {}", config.typecfg);
|
||||
let consul = config.consul.clone();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
match consul {
|
||||
Some(consul) => {
|
||||
let servers = consul.servers.unwrap();
|
||||
info!("Consul Servers => {:?}", servers);
|
||||
let end = servers.len() - 1;
|
||||
|
||||
info!("Consul Discovery is enabled : {}", config.typecfg);
|
||||
let consul = config.consul.clone();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
match consul {
|
||||
Some(consul) => {
|
||||
let servers = consul.servers.unwrap();
|
||||
info!("Consul Servers => {:?}", servers);
|
||||
let end = servers.len();
|
||||
|
||||
loop {
|
||||
let num = rand::rng().random_range(1..end);
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let upstreams = consul_request(consul_data, consul.services.clone(), consul.token.clone());
|
||||
match upstreams.await {
|
||||
Some(upstreams) => {
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
loop {
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
None => {}
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let upstreams = consul_request(consul_data, consul.services.clone(), consul.token.clone());
|
||||
match upstreams.await {
|
||||
Some(upstreams) => {
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
kubernetes: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
@@ -109,7 +103,7 @@ async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, tok
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)>> {
|
||||
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = reqwest::Client::new();
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(token) = token {
|
||||
@@ -118,7 +112,7 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
|
||||
let to = Duration::from_secs(1);
|
||||
let u = client.get(url).timeout(to).send();
|
||||
let mut values = Vec::new();
|
||||
let upstreams: DashMap<String, (Vec<(String, u16, bool, bool, bool)>, AtomicUsize)> = DashMap::new();
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
match u.await {
|
||||
Ok(r) => {
|
||||
let jason = r.json::<Vec<Service>>().await;
|
||||
@@ -127,7 +121,15 @@ async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<Strin
|
||||
for service in whitelist {
|
||||
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let to_add = (addr, prt, false, false, false);
|
||||
let to_add = InnerMap {
|
||||
address: addr,
|
||||
port: prt,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
healthcheck: None,
|
||||
};
|
||||
values.push(to_add);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +1,20 @@
|
||||
use crate::utils::consul;
|
||||
use crate::utils::filewatch;
|
||||
use crate::utils::structs::Configuration;
|
||||
use crate::utils::{consul, kuber};
|
||||
use crate::web::webserver;
|
||||
use async_trait::async_trait;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct FromFileProvider {
|
||||
pub path: String,
|
||||
}
|
||||
pub struct APIUpstreamProvider {
|
||||
pub config_api_enabled: bool,
|
||||
pub address: String,
|
||||
pub masterkey: String,
|
||||
pub tls_address: Option<String>,
|
||||
pub tls_certificate: Option<String>,
|
||||
pub tls_key_file: Option<String>,
|
||||
}
|
||||
|
||||
pub struct ConsulProvider {
|
||||
pub path: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Discovery {
|
||||
async fn start(&self, tx: Sender<Configuration>);
|
||||
pub file_server_address: Option<String>,
|
||||
pub file_server_folder: Option<String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -32,6 +24,23 @@ impl Discovery for APIUpstreamProvider {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FromFileProvider {
|
||||
pub path: String,
|
||||
}
|
||||
|
||||
pub struct ConsulProvider {
|
||||
pub config: Arc<Configuration>,
|
||||
}
|
||||
|
||||
pub struct KubernetesProvider {
|
||||
pub config: Arc<Configuration>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Discovery {
|
||||
async fn start(&self, tx: Sender<Configuration>);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Discovery for FromFileProvider {
|
||||
async fn start(&self, tx: Sender<Configuration>) {
|
||||
@@ -42,6 +51,13 @@ impl Discovery for FromFileProvider {
|
||||
#[async_trait]
|
||||
impl Discovery for ConsulProvider {
|
||||
async fn start(&self, tx: Sender<Configuration>) {
|
||||
tokio::spawn(consul::start(self.path.clone(), tx.clone()));
|
||||
tokio::spawn(consul::start(tx.clone(), self.config.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Discovery for KubernetesProvider {
|
||||
async fn start(&self, tx: Sender<Configuration>) {
|
||||
tokio::spawn(kuber::start(tx.clone(), self.config.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
158
src/utils/dnsclient.rs
Normal file
158
src/utils/dnsclient.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
use crate::utils::structs::InnerMap;
|
||||
use dashmap::DashMap;
|
||||
use hickory_client::client::{Client, ClientHandle};
|
||||
use hickory_client::proto::rr::{DNSClass, Name, RecordType};
|
||||
use hickory_client::proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_client::proto::udp::UdpClientStream;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
type DnsError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
pub struct DnsClientPool {
|
||||
clients: Vec<Mutex<DnsClient>>,
|
||||
}
|
||||
|
||||
struct DnsClient {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
pub async fn start2(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let k8s = config.kubernetes.clone();
|
||||
match k8s {
|
||||
Some(k8s) => {
|
||||
let dnserver = k8s.servers.unwrap_or(vec!["127.0.0.1:53".to_string()]);
|
||||
let headers = DashMap::new();
|
||||
let end = dnserver.len() - 1;
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
let srv = dnserver.get(num).unwrap().to_string();
|
||||
let pool = DnsClientPool::new(5, srv.clone()).await;
|
||||
let u = UpstreamsDashMap::new();
|
||||
if let Some(whitelist) = k8s.services {
|
||||
loop {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
for service in whitelist.iter() {
|
||||
let ret = pool.query_srv(service.real.as_str(), srv.clone()).await;
|
||||
match ret {
|
||||
Ok(r) => {
|
||||
upstreams.insert(service.proxy.clone(), r);
|
||||
}
|
||||
Err(e) => eprintln!("DNS query failed for {:?}: {:?}", service, e),
|
||||
}
|
||||
}
|
||||
if !compare_dashmaps(&u, &upstreams) {
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
kubernetes: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &u);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsClient {
|
||||
pub async fn new(server: String) -> Result<Self, DnsError> {
|
||||
let server_details = server;
|
||||
let server: SocketAddr = server_details.parse().expect("Unable to parse socket address");
|
||||
let conn = UdpClientStream::builder(server, TokioRuntimeProvider::default()).build();
|
||||
let (client, bg) = Client::connect(conn).await.unwrap();
|
||||
tokio::spawn(bg);
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
pub async fn query_srv(&mut self, name: &str) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
let mut values = Vec::new();
|
||||
match tokio::time::timeout(Duration::from_secs(5), self.client.query(Name::from_str(name)?, DNSClass::IN, RecordType::SRV)).await {
|
||||
Ok(Ok(response)) => {
|
||||
for answer in response.answers() {
|
||||
if let hickory_client::proto::rr::RData::SRV(srv) = answer.data() {
|
||||
let to_add = InnerMap {
|
||||
address: srv.target().to_string(),
|
||||
port: srv.port(),
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
};
|
||||
values.push(to_add);
|
||||
}
|
||||
}
|
||||
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
|
||||
Ok(upstreams)
|
||||
}
|
||||
Ok(Err(e)) => Err(Box::new(e)),
|
||||
Err(_) => Err("DNS query timed out".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsClientPool {
|
||||
pub async fn new(pool_size: usize, server: String) -> Self {
|
||||
let mut clients = Vec::with_capacity(pool_size);
|
||||
for _ in 0..pool_size {
|
||||
if let Ok(client) = DnsClient::new(server.clone()).await {
|
||||
clients.push(Mutex::new(client));
|
||||
}
|
||||
}
|
||||
Self { clients }
|
||||
}
|
||||
|
||||
pub async fn query_srv(&self, name: &str, server: String) -> Result<DashMap<String, (Vec<InnerMap>, AtomicUsize)>, DnsError> {
|
||||
// Try to get an available client
|
||||
for client_mutex in &self.clients {
|
||||
if let Ok(mut client) = client_mutex.try_lock() {
|
||||
let vay = client.query_srv(name).await;
|
||||
match vay {
|
||||
Ok(_) => return vay,
|
||||
Err(_) => {
|
||||
// If query fails, drop this client and create a new one
|
||||
*client = match DnsClient::new(server).await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
// Retry with the new client
|
||||
return client.query_srv(name).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all clients are busy, wait for the first one with a timeout
|
||||
match tokio::time::timeout(Duration::from_secs(2), self.clients[0].lock()).await {
|
||||
Ok(mut client) => client.query_srv(name).await,
|
||||
Err(_) => Err("All DNS clients are busy and timeout reached".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -2,7 +2,7 @@ use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::Configuration;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use log::{error, info, warn};
|
||||
use log::error;
|
||||
use notify::event::ModifyKind;
|
||||
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
use pingora::prelude::sleep;
|
||||
@@ -15,19 +15,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
let file_path = fp.as_str();
|
||||
let parent_dir = Path::new(file_path).parent().unwrap();
|
||||
let (local_tx, mut local_rx) = tokio::sync::mpsc::channel::<notify::Result<Event>>(1);
|
||||
let snd = load_configuration(file_path, "filepath");
|
||||
|
||||
match snd {
|
||||
Some(snd) => {
|
||||
if snd.typecfg != "file" {
|
||||
warn!("Disabling file watcher, requested discovery type is: {}", snd.typecfg);
|
||||
return;
|
||||
}
|
||||
info!("Watching for changes in {:?}", parent_dir);
|
||||
toreturn.send(snd).await.unwrap();
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
let _watcher_handle = task::spawn_blocking({
|
||||
let parent_dir = parent_dir.to_path_buf(); // Move directory path into the closure
|
||||
move || {
|
||||
@@ -53,7 +41,7 @@ pub async fn start(fp: String, mut toreturn: Sender<Configuration>) {
|
||||
if start.elapsed() > Duration::from_secs(2) {
|
||||
start = Instant::now();
|
||||
// info!("Config File changed :=> {:?}", e);
|
||||
let snd = load_configuration(file_path, "filepath");
|
||||
let snd = load_configuration(file_path, "filepath").await;
|
||||
match snd {
|
||||
Some(snd) => {
|
||||
toreturn.send(snd).await.unwrap();
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::tools::*;
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info, warn};
|
||||
use log::{error, warn};
|
||||
use reqwest::{Client, Version};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
@@ -11,113 +11,111 @@ use tonic::transport::Endpoint;
|
||||
|
||||
pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>, idlist: Arc<UpstreamsIdMap>, params: (&str, u64)) {
|
||||
let mut period = interval(Duration::from_secs(params.1));
|
||||
let mut first_run = 0;
|
||||
let client = Client::builder().timeout(Duration::from_secs(params.1)).danger_accept_invalid_certs(true).build().unwrap();
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = period.tick() => {
|
||||
let totest : UpstreamsDashMap = DashMap::new();
|
||||
let fclone : UpstreamsDashMap = clone_dashmap(&fullist);
|
||||
for val in fclone.iter() {
|
||||
let host = val.key();
|
||||
let inner = DashMap::new();
|
||||
let mut _scheme: (String, u16, bool, bool, bool) = ("".to_string(), 0, false, false, false);
|
||||
for path_entry in val.value().iter() {
|
||||
// let inner = DashMap::new();
|
||||
let path = path_entry.key();
|
||||
let mut innervec= Vec::new();
|
||||
for k in path_entry.value().0 .iter().enumerate() {
|
||||
let (ip, port, _ssl, _version, _redir) = k.1;
|
||||
let mut _link = String::new();
|
||||
let tls = detect_tls(ip, port).await;
|
||||
let mut is_h2 = false;
|
||||
|
||||
// if tls.1 == Some(Version::HTTP_11) {
|
||||
// println!(" V1: ==> {:?}", tls.1)
|
||||
// }else if tls.1 == Some(Version::HTTP_2) {
|
||||
// is_h2 = true;
|
||||
// println!(" V2: ==> {:?}", tls.1)
|
||||
// }
|
||||
|
||||
if tls.1 == Some(Version::HTTP_2) {
|
||||
is_h2 = true;
|
||||
// println!(" V2: ==> {} ==> {:?}", tls.0, tls.1)
|
||||
}
|
||||
|
||||
match tls.0 {
|
||||
true => _link = format!("https://{}:{}{}", ip, port, path),
|
||||
false => _link = format!("http://{}:{}{}", ip, port, path),
|
||||
}
|
||||
// if _pref == "https://" {
|
||||
// _scheme = (ip.to_string(), *port, true);
|
||||
// }else {
|
||||
// _scheme = (ip.to_string(), *port, false);
|
||||
// }
|
||||
_scheme = (ip.to_string(), *port, tls.0, is_h2, *_redir);
|
||||
// let link = format!("{}{}:{}{}", _pref, ip, port, path);
|
||||
let resp = http_request(_link.as_str(), params.0, "").await;
|
||||
match resp.0 {
|
||||
true => {
|
||||
if resp.1 {
|
||||
_scheme = (ip.to_string(), *port, tls.0, true, *_redir);
|
||||
}
|
||||
innervec.push(_scheme.clone());
|
||||
}
|
||||
false => {
|
||||
warn!("Dead Upstream : {}", _link);
|
||||
}
|
||||
}
|
||||
}
|
||||
inner.insert(path.clone().to_owned(), (innervec, AtomicUsize::new(0)));
|
||||
}
|
||||
totest.insert(host.clone(), inner);
|
||||
}
|
||||
|
||||
if first_run == 1 {
|
||||
info!("Performing initial hatchecks and upstreams ssl detection");
|
||||
clone_idmap_into(&totest, &idlist);
|
||||
info!("Aralez is up and ready to serve requests, the upstreams list is:");
|
||||
print_upstreams(&totest)
|
||||
}
|
||||
|
||||
first_run+=1;
|
||||
|
||||
if ! compare_dashmaps(&totest, &upslist){
|
||||
clone_dashmap_into(&totest, &upslist);
|
||||
clone_idmap_into(&totest, &idlist);
|
||||
}
|
||||
|
||||
populate_upstreams(&upslist, &fullist, &idlist, params, &client).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn http_request(url: &str, method: &str, payload: &str) -> (bool, bool) {
|
||||
let client = Client::builder().danger_accept_invalid_certs(true).build().unwrap();
|
||||
let timeout = Duration::from_secs(1);
|
||||
pub async fn populate_upstreams(upslist: &Arc<UpstreamsDashMap>, fullist: &Arc<UpstreamsDashMap>, idlist: &Arc<UpstreamsIdMap>, params: (&str, u64), client: &Client) {
|
||||
let totest = build_upstreams(fullist, params.0, client).await;
|
||||
if !compare_dashmaps(&totest, upslist) {
|
||||
clone_dashmap_into(&totest, upslist);
|
||||
clone_idmap_into(&totest, idlist);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn initiate_upstreams(fullist: UpstreamsDashMap) -> UpstreamsDashMap {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
|
||||
build_upstreams(&fullist, "HEAD", &client).await
|
||||
}
|
||||
|
||||
async fn build_upstreams(fullist: &UpstreamsDashMap, method: &str, client: &Client) -> UpstreamsDashMap {
|
||||
let totest: UpstreamsDashMap = DashMap::new();
|
||||
let fclone = clone_dashmap(fullist);
|
||||
for val in fclone.iter() {
|
||||
let host = val.key();
|
||||
let inner = DashMap::new();
|
||||
|
||||
for path_entry in val.value().iter() {
|
||||
let path = path_entry.key();
|
||||
let mut innervec = Vec::new();
|
||||
|
||||
for (_, upstream) in path_entry.value().0.iter().enumerate() {
|
||||
let tls = detect_tls(upstream.address.as_str(), &upstream.port, &client).await;
|
||||
let is_h2 = matches!(tls.1, Some(Version::HTTP_2));
|
||||
|
||||
let link = if tls.0 {
|
||||
format!("https://{}:{}{}", upstream.address, upstream.port, path)
|
||||
} else {
|
||||
format!("http://{}:{}{}", upstream.address, upstream.port, path)
|
||||
};
|
||||
|
||||
let mut scheme = InnerMap {
|
||||
address: upstream.address.clone(),
|
||||
port: upstream.port,
|
||||
is_ssl: tls.0,
|
||||
is_http2: is_h2,
|
||||
to_https: upstream.to_https,
|
||||
rate_limit: upstream.rate_limit,
|
||||
healthcheck: upstream.healthcheck,
|
||||
};
|
||||
|
||||
if scheme.healthcheck.unwrap_or(true) {
|
||||
let resp = http_request(&link, method, "", &client).await;
|
||||
if resp.0 {
|
||||
if resp.1 {
|
||||
scheme.is_http2 = is_h2; // could be adjusted further
|
||||
}
|
||||
innervec.push(scheme);
|
||||
} else {
|
||||
warn!("Dead Upstream : {}", link);
|
||||
}
|
||||
} else {
|
||||
innervec.push(scheme);
|
||||
}
|
||||
|
||||
// let resp = http_request(&link, method, "", &client).await;
|
||||
// if resp.0 {
|
||||
// if resp.1 {
|
||||
// scheme.is_http2 = is_h2; // could be adjusted further
|
||||
// }
|
||||
// innervec.push(scheme);
|
||||
// } else {
|
||||
// warn!("Dead Upstream : {}", link);
|
||||
// }
|
||||
}
|
||||
inner.insert(path.clone(), (innervec, AtomicUsize::new(0)));
|
||||
}
|
||||
totest.insert(host.clone(), inner);
|
||||
}
|
||||
totest
|
||||
}
|
||||
|
||||
async fn http_request(url: &str, method: &str, payload: &str, client: &Client) -> (bool, bool) {
|
||||
if !["POST", "GET", "HEAD"].contains(&method) {
|
||||
error!("Method {} not supported. Only GET|POST|HEAD are supported ", method);
|
||||
return (false, false);
|
||||
}
|
||||
async fn send_request(client: &Client, method: &str, url: &str, payload: &str, timeout: Duration) -> Option<reqwest::Response> {
|
||||
async fn send_request(client: &Client, method: &str, url: &str, payload: &str) -> Option<reqwest::Response> {
|
||||
match method {
|
||||
"POST" => client.post(url).body(payload.to_owned()).timeout(timeout).send().await.ok(),
|
||||
"GET" => client.get(url).timeout(timeout).send().await.ok(),
|
||||
"HEAD" => client.head(url).timeout(timeout).send().await.ok(),
|
||||
"POST" => client.post(url).body(payload.to_owned()).send().await.ok(),
|
||||
"GET" => client.get(url).send().await.ok(),
|
||||
"HEAD" => client.head(url).send().await.ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
match send_request(&client, method, url, payload, timeout).await {
|
||||
match send_request(&client, method, url, payload).await {
|
||||
Some(response) => {
|
||||
let status = response.status().as_u16();
|
||||
((99..499).contains(&status), false)
|
||||
}
|
||||
None => {
|
||||
// let fallback_url = url.replace("https", "http");
|
||||
// ping_grpc(&fallback_url).await
|
||||
(ping_grpc(&url).await, true)
|
||||
}
|
||||
None => (ping_grpc(&url).await, true),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,10 +126,7 @@ pub async fn ping_grpc(addr: &str) -> bool {
|
||||
let endpoint = endpoint.timeout(Duration::from_secs(2));
|
||||
|
||||
match tokio::time::timeout(Duration::from_secs(3), endpoint.connect()).await {
|
||||
Ok(Ok(_channel)) => {
|
||||
// println!("{:?} ==> {:?} ==> {}", endpoint, _channel, addr);
|
||||
true
|
||||
}
|
||||
Ok(Ok(_channel)) => true,
|
||||
_ => false,
|
||||
}
|
||||
} else {
|
||||
@@ -139,15 +134,24 @@ pub async fn ping_grpc(addr: &str) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
async fn detect_tls(ip: &str, port: &u16) -> (bool, Option<Version>) {
|
||||
let url = format!("https://{}:{}", ip, port);
|
||||
// let url = format!("{}:{}", ip, port);
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().unwrap();
|
||||
match client.get(&url).send().await {
|
||||
Ok(response) => (true, Some(response.version())),
|
||||
Err(e) => {
|
||||
if e.is_builder() || e.is_connect() || e.to_string().contains("tls") {
|
||||
(false, None)
|
||||
async fn detect_tls(ip: &str, port: &u16, client: &Client) -> (bool, Option<Version>) {
|
||||
let https_url = format!("https://{}:{}", ip, port);
|
||||
match client.get(&https_url).send().await {
|
||||
Ok(response) => {
|
||||
// println!("{} => {:?} (HTTPS)", https_url, response.version());
|
||||
return (true, Some(response.version()));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let http_url = format!("http://{}:{}", ip, port);
|
||||
match client.get(&http_url).send().await {
|
||||
Ok(response) => {
|
||||
// println!("{} => {:?} (HTTP)", http_url, response.version());
|
||||
(false, Some(response.version()))
|
||||
}
|
||||
Err(_) => {
|
||||
if ping_grpc(&http_url).await {
|
||||
(false, Some(Version::HTTP_2))
|
||||
} else {
|
||||
(false, None)
|
||||
}
|
||||
|
||||
134
src/utils/kuber.rs
Normal file
134
src/utils/kuber.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
// use crate::utils::dnsclient::DnsClientPool;
|
||||
use crate::utils::structs::{Configuration, InnerMap, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use pingora::prelude::sleep;
|
||||
use rand::Rng;
|
||||
use reqwest::Client;
|
||||
use std::env;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
// static KUBERNETES_SERVICE_HOST: &str = "IP_ADDRESS";
|
||||
// static TOKEN: &str = "TOKEN";
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Endpoints {
|
||||
subsets: Option<Vec<Subset>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Subset {
|
||||
addresses: Option<Vec<Address>>,
|
||||
ports: Option<Vec<Port>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Address {
|
||||
ip: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Port {
|
||||
// name: String,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
pub async fn start(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
loop {
|
||||
if let Some(kuber) = config.kubernetes.clone() {
|
||||
let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string());
|
||||
let token = read_token(path.as_str()).await;
|
||||
let servers = kuber.servers.unwrap_or(vec![format!(
|
||||
"{}:{}",
|
||||
env::var("KUBERNETES_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
|
||||
env::var("KUBERNETES_SERVICE_PORT_HTTPS").unwrap_or("0".to_string())
|
||||
)]);
|
||||
let end = servers.len() - 1;
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
let server = servers.get(num).unwrap().to_string();
|
||||
|
||||
if let Some(svc) = kuber.services {
|
||||
for i in svc {
|
||||
let url = format!("https://{}/api/v1/namespaces/staging/endpoints/{}", server, i.real);
|
||||
let list = get_by_http(&*url, &*token).await;
|
||||
if let Some(list) = list {
|
||||
upstreams.insert(i.proxy.clone(), list);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: config.headers.clone(),
|
||||
consul: config.consul.clone(),
|
||||
kubernetes: config.kubernetes.clone(),
|
||||
typecfg: config.typecfg.clone(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_by_http(url: &str, token: &str) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
|
||||
|
||||
let resp = client.get(url).bearer_auth(token).send().await.ok()?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
eprintln!("Kubernetes API returned status: {}", resp.status());
|
||||
return None;
|
||||
}
|
||||
|
||||
let endpoints: Endpoints = resp.json().await.ok()?;
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
|
||||
if let Some(subsets) = endpoints.subsets {
|
||||
for subset in subsets {
|
||||
if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) {
|
||||
for addr in addresses {
|
||||
let mut inner_vec = Vec::new();
|
||||
for port in &ports {
|
||||
let to_add = InnerMap {
|
||||
address: addr.ip.clone(),
|
||||
port: port.port.clone(),
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
healthcheck: None,
|
||||
};
|
||||
inner_vec.push(to_add);
|
||||
}
|
||||
upstreams.insert("/".to_string(), (inner_vec, AtomicUsize::new(0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
async fn read_token(path: &str) -> String {
|
||||
let mut file = File::open(path).await.unwrap();
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents).await.unwrap();
|
||||
contents.trim().to_string()
|
||||
}
|
||||
@@ -61,27 +61,3 @@ pub fn calc_metrics(metric_types: &MetricTypes) {
|
||||
REQUESTS_BY_METHOD.with_label_values(&[&metric_types.method]).inc();
|
||||
RESPONSE_LATENCY.observe(metric_types.latency.as_secs_f64());
|
||||
}
|
||||
/*
|
||||
pub fn calc_metrics(method: String, code: u16, latency: Duration) {
|
||||
REQUEST_COUNT.inc();
|
||||
let timer = REQUEST_LATENCY.start_timer();
|
||||
timer.observe_duration();
|
||||
RESPONSE_CODES.with_label_values(&[&code.to_string()]).inc();
|
||||
REQUESTS_BY_METHOD.with_label_values(&[&method]).inc();
|
||||
RESPONSE_LATENCY.observe(latency.as_secs_f64());
|
||||
}
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(5));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// read Pingora stats
|
||||
let stats = pingora.get_stats();
|
||||
|
||||
// update Prometheus metrics accordingly
|
||||
REQUEST_COUNT.set(stats.requests_total);
|
||||
// ... etc
|
||||
}
|
||||
});
|
||||
*/
|
||||
|
||||
@@ -1,139 +1,157 @@
|
||||
use crate::utils::healthcheck;
|
||||
use crate::utils::structs::*;
|
||||
use crate::utils::tools::{clone_dashmap, clone_dashmap_into, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info, warn};
|
||||
use serde_yaml::Error;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
// use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::{env, fs};
|
||||
// use tokio::sync::oneshot::{Receiver, Sender};
|
||||
|
||||
pub fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
let mut toreturn: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: Extraparams {
|
||||
sticky_sessions: false,
|
||||
to_https: None,
|
||||
authentication: DashMap::new(),
|
||||
pub async fn load_configuration(d: &str, kind: &str) -> Option<Configuration> {
|
||||
let yaml_data = match kind {
|
||||
"filepath" => match fs::read_to_string(d) {
|
||||
Ok(data) => {
|
||||
info!("Reading upstreams from {}", d);
|
||||
data
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Reading: {}: {:?}", d, e);
|
||||
warn!("Running with empty upstreams list, update it via API");
|
||||
return None;
|
||||
}
|
||||
},
|
||||
};
|
||||
toreturn.upstreams = UpstreamsDashMap::new();
|
||||
toreturn.headers = Headers::new();
|
||||
|
||||
let mut yaml_data = d.to_string();
|
||||
match kind {
|
||||
"filepath" => {
|
||||
let _ = match fs::read_to_string(d) {
|
||||
Ok(data) => {
|
||||
info!("Reading upstreams from {}", d);
|
||||
yaml_data = data
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Reading: {}: {:?}", d, e.to_string());
|
||||
warn!("Running with empty upstreams list, update it via API");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
}
|
||||
"content" => {
|
||||
info!("Reading upstreams from API post body");
|
||||
d.to_string()
|
||||
}
|
||||
_ => error!("Mismatched parameter, only filepath|content is allowed "),
|
||||
}
|
||||
|
||||
let p: Result<Config, Error> = serde_yaml::from_str(&yaml_data);
|
||||
match p {
|
||||
Ok(parsed) => {
|
||||
let global_headers = DashMap::new();
|
||||
let mut hl = Vec::new();
|
||||
if let Some(headers) = &parsed.headers {
|
||||
for header in headers.iter() {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.to_string(), val.to_string()));
|
||||
}
|
||||
}
|
||||
global_headers.insert("/".to_string(), hl);
|
||||
toreturn.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
|
||||
|
||||
toreturn.extraparams.sticky_sessions = parsed.sticky_sessions;
|
||||
toreturn.extraparams.to_https = parsed.to_https;
|
||||
}
|
||||
if let Some(auth) = &parsed.authorization {
|
||||
let name = auth.get("type").unwrap().to_string();
|
||||
let creds = auth.get("creds").unwrap().to_string();
|
||||
let val: Vec<String> = vec![name, creds];
|
||||
toreturn.extraparams.authentication.insert("authorization".to_string(), val);
|
||||
} else {
|
||||
toreturn.extraparams.authentication = DashMap::new();
|
||||
}
|
||||
|
||||
match parsed.provider.as_str() {
|
||||
"file" => {
|
||||
toreturn.typecfg = "file".to_string();
|
||||
if let Some(upstream) = parsed.upstreams {
|
||||
for (hostname, host_config) in upstream {
|
||||
let path_map = DashMap::new();
|
||||
let header_list = DashMap::new();
|
||||
for (path, path_config) in host_config.paths {
|
||||
let mut server_list = Vec::new();
|
||||
let mut hl = Vec::new();
|
||||
if let Some(headers) = &path_config.headers {
|
||||
for header in headers.iter().by_ref() {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.to_string(), val.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
header_list.insert(path.clone(), hl);
|
||||
for server in path_config.servers {
|
||||
if let Some((ip, port_str)) = server.split_once(':') {
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
// let to_https = matches!(path_config.to_https, Some(true));
|
||||
let to_https = path_config.to_https.unwrap_or(false);
|
||||
server_list.push((ip.to_string(), port, true, false, to_https));
|
||||
}
|
||||
}
|
||||
}
|
||||
path_map.insert(path, (server_list, AtomicUsize::new(0)));
|
||||
}
|
||||
toreturn.headers.insert(hostname.clone(), header_list);
|
||||
toreturn.upstreams.insert(hostname, path_map);
|
||||
}
|
||||
}
|
||||
Some(toreturn)
|
||||
}
|
||||
"consul" => {
|
||||
toreturn.typecfg = "consul".to_string();
|
||||
let consul = parsed.consul;
|
||||
match consul {
|
||||
Some(consul) => {
|
||||
toreturn.consul = Some(consul);
|
||||
Some(toreturn)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
"kubernetes" => None,
|
||||
_ => {
|
||||
warn!("Unknown provider {}", parsed.provider);
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!("Mismatched parameter, only filepath|content is allowed");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let parsed: Config = match serde_yaml::from_str(&yaml_data) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
error!("Failed to parse upstreams file: {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let mut toreturn = Configuration::default();
|
||||
|
||||
populate_headers_and_auth(&mut toreturn, &parsed).await;
|
||||
toreturn.typecfg = parsed.provider.clone();
|
||||
|
||||
match parsed.provider.as_str() {
|
||||
"file" => {
|
||||
populate_file_upstreams(&mut toreturn, &parsed).await;
|
||||
Some(toreturn)
|
||||
}
|
||||
"consul" => {
|
||||
toreturn.consul = parsed.consul;
|
||||
toreturn.consul.is_some().then_some(toreturn)
|
||||
}
|
||||
"kubernetes" => {
|
||||
toreturn.kubernetes = parsed.kubernetes;
|
||||
toreturn.kubernetes.is_some().then_some(toreturn)
|
||||
}
|
||||
_ => {
|
||||
warn!("Unknown provider {}", parsed.provider);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn populate_headers_and_auth(config: &mut Configuration, parsed: &Config) {
|
||||
if let Some(headers) = &parsed.headers {
|
||||
let mut hl = Vec::new();
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let global_headers = DashMap::new();
|
||||
global_headers.insert("/".to_string(), hl);
|
||||
config.headers.insert("GLOBAL_HEADERS".to_string(), global_headers);
|
||||
}
|
||||
|
||||
config.extraparams.sticky_sessions = parsed.sticky_sessions;
|
||||
config.extraparams.to_https = parsed.to_https;
|
||||
config.extraparams.rate_limit = parsed.rate_limit;
|
||||
|
||||
if let Some(rate) = &parsed.rate_limit {
|
||||
info!("Applied Global Rate Limit : {} request per second", rate);
|
||||
}
|
||||
|
||||
if let Some(auth) = &parsed.authorization {
|
||||
let name = auth.get("type").unwrap_or(&"".to_string()).to_string();
|
||||
let creds = auth.get("creds").unwrap_or(&"".to_string()).to_string();
|
||||
config.extraparams.authentication.insert("authorization".to_string(), vec![name, creds]);
|
||||
} else {
|
||||
config.extraparams.authentication = DashMap::new();
|
||||
}
|
||||
}
|
||||
|
||||
async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
|
||||
let imtdashmap = UpstreamsDashMap::new();
|
||||
if let Some(upstreams) = &parsed.upstreams {
|
||||
for (hostname, host_config) in upstreams {
|
||||
let path_map = DashMap::new();
|
||||
let header_list = DashMap::new();
|
||||
for (path, path_config) in &host_config.paths {
|
||||
if let Some(rate) = &path_config.rate_limit {
|
||||
info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
|
||||
}
|
||||
|
||||
let mut server_list = Vec::new();
|
||||
let mut hl = Vec::new();
|
||||
|
||||
if let Some(headers) = &path_config.headers {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
header_list.insert(path.clone(), hl);
|
||||
|
||||
for server in &path_config.servers {
|
||||
if let Some((ip, port_str)) = server.split_once(':') {
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
server_list.push(InnerMap {
|
||||
address: ip.trim().to_string(),
|
||||
port,
|
||||
is_ssl: true,
|
||||
is_http2: false,
|
||||
to_https: path_config.to_https.unwrap_or(false),
|
||||
rate_limit: path_config.rate_limit,
|
||||
healthcheck: path_config.healthcheck,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
path_map.insert(path.clone(), (server_list, AtomicUsize::new(0)));
|
||||
}
|
||||
config.headers.insert(hostname.clone(), header_list);
|
||||
imtdashmap.insert(hostname.clone(), path_map);
|
||||
}
|
||||
let y = clone_dashmap(&imtdashmap);
|
||||
let r = healthcheck::initiate_upstreams(y).await;
|
||||
clone_dashmap_into(&r, &config.upstreams);
|
||||
println!("Upstream Config:");
|
||||
print_upstreams(&config.upstreams);
|
||||
}
|
||||
}
|
||||
pub fn parce_main_config(path: &str) -> AppConfig {
|
||||
info!("Parsing configuration");
|
||||
let data = fs::read_to_string(path).unwrap();
|
||||
let reply = DashMap::new();
|
||||
let cfg: HashMap<String, String> = serde_yaml::from_str(&*data).expect("Failed to parse main config file");
|
||||
let mut cfo: AppConfig = serde_yaml::from_str(&*data).expect("Failed to parse main config file");
|
||||
log_builder(&cfo);
|
||||
cfo.hc_method = cfo.hc_method.to_uppercase();
|
||||
for (k, v) in cfg {
|
||||
reply.insert(k.to_string(), v.to_string());
|
||||
@@ -150,15 +168,52 @@ pub fn parce_main_config(path: &str) -> AppConfig {
|
||||
}
|
||||
}
|
||||
};
|
||||
// match cfo.config_tls_address.clone() {
|
||||
// Some(tls_cert) => {
|
||||
// if let Some((ip, port_str)) = tls_cert.split_once(':') {
|
||||
// if let Ok(port) = port_str.parse::<u16>() {
|
||||
// cfo.local_tls_server = Option::from((ip.to_string(), port));
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// None => {}
|
||||
// };
|
||||
cfo.proxy_tls_grade = parce_tls_grades(cfo.proxy_tls_grade.clone());
|
||||
cfo
|
||||
}
|
||||
|
||||
fn parce_tls_grades(what: Option<String>) -> Option<String> {
|
||||
match what {
|
||||
Some(g) => match g.to_ascii_lowercase().as_str() {
|
||||
"high" => {
|
||||
// info!("TLS grade set to: [ HIGH ]");
|
||||
Some("high".to_string())
|
||||
}
|
||||
"medium" => {
|
||||
// info!("TLS grade set to: [ MEDIUM ]");
|
||||
Some("medium".to_string())
|
||||
}
|
||||
"unsafe" => {
|
||||
// info!("TLS grade set to: [ UNSAFE ]");
|
||||
Some("unsafe".to_string())
|
||||
}
|
||||
_ => {
|
||||
warn!("Error parsing TLS grade, defaulting to: `medium`");
|
||||
Some("medium".to_string())
|
||||
}
|
||||
},
|
||||
None => {
|
||||
warn!("TLS grade not set, defaulting to: medium");
|
||||
Some("b".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn log_builder(conf: &AppConfig) {
|
||||
let log_level = conf.log_level.clone();
|
||||
unsafe {
|
||||
match log_level.as_str() {
|
||||
"info" => env::set_var("RUST_LOG", "info"),
|
||||
"error" => env::set_var("RUST_LOG", "error"),
|
||||
"warn" => env::set_var("RUST_LOG", "warn"),
|
||||
"debug" => env::set_var("RUST_LOG", "debug"),
|
||||
"trace" => env::set_var("RUST_LOG", "trace"),
|
||||
"off" => env::set_var("RUST_LOG", "off"),
|
||||
_ => {
|
||||
println!("Error reading log level, defaulting to: INFO");
|
||||
env::set_var("RUST_LOG", "info")
|
||||
}
|
||||
}
|
||||
}
|
||||
env_logger::builder().init();
|
||||
}
|
||||
|
||||
@@ -3,63 +3,83 @@ use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
||||
pub type InnerMap = (String, u16, bool, bool, bool);
|
||||
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<InnerMap>, AtomicUsize)>>;
|
||||
|
||||
pub type UpstreamsIdMap = DashMap<String, InnerMap>;
|
||||
pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceMapping {
|
||||
pub proxy: String,
|
||||
pub real: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Extraparams {
|
||||
pub sticky_sessions: bool,
|
||||
pub to_https: Option<bool>,
|
||||
pub authentication: DashMap<String, Vec<String>>,
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
|
||||
pub struct Kubernetes {
|
||||
pub servers: Option<Vec<String>>,
|
||||
pub services: Option<Vec<ServiceMapping>>,
|
||||
pub tokenpath: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
|
||||
pub struct Consul {
|
||||
pub servers: Option<Vec<String>>,
|
||||
pub services: Option<Vec<ServiceMapping>>,
|
||||
pub token: Option<String>,
|
||||
}
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub provider: String,
|
||||
pub sticky_sessions: bool,
|
||||
pub to_https: Option<bool>,
|
||||
#[serde(default)]
|
||||
pub upstreams: Option<HashMap<String, HostConfig>>,
|
||||
#[serde(default)]
|
||||
pub globals: Option<HashMap<String, Vec<String>>>,
|
||||
#[serde(default)]
|
||||
pub headers: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub authorization: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
pub consul: Option<Consul>,
|
||||
#[serde(default)]
|
||||
pub kubernetes: Option<Kubernetes>,
|
||||
#[serde(default)]
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct HostConfig {
|
||||
pub paths: HashMap<String, PathConfig>,
|
||||
pub rate_limit: Option<isize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct PathConfig {
|
||||
pub servers: Vec<String>,
|
||||
pub to_https: Option<bool>,
|
||||
pub headers: Option<Vec<String>>,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub healthcheck: Option<bool>,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Configuration {
|
||||
pub upstreams: UpstreamsDashMap,
|
||||
pub headers: Headers,
|
||||
pub consul: Option<Consul>,
|
||||
pub kubernetes: Option<Kubernetes>,
|
||||
pub typecfg: String,
|
||||
pub extraparams: Extraparams,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AppConfig {
|
||||
pub hc_interval: u16,
|
||||
pub hc_method: String,
|
||||
@@ -68,13 +88,41 @@ pub struct AppConfig {
|
||||
pub master_key: String,
|
||||
pub config_address: String,
|
||||
pub proxy_address_http: String,
|
||||
pub config_api_enabled: bool,
|
||||
pub config_tls_address: Option<String>,
|
||||
pub config_tls_certificate: Option<String>,
|
||||
pub config_tls_key_file: Option<String>,
|
||||
pub proxy_address_tls: Option<String>,
|
||||
pub proxy_port_tls: Option<u16>,
|
||||
// pub tls_certificate: Option<String>,
|
||||
// pub tls_key_file: Option<String>,
|
||||
pub local_server: Option<(String, u16)>,
|
||||
pub proxy_certificates: Option<String>,
|
||||
pub proxy_tls_grade: Option<String>,
|
||||
pub file_server_address: Option<String>,
|
||||
pub file_server_folder: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct InnerMap {
|
||||
pub address: String,
|
||||
pub port: u16,
|
||||
pub is_ssl: bool,
|
||||
pub is_http2: bool,
|
||||
pub to_https: bool,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub healthcheck: Option<bool>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl InnerMap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
address: Default::default(),
|
||||
port: Default::default(),
|
||||
is_ssl: Default::default(),
|
||||
is_http2: Default::default(),
|
||||
to_https: Default::default(),
|
||||
rate_limit: Default::default(),
|
||||
healthcheck: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
120
src/utils/tls.rs
120
src/utils/tls.rs
@@ -1,4 +1,7 @@
|
||||
use openssl::ssl::{select_next_proto, AlpnError, NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef};
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info, warn};
|
||||
use pingora::tls::ssl::{select_next_proto, AlpnError, NameType, SniError, SslAlert, SslContext, SslFiletype, SslMethod, SslRef, SslVersion};
|
||||
use pingora_core::listeners::tls::TlsSettings;
|
||||
use rustls_pemfile::{read_one, Item};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashSet;
|
||||
@@ -28,43 +31,56 @@ struct CertificateInfo {
|
||||
#[derive(Debug)]
|
||||
pub struct Certificates {
|
||||
configs: Vec<CertificateInfo>,
|
||||
name_map: DashMap<String, SslContext>,
|
||||
pub default_cert_path: String,
|
||||
pub default_key_path: String,
|
||||
}
|
||||
|
||||
impl Certificates {
|
||||
pub fn new(configs: &Vec<CertificateConfig>) -> Self {
|
||||
let default_cert = configs.first().expect("atleast one TLS certificate required");
|
||||
pub fn new(configs: &Vec<CertificateConfig>, _grade: &str) -> Option<Self> {
|
||||
let default_cert = configs.first().expect("At least one TLS certificate required");
|
||||
let mut cert_infos = Vec::new();
|
||||
let name_map: DashMap<String, SslContext> = DashMap::new();
|
||||
for config in configs {
|
||||
cert_infos.push(
|
||||
load_cert_info(&config.cert_path, &config.key_path)
|
||||
.unwrap_or_else(|| panic!("unable to load certificate info | public: {}, private: {}", &config.cert_path, &config.key_path)),
|
||||
);
|
||||
let cert_info = load_cert_info(&config.cert_path, &config.key_path, _grade);
|
||||
match cert_info {
|
||||
Some(cert) => {
|
||||
for name in &cert.common_names {
|
||||
name_map.insert(name.clone(), cert.ssl_context.clone());
|
||||
}
|
||||
for name in &cert.alt_names {
|
||||
name_map.insert(name.clone(), cert.ssl_context.clone());
|
||||
}
|
||||
|
||||
cert_infos.push(cert)
|
||||
}
|
||||
None => {
|
||||
error!("Unable to load certificate info | public: {}, private: {}", &config.cert_path, &config.key_path);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
Self {
|
||||
Some(Self {
|
||||
name_map: name_map,
|
||||
configs: cert_infos,
|
||||
default_cert_path: default_cert.cert_path.clone(),
|
||||
default_key_path: default_cert.key_path.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn find_ssl_context(&self, server_name: &str) -> Option<&SslContext> {
|
||||
fn find_ssl_context(&self, server_name: &str) -> Option<SslContext> {
|
||||
if let Some(ctx) = self.name_map.get(server_name) {
|
||||
return Some(ctx.clone());
|
||||
}
|
||||
for config in &self.configs {
|
||||
// Exact name match
|
||||
if config.common_names.contains(&server_name.to_string()) || config.alt_names.contains(&server_name.to_string()) {
|
||||
return Some(&config.ssl_context);
|
||||
}
|
||||
|
||||
// Wildcard match
|
||||
for name in &config.common_names {
|
||||
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
|
||||
return Some(&config.ssl_context);
|
||||
return Some(config.ssl_context.clone());
|
||||
}
|
||||
}
|
||||
for name in &config.alt_names {
|
||||
if name.starts_with("*.") && server_name.ends_with(&name[1..]) {
|
||||
return Some(&config.ssl_context);
|
||||
return Some(config.ssl_context.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -74,21 +90,23 @@ impl Certificates {
|
||||
pub fn server_name_callback(&self, ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert) -> Result<(), SniError> {
|
||||
let server_name = ssl_ref.servername(NameType::HOST_NAME);
|
||||
log::debug!("TLS connect: server_name = {:?}, ssl_ref = {:?}, ssl_alert = {:?}", server_name, ssl_ref, ssl_alert);
|
||||
// let start_time = Instant::now();
|
||||
if let Some(name) = server_name {
|
||||
match self.find_ssl_context(name) {
|
||||
Some(ctx) => {
|
||||
ssl_ref.set_ssl_context(ctx).map_err(|_| SniError::ALERT_FATAL)?;
|
||||
ssl_ref.set_ssl_context(&*ctx).map_err(|_| SniError::ALERT_FATAL)?;
|
||||
}
|
||||
None => {
|
||||
log::debug!("No matching server name found");
|
||||
}
|
||||
}
|
||||
}
|
||||
// println!("Context ==> {:?} <==", start_time.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn load_cert_info(cert_path: &str, key_path: &str) -> Option<CertificateInfo> {
|
||||
fn load_cert_info(cert_path: &str, key_path: &str, _grade: &str) -> Option<CertificateInfo> {
|
||||
let mut common_names = HashSet::new();
|
||||
let mut alt_names = HashSet::new();
|
||||
|
||||
@@ -160,7 +178,6 @@ fn load_cert_info(cert_path: &str, key_path: &str) -> Option<CertificateInfo> {
|
||||
|
||||
fn create_ssl_context(cert_path: &str, key_path: &str) -> Result<SslContext, Box<dyn std::error::Error>> {
|
||||
let mut ctx = SslContext::builder(SslMethod::tls())?;
|
||||
|
||||
ctx.set_certificate_chain_file(cert_path)?;
|
||||
ctx.set_private_key_file(key_path, SslFiletype::PEM)?;
|
||||
ctx.set_alpn_select_callback(prefer_h2);
|
||||
@@ -168,9 +185,70 @@ fn create_ssl_context(cert_path: &str, key_path: &str) -> Result<SslContext, Box
|
||||
Ok(built)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CipherSuite {
|
||||
pub high: &'static str,
|
||||
pub medium: &'static str,
|
||||
pub legacy: &'static str,
|
||||
}
|
||||
const CIPHERS: CipherSuite = CipherSuite {
|
||||
high: "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305",
|
||||
medium: "ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:AES128-GCM-SHA256",
|
||||
legacy: "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH",
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TlsGrade {
|
||||
HIGH,
|
||||
MEDIUM,
|
||||
LEGACY,
|
||||
}
|
||||
|
||||
impl TlsGrade {
|
||||
pub fn from_str(s: &str) -> Option<Self> {
|
||||
match s.to_ascii_lowercase().as_str() {
|
||||
"high" => Some(TlsGrade::HIGH),
|
||||
"medium" => Some(TlsGrade::MEDIUM),
|
||||
"unsafe" => Some(TlsGrade::LEGACY),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
|
||||
match select_next_proto("\x02h2\x08http/1.1".as_bytes(), alpn_in) {
|
||||
Some(p) => Ok(p),
|
||||
_ => Err(AlpnError::NOACK),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_tsl_grade(tls_settings: &mut TlsSettings, grade: &str) {
|
||||
let config_grade = TlsGrade::from_str(grade);
|
||||
match config_grade {
|
||||
Some(TlsGrade::HIGH) => {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1_2));
|
||||
// let _ = tls_settings.set_max_proto_version(Some(SslVersion::TLS1_3));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.high);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.high);
|
||||
info!("TLS grade: {:?}, => HIGH", tls_settings.options());
|
||||
}
|
||||
Some(TlsGrade::MEDIUM) => {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
info!("TLS grade: {:?}, => MEDIUM", tls_settings.options());
|
||||
}
|
||||
Some(TlsGrade::LEGACY) => {
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::SSL3));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.legacy);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.legacy);
|
||||
warn!("TLS grade: {:?}, => UNSAFE", tls_settings.options());
|
||||
}
|
||||
None => {
|
||||
// Defaults to MEDIUM
|
||||
let _ = tls_settings.set_min_proto_version(Some(SslVersion::TLS1));
|
||||
let _ = tls_settings.set_cipher_list(CIPHERS.medium);
|
||||
let _ = tls_settings.set_ciphersuites(CIPHERS.medium);
|
||||
warn!("TLS grade is not detected defaulting top MEDIUM");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
use crate::utils::structs::{UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::structs::{InnerMap, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::tls;
|
||||
use crate::utils::tls::CertificateConfig;
|
||||
use dashmap::DashMap;
|
||||
use log::{error, info};
|
||||
use notify::{event::ModifyKind, Config, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::type_name;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Write;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Sender};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
@@ -16,10 +21,12 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
|
||||
for path_entry in host_entry.value().iter() {
|
||||
let path = path_entry.key();
|
||||
println!(" Path: {}", path);
|
||||
|
||||
for (ip, port, ssl, vers, to_https) in path_entry.value().0.clone() {
|
||||
println!(" ===> IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}", ip, port, ssl, vers, to_https);
|
||||
println!(" Path: {}", path);
|
||||
for f in path_entry.value().0.clone() {
|
||||
println!(
|
||||
" IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}",
|
||||
f.address, f.port, f.is_ssl, f.is_http2, f.to_https
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,13 +142,22 @@ pub fn clone_idmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsIdMap) {
|
||||
let new_vec = vec.clone();
|
||||
for x in vec.iter() {
|
||||
let mut id = String::new();
|
||||
write!(&mut id, "{}:{}:{}", x.0, x.1, x.2).unwrap();
|
||||
write!(&mut id, "{}:{}:{}", x.address, x.port, x.is_ssl).unwrap();
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(id.clone().into_bytes());
|
||||
let hash = hasher.finalize();
|
||||
let hex_hash = base16ct::lower::encode_string(&hash);
|
||||
let hh = hex_hash[0..50].to_string();
|
||||
cloned.insert(id, (hh.clone(), 0000, false, false, false));
|
||||
let to_add = InnerMap {
|
||||
address: hh.clone(),
|
||||
port: 0,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
healthcheck: None,
|
||||
};
|
||||
cloned.insert(id, to_add);
|
||||
cloned.insert(hh, x.to_owned());
|
||||
}
|
||||
new_inner_map.insert(path.clone(), new_vec);
|
||||
@@ -162,7 +178,7 @@ pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
|
||||
inner.push(name.clone() + ".crt");
|
||||
inner.push(name.clone() + ".key");
|
||||
f.insert(domain[domain.len() - 1].to_owned(), inner);
|
||||
let y = tls::CertificateConfig {
|
||||
let y = CertificateConfig {
|
||||
cert_path: name.clone() + ".crt",
|
||||
key_path: name.clone() + ".key",
|
||||
};
|
||||
@@ -170,7 +186,7 @@ pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
|
||||
}
|
||||
}
|
||||
for (_, v) in f.iter() {
|
||||
let y = tls::CertificateConfig {
|
||||
let y = CertificateConfig {
|
||||
cert_path: v[0].clone(),
|
||||
key_path: v[1].clone(),
|
||||
};
|
||||
@@ -178,3 +194,30 @@ pub fn listdir(dir: String) -> Vec<tls::CertificateConfig> {
|
||||
}
|
||||
certificate_configs
|
||||
}
|
||||
|
||||
pub fn watch_folder(path: String, sender: Sender<Vec<CertificateConfig>>) -> notify::Result<()> {
|
||||
let (tx, rx) = channel();
|
||||
let mut watcher = RecommendedWatcher::new(tx, Config::default())?;
|
||||
watcher.watch(path.as_ref(), RecursiveMode::Recursive)?;
|
||||
info!("Watching for certificates in : {}", path);
|
||||
let certificate_configs = listdir(path.clone());
|
||||
sender.send(certificate_configs)?;
|
||||
let mut start = Instant::now();
|
||||
loop {
|
||||
match rx.recv_timeout(Duration::from_secs(1)) {
|
||||
Ok(Ok(event)) => match &event.kind {
|
||||
EventKind::Modify(ModifyKind::Data(_)) | EventKind::Create(_) | EventKind::Remove(_) => {
|
||||
if start.elapsed() > Duration::from_secs(1) {
|
||||
start = Instant::now();
|
||||
let certificate_configs = listdir(path.clone());
|
||||
sender.send(certificate_configs)?;
|
||||
info!("Certificate changed: {:?}, {:?}", event.kind, event.paths);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Ok(Err(e)) => error!("Watch error: {:?}", e),
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider};
|
||||
use crate::utils::discovery::{APIUpstreamProvider, ConsulProvider, Discovery, FromFileProvider, KubernetesProvider};
|
||||
use crate::utils::parceyaml::load_configuration;
|
||||
use crate::utils::structs::Configuration;
|
||||
use crate::utils::tools::*;
|
||||
use crate::utils::*;
|
||||
@@ -6,8 +7,8 @@ use crate::web::proxyhttp::LB;
|
||||
use async_trait::async_trait;
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc;
|
||||
use futures::StreamExt;
|
||||
use log::info;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use log::{error, info};
|
||||
use pingora_core::server::ShutdownWatch;
|
||||
use pingora_core::services::background::BackgroundService;
|
||||
use std::sync::Arc;
|
||||
@@ -15,30 +16,50 @@ use std::sync::Arc;
|
||||
#[async_trait]
|
||||
impl BackgroundService for LB {
|
||||
async fn start(&self, mut shutdown: ShutdownWatch) {
|
||||
info!("Starting background service");
|
||||
let (tx, mut rx) = mpsc::channel::<Configuration>(0);
|
||||
info!("Starting background service"); // tx: Sender<Configuration>
|
||||
let (mut tx, mut rx) = mpsc::channel::<Configuration>(1);
|
||||
let tx_api = tx.clone();
|
||||
let config = load_configuration(self.config.upstreams_conf.clone().as_str(), "filepath")
|
||||
.await
|
||||
.expect("Failed to load configuration");
|
||||
|
||||
let tx_file = tx.clone();
|
||||
let tx_consul = tx.clone();
|
||||
|
||||
let file_load = FromFileProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
let consul_load = ConsulProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
|
||||
let _ = tokio::spawn(async move { file_load.start(tx_file).await });
|
||||
let _ = tokio::spawn(async move { consul_load.start(tx_consul).await });
|
||||
match config.typecfg.as_str() {
|
||||
"file" => {
|
||||
info!("Running File discovery, requested type is: {}", config.typecfg);
|
||||
tx.send(config).await.unwrap();
|
||||
let file_load = FromFileProvider {
|
||||
path: self.config.upstreams_conf.clone(),
|
||||
};
|
||||
let _ = tokio::spawn(async move { file_load.start(tx).await });
|
||||
}
|
||||
"kubernetes" => {
|
||||
info!("Running Kubernetes discovery, requested type is: {}", config.typecfg);
|
||||
let cf = Arc::from(config);
|
||||
let kuber_load = KubernetesProvider { config: cf.clone() };
|
||||
let _ = tokio::spawn(async move { kuber_load.start(tx).await });
|
||||
}
|
||||
"consul" => {
|
||||
info!("Running Consul discovery, requested type is: {}", config.typecfg);
|
||||
let cf = Arc::from(config);
|
||||
let consul_load = ConsulProvider { config: cf.clone() };
|
||||
let _ = tokio::spawn(async move { consul_load.start(tx).await });
|
||||
}
|
||||
_ => {
|
||||
error!("Unknown discovery type: {}", config.typecfg);
|
||||
}
|
||||
}
|
||||
|
||||
let api_load = APIUpstreamProvider {
|
||||
address: self.config.config_address.clone(),
|
||||
masterkey: self.config.master_key.clone(),
|
||||
config_api_enabled: self.config.config_api_enabled.clone(),
|
||||
tls_address: self.config.config_tls_address.clone(),
|
||||
tls_certificate: self.config.config_tls_certificate.clone(),
|
||||
tls_key_file: self.config.config_tls_key_file.clone(),
|
||||
file_server_address: self.config.file_server_address.clone(),
|
||||
file_server_folder: self.config.file_server_folder.clone(),
|
||||
};
|
||||
let tx_api = tx.clone();
|
||||
// let tx_api = tx.clone();
|
||||
let _ = tokio::spawn(async move { api_load.start(tx_api).await });
|
||||
|
||||
let uu = self.ump_upst.clone();
|
||||
@@ -62,6 +83,7 @@ impl BackgroundService for LB {
|
||||
new.sticky_sessions = ss.extraparams.sticky_sessions;
|
||||
new.to_https = ss.extraparams.to_https;
|
||||
new.authentication = ss.extraparams.authentication.clone();
|
||||
new.rate_limit = ss.extraparams.rate_limit;
|
||||
self.extraparams.store(Arc::new(new));
|
||||
self.headers.clear();
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ impl GetHost for LB {
|
||||
}
|
||||
}
|
||||
}
|
||||
// println!("BMT :===> {:?}", best_match);
|
||||
// println!("Best Match :===> {:?}", best_match);
|
||||
best_match
|
||||
}
|
||||
fn get_header(&self, peer: &str, path: &str) -> Option<Vec<(String, String)>> {
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
use crate::utils::auth::authenticate;
|
||||
use crate::utils::metrics::*;
|
||||
use crate::utils::structs::{AppConfig, Extraparams, Headers, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::utils::structs::{AppConfig, Extraparams, Headers, InnerMap, UpstreamsDashMap, UpstreamsIdMap};
|
||||
use crate::web::gethosts::GetHost;
|
||||
use arc_swap::ArcSwap;
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use log::{debug, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use pingora::http::{RequestHeader, ResponseHeader, StatusCode};
|
||||
use pingora::prelude::*;
|
||||
use pingora::ErrorSource::Upstream;
|
||||
use pingora_core::listeners::ALPN;
|
||||
use pingora_core::prelude::HttpPeer;
|
||||
use pingora_limits::rate::Rate;
|
||||
use pingora_proxy::{ProxyHttp, Session};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::Instant;
|
||||
|
||||
static RATE_LIMITER: Lazy<Rate> = Lazy::new(|| Rate::new(Duration::from_secs(1)));
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LB {
|
||||
pub ump_upst: Arc<UpstreamsDashMap>,
|
||||
pub ump_full: Arc<UpstreamsDashMap>,
|
||||
@@ -29,12 +35,13 @@ pub struct Context {
|
||||
to_https: bool,
|
||||
redirect_to: String,
|
||||
start_time: Instant,
|
||||
hostname: Option<String>,
|
||||
upstream_peer: Option<InnerMap>,
|
||||
extraparams: arc_swap::Guard<Arc<Extraparams>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProxyHttp for LB {
|
||||
// type CTX = ();
|
||||
// fn new_ctx(&self) -> Self::CTX {}
|
||||
type CTX = Context;
|
||||
fn new_ctx(&self) -> Self::CTX {
|
||||
Context {
|
||||
@@ -42,56 +49,90 @@ impl ProxyHttp for LB {
|
||||
to_https: false,
|
||||
redirect_to: String::new(),
|
||||
start_time: Instant::now(),
|
||||
hostname: None,
|
||||
upstream_peer: None,
|
||||
extraparams: self.extraparams.load(),
|
||||
}
|
||||
}
|
||||
async fn request_filter(&self, session: &mut Session, _ctx: &mut Self::CTX) -> Result<bool> {
|
||||
if let Some(auth) = self.extraparams.load().authentication.get("authorization") {
|
||||
let ep = _ctx.extraparams.clone();
|
||||
|
||||
if let Some(auth) = ep.authentication.get("authorization") {
|
||||
let authenticated = authenticate(&auth.value(), &session);
|
||||
if !authenticated {
|
||||
let _ = session.respond_error(401).await;
|
||||
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path().to_string());
|
||||
warn!("Forbidden: {:?}, {}", session.client_addr(), session.req_header().uri.path());
|
||||
return Ok(true);
|
||||
}
|
||||
};
|
||||
Ok(false)
|
||||
}
|
||||
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
|
||||
let host_name = return_header_host(&session);
|
||||
match host_name {
|
||||
Some(hostname) => {
|
||||
// session.req_header_mut().headers.insert("X-Host-Name", host.to_string().parse().unwrap());
|
||||
let mut backend_id = None;
|
||||
|
||||
if self.extraparams.load().sticky_sessions {
|
||||
if let Some(cookies) = session.req_header().headers.get("cookie") {
|
||||
if let Ok(cookie_str) = cookies.to_str() {
|
||||
for cookie in cookie_str.split(';') {
|
||||
let trimmed = cookie.trim();
|
||||
if let Some(value) = trimmed.strip_prefix("backend_id=") {
|
||||
backend_id = Some(value);
|
||||
break;
|
||||
}
|
||||
let hostname = return_header_host(&session);
|
||||
_ctx.hostname = hostname;
|
||||
|
||||
let mut backend_id = None;
|
||||
|
||||
if ep.sticky_sessions {
|
||||
if let Some(cookies) = session.req_header().headers.get("cookie") {
|
||||
if let Ok(cookie_str) = cookies.to_str() {
|
||||
for cookie in cookie_str.split(';') {
|
||||
let trimmed = cookie.trim();
|
||||
if let Some(value) = trimmed.strip_prefix("backend_id=") {
|
||||
backend_id = Some(value);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match _ctx.hostname.as_ref() {
|
||||
None => return Ok(false),
|
||||
Some(host) => {
|
||||
// let optioninnermap = self.get_host(host.as_str(), host.as_str(), backend_id);
|
||||
let optioninnermap = self.get_host(host.as_str(), session.req_header().uri.path(), backend_id);
|
||||
match optioninnermap {
|
||||
None => return Ok(false),
|
||||
Some(ref innermap) => {
|
||||
if let Some(rate) = innermap.rate_limit.or(ep.rate_limit) {
|
||||
// let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip().to_string()).unwrap_or_else(|| host.to_string());
|
||||
let rate_key = session.client_addr().and_then(|addr| addr.as_inet()).map(|inet| inet.ip());
|
||||
let curr_window_requests = RATE_LIMITER.observe(&rate_key, 1);
|
||||
if curr_window_requests > rate {
|
||||
let mut header = ResponseHeader::build(429, None).unwrap();
|
||||
header.insert_header("X-Rate-Limit-Limit", rate.to_string()).unwrap();
|
||||
header.insert_header("X-Rate-Limit-Remaining", "0").unwrap();
|
||||
header.insert_header("X-Rate-Limit-Reset", "1").unwrap();
|
||||
session.set_keepalive(None);
|
||||
session.write_response_header(Box::new(header), true).await?;
|
||||
debug!("Rate limited: {:?}, {}", rate_key, rate);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let ddr = self.get_host(hostname, hostname, backend_id);
|
||||
|
||||
match ddr {
|
||||
Some((address, port, ssl, is_h2, to_https)) => {
|
||||
let mut peer = Box::new(HttpPeer::new((address.clone(), port.clone()), ssl, String::new()));
|
||||
_ctx.upstream_peer = optioninnermap;
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
async fn upstream_peer(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result<Box<HttpPeer>> {
|
||||
// let host_name = return_header_host(&session);
|
||||
match ctx.hostname.as_ref() {
|
||||
Some(hostname) => {
|
||||
match ctx.upstream_peer.as_ref() {
|
||||
// Some((address, port, ssl, is_h2, to_https)) => {
|
||||
Some(innermap) => {
|
||||
let mut peer = Box::new(HttpPeer::new((innermap.address.clone(), innermap.port.clone()), innermap.is_ssl, String::new()));
|
||||
// if session.is_http2() {
|
||||
if is_h2 {
|
||||
if innermap.is_http2 {
|
||||
peer.options.alpn = ALPN::H2;
|
||||
}
|
||||
if ssl {
|
||||
peer.sni = hostname.to_string();
|
||||
if innermap.is_ssl {
|
||||
peer.sni = hostname.clone();
|
||||
peer.options.verify_cert = false;
|
||||
peer.options.verify_hostname = false;
|
||||
}
|
||||
// println!("{}, {}, alpn {}, h2 {:?}, to_https {}", hostname, address.as_str(), peer.options.alpn, is_h2, _to_https);
|
||||
if self.extraparams.load().to_https.unwrap_or(false) || to_https {
|
||||
if ctx.to_https || innermap.to_https {
|
||||
if let Some(stream) = session.stream() {
|
||||
if stream.get_ssl().is_none() {
|
||||
if let Some(addr) = session.server_addr() {
|
||||
@@ -106,7 +147,7 @@ impl ProxyHttp for LB {
|
||||
}
|
||||
}
|
||||
|
||||
ctx.backend_id = format!("{}:{}:{}", address.clone(), port.clone(), ssl);
|
||||
ctx.backend_id = format!("{}:{}:{}", innermap.address.clone(), innermap.port.clone(), innermap.is_ssl);
|
||||
Ok(peer)
|
||||
}
|
||||
None => {
|
||||
@@ -134,32 +175,28 @@ impl ProxyHttp for LB {
|
||||
}
|
||||
}
|
||||
|
||||
async fn upstream_request_filter(&self, session: &mut Session, _upstream_request: &mut RequestHeader, _ctx: &mut Self::CTX) -> Result<()> {
|
||||
match session.client_addr() {
|
||||
Some(ip) => {
|
||||
let inet = ip.as_inet();
|
||||
match inet {
|
||||
Some(addr) => {
|
||||
_upstream_request
|
||||
.insert_header("X-Forwarded-For", addr.to_string().split(':').collect::<Vec<&str>>()[0])
|
||||
.unwrap();
|
||||
}
|
||||
None => warn!("Malformed Client IP: {:?}", inet),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Cannot detect client IP");
|
||||
}
|
||||
async fn upstream_request_filter(&self, _session: &mut Session, upstream_request: &mut RequestHeader, ctx: &mut Self::CTX) -> Result<()> {
|
||||
if let Some(hostname) = ctx.hostname.as_ref() {
|
||||
upstream_request.insert_header("Host", hostname)?;
|
||||
}
|
||||
if let Some(peer) = ctx.upstream_peer.as_ref() {
|
||||
upstream_request.insert_header("X-Forwarded-For", peer.address.as_str())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// async fn request_body_filter(&self, _session: &mut Session, _body: &mut Option<Bytes>, _end_of_stream: bool, _ctx: &mut Self::CTX) -> Result<()>
|
||||
// where
|
||||
// Self::CTX: Send + Sync,
|
||||
// {
|
||||
// Ok(())
|
||||
// }
|
||||
async fn response_filter(&self, session: &mut Session, _upstream_response: &mut ResponseHeader, ctx: &mut Self::CTX) -> Result<()> {
|
||||
// _upstream_response.insert_header("X-Proxied-From", "Fooooooooooooooo").unwrap();
|
||||
if self.extraparams.load().sticky_sessions {
|
||||
if ctx.extraparams.sticky_sessions {
|
||||
let backend_id = ctx.backend_id.clone();
|
||||
if let Some(bid) = self.ump_byid.get(&backend_id) {
|
||||
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.0));
|
||||
let _ = _upstream_response.insert_header("set-cookie", format!("backend_id={}; Path=/; Max-Age=600; HttpOnly; SameSite=Lax", bid.address));
|
||||
}
|
||||
}
|
||||
if ctx.to_https {
|
||||
@@ -168,7 +205,7 @@ impl ProxyHttp for LB {
|
||||
redirect_response.insert_header("Content-Length", "0")?;
|
||||
session.write_response_header(Box::new(redirect_response), false).await?;
|
||||
}
|
||||
match return_header_host(&session) {
|
||||
match ctx.hostname.as_ref() {
|
||||
Some(host) => {
|
||||
let path = session.req_header().uri.path();
|
||||
let host_header = host;
|
||||
@@ -194,6 +231,7 @@ impl ProxyHttp for LB {
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
session.set_keepalive(Some(300));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -210,26 +248,19 @@ impl ProxyHttp for LB {
|
||||
}
|
||||
}
|
||||
|
||||
fn return_header_host(session: &Session) -> Option<&str> {
|
||||
fn return_header_host(session: &Session) -> Option<String> {
|
||||
if session.is_http2() {
|
||||
match session.req_header().uri.host() {
|
||||
Some(host) => Option::from(host),
|
||||
Some(host) => Option::from(host.to_string()),
|
||||
None => None,
|
||||
}
|
||||
} else {
|
||||
match session.req_header().headers.get("host") {
|
||||
Some(host) => {
|
||||
let header_host = host.to_str().unwrap().splitn(2, ':').collect::<Vec<&str>>();
|
||||
Option::from(header_host[0])
|
||||
Option::from(header_host[0].to_string())
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fn return_no_host(inp: &Option<(String, u16)>) -> Box<HttpPeer> {
|
||||
// match inp {
|
||||
// Some(t) => Box::new(HttpPeer::new(t, false, String::new())),
|
||||
// None => Box::new(HttpPeer::new(("0.0.0.0", 0), false, String::new())),
|
||||
// }
|
||||
// }
|
||||
|
||||
118
src/web/start.rs
118
src/web/start.rs
@@ -1,17 +1,19 @@
|
||||
// use rustls::crypto::ring::default_provider;
|
||||
use crate::utils::structs::Extraparams;
|
||||
use crate::utils::tls;
|
||||
use crate::utils::tools::listdir;
|
||||
use crate::utils::tls::CertificateConfig;
|
||||
use crate::utils::tools::*;
|
||||
use crate::web::proxyhttp::LB;
|
||||
use arc_swap::ArcSwap;
|
||||
use dashmap::DashMap;
|
||||
use log::info;
|
||||
use openssl::ssl::{SslAlert, SslRef};
|
||||
use pingora::tls::ssl::{SslAlert, SslRef};
|
||||
use pingora_core::listeners::tls::TlsSettings;
|
||||
use pingora_core::prelude::{background_service, Opt};
|
||||
use pingora_core::server::Server;
|
||||
use std::env;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
pub fn run() {
|
||||
// default_provider().install_default().expect("Failed to install rustls crypto provider");
|
||||
@@ -26,78 +28,88 @@ pub fn run() {
|
||||
let ff_config = Arc::new(DashMap::new());
|
||||
let im_config = Arc::new(DashMap::new());
|
||||
let hh_config = Arc::new(DashMap::new());
|
||||
|
||||
let ec_config = Arc::new(ArcSwap::from_pointee(Extraparams {
|
||||
sticky_sessions: false,
|
||||
to_https: None,
|
||||
authentication: DashMap::new(),
|
||||
rate_limit: None,
|
||||
}));
|
||||
|
||||
let cfg = Arc::new(maincfg);
|
||||
|
||||
let lb = LB {
|
||||
ump_upst: uf_config.clone(),
|
||||
ump_full: ff_config.clone(),
|
||||
ump_byid: im_config.clone(),
|
||||
ump_upst: uf_config,
|
||||
ump_full: ff_config,
|
||||
ump_byid: im_config,
|
||||
config: cfg.clone(),
|
||||
headers: hh_config.clone(),
|
||||
extraparams: ec_config.clone(),
|
||||
headers: hh_config,
|
||||
extraparams: ec_config,
|
||||
};
|
||||
let bg = LB {
|
||||
ump_upst: uf_config.clone(),
|
||||
ump_full: ff_config.clone(),
|
||||
ump_byid: im_config.clone(),
|
||||
config: cfg.clone(),
|
||||
headers: hh_config.clone(),
|
||||
extraparams: ec_config.clone(),
|
||||
};
|
||||
|
||||
// env_logger::Env::new();
|
||||
// env_logger::init();
|
||||
|
||||
let log_level = cfg.log_level.clone();
|
||||
unsafe {
|
||||
match log_level.as_str() {
|
||||
"info" => env::set_var("RUST_LOG", "info"),
|
||||
"error" => env::set_var("RUST_LOG", "error"),
|
||||
"warn" => env::set_var("RUST_LOG", "warn"),
|
||||
"debug" => env::set_var("RUST_LOG", "debug"),
|
||||
"trace" => env::set_var("RUST_LOG", "trace"),
|
||||
"off" => env::set_var("RUST_LOG", "off"),
|
||||
_ => {
|
||||
println!("Error reading log level, defaulting to: INFO");
|
||||
env::set_var("RUST_LOG", "info")
|
||||
/*
|
||||
let log_level = cfg.log_level.clone();
|
||||
unsafe {
|
||||
match log_level.as_str() {
|
||||
"info" => env::set_var("RUST_LOG", "info"),
|
||||
"error" => env::set_var("RUST_LOG", "error"),
|
||||
"warn" => env::set_var("RUST_LOG", "warn"),
|
||||
"debug" => env::set_var("RUST_LOG", "debug"),
|
||||
"trace" => env::set_var("RUST_LOG", "trace"),
|
||||
"off" => env::set_var("RUST_LOG", "off"),
|
||||
_ => {
|
||||
println!("Error reading log level, defaulting to: INFO");
|
||||
env::set_var("RUST_LOG", "info")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
env_logger::builder()
|
||||
// .format_timestamp(None)
|
||||
// .format_module_path(false)
|
||||
// .format_source_path(false)
|
||||
// .format_target(false)
|
||||
.init();
|
||||
env_logger::builder().init();
|
||||
*/
|
||||
let grade = cfg.proxy_tls_grade.clone().unwrap_or("medium".to_string());
|
||||
info!("TLS grade set to: [ {} ]", grade);
|
||||
|
||||
let bg_srvc = background_service("bgsrvc", bg);
|
||||
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb);
|
||||
let bg_srvc = background_service("bgsrvc", lb.clone());
|
||||
let mut proxy = pingora_proxy::http_proxy_service(&server.configuration, lb.clone());
|
||||
let bind_address_http = cfg.proxy_address_http.clone();
|
||||
|
||||
let bind_address_tls = cfg.proxy_address_tls.clone();
|
||||
// let foo = crate::utils::tls::build_ssl_context_builder();
|
||||
match bind_address_tls {
|
||||
Some(bind_address_tls) => {
|
||||
info!("Running TLS listener on :{}", bind_address_tls);
|
||||
// let cert_path = cfg.tls_certificate.clone().unwrap();
|
||||
// let key_path = cfg.tls_key_file.clone().unwrap();
|
||||
// let mut tls_settings = tls::TlsSettings::intermediate(&cert_path, &key_path).unwrap();
|
||||
// tls_settings.enable_h2();
|
||||
// proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
|
||||
let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
|
||||
let certs_path = cfg.proxy_certificates.clone().unwrap();
|
||||
thread::spawn(move || {
|
||||
watch_folder(certs_path, tx).unwrap();
|
||||
});
|
||||
let certificate_configs = rx.recv().unwrap();
|
||||
let first_set = tls::Certificates::new(&certificate_configs, grade.as_str()).unwrap_or_else(|| panic!("Unable to load initial certificate info"));
|
||||
let certificates = Arc::new(ArcSwap::from_pointee(first_set));
|
||||
let certs_for_callback = certificates.clone();
|
||||
|
||||
let certificate_configs = listdir(cfg.proxy_certificates.clone().unwrap());
|
||||
let certificates = tls::Certificates::new(&certificate_configs);
|
||||
let mut tls_settings = TlsSettings::intermediate(&certificates.default_cert_path, &certificates.default_key_path).expect("unable to load or parse cert/key");
|
||||
tls_settings.enable_h2();
|
||||
tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certificates.server_name_callback(ssl_ref, ssl_alert));
|
||||
let certs_for_watcher = certificates.clone();
|
||||
let new_certs = tls::Certificates::new(&certificate_configs, grade.as_str());
|
||||
certs_for_watcher.store(Arc::new(new_certs.unwrap()));
|
||||
|
||||
let mut tls_settings =
|
||||
TlsSettings::intermediate(&certs_for_callback.load().default_cert_path, &certs_for_callback.load().default_key_path).expect("unable to load or parse cert/key");
|
||||
|
||||
tls::set_tsl_grade(&mut tls_settings, grade.as_str());
|
||||
tls_settings.set_servername_callback(move |ssl_ref: &mut SslRef, ssl_alert: &mut SslAlert| certs_for_callback.load().server_name_callback(ssl_ref, ssl_alert));
|
||||
tls_settings.set_alpn_select_callback(tls::prefer_h2);
|
||||
|
||||
proxy.add_tls_with_settings(&bind_address_tls, None, tls_settings);
|
||||
|
||||
let certs_for_watcher = certificates.clone();
|
||||
thread::spawn(move || {
|
||||
while let Ok(new_configs) = rx.recv() {
|
||||
let new_certs = tls::Certificates::new(&new_configs, grade.as_str());
|
||||
match new_certs {
|
||||
Some(new_certs) => {
|
||||
certs_for_watcher.store(Arc::new(new_certs));
|
||||
info!("Reload TLS certificates from {}", cfg.proxy_certificates.clone().unwrap())
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use tokio::net::TcpListener;
|
||||
use tower_http::services::ServeDir;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct InputKey {
|
||||
@@ -34,6 +35,7 @@ struct OutToken {
|
||||
struct AppState {
|
||||
master_key: String,
|
||||
config_sender: Sender<Configuration>,
|
||||
config_api_enabled: bool,
|
||||
}
|
||||
|
||||
#[allow(unused_mut)]
|
||||
@@ -41,13 +43,16 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
|
||||
let app_state = AppState {
|
||||
master_key: config.masterkey.clone(),
|
||||
config_sender: to_return.clone(),
|
||||
config_api_enabled: config.config_api_enabled.clone(),
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
// .route("/{*wildcard}", get(senderror))
|
||||
// .route("/{*wildcard}", post(senderror))
|
||||
// .route("/{*wildcard}", put(senderror))
|
||||
// .route("/{*wildcard}", head(senderror))
|
||||
// .route("/{*wildcard}", delete(senderror))
|
||||
// .nest_service("/static", static_files)
|
||||
.route("/jwt", post(jwt_gen))
|
||||
.route("/conf", post(conf))
|
||||
.route("/metrics", get(metrics))
|
||||
@@ -65,15 +70,29 @@ pub async fn run_server(config: &APIUpstreamProvider, mut to_return: Sender<Conf
|
||||
info!("Starting the TLS API server on: {}", value);
|
||||
}
|
||||
|
||||
if let (Some(address), Some(folder)) = (&config.file_server_address, &config.file_server_folder) {
|
||||
let static_files = ServeDir::new(folder);
|
||||
let static_serve: Router = Router::new().fallback_service(static_files);
|
||||
let static_listen = TcpListener::bind(address).await.unwrap();
|
||||
let _ = tokio::spawn(async move { axum::serve(static_listen, static_serve).await.unwrap() });
|
||||
}
|
||||
|
||||
let listener = TcpListener::bind(config.address.clone()).await.unwrap();
|
||||
info!("Starting the API server on: {}", config.address);
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
}
|
||||
|
||||
async fn conf(State(mut st): State<AppState>, Query(params): Query<HashMap<String, String>>, content: String) -> impl IntoResponse {
|
||||
if !st.config_api_enabled {
|
||||
return Response::builder()
|
||||
.status(StatusCode::FORBIDDEN)
|
||||
.body(Body::from("Config remote API is disabled !\n"))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if let Some(s) = params.get("key") {
|
||||
if s.to_owned() == st.master_key.to_owned() {
|
||||
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content") {
|
||||
if s.to_owned() == st.master_key {
|
||||
if let Some(serverlist) = crate::utils::parceyaml::load_configuration(content.as_str(), "content").await {
|
||||
st.config_sender.send(serverlist).await.unwrap();
|
||||
return Response::builder().status(StatusCode::OK).body(Body::from("Config, conf file, updated !\n")).unwrap();
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user