mirror of
https://github.com/sadoyan/aralez.git
synced 2026-04-30 23:08:40 +08:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02de5f1c21 | ||
|
|
9519280026 | ||
|
|
e87c60cf4f | ||
|
|
25693a7058 | ||
|
|
3b0b385ec7 | ||
|
|
5359c2e8e9 | ||
|
|
2b62d1e6de | ||
|
|
8a290e5084 | ||
|
|
3541b20c80 |
15
.github/FUNDING.yml
vendored
Normal file
15
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
polar: # Replace with a single Polar username
|
||||
buy_me_a_coffee: sadoyan
|
||||
thanks_dev: # Replace with a single thanks.dev username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
201
LICENSE
Normal file
201
LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,7 +1,7 @@
|
||||
# Main configuration file, applied on startup
|
||||
threads: 12 # Number of daemon threads default setting
|
||||
#user: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||
#group: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||
#runuser: pastor # Username for running aralez after dropping root privileges, requires program to start as root
|
||||
#rungroup: pastor # Group for running aralez after dropping root privileges, requires program to start as root
|
||||
daemon: false # Run in background
|
||||
upstream_keepalive_pool_size: 500 # Pool size for upstream keepalive connections
|
||||
pid_file: /tmp/aralez.pid # Path to PID file
|
||||
|
||||
@@ -1,55 +1,76 @@
|
||||
# The file under watch and hot reload, changes are applied immediately, no need to restart or reload.
|
||||
provider: "file" # consul, kubernetes
|
||||
provider: "file" # "file" "consul" "kubernetes"
|
||||
sticky_sessions: false
|
||||
to_ssl: false
|
||||
#rate_limit: 100
|
||||
to_https: false
|
||||
rate_limit: 100
|
||||
headers:
|
||||
- "Access-Control-Allow-Origin:*"
|
||||
- "Access-Control-Allow-Methods:POST, GET, OPTIONS"
|
||||
- "Access-Control-Max-Age:86400"
|
||||
- "X-Custom-Header:Something Special"
|
||||
authorization:
|
||||
type: "jwt"
|
||||
creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||
- "Strict-Transport-Security:max-age=31536000; includeSubDomains; preload"
|
||||
#authorization:
|
||||
# type: "jwt"
|
||||
# creds: "910517d9-f9a1-48de-8826-dbadacbd84af-cb6f830e-ab16-47ec-9d8f-0090de732774"
|
||||
# type: "basic"
|
||||
# creds: "user:Passw0rd"
|
||||
# creds: "username:Pa$$w0rd"
|
||||
# type: "apikey"
|
||||
# creds: "5ecbf799-1343-4e94-a9b5-e278af5cd313-56b45249-1839-4008-a450-a60dc76d2bae"
|
||||
consul: # If the provider is consul. Otherwise, ignored.
|
||||
consul:
|
||||
servers:
|
||||
- "http://consul1:8500"
|
||||
- "http://consul2:8500"
|
||||
- "http://consul3:8500"
|
||||
services: # proxy: The hostname to access the proxy server, real : The real service name in Consul database.
|
||||
- proxy: "proxy-frontend-dev-frontend-srv"
|
||||
real: "frontend-dev-frontend-srv"
|
||||
- "http://192.168.1.199:8500"
|
||||
- "http://192.168.1.200:8500"
|
||||
- "http://192.168.1.201:8500"
|
||||
services: # hostname: The hostname to access the proxy server, upstream : The real service name in Consul database.
|
||||
- hostname: "vt-webapi-service"
|
||||
upstream: "vt-webapi-service-health"
|
||||
path: "/one"
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
rate_limit: 1
|
||||
to_https: false
|
||||
- hostname: "vt-webapi-service"
|
||||
upstream: "vt-webapi-service-health"
|
||||
path: "/"
|
||||
token: "8e2db809-845b-45e1-8b47-2c8356a09da0-a4370955-18c2-4d6e-a8f8-ffcc0b47be81" # Consul server access token, If Consul auth is enabled
|
||||
kubernetes:
|
||||
servers:
|
||||
- "172.16.0.11:5443" # KUBERNETES_SERVICE_HOST : KUBERNETES_SERVICE_PORT_HTTPS
|
||||
- "192.168.1.55:443" #For testing only, overrides with KUBERNETES_SERVICE_HOST : KUBERNETES_SERVICE_PORT_HTTPS env variables.
|
||||
services:
|
||||
- proxy: "vt-api-service-v2"
|
||||
real: "vt-api-service-v2"
|
||||
- proxy: "vt-search-service"
|
||||
real: "vt-search-service"
|
||||
- proxy: "vt-websocket-service"
|
||||
real: "vt-websocket-service"
|
||||
tokenpath: "/tmp/token.txt" # /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
- hostname: "vt-webapi-service"
|
||||
path: "/"
|
||||
upstream: "vt-webapi-service"
|
||||
- hostname: "vt-webapi-service"
|
||||
upstream: "vt-console-service"
|
||||
path: "/one"
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
rate_limit: 100
|
||||
to_https: false
|
||||
- hostname: "vt-webapi-service"
|
||||
upstream: "vt-rambulik-service"
|
||||
path: "/two"
|
||||
- hostname: "vt-websocket-service"
|
||||
upstream: "vt-websocket-service"
|
||||
path: "/"
|
||||
tokenpath: "/path/to/kubetoken.txt" #If not set, will default to /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
upstreams:
|
||||
myip.mydomain.com:
|
||||
paths:
|
||||
rate_limit: 10 # Per path rate limit have higher priority than global rate limit. If not set, the global rate limit will be used
|
||||
"/":
|
||||
rate_limit: 200
|
||||
to_https: false
|
||||
headers:
|
||||
- "X-Proxy-From:Aralez"
|
||||
servers: # List of upstreams HOST:PORT
|
||||
servers:
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
- "127.0.0.3:8000"
|
||||
- "127.0.0.4:8000"
|
||||
- "127.0.0.5:8000"
|
||||
"/ping":
|
||||
to_https: true
|
||||
to_https: false
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
- "X-Proxy-From:Aralez"
|
||||
@@ -62,14 +83,28 @@ upstreams:
|
||||
polo.mydomain.com:
|
||||
paths:
|
||||
"/":
|
||||
to_https: false
|
||||
headers:
|
||||
- "X-Some-Thing:Yaaaaaaaaaaaaaaa"
|
||||
servers:
|
||||
- "192.168.1.1:8000"
|
||||
- "192.168.1.10:8000"
|
||||
- "127.0.0.1:8000"
|
||||
- "127.0.0.2:8000"
|
||||
- "127.0.0.3:8000"
|
||||
- "127.0.0.4:8000"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
apt.mydomain.com:
|
||||
paths:
|
||||
"/":
|
||||
servers:
|
||||
- "192.168.1.10:443"
|
||||
"/.well-known/acme-challenge":
|
||||
healthcheck: false
|
||||
servers:
|
||||
- "127.0.0.1:8001"
|
||||
localpost:
|
||||
paths:
|
||||
"/":
|
||||
to_https: false
|
||||
servers:
|
||||
- "127.0.0.1:9000"
|
||||
@@ -6,6 +6,7 @@ mod filewatch;
|
||||
pub mod healthcheck;
|
||||
pub mod jwt;
|
||||
pub mod kuber;
|
||||
pub mod kuberconsul;
|
||||
pub mod metrics;
|
||||
pub mod parceyaml;
|
||||
pub mod state;
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
use crate::utils::kuberconsul::{list_to_upstreams, match_path};
|
||||
use crate::utils::parceyaml::build_headers;
|
||||
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use log::{info, warn};
|
||||
use pingora::prelude::sleep;
|
||||
use rand::Rng;
|
||||
use reqwest::header::{HeaderMap, HeaderValue};
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -28,116 +31,89 @@ struct TaggedAddress {
|
||||
}
|
||||
|
||||
pub async fn start(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let headers = DashMap::new();
|
||||
info!("Consul Discovery is enabled : {}", config.typecfg);
|
||||
let consul = config.consul.clone();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
match consul {
|
||||
Some(consul) => {
|
||||
let servers = consul.servers.unwrap();
|
||||
info!("Consul Servers => {:?}", servers);
|
||||
loop {
|
||||
if let Some(consul) = config.consul.clone() {
|
||||
let servers = consul.servers.unwrap_or(vec![format!(
|
||||
"{}:{}",
|
||||
env::var("CONSUL_SERVICE_HOST").unwrap_or("0.0.0.0".to_string()),
|
||||
env::var("CONSUL_SERVICE_PORT").unwrap_or("0".to_string())
|
||||
)]);
|
||||
let end = servers.len() - 1;
|
||||
|
||||
loop {
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
headers.clear();
|
||||
for (k, v) in config.headers.clone() {
|
||||
headers.insert(k.to_string(), v);
|
||||
}
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let upstreams = consul_request(consul_data, consul.services.clone(), consul.token.clone());
|
||||
match upstreams.await {
|
||||
Some(upstreams) => {
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
kubernetes: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let mut num = 0;
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
let consul_data = servers.get(num).unwrap().to_string();
|
||||
let ss = consul_data + "/v1/catalog/service/";
|
||||
if let Some(ref svc) = consul.services {
|
||||
for i in svc {
|
||||
let header_list = DashMap::new();
|
||||
let mut hl = Vec::new();
|
||||
build_headers(&i.headers, config.as_ref(), &mut hl);
|
||||
if hl.len() > 0 {
|
||||
header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl);
|
||||
config.headers.insert(i.hostname.clone(), header_list);
|
||||
}
|
||||
None => {}
|
||||
let pref: String = ss.clone() + &i.upstream;
|
||||
let list = get_by_http(pref, consul.token.clone(), &i).await;
|
||||
list_to_upstreams(list, &upstreams, &i);
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let mut tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: Default::default(),
|
||||
consul: None,
|
||||
kubernetes: None,
|
||||
typecfg: "".to_string(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
tosend.headers = config.headers.clone();
|
||||
tosend.extraparams.authentication = config.extraparams.authentication.clone();
|
||||
tosend.typecfg = config.typecfg.clone();
|
||||
tosend.consul = config.consul.clone();
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn consul_request(url: String, whitelist: Option<Vec<ServiceMapping>>, token: Option<String>) -> Option<UpstreamsDashMap> {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let ss = url.clone() + "/v1/catalog/service/";
|
||||
match whitelist {
|
||||
Some(whitelist) => {
|
||||
for k in whitelist.iter() {
|
||||
let pref: String = ss.clone() + &k.real;
|
||||
let list = get_by_http(pref.clone(), token.clone()).await;
|
||||
match list {
|
||||
Some(list) => {
|
||||
upstreams.insert(k.proxy.clone(), list);
|
||||
}
|
||||
None => {
|
||||
warn!("Whitelist not found for {}", k.proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
async fn get_by_http(url: String, token: Option<String>) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = reqwest::Client::new();
|
||||
async fn get_by_http(url: String, token: Option<String>, conf: &ServiceMapping) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(token) = token {
|
||||
headers.insert("X-Consul-Token", HeaderValue::from_str(&token).unwrap());
|
||||
}
|
||||
let to = Duration::from_secs(1);
|
||||
let u = client.get(url).timeout(to).send();
|
||||
let mut values = Vec::new();
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
match u.await {
|
||||
Ok(r) => {
|
||||
let jason = r.json::<Vec<Service>>().await;
|
||||
match jason {
|
||||
Ok(whitelist) => {
|
||||
for service in whitelist {
|
||||
let addr = service.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
let prt = service.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let to_add = InnerMap {
|
||||
address: addr,
|
||||
port: prt,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
healthcheck: None,
|
||||
};
|
||||
values.push(to_add);
|
||||
}
|
||||
}
|
||||
Err(_) => return None,
|
||||
}
|
||||
}
|
||||
Err(_) => return None,
|
||||
let resp = client.get(url).timeout(to).send().await.ok()?;
|
||||
if !resp.status().is_success() {
|
||||
eprintln!("Consul API returned status: {}", resp.status());
|
||||
return None;
|
||||
}
|
||||
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
|
||||
let mut inner_vec = Vec::new();
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
let endpoints: Vec<Service> = resp.json().await.ok()?;
|
||||
for subsets in endpoints {
|
||||
let addr = subsets.tagged_addresses.get("lan_ipv4").unwrap().address.clone();
|
||||
let prt = subsets.tagged_addresses.get("lan_ipv4").unwrap().port.clone();
|
||||
let to_add = InnerMap {
|
||||
address: addr,
|
||||
port: prt,
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: conf.to_https.unwrap_or(false),
|
||||
rate_limit: conf.rate_limit,
|
||||
healthcheck: None,
|
||||
};
|
||||
inner_vec.push(to_add);
|
||||
}
|
||||
match_path(&conf, &upstreams, inner_vec.clone());
|
||||
Some(upstreams)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// use crate::utils::dnsclient::DnsClientPool;
|
||||
use crate::utils::structs::{Configuration, InnerMap, UpstreamsDashMap};
|
||||
use crate::utils::kuberconsul::{list_to_upstreams, match_path};
|
||||
use crate::utils::parceyaml::build_headers;
|
||||
use crate::utils::structs::{Configuration, InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use crate::utils::tools::{clone_dashmap_into, compare_dashmaps, print_upstreams};
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
@@ -14,9 +15,6 @@ use std::time::Duration;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
// static KUBERNETES_SERVICE_HOST: &str = "IP_ADDRESS";
|
||||
// static TOKEN: &str = "TOKEN";
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Endpoints {
|
||||
subsets: Option<Vec<Subset>>,
|
||||
@@ -35,15 +33,14 @@ struct Address {
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct Port {
|
||||
// name: String,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
pub async fn start(mut toreturn: Sender<Configuration>, config: Arc<Configuration>) {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let prev_upstreams = UpstreamsDashMap::new();
|
||||
loop {
|
||||
if let Some(kuber) = config.kubernetes.clone() {
|
||||
let upstreams = UpstreamsDashMap::new();
|
||||
let path = kuber.tokenpath.unwrap_or("/var/run/secrets/kubernetes.io/serviceaccount/token".to_string());
|
||||
let token = read_token(path.as_str()).await;
|
||||
let servers = kuber.servers.unwrap_or(vec![format!(
|
||||
@@ -56,70 +53,72 @@ pub async fn start(mut toreturn: Sender<Configuration>, config: Arc<Configuratio
|
||||
if end > 0 {
|
||||
num = rand::rng().random_range(0..end);
|
||||
}
|
||||
let server = servers.get(num).unwrap().to_string();
|
||||
|
||||
let server = servers.get(num).unwrap().to_string();
|
||||
if let Some(svc) = kuber.services {
|
||||
for i in svc {
|
||||
let url = format!("https://{}/api/v1/namespaces/staging/endpoints/{}", server, i.real);
|
||||
let list = get_by_http(&*url, &*token).await;
|
||||
if let Some(list) = list {
|
||||
upstreams.insert(i.proxy.clone(), list);
|
||||
let header_list = DashMap::new();
|
||||
|
||||
let mut hl = Vec::new();
|
||||
build_headers(&i.headers, config.as_ref(), &mut hl);
|
||||
if hl.len() > 0 {
|
||||
header_list.insert(i.path.clone().unwrap_or("/".to_string()), hl);
|
||||
config.headers.insert(i.hostname.clone(), header_list);
|
||||
}
|
||||
let url = format!("https://{}/api/v1/namespaces/staging/endpoints/{}", server, i.hostname);
|
||||
let list = get_by_http(&*url, &*token, &i).await;
|
||||
list_to_upstreams(list, &upstreams, &i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: config.headers.clone(),
|
||||
consul: config.consul.clone(),
|
||||
kubernetes: config.kubernetes.clone(),
|
||||
typecfg: config.typecfg.clone(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
if !compare_dashmaps(&upstreams, &prev_upstreams) {
|
||||
let tosend: Configuration = Configuration {
|
||||
upstreams: Default::default(),
|
||||
headers: config.headers.clone(),
|
||||
consul: config.consul.clone(),
|
||||
kubernetes: config.kubernetes.clone(),
|
||||
typecfg: config.typecfg.clone(),
|
||||
extraparams: config.extraparams.clone(),
|
||||
};
|
||||
clone_dashmap_into(&upstreams, &prev_upstreams);
|
||||
clone_dashmap_into(&upstreams, &tosend.upstreams);
|
||||
print_upstreams(&tosend.upstreams);
|
||||
toreturn.send(tosend).await.unwrap();
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_by_http(url: &str, token: &str) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
pub async fn get_by_http(url: &str, token: &str, conf: &ServiceMapping) -> Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>> {
|
||||
let client = Client::builder().timeout(Duration::from_secs(2)).danger_accept_invalid_certs(true).build().ok()?;
|
||||
|
||||
let resp = client.get(url).bearer_auth(token).send().await.ok()?;
|
||||
|
||||
let to = Duration::from_secs(1);
|
||||
let resp = client.get(url).timeout(to).bearer_auth(token).send().await.ok()?;
|
||||
if !resp.status().is_success() {
|
||||
eprintln!("Kubernetes API returned status: {}", resp.status());
|
||||
return None;
|
||||
}
|
||||
|
||||
let endpoints: Endpoints = resp.json().await.ok()?;
|
||||
let upstreams: DashMap<String, (Vec<InnerMap>, AtomicUsize)> = DashMap::new();
|
||||
|
||||
if let Some(subsets) = endpoints.subsets {
|
||||
for subset in subsets {
|
||||
if let (Some(addresses), Some(ports)) = (subset.addresses, subset.ports) {
|
||||
let mut inner_vec = Vec::new();
|
||||
for addr in addresses {
|
||||
let mut inner_vec = Vec::new();
|
||||
for port in &ports {
|
||||
let to_add = InnerMap {
|
||||
address: addr.ip.clone(),
|
||||
port: port.port.clone(),
|
||||
is_ssl: false,
|
||||
is_http2: false,
|
||||
to_https: false,
|
||||
rate_limit: None,
|
||||
to_https: conf.to_https.unwrap_or(false),
|
||||
rate_limit: conf.rate_limit,
|
||||
healthcheck: None,
|
||||
};
|
||||
inner_vec.push(to_add);
|
||||
}
|
||||
upstreams.insert("/".to_string(), (inner_vec, AtomicUsize::new(0)));
|
||||
}
|
||||
match_path(&conf, &upstreams, inner_vec.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
29
src/utils/kuberconsul.rs
Normal file
29
src/utils/kuberconsul.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use crate::utils::structs::{InnerMap, ServiceMapping, UpstreamsDashMap};
|
||||
use dashmap::DashMap;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
||||
pub fn list_to_upstreams(lt: Option<DashMap<String, (Vec<InnerMap>, AtomicUsize)>>, upstreams: &UpstreamsDashMap, i: &ServiceMapping) {
|
||||
if let Some(list) = lt {
|
||||
match upstreams.get(&i.hostname.clone()) {
|
||||
Some(upstr) => {
|
||||
for (k, v) in list {
|
||||
upstr.value().insert(k, v);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
upstreams.insert(i.hostname.clone(), list);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn match_path(conf: &ServiceMapping, upstreams: &DashMap<String, (Vec<InnerMap>, AtomicUsize)>, values: Vec<InnerMap>) {
|
||||
match conf.path {
|
||||
Some(ref p) => {
|
||||
upstreams.insert(p.to_string(), (values, AtomicUsize::new(0)));
|
||||
}
|
||||
None => {
|
||||
upstreams.insert("/".to_string(), (values, AtomicUsize::new(0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -108,18 +108,11 @@ async fn populate_file_upstreams(config: &mut Configuration, parsed: &Config) {
|
||||
info!("Applied Rate Limit for {} : {} request per second", hostname, rate);
|
||||
}
|
||||
|
||||
let mut server_list = Vec::new();
|
||||
let mut hl = Vec::new();
|
||||
|
||||
if let Some(headers) = &path_config.headers {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut hl: Vec<(String, String)> = Vec::new();
|
||||
build_headers(&path_config.headers, config, &mut hl);
|
||||
header_list.insert(path.clone(), hl);
|
||||
|
||||
let mut server_list = Vec::new();
|
||||
for server in &path_config.servers {
|
||||
if let Some((ip, port_str)) = server.split_once(':') {
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
@@ -224,3 +217,20 @@ fn log_builder(conf: &AppConfig) {
|
||||
}
|
||||
env_logger::builder().init();
|
||||
}
|
||||
|
||||
pub fn build_headers(path_config: &Option<Vec<String>>, config: &Configuration, hl: &mut Vec<(String, String)>) {
|
||||
if let Some(headers) = &path_config {
|
||||
for header in headers {
|
||||
if let Some((key, val)) = header.split_once(':') {
|
||||
hl.push((key.trim().to_string(), val.trim().to_string()));
|
||||
}
|
||||
}
|
||||
if let Some(push) = config.headers.get("GLOBAL_HEADERS") {
|
||||
for k in push.iter() {
|
||||
for x in k.value() {
|
||||
hl.push(x.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,16 @@ pub type Headers = DashMap<String, DashMap<String, Vec<(String, String)>>>;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceMapping {
|
||||
pub proxy: String,
|
||||
pub real: String,
|
||||
pub upstream: String,
|
||||
pub hostname: String,
|
||||
pub path: Option<String>,
|
||||
pub to_https: Option<bool>,
|
||||
pub rate_limit: Option<isize>,
|
||||
pub headers: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
// pub type Services = DashMap<String, Vec<(String, Option<String>)>>;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Extraparams {
|
||||
pub sticky_sessions: bool,
|
||||
|
||||
@@ -29,8 +29,13 @@ pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
println!(" Path: {}", path);
|
||||
for f in path_entry.value().0.clone() {
|
||||
println!(
|
||||
" IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}",
|
||||
f.address, f.port, f.is_ssl, f.is_http2, f.to_https
|
||||
" IP: {}, Port: {}, SSL: {}, H2: {}, To HTTPS: {}, Rate Limit: {}",
|
||||
f.address,
|
||||
f.port,
|
||||
f.is_ssl,
|
||||
f.is_http2,
|
||||
f.to_https,
|
||||
f.rate_limit.unwrap_or(0)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -250,8 +255,8 @@ pub fn drop_priv(user: String, group: String, http_addr: String, tls_addr: Optio
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_priv(addr: String) {
|
||||
let port = SocketAddr::from_str(&addr).map(|sa| sa.port()).unwrap();
|
||||
pub fn check_priv(addr: &str) {
|
||||
let port = SocketAddr::from_str(addr).map(|sa| sa.port()).unwrap();
|
||||
match port < 1024 {
|
||||
true => {
|
||||
let meta = std::fs::metadata("/proc/self").map(|m| m.uid()).unwrap();
|
||||
|
||||
@@ -52,7 +52,6 @@ impl GetHost for LB {
|
||||
let host_entry = self.headers.get(peer)?;
|
||||
let mut current_path = path.to_string();
|
||||
let mut best_match: Option<Vec<(String, String)>> = None;
|
||||
|
||||
loop {
|
||||
if let Some(entry) = host_entry.get(¤t_path) {
|
||||
if !entry.value().is_empty() {
|
||||
|
||||
@@ -215,6 +215,7 @@ impl ProxyHttp for LB {
|
||||
let path = session.req_header().uri.path();
|
||||
let host_header = host;
|
||||
let split_header = host_header.split_once(':');
|
||||
|
||||
match split_header {
|
||||
Some(sh) => {
|
||||
let yoyo = self.get_header(sh.0, path);
|
||||
|
||||
@@ -55,11 +55,11 @@ pub fn run() {
|
||||
let bind_address_http = cfg.proxy_address_http.clone();
|
||||
let bind_address_tls = cfg.proxy_address_tls.clone();
|
||||
|
||||
check_priv(bind_address_http.clone());
|
||||
check_priv(bind_address_http.as_str());
|
||||
|
||||
match bind_address_tls {
|
||||
Some(bind_address_tls) => {
|
||||
check_priv(bind_address_tls.clone());
|
||||
check_priv(bind_address_tls.as_str());
|
||||
let (tx, rx): (Sender<Vec<CertificateConfig>>, Receiver<Vec<CertificateConfig>>) = channel();
|
||||
let certs_path = cfg.proxy_certificates.clone().unwrap();
|
||||
thread::spawn(move || {
|
||||
|
||||
Reference in New Issue
Block a user