mirror of
https://github.com/sadoyan/aralez.git
synced 2026-04-30 06:48:37 +08:00
New inmplementations, big commit :
1. Nested upstreams with params 2. SSL upstream support 3. Upstreams move to yaml format 4. Command line start arguments
This commit is contained in:
@@ -1,73 +0,0 @@
|
||||
use crate::utils::tools::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
// #[allow(dead_code)]
|
||||
pub fn dm(map1: &UpstreamMap, map2: &UpstreamMap) -> bool {
|
||||
if map1.len() != map2.len() {
|
||||
return false; // Different number of keys
|
||||
}
|
||||
for entry1 in map1.iter() {
|
||||
let key = entry1.key();
|
||||
let (vec1, _) = entry1.value(); // Extract value
|
||||
|
||||
if let Some(entry2) = map2.get(key) {
|
||||
let (vec2, _) = entry2.value(); // Correctly extract value
|
||||
if vec1 != vec2 {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn dam(map1: &UpstresmDashMap, map2: &UpstresmDashMap) -> bool {
|
||||
// Step 1: Check if both maps have the same keys
|
||||
let keys1: HashSet<_> = map1.iter().map(|entry| entry.key().clone()).collect();
|
||||
let keys2: HashSet<_> = map2.iter().map(|entry| entry.key().clone()).collect();
|
||||
if keys1 != keys2 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 2: Check if the inner maps have the same keys
|
||||
for entry1 in map1.iter() {
|
||||
let hostname = entry1.key();
|
||||
let inner_map1 = entry1.value();
|
||||
|
||||
let Some(inner_map2) = map2.get(hostname) else {
|
||||
return false; // Key exists in map1 but not in map2
|
||||
};
|
||||
|
||||
let inner_keys1: HashSet<_> = inner_map1.iter().map(|e| e.key().clone()).collect();
|
||||
let inner_keys2: HashSet<_> = inner_map2.iter().map(|e| e.key().clone()).collect();
|
||||
if inner_keys1 != inner_keys2 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 3: Compare values (ignore order)
|
||||
for path_entry in inner_map1.iter() {
|
||||
let path = path_entry.key();
|
||||
let (vec1, _counter1) = path_entry.value();
|
||||
|
||||
let Some(entry2) = inner_map2.get(path) else {
|
||||
return false; // Path exists in map1 but not in map2
|
||||
};
|
||||
let (vec2, _counter2) = entry2.value(); // ✅ Correctly extract values
|
||||
|
||||
// Compare AtomicUsize values
|
||||
// if counter1.load(Ordering::Relaxed) != counter2.load(Ordering::Relaxed) {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// Convert Vec to HashSet to compare unordered values
|
||||
let set1: HashSet<_> = vec1.iter().collect();
|
||||
let set2: HashSet<_> = vec2.iter().collect();
|
||||
if set1 != set2 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
@@ -1,16 +1,16 @@
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::utils::parceyaml::load_yaml_to_dashmap;
|
||||
use crate::utils::tools::*;
|
||||
use crate::web::webserver;
|
||||
use async_trait::async_trait;
|
||||
use dashmap::DashMap;
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::SinkExt;
|
||||
use notify::event::ModifyKind;
|
||||
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::task;
|
||||
|
||||
pub struct FromFileProvider {
|
||||
@@ -20,34 +20,32 @@ pub struct APIUpstreamProvider;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Discovery {
|
||||
async fn run(&self, tx: Sender<UpstreamMap>);
|
||||
async fn start(&self, tx: Sender<UpstreamsDashMap>);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Discovery for APIUpstreamProvider {
|
||||
async fn run(&self, toreturn: Sender<UpstreamMap>) {
|
||||
async fn start(&self, toreturn: Sender<UpstreamsDashMap>) {
|
||||
webserver::run_server(toreturn).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Discovery for FromFileProvider {
|
||||
async fn run(&self, tx: Sender<UpstreamMap>) {
|
||||
async fn start(&self, tx: Sender<UpstreamsDashMap>) {
|
||||
tokio::spawn(watch_file(self.path.clone(), tx.clone()));
|
||||
}
|
||||
}
|
||||
pub async fn watch_file(fp: String, mut toreturn: Sender<UpstreamMap>) {
|
||||
pub async fn watch_file(fp: String, mut toreturn: Sender<UpstreamsDashMap>) {
|
||||
let file_path = fp.as_str();
|
||||
let parent_dir = Path::new(file_path).parent().unwrap(); // Watch directory, not file
|
||||
let parent_dir = Path::new(file_path).parent().unwrap();
|
||||
let (local_tx, mut local_rx) = tokio::sync::mpsc::channel::<notify::Result<Event>>(1);
|
||||
|
||||
println!("Watching for changes in {:?}", parent_dir);
|
||||
let paths = fs::read_dir(parent_dir).unwrap();
|
||||
for path in paths {
|
||||
println!(" {}", path.unwrap().path().display())
|
||||
}
|
||||
|
||||
let snd = build_upstreams(file_path, "filepath");
|
||||
let snd = load_yaml_to_dashmap(file_path, "filepath");
|
||||
let _ = toreturn.send(snd).await.unwrap();
|
||||
|
||||
let _watcher_handle = task::spawn_blocking({
|
||||
@@ -71,18 +69,11 @@ pub async fn watch_file(fp: String, mut toreturn: Sender<UpstreamMap>) {
|
||||
match event {
|
||||
Ok(e) => match e.kind {
|
||||
EventKind::Modify(ModifyKind::Data(_)) | EventKind::Create(..) | EventKind::Remove(..) => {
|
||||
if e.paths[0].to_str().unwrap().ends_with("conf") {
|
||||
// if start.elapsed() > Duration::from_secs(10) {
|
||||
if e.paths[0].to_str().unwrap().ends_with("yaml") {
|
||||
if start.elapsed() > Duration::from_secs(2) {
|
||||
start = Instant::now();
|
||||
println!("Config File changed :=> {:?}", e);
|
||||
|
||||
let upstreams = build_upstreams2("etc/upstreams-long.conf", "filepath");
|
||||
print_upstreams(&upstreams);
|
||||
|
||||
println!("\n\n");
|
||||
|
||||
let snd = build_upstreams(file_path, "filepath");
|
||||
let snd = load_yaml_to_dashmap(file_path, "filepath");
|
||||
let _ = toreturn.send(snd).await.unwrap();
|
||||
}
|
||||
}
|
||||
@@ -93,59 +84,9 @@ pub async fn watch_file(fp: String, mut toreturn: Sender<UpstreamMap>) {
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn build_upstreams(d: &str, kind: &str) -> UpstreamMap {
|
||||
let upstreams = DashMap::new();
|
||||
let mut contents = d.to_string();
|
||||
match kind {
|
||||
"filepath" => {
|
||||
println!("Reading upstreams from {}", d);
|
||||
let _ = match fs::read_to_string(d) {
|
||||
Ok(data) => contents = data,
|
||||
Err(e) => {
|
||||
eprintln!("Error reading file: {:?}", e);
|
||||
return upstreams;
|
||||
}
|
||||
};
|
||||
}
|
||||
"content" => {
|
||||
println!("Reading upstreams from API post body");
|
||||
}
|
||||
_ => println!("*******************> nothing <*******************"),
|
||||
}
|
||||
|
||||
for line in contents.lines().filter(|line| !line.trim().is_empty()) {
|
||||
let mut parts = line.split_whitespace();
|
||||
|
||||
let Some(hostname) = parts.next() else {
|
||||
continue;
|
||||
};
|
||||
let Some(address) = parts.next() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut addr_parts = address.split(':');
|
||||
let Some(ip) = addr_parts.next() else {
|
||||
continue;
|
||||
};
|
||||
let Some(port_str) = addr_parts.next() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Ok(port) = port_str.parse::<u16>() else {
|
||||
continue;
|
||||
};
|
||||
upstreams
|
||||
.entry(hostname.to_string()) // Step 1: Find or create entry
|
||||
.or_insert_with(|| (Vec::new(), AtomicUsize::new(0))) // Step 2: Insert if missing
|
||||
.0 // Step 3: Access the Vec<(String, u16)>
|
||||
.push((ip.to_string(), port)); // Step 4: Append new data
|
||||
}
|
||||
|
||||
upstreams
|
||||
}
|
||||
|
||||
pub fn build_upstreams2(d: &str, kind: &str) -> UpstresmDashMap {
|
||||
let upstreams: UpstresmDashMap = DashMap::new();
|
||||
#[allow(dead_code)]
|
||||
pub fn build_upstreams(d: &str, kind: &str) -> UpstreamsDashMap {
|
||||
let upstreams: UpstreamsDashMap = DashMap::new();
|
||||
let mut contents = d.to_string();
|
||||
match kind {
|
||||
"filepath" => {
|
||||
@@ -203,7 +144,5 @@ pub fn build_upstreams2(d: &str, kind: &str) -> UpstresmDashMap {
|
||||
.0
|
||||
.push((ip.to_string(), port, ssl, proto.to_string()));
|
||||
}
|
||||
// println!("\n\nResult ===> {} <===\n\n", dam(&hopar, &upstreams));
|
||||
// println!("{:?}", hopar);
|
||||
upstreams
|
||||
}
|
||||
|
||||
@@ -5,60 +5,52 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::interval;
|
||||
|
||||
pub async fn hc(upslist: Arc<UpstreamMap>, fullist: Arc<UpstreamMap>) {
|
||||
pub async fn hc2(upslist: Arc<UpstreamsDashMap>, fullist: Arc<UpstreamsDashMap>) {
|
||||
let mut period = interval(Duration::from_secs(2));
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = period.tick() => {
|
||||
// let before = Instant::now();
|
||||
let totest: UpstreamMap = DashMap::new();
|
||||
let fclone: UpstreamMap = DashMap::new();
|
||||
// println!("\nElapsed dash: {:.2?}", before.elapsed());
|
||||
// let before = Instant::now();
|
||||
{
|
||||
for v in fullist.iter() {
|
||||
fclone.insert(v.key().clone(), (v.value().0.clone(), AtomicUsize::new(0)));
|
||||
}
|
||||
} // lock releases when scope ends
|
||||
// println!("Elapsed full: {:.2?}", before.elapsed());
|
||||
let totest : UpstreamsDashMap = DashMap::new();
|
||||
let fclone : UpstreamsDashMap = clone_dashmap(&fullist);
|
||||
for val in fclone.iter() {
|
||||
let mut newvec = vec![];
|
||||
for hostport in val.value().0.clone(){
|
||||
let hostpart = hostport.0.split('/').last().unwrap(); // For later use
|
||||
let url = format!("http://{}:{}", hostpart, hostport.1);
|
||||
let resp = http_request(url.as_str(), "GET", "").await;
|
||||
match resp{
|
||||
true => {
|
||||
newvec.push((hostpart.to_string(), hostport.1));
|
||||
},
|
||||
false => {
|
||||
println!("Dead upstream. Host: {}, Upstream: {}:{} ",val.key(), hostpart.to_string(), hostport.1 );
|
||||
let host = val.key();
|
||||
let inner = DashMap::new();
|
||||
for path_entry in val.value().iter() {
|
||||
// let inner = DashMap::new();
|
||||
let path = path_entry.key();
|
||||
let mut innervec= Vec::new();
|
||||
for k in path_entry.value().0.iter().enumerate() {
|
||||
let (ip, port, ssl, _proto) = k.1;
|
||||
let mut _pref = "";
|
||||
match ssl {
|
||||
true => _pref = "https://",
|
||||
false => _pref = "http://",
|
||||
}
|
||||
let link = format!("{}{}:{}{}", _pref, ip, port, path);
|
||||
let resp = http_request(link.as_str(), "HEAD", "").await;
|
||||
match resp {
|
||||
true => {
|
||||
innervec.push(k.1.clone());
|
||||
}
|
||||
false => {
|
||||
println!("Dead Upstream {}, Link: {}",k.0, link);
|
||||
}
|
||||
}
|
||||
}
|
||||
inner.insert(path.clone().to_owned(), (innervec, AtomicUsize::new(0)));
|
||||
}
|
||||
totest.insert(val.key().clone(), (newvec, AtomicUsize::new(0)));
|
||||
totest.insert(host.clone(), inner);
|
||||
}
|
||||
// let before = Instant::now();
|
||||
{
|
||||
if !crate::utils::compare::dm(&upslist, &totest) {
|
||||
println!("Dashmaps not matched, synchronizing");
|
||||
upslist.clear();
|
||||
for (k, v) in totest { // loop takes the ownership
|
||||
println!("Host: {}", k);
|
||||
for vv in &v.0 {
|
||||
println!(" :===> {:?}", vv);
|
||||
}
|
||||
upslist.insert(k, v);
|
||||
}
|
||||
}
|
||||
if ! compare_dashmaps(&totest, &upslist){
|
||||
print_upstreams(&totest);
|
||||
clone_dashmap_into(&totest, &upslist);
|
||||
}
|
||||
// println!("Elapsed upsl: {:.2?}", before.elapsed());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn http_request(url: &str, method: &str, payload: &str) -> bool {
|
||||
let client = reqwest::Client::new();
|
||||
let to = Duration::from_secs(1);
|
||||
@@ -83,6 +75,13 @@ async fn http_request(url: &str, method: &str, payload: &str) -> bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
"HEAD" => {
|
||||
let response = client.head(url).timeout(to).send().await;
|
||||
match response {
|
||||
Ok(r) => 100 <= r.status().as_u16() && r.status().as_u16() < 500,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
61
src/utils/parceyaml.rs
Normal file
61
src/utils/parceyaml.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use crate::utils::tools::*;
|
||||
use dashmap::DashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct Config {
|
||||
upstreams: HashMap<String, HostConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct HostConfig {
|
||||
paths: HashMap<String, PathConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct PathConfig {
|
||||
protocol: String,
|
||||
ssl: bool,
|
||||
servers: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn load_yaml_to_dashmap(d: &str, kind: &str) -> UpstreamsDashMap {
|
||||
let dashmap = UpstreamsDashMap::new();
|
||||
let mut yaml_data = d.to_string();
|
||||
match kind {
|
||||
"filepath" => {
|
||||
println!("Reading upstreams from {}", d);
|
||||
let _ = match fs::read_to_string(d) {
|
||||
Ok(data) => yaml_data = data,
|
||||
Err(e) => {
|
||||
eprintln!("Error reading file: {:?}", e);
|
||||
return dashmap;
|
||||
}
|
||||
};
|
||||
}
|
||||
"content" => {
|
||||
println!("Reading upstreams from API post body");
|
||||
}
|
||||
_ => println!("*******************> nothing <*******************"),
|
||||
}
|
||||
let parsed: Config = serde_yaml::from_str(&yaml_data).expect("Failed to parse YAML");
|
||||
for (hostname, host_config) in parsed.upstreams {
|
||||
let path_map = DashMap::new();
|
||||
for (path, path_config) in host_config.paths {
|
||||
let mut server_list = Vec::new();
|
||||
for server in path_config.servers {
|
||||
if let Some((ip, port_str)) = server.split_once(':') {
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
server_list.push((ip.to_string(), port, path_config.ssl, path_config.protocol.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
path_map.insert(path, (server_list, AtomicUsize::new(0)));
|
||||
}
|
||||
dashmap.insert(hostname, path_map);
|
||||
}
|
||||
dashmap
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
use dashmap::DashMap;
|
||||
use std::any::type_name;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn print_upstreams(upstreams: &UpstresmDashMap) {
|
||||
pub fn print_upstreams(upstreams: &UpstreamsDashMap) {
|
||||
for host_entry in upstreams.iter() {
|
||||
let hostname = host_entry.key();
|
||||
println!("Hostname: {}", hostname);
|
||||
@@ -19,8 +20,8 @@ pub fn print_upstreams(upstreams: &UpstresmDashMap) {
|
||||
}
|
||||
}
|
||||
|
||||
pub type UpstresmDashMap = DashMap<String, DashMap<String, (Vec<(String, u16, bool, String)>, AtomicUsize)>>;
|
||||
pub type UpstreamMap = DashMap<String, (Vec<(String, u16)>, AtomicUsize)>;
|
||||
pub type UpstreamsDashMap = DashMap<String, DashMap<String, (Vec<(String, u16, bool, String)>, AtomicUsize)>>;
|
||||
// pub type UpstreamMap = DashMap<String, (Vec<(String, u16)>, AtomicUsize)>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn typeoff<T>(_: T) {
|
||||
@@ -28,6 +29,7 @@ pub fn typeoff<T>(_: T) {
|
||||
println!("{:?}", to);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn string_to_bool(val: Option<&str>) -> Option<bool> {
|
||||
match val {
|
||||
Some(v) => match v {
|
||||
@@ -38,3 +40,78 @@ pub fn string_to_bool(val: Option<&str>) -> Option<bool> {
|
||||
None => Some(false),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn clone_dashmap(original: &UpstreamsDashMap) -> UpstreamsDashMap {
|
||||
let new_map: UpstreamsDashMap = DashMap::new();
|
||||
|
||||
for outer_entry in original.iter() {
|
||||
let hostname = outer_entry.key();
|
||||
let inner_map = outer_entry.value();
|
||||
|
||||
let new_inner_map = DashMap::new();
|
||||
|
||||
for inner_entry in inner_map.iter() {
|
||||
let path = inner_entry.key();
|
||||
let (vec, _) = inner_entry.value();
|
||||
let new_vec = vec.clone();
|
||||
let new_counter = AtomicUsize::new(0);
|
||||
new_inner_map.insert(path.clone(), (new_vec, new_counter));
|
||||
}
|
||||
new_map.insert(hostname.clone(), new_inner_map);
|
||||
}
|
||||
new_map
|
||||
}
|
||||
|
||||
pub fn clone_dashmap_into(original: &UpstreamsDashMap, cloned: &UpstreamsDashMap) {
|
||||
cloned.clear();
|
||||
for outer_entry in original.iter() {
|
||||
let hostname = outer_entry.key();
|
||||
let inner_map = outer_entry.value();
|
||||
|
||||
let new_inner_map = DashMap::new();
|
||||
|
||||
for inner_entry in inner_map.iter() {
|
||||
let path = inner_entry.key();
|
||||
let (vec, _) = inner_entry.value();
|
||||
let new_vec = vec.clone();
|
||||
let new_counter = AtomicUsize::new(0);
|
||||
new_inner_map.insert(path.clone(), (new_vec, new_counter));
|
||||
}
|
||||
cloned.insert(hostname.clone(), new_inner_map);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compare_dashmaps(map1: &UpstreamsDashMap, map2: &UpstreamsDashMap) -> bool {
|
||||
let keys1: HashSet<_> = map1.iter().map(|entry| entry.key().clone()).collect();
|
||||
let keys2: HashSet<_> = map2.iter().map(|entry| entry.key().clone()).collect();
|
||||
if keys1 != keys2 {
|
||||
return false;
|
||||
}
|
||||
for entry1 in map1.iter() {
|
||||
let hostname = entry1.key();
|
||||
let inner_map1 = entry1.value();
|
||||
let Some(inner_map2) = map2.get(hostname) else {
|
||||
return false;
|
||||
};
|
||||
let inner_keys1: HashSet<_> = inner_map1.iter().map(|e| e.key().clone()).collect();
|
||||
let inner_keys2: HashSet<_> = inner_map2.iter().map(|e| e.key().clone()).collect();
|
||||
if inner_keys1 != inner_keys2 {
|
||||
return false;
|
||||
}
|
||||
for path_entry in inner_map1.iter() {
|
||||
let path = path_entry.key();
|
||||
let (vec1, _counter1) = path_entry.value();
|
||||
let Some(entry2) = inner_map2.get(path) else {
|
||||
return false; // Path exists in map1 but not in map2
|
||||
};
|
||||
let (vec2, _counter2) = entry2.value();
|
||||
let set1: HashSet<_> = vec1.iter().collect();
|
||||
let set2: HashSet<_> = vec2.iter().collect();
|
||||
if set1 != set2 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user