forked from NG-SD-Plugins/PowerTools
Make limits cache update only occur exactly once per period
This commit is contained in:
parent
182c30b4ee
commit
d7a108004b
7 changed files with 67 additions and 52 deletions
2
backend/Cargo.lock
generated
2
backend/Cargo.lock
generated
|
@ -1170,7 +1170,7 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "powertools"
|
name = "powertools"
|
||||||
version = "2.0.0-alpha4"
|
version = "2.0.0-alpha5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "powertools"
|
name = "powertools"
|
||||||
version = "2.0.0-alpha4"
|
version = "2.0.0-alpha5"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["NGnius (Graham) <ngniusness@gmail.com>"]
|
authors = ["NGnius (Graham) <ngniusness@gmail.com>"]
|
||||||
description = "Backend (superuser) functionality for PowerTools"
|
description = "Backend (superuser) functionality for PowerTools"
|
||||||
|
|
|
@ -12,6 +12,8 @@ pub const LIMITS_FILE: &str = "limits_cache.ron";
|
||||||
pub const LIMITS_REFRESH_PERIOD: std::time::Duration = std::time::Duration::from_secs(60 * 60 * 24); // 1 day
|
pub const LIMITS_REFRESH_PERIOD: std::time::Duration = std::time::Duration::from_secs(60 * 60 * 24); // 1 day
|
||||||
#[cfg(feature = "online")]
|
#[cfg(feature = "online")]
|
||||||
pub const LIMITS_STARTUP_WAIT: std::time::Duration = std::time::Duration::from_secs(60); // 1 minute
|
pub const LIMITS_STARTUP_WAIT: std::time::Duration = std::time::Duration::from_secs(60); // 1 minute
|
||||||
|
#[cfg(feature = "online")]
|
||||||
|
pub const LIMITS_CHECK_PERIOD: std::time::Duration = std::time::Duration::from_secs(5 * 60); // 5 minutes
|
||||||
pub const LIMITS_OVERRIDE_FILE: &str = "limits_override.ron";
|
pub const LIMITS_OVERRIDE_FILE: &str = "limits_override.ron";
|
||||||
|
|
||||||
#[cfg(feature = "online")]
|
#[cfg(feature = "online")]
|
||||||
|
|
|
@ -8,28 +8,7 @@ use crate::persist::{DriverJson, SettingsJson};
|
||||||
use crate::settings::{Driver, General, ProviderBuilder, TBattery, TCpus, TGeneral, TGpu};
|
use crate::settings::{Driver, General, ProviderBuilder, TBattery, TCpus, TGeneral, TGpu};
|
||||||
|
|
||||||
fn get_limits() -> limits_core::json_v2::Base {
|
fn get_limits() -> limits_core::json_v2::Base {
|
||||||
let limits_path = super::utility::limits_path();
|
super::limits_worker::get_limits_cached()
|
||||||
match File::open(&limits_path) {
|
|
||||||
Ok(f) => match ron::de::from_reader(f) {
|
|
||||||
Ok(lim) => lim,
|
|
||||||
Err(e) => {
|
|
||||||
log::warn!(
|
|
||||||
"Failed to parse limits file `{}`, cannot use for auto_detect: {}",
|
|
||||||
limits_path.display(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
limits_core::json_v2::Base::default()
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
log::warn!(
|
|
||||||
"Failed to open limits file `{}`: {}",
|
|
||||||
limits_path.display(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
super::limits_worker::get_limits_cached()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_limits_overrides() -> Option<Limits> {
|
fn get_limits_overrides() -> Option<Limits> {
|
||||||
|
|
|
@ -2,6 +2,11 @@ use std::thread::{self, JoinHandle};
|
||||||
|
|
||||||
use limits_core::json_v2::Base;
|
use limits_core::json_v2::Base;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn expired_updated_time() -> chrono::DateTime<chrono::offset::Utc> {
|
||||||
|
chrono::offset::Utc::now() - (crate::consts::LIMITS_REFRESH_PERIOD * 2)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "online")]
|
#[cfg(feature = "online")]
|
||||||
pub fn spawn() -> JoinHandle<()> {
|
pub fn spawn() -> JoinHandle<()> {
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
|
@ -12,41 +17,53 @@ pub fn spawn() -> JoinHandle<()> {
|
||||||
loop {
|
loop {
|
||||||
if (limits_path.exists() && limits_path.is_file()) || !limits_path.exists() {
|
if (limits_path.exists() && limits_path.is_file()) || !limits_path.exists() {
|
||||||
// try to load limits from file, fallback to built-in default
|
// try to load limits from file, fallback to built-in default
|
||||||
let base = if limits_path.exists() {
|
let mut base = if limits_path.exists() {
|
||||||
match std::fs::File::open(&limits_path) {
|
match std::fs::File::open(&limits_path) {
|
||||||
Ok(f) => match ron::de::from_reader(f) {
|
Ok(f) => match ron::de::from_reader(f) {
|
||||||
Ok(b) => b,
|
Ok(b) => b,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("Cannot parse {}: {}", limits_path.display(), e);
|
log::error!("Cannot parse {}: {}", limits_path.display(), e);
|
||||||
Base::default()
|
crate::utility::CachedData {
|
||||||
|
data: Base::default(),
|
||||||
|
updated: expired_updated_time(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("Cannot open {}: {}", limits_path.display(), e);
|
log::error!("Cannot open {}: {}", limits_path.display(), e);
|
||||||
Base::default()
|
crate::utility::CachedData {
|
||||||
|
data: Base::default(),
|
||||||
|
updated: expired_updated_time(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let base = Base::default();
|
let mut base = crate::utility::CachedData {
|
||||||
save_base(&base, &limits_path);
|
data: Base::default(),
|
||||||
|
updated: expired_updated_time(),
|
||||||
|
};
|
||||||
|
save_base(&mut base, &limits_path);
|
||||||
base
|
base
|
||||||
};
|
};
|
||||||
crate::api::web::set_base_url(base.store);
|
crate::api::web::set_base_url(base.data.store.clone());
|
||||||
if let Some(refresh) = &base.refresh {
|
if let Some(refresh) = &base.data.refresh {
|
||||||
// try to retrieve newer version
|
if base.needs_update(crate::consts::LIMITS_REFRESH_PERIOD) {
|
||||||
match ureq::get(refresh).call() {
|
// try to retrieve newer version
|
||||||
Ok(response) => {
|
match ureq::get(refresh).call() {
|
||||||
let json_res: std::io::Result<Base> = response.into_json();
|
Ok(response) => {
|
||||||
match json_res {
|
let json_res: std::io::Result<Base> = response.into_json();
|
||||||
Ok(new_base) => {
|
match json_res {
|
||||||
save_base(&new_base, &limits_path);
|
Ok(new_base) => {
|
||||||
}
|
base.data = new_base;
|
||||||
Err(e) => {
|
save_base(&mut base, &limits_path);
|
||||||
log::error!("Cannot parse response from `{}`: {}", refresh, e)
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Cannot parse response from `{}`: {}", refresh, e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Err(e) => log::warn!("Cannot download limits from `{}`: {}", refresh, e),
|
||||||
}
|
}
|
||||||
Err(e) => log::warn!("Cannot download limits from `{}`: {}", refresh, e),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log::info!("limits_worker refresh is empty, terminating...");
|
log::info!("limits_worker refresh is empty, terminating...");
|
||||||
|
@ -55,7 +72,7 @@ pub fn spawn() -> JoinHandle<()> {
|
||||||
} else if !limits_path.is_file() {
|
} else if !limits_path.is_file() {
|
||||||
log::error!("Path for storing limits is not a file!");
|
log::error!("Path for storing limits is not a file!");
|
||||||
}
|
}
|
||||||
thread::sleep(crate::consts::LIMITS_REFRESH_PERIOD);
|
thread::sleep(crate::consts::LIMITS_CHECK_PERIOD);
|
||||||
}
|
}
|
||||||
log::warn!("limits_worker completed!");
|
log::warn!("limits_worker completed!");
|
||||||
})
|
})
|
||||||
|
@ -68,33 +85,35 @@ pub fn spawn() -> JoinHandle<()> {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_limits_cached() -> Base {
|
pub fn get_limits_cached() -> Base {
|
||||||
let limits_path = super::utility::limits_path();
|
let limits_path = super::utility::limits_path();
|
||||||
if limits_path.is_file() {
|
let cached: crate::utility::CachedData<Base> = if limits_path.is_file() {
|
||||||
match std::fs::File::open(&limits_path) {
|
match std::fs::File::open(&limits_path) {
|
||||||
Ok(f) => match ron::de::from_reader(f) {
|
Ok(f) => match ron::de::from_reader(f) {
|
||||||
Ok(b) => b,
|
Ok(b) => b,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("Cannot parse {}: {}", limits_path.display(), e);
|
log::error!("Cannot parse {}: {}", limits_path.display(), e);
|
||||||
Base::default()
|
return Base::default();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("Cannot open {}: {}", limits_path.display(), e);
|
log::error!("Cannot open {}: {}", limits_path.display(), e);
|
||||||
Base::default()
|
return Base::default();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Base::default()
|
return Base::default();
|
||||||
}
|
};
|
||||||
|
cached.data
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "online")]
|
#[cfg(feature = "online")]
|
||||||
fn save_base(new_base: &Base, path: impl AsRef<std::path::Path>) {
|
fn save_base(new_base: &mut crate::utility::CachedData<Base>, path: impl AsRef<std::path::Path>) {
|
||||||
let limits_path = path.as_ref();
|
let limits_path = path.as_ref();
|
||||||
|
new_base.updated = chrono::offset::Utc::now();
|
||||||
match std::fs::File::create(&limits_path) {
|
match std::fs::File::create(&limits_path) {
|
||||||
Ok(f) => {
|
Ok(f) => {
|
||||||
match ron::ser::to_writer_pretty(f, &new_base, crate::utility::ron_pretty_config()) {
|
match ron::ser::to_writer_pretty(f, new_base, crate::utility::ron_pretty_config()) {
|
||||||
Ok(_) => log::info!("Successfully saved new limits to {}", limits_path.display()),
|
Ok(_) => log::info!("Successfully saved new limits to {}", limits_path.display()),
|
||||||
Err(e) => log::error!(
|
Err(e) => log::error!(
|
||||||
"Failed to save limits json to file `{}`: {}",
|
"Failed to save limits json to file `{}`: {}",
|
||||||
|
|
|
@ -3,6 +3,9 @@
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use chrono::{offset::Utc, DateTime};
|
||||||
|
|
||||||
/*pub fn unwrap_lock<'a, T: Sized>(
|
/*pub fn unwrap_lock<'a, T: Sized>(
|
||||||
result: LockResult<MutexGuard<'a, T>>,
|
result: LockResult<MutexGuard<'a, T>>,
|
||||||
lock_name: &str,
|
lock_name: &str,
|
||||||
|
@ -16,6 +19,18 @@ use std::os::unix::fs::PermissionsExt;
|
||||||
}
|
}
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct CachedData<T> {
|
||||||
|
pub data: T,
|
||||||
|
pub updated: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl <T> CachedData<T> {
|
||||||
|
pub fn needs_update(&self, max_age: std::time::Duration) -> bool {
|
||||||
|
self.updated < (Utc::now() - max_age)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn ron_pretty_config() -> ron::ser::PrettyConfig {
|
pub fn ron_pretty_config() -> ron::ser::PrettyConfig {
|
||||||
ron::ser::PrettyConfig::default()
|
ron::ser::PrettyConfig::default()
|
||||||
.struct_names(true)
|
.struct_names(true)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "PowerTools",
|
"name": "PowerTools",
|
||||||
"version": "2.0.0-alpha4",
|
"version": "2.0.0-alpha5",
|
||||||
"description": "Power tweaks for power users",
|
"description": "Power tweaks for power users",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "shx rm -rf dist && rollup -c",
|
"build": "shx rm -rf dist && rollup -c",
|
||||||
|
|
Loading…
Reference in a new issue