refactor (backend-rs): rework static cache

Co-authored-by: sup39 <dev@sup39.dev>
This commit is contained in:
naskya 2024-08-01 15:57:21 +09:00
parent d5919b21fb
commit 8584bb439b
No known key found for this signature in database
GPG key ID: 712D413B3A9FED5C
9 changed files with 90 additions and 48 deletions

53
packages/backend-rs/src/cache/bare.rs vendored Normal file
View file

@ -0,0 +1,53 @@
//! In-memory cache handler
use chrono::{DateTime, Duration, Utc};
use std::sync::Mutex;
pub struct Cache<T: Clone> {
cache: Mutex<TimedData<T>>,
ttl: Option<Duration>,
}
struct TimedData<T: Clone> {
value: Option<T>,
last_updated: DateTime<Utc>,
}
impl<T: Clone> Cache<T> {
pub const fn new(ttl: Option<Duration>) -> Self {
Self {
cache: Mutex::new(TimedData {
value: None,
last_updated: DateTime::UNIX_EPOCH,
}),
ttl,
}
}
pub fn set(&self, value: T) {
if self.ttl.is_none() {
let _ = self
.cache
.lock()
.map(|mut cache| (*cache).value = Some(value));
} else {
let _ = self.cache.lock().map(|mut cache| {
*cache = TimedData {
value: Some(value),
last_updated: Utc::now(),
}
});
}
}
pub fn get(&self) -> Option<T> {
let data = self.cache.lock().ok()?;
if let Some(ttl) = self.ttl {
if data.last_updated + ttl < Utc::now() {
return None;
}
}
data.value.to_owned()
}
}

3
packages/backend-rs/src/cache/mod.rs vendored Normal file
View file

@ -0,0 +1,3 @@
pub mod bare;
pub use bare::Cache;

View file

@ -1,15 +1,11 @@
//! Server information
use crate::{database::db_conn, model::entity::meta};
use crate::{cache::Cache, database::db_conn, model::entity::meta};
use sea_orm::{prelude::*, ActiveValue};
use std::sync::Mutex;
type Meta = meta::Model;
static CACHE: Mutex<Option<Meta>> = Mutex::new(None);
fn set_cache(meta: &Meta) {
let _ = CACHE.lock().map(|mut cache| *cache = Some(meta.clone()));
}
static INSTANCE_META_CACHE: Cache<Meta> = Cache::new(None);
#[macros::export(js_name = "fetchMeta")]
pub async fn local_server_info() -> Result<Meta, DbErr> {
@ -25,7 +21,7 @@ pub async fn update() -> Result<(), DbErr> {
async fn local_server_info_impl(use_cache: bool) -> Result<Meta, DbErr> {
// try using cache
if use_cache {
if let Some(cache) = CACHE.lock().ok().and_then(|cache| cache.clone()) {
if let Some(cache) = INSTANCE_META_CACHE.get() {
return Ok(cache);
}
}
@ -34,7 +30,7 @@ async fn local_server_info_impl(use_cache: bool) -> Result<Meta, DbErr> {
let db = db_conn().await?;
let meta = meta::Entity::find().one(db).await?;
if let Some(meta) = meta {
set_cache(&meta);
INSTANCE_META_CACHE.set(meta.clone());
return Ok(meta);
}
@ -45,7 +41,7 @@ async fn local_server_info_impl(use_cache: bool) -> Result<Meta, DbErr> {
})
.exec_with_returning(db)
.await?;
set_cache(&meta);
INSTANCE_META_CACHE.set(meta.clone());
Ok(meta)
}

View file

@ -1,6 +1,7 @@
//! NodeInfo generator
use crate::{
cache::Cache,
config::{local_server_info, CONFIG},
database::db_conn,
federation::nodeinfo::schema::*,
@ -9,15 +10,9 @@ use crate::{
};
use sea_orm::prelude::*;
use serde_json::json;
use std::{collections::HashMap, sync::Mutex};
use std::collections::HashMap;
static CACHE: Mutex<Option<Nodeinfo21>> = Mutex::new(None);
fn set_cache(nodeinfo: &Nodeinfo21) {
let _ = CACHE
.lock()
.map(|mut cache| *cache = Some(nodeinfo.to_owned()));
}
static NODEINFO_CACHE: Cache<Nodeinfo21> = Cache::new(None);
/// Fetches the number of total/active local users and local posts.
///
@ -129,7 +124,7 @@ async fn generate_nodeinfo_2_1() -> Result<Nodeinfo21, DbErr> {
async fn nodeinfo_2_1_impl(use_cache: bool) -> Result<Nodeinfo21, DbErr> {
if use_cache {
if let Some(nodeinfo) = CACHE.lock().ok().and_then(|cache| cache.to_owned()) {
if let Some(nodeinfo) = NODEINFO_CACHE.get() {
return Ok(nodeinfo);
}
}
@ -137,7 +132,7 @@ async fn nodeinfo_2_1_impl(use_cache: bool) -> Result<Nodeinfo21, DbErr> {
let nodeinfo = generate_nodeinfo_2_1().await?;
tracing::info!("updating cache");
set_cache(&nodeinfo);
NODEINFO_CACHE.set(nodeinfo.clone());
Ok(nodeinfo)
}

View file

@ -1,5 +1,6 @@
#![doc = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md"))]
pub mod cache;
pub mod config;
pub mod database;
pub mod federation;

View file

@ -1,26 +0,0 @@
//! In-memory antennas cache handler
use crate::{database::db_conn, model::entity::antenna};
use sea_orm::prelude::*;
use std::sync::{Arc, Mutex};
static CACHE: Mutex<Option<Arc<[antenna::Model]>>> = Mutex::new(None);
fn set(antennas: Arc<[antenna::Model]>) {
let _ = CACHE.lock().map(|mut cache| *cache = Some(antennas));
}
pub(super) async fn update() -> Result<Arc<[antenna::Model]>, DbErr> {
tracing::debug!("updating cache");
let antennas: Arc<[antenna::Model]> =
antenna::Entity::find().all(db_conn().await?).await?.into();
set(antennas.clone());
Ok(antennas)
}
pub(super) async fn get() -> Result<Arc<[antenna::Model]>, DbErr> {
if let Some(cache) = CACHE.lock().ok().and_then(|cache| cache.clone()) {
return Ok(cache);
}
update().await
}

View file

@ -1,4 +1,24 @@
mod cache;
mod check_hit;
pub mod process_new_note;
pub mod update;
use crate::{cache::Cache, database::db_conn, model::entity::antenna};
use sea_orm::prelude::*;
use std::sync::Arc;
static ANTENNAS_CACHE: Cache<Arc<[antenna::Model]>> = Cache::new(None);
async fn update() -> Result<Arc<[antenna::Model]>, DbErr> {
tracing::debug!("updating cache");
let antennas: Arc<[antenna::Model]> =
antenna::Entity::find().all(db_conn().await?).await?.into();
ANTENNAS_CACHE.set(antennas.clone());
Ok(antennas)
}
async fn get_antennas() -> Result<Arc<[antenna::Model]>, DbErr> {
if let Some(cache) = ANTENNAS_CACHE.get() {
return Ok(cache);
}
update().await
}

View file

@ -46,7 +46,7 @@ pub async fn update_antennas_on_new_note(
let note_all_texts = elaborate!(note, false).await?;
// TODO: do this in parallel
for antenna in antenna::cache::get().await?.iter() {
for antenna in antenna::get_antennas().await?.iter() {
if note_muted_users.contains(&antenna.user_id) {
continue;
}

View file

@ -2,6 +2,6 @@
#[macros::ts_export]
pub async fn update_antenna_cache() -> Result<(), sea_orm::DbErr> {
super::cache::update().await?;
super::update().await?;
Ok(())
}