From de183e4023156384521a191c75dc02daa5cf5857 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Fri, 10 Oct 2025 20:18:55 +0200 Subject: [PATCH 01/17] feat(mediadeletion) initial work on media deletion support (moved to dashadev) --- src/core/config/mod.rs | 29 +++ src/database/map/insert.rs | 30 +++ src/database/maps.rs | 4 + src/service/media/mod.rs | 66 +++++- src/service/media/retention.rs | 275 +++++++++++++++++++++++++ src/service/rooms/timeline/append.rs | 34 ++- src/service/rooms/timeline/backfill.rs | 38 +++- tuwunel-example.toml | 15 ++ 8 files changed, 484 insertions(+), 7 deletions(-) create mode 100644 src/service/media/retention.rs diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 8aa9c2f4f..dbd826098 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1530,6 +1530,10 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub forbidden_remote_server_names: RegexSet, + /// Media retention configuration (flattened; formerly media.retention.*) + #[serde(default, alias = "media", alias = "media.retention")] + pub media: MediaRetentionConfig, + /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. @@ -2132,6 +2136,26 @@ pub struct WellKnownConfig { pub support_mxid: Option, } +#[derive(Clone, Debug, Deserialize, Default)] +#[config_example_generator( + filename = "tuwunel-example.toml", + section = "global.media" +)] +pub struct MediaRetentionConfig { + /// What to do with local media when an event referencing it is redacted. + /// keep | delete_if_unreferenced | force_delete_local + /// default: "keep" + #[serde(default = "default_media_retention_on_redaction")] + pub on_redaction: String, + + /// Grace period in seconds before deleting queued media. + /// default: 0 + #[serde(default)] + pub grace_period_secs: u64, +} + +fn default_media_retention_on_redaction() -> String { "keep".to_owned() } + #[derive(Clone, Copy, Debug, Deserialize, Default)] #[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] #[config_example_generator( @@ -2573,6 +2597,11 @@ impl Config { } pub fn check(&self) -> Result<(), Error> { check(self) } + + // Media retention helpers + pub fn media_retention_on_redaction(&self) -> &str { self.media.on_redaction.as_str() } + + pub fn media_retention_grace_period_secs(&self) -> u64 { self.media.grace_period_secs } } fn true_fn() -> bool { true } diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 694543e9f..e5bbfef15 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -59,3 +59,33 @@ where self.engine.flush().expect("database flush error"); } } + +/// Atomically write a batch of raw put and delete operations. +#[implement(super::Map)] +#[tracing::instrument(skip(self, puts, dels), fields(%self), level = "trace")] +pub fn write_batch_raw(&self, puts: Ip, dels: Ik) +where + Ip: IntoIterator, Vec)>, + Ik: IntoIterator>, +{ + let mut batch = WriteBatchWithTransaction::::default(); + let cf = self.cf(); + for (k, v) in puts { + batch.put_cf(&cf, &k, &v); + } + for k in dels { + batch.delete_cf(&cf, &k); + } + + let write_options = &self.write_options; + use crate::util::or_else as db_or_else; + self.engine + .db + .write_opt(batch, write_options) + .or_else(db_or_else) + .expect("database write batch error"); + + if !self.engine.corked() { + self.engine.flush().expect("database flush error"); + } +} diff --git a/src/database/maps.rs b/src/database/maps.rs index 0177cf23c..0575e58b4 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -108,6 +108,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "mediaid_file", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "media_retention", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "mediaid_user", ..descriptor::RANDOM_SMALL diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index a7bbd8643..5ec6bd8f7 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,11 +1,12 @@ pub mod blurhash; mod data; +mod retention; pub(super) mod migrations; mod preview; mod remote; mod tests; mod thumbnail; -use std::{path::PathBuf, sync::Arc, time::SystemTime}; +use std::{path::PathBuf, sync::Arc, time::{Duration, SystemTime}}; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; @@ -15,12 +16,11 @@ use tokio::{ io::{AsyncReadExt, AsyncWriteExt, BufReader}, }; use tuwunel_core::{ - Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace, - utils::{self, MutexMap}, - warn, + Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, warn }; use self::data::{Data, Metadata}; +use self::retention::Retention; pub use self::thumbnail::Dim; #[derive(Debug)] @@ -34,6 +34,7 @@ pub struct Service { url_preview_mutex: MutexMap, pub(super) db: Data, services: Arc, + retention: Retention, } /// generated MXC ID (`media-id`) length @@ -52,12 +53,30 @@ impl crate::Service for Service { url_preview_mutex: MutexMap::new(), db: Data::new(args.db), services: args.services.clone(), + retention: Retention::new(args.db), })) } async fn worker(self: Arc) -> Result { self.create_media_dir().await?; + // startup summary for retention configuration + warn!(policy = self.services.server.config.media_retention_on_redaction(), grace = self.services.server.config.media_retention_grace_period_secs(), "retention: startup configuration"); + + // deletion worker loop (scaffold): runs periodically respecting grace period + let grace = Duration::from_secs(self.services.server.config.media_retention_grace_period_secs()); + let retention = self.retention.clone(); + let this = self.clone(); + warn!("creating media deletion worker"); + tokio::spawn(async move { + loop { + if let Err(e) = retention.worker_process_queue(&this, grace).await { + debug_warn!("media retention worker error: {e}"); + } + tokio::time::sleep(Duration::from_secs(10)).await; + } + }); + Ok(()) } @@ -65,6 +84,45 @@ impl crate::Service for Service { } impl Service { + // below helpers can be called by message processing pipelines when events are created/edited/redacted. + pub fn retention_insert_mxcs_on_event(&self, event_id: &str, room_id: &str, mxcs: &[(String, bool, String)]) { + self.retention.insert_mxcs_on_event(event_id, room_id, mxcs); + } + + pub async fn retention_decrement_on_redaction(&self, event_id: &str) { + use self::retention::RetentionPolicy; + let policy = RetentionPolicy::from_str(self.services.server.config.media_retention_on_redaction()); + // try normal path + if let Ok(deleted) = self.retention.decrement_refcount_on_redaction(event_id, policy).await { + if !deleted.is_empty() { return; } + } + // fallback: attempt reconstruction if no mer: entries existed + // fetch original PDU JSON; if found, scan for mxc:// URIs + if let Ok(parsed_eid) = ruma::EventId::parse(event_id) { if let Ok(json_obj) = self.services.timeline.get_pdu_json(&parsed_eid).await { + let event_type = json_obj.get("type").cloned(); + let unsigned_val = json_obj.get("unsigned").cloned(); + let unsigned_keys = unsigned_val.as_ref().and_then(|u| u.as_object()).map(|obj| obj.keys().cloned().collect::>()); + let content_debug = json_obj.get("content").cloned(); + warn!(event_id, ?event_type, keys=?json_obj.keys().collect::>(), ?unsigned_keys, ?unsigned_val, ?content_debug, "retention: fallback inspecting raw event"); + let mut mxcs = std::collections::HashSet::new(); + fn scan(val: &ruma::CanonicalJsonValue, out: &mut std::collections::HashSet) { + match val { + ruma::CanonicalJsonValue::String(s) if s.starts_with("mxc://") => { out.insert(s.clone()); }, + ruma::CanonicalJsonValue::Object(map) => { for v in map.values() { scan(v, out); } }, + ruma::CanonicalJsonValue::Array(arr) => { for v in arr { scan(v, out); } }, + _ => {}, + } + } + scan(&ruma::CanonicalJsonValue::Object(json_obj.clone()), &mut mxcs); + let mxcs_len = mxcs.len(); + for mxc in mxcs { + if policy == RetentionPolicy::ForceDeleteLocal { + self.retention.queue_media_for_deletion(&mxc); + } + } + warn!(event_id, count=mxcs_len, "retention: fallback redaction scan queued media"); }} + } + /// Uploads a file. pub async fn create( &self, diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs new file mode 100644 index 000000000..674755484 --- /dev/null +++ b/src/service/media/retention.rs @@ -0,0 +1,275 @@ +use std::{path::PathBuf, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}}; + +use futures::StreamExt; +use serde::{Deserialize, Serialize}; +use tuwunel_core::{Result, err, trace, warn}; +use tuwunel_database::{Cbor, Deserialized, Map, keyval::serialize_val}; + +use super::Service; + +/// keyspace prefixes inside the `media_retention` CF +const K_MREF: &str = "mref:"; // mref: +const K_MER: &str = "mer:"; // mer:: +const K_QUEUE: &str = "qdel:"; // qdel: => DeletionCandidate + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct MediaRef { + pub refcount: i64, + pub local: bool, + pub first_seen_ts: u64, + pub last_seen_ts: u64, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct MediaEventRef { + pub mxc: String, + pub room_id: String, + pub kind: String, // "content.url", "thumbnail_url" +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct DeletionCandidate { + pub mxc: String, + pub enqueued_ts: u64, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(super) enum RetentionPolicy { + Keep, + DeleteIfUnreferenced, + ForceDeleteLocal, +} + +impl RetentionPolicy { + pub(super) fn from_str(s: &str) -> Self { + match s { + "delete_if_unreferenced" => Self::DeleteIfUnreferenced, + "force_delete_local" => Self::ForceDeleteLocal, + _ => Self::Keep, + } + } +} + +#[derive(Clone)] +pub(super) struct Retention { + cf: Arc, +} + +impl Retention { + pub(super) fn new(db: &Arc) -> Self { + Self { cf: db["media_retention"].clone() } + } + + #[inline] + fn key_mref(mxc: &str) -> String { format!("{K_MREF}{mxc}") } + + #[inline] + fn key_mer(event_id: &str, kind: &str) -> String { format!("{K_MER}{event_id}:{kind}") } + + #[inline] + fn key_queue(mxc: &str) -> String { format!("{K_QUEUE}{mxc}") } + + #[allow(dead_code)] + pub(super) async fn get_media_ref(&self, mxc: &str) -> Result> { + match self.cf.get(&Self::key_mref(mxc)).await { + Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), + Err(_) => Ok(None), + } + } + + #[allow(dead_code)] + pub(super) fn put_media_ref(&self, mxc: &str, mr: &MediaRef) { self.cf.raw_put(Self::key_mref(mxc), Cbor(mr)); } + + #[allow(dead_code)] + pub(super) async fn get_media_event_ref(&self, event_id: &str, kind: &str) -> Result> { + match self.cf.get(&Self::key_mer(event_id, kind)).await { + Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), + Err(_) => Ok(None), + } + } + + #[allow(dead_code)] + pub(super) fn put_media_event_ref(&self, event_id: &str, mer: &MediaEventRef) { + let key = Self::key_mer(event_id, &mer.kind); + self.cf.raw_put(key, Cbor(mer)); + } + + /// insert/update references for a newly created or edited event. + /// + /// assumptions: + /// - `mxcs` is a slice of (mxc_uri, local, kind) + pub(super) fn insert_mxcs_on_event(&self, event_id: &str, room_id: &str, mxcs: &[(String, bool, String)]) { + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); + if mxcs.is_empty() { + warn!(%event_id, "retention: insert called with zero MXCs"); + return; + } + warn!(%event_id, count = mxcs.len(), %room_id, "retention: inserting media refs for event"); + + let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); + for (mxc, local, kind) in mxcs.iter() { + // update MediaEventRef + let mer = MediaEventRef { mxc: mxc.clone(), room_id: room_id.to_owned(), kind: kind.clone() }; + let key_mer = Self::key_mer(event_id, kind).into_bytes(); + let val_mer = serialize_val(Cbor(&mer)).expect("serialize mer").to_vec(); + puts.push((key_mer, val_mer)); + + // upsert MediaRef + let key_mref = Self::key_mref(mxc); + let current = self.cf.get_blocking(&key_mref); + let (mr, new) = match current.and_then(|h| h.deserialized::>()) { + Ok(Cbor(mut v)) => { + v.refcount = v.refcount.saturating_add(1); + v.last_seen_ts = now; + (v, false) + }, + _ => (MediaRef { refcount: 1, local: *local, first_seen_ts: now, last_seen_ts: now }, true), + }; + if new { + warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: new media ref"); + } else { + warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: increment media ref"); + } + let val_mref = serialize_val(Cbor(&mr)).expect("serialize mref").to_vec(); + puts.push((key_mref.into_bytes(), val_mref)); + } + self.cf.write_batch_raw(puts, std::iter::empty()); + } + + /// decrement refcounts for all MediaEventRef mapped by this event id. + /// if policy is set to delete unreferenced/local, enqueue for deletion + pub(super) async fn decrement_refcount_on_redaction( + &self, + event_id: &str, + policy: RetentionPolicy, + ) -> Result> { + warn!(%event_id, ?policy, "retention: redaction decrement start"); + let prefix = format!("{K_MER}{event_id}:"); + let prefixb = prefix.as_bytes().to_vec(); + let mut to_delete: Vec = Vec::new(); + let mut puts: Vec<(Vec, Vec)> = Vec::new(); + let mut dels: Vec> = Vec::new(); + let mut processed = 0usize; + + let mut stream = self.cf.stream_raw_prefix::<&str, Cbor, _>(&prefixb); + while let Some(item) = stream.next().await.transpose()? { + let (key, Cbor(mer)) = item; + processed = processed.saturating_add(1); + // load MediaRef + let key_mref = Self::key_mref(&mer.mxc); + let current = self.cf.get(&key_mref).await.ok(); + if let Some(handle) = current { + let Cbor(mut mr): Cbor = handle.deserialized::>()?; + mr.refcount = mr.refcount.saturating_sub(1); + mr.last_seen_ts = now_secs(); + let should_queue = match policy { + RetentionPolicy::Keep => false, + RetentionPolicy::DeleteIfUnreferenced => mr.refcount == 0, + RetentionPolicy::ForceDeleteLocal => mr.local, + }; + warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, "retention: redaction updated ref"); + let val_mref = serialize_val(Cbor(&mr))?.to_vec(); + puts.push((key_mref.into_bytes(), val_mref)); + if should_queue { + let kq = Self::key_queue(&mer.mxc).into_bytes(); + let vq = serialize_val(Cbor(&DeletionCandidate { mxc: mer.mxc.clone(), enqueued_ts: now_secs() }))?.to_vec(); + puts.push((kq, vq)); + warn!(%event_id, mxc = %mer.mxc, "retention: queued media for deletion"); + to_delete.push(mer.mxc.clone()); + } + } + + // remove the mer entry regardless + dels.push(key.as_bytes().to_vec()); + } + self.cf.write_batch_raw(puts, dels); + if processed == 0 { + warn!(%event_id, "retention: no media event refs found on redaction; did insert run during creation?"); + } + warn!(%event_id, queued = to_delete.len(), processed, "retention: redaction decrement complete"); + Ok(to_delete) + } + + /// qeue a media item for deletion (idempotent best-effort). + pub(super) fn queue_media_for_deletion(&self, mxc: &str) { + let key = Self::key_queue(mxc); + // overwrite / insert candidate with fresh timestamp + let cand = DeletionCandidate { mxc: mxc.to_owned(), enqueued_ts: now_secs() }; + warn!(mxc, "retention: fallback queue media for deletion"); + self.cf.raw_put(key, Cbor(&cand)); + } + + /// worker: processes queued deletion candidates after grace period. + pub(super) async fn worker_process_queue(&self, service: &Service, grace: Duration) -> Result<()> { + let prefix = K_QUEUE.as_bytes(); + warn!(?grace, "retention: worker iteration start"); + let mut stream = self.cf.stream_raw_prefix::<&str, Cbor, _>(&prefix); + let mut processed = 0usize; + let mut deleted = 0usize; + while let Some(item) = stream.next().await.transpose()? { + let (key, Cbor(cand)) = item; + let now = now_secs(); + if now < cand.enqueued_ts.saturating_add(grace.as_secs()) { + warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); + continue; + } + + // attempt deletion of local media files + let deleted_bytes = self.delete_local_media(service, &cand.mxc).await.unwrap_or(0); + if deleted_bytes > 0 { + warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); + } else { + warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); + } + + // remove metadata entries (best-effort) + let dels = vec![key.as_bytes().to_vec(), Self::key_mref(&cand.mxc).into_bytes()]; + self.cf.write_batch_raw(std::iter::empty(), dels); + processed = processed.saturating_add(1); + deleted = deleted.saturating_add(1); + } + if processed == 0 { + warn!("retention: worker iteration found no deletion candidates"); + } else { + warn!(processed, deleted, "retention: worker iteration complete"); + } + + Ok(()) + } + + async fn delete_local_media(&self, service: &Service, mxc: &str) -> Result { + // delete original + thumbnails (any dimensions) + use ruma::Mxc; + let mxc_parsed: Mxc<'_> = mxc.try_into().map_err(|_| err!(Request(BadJson("invalid mxc"))))?; + + // delete originals + let keys = service.db.search_mxc_metadata_prefix(&mxc_parsed).await.unwrap_or_default(); + let mut total = 0u64; + for key in keys { + let path = service.get_media_file(&key); + total = total.saturating_add(remove_file_tolerant(path)); + let legacy = service.get_media_file_b64(&key); + total = total.saturating_add(remove_file_tolerant(legacy)); + } + warn!("retention: total bytes deleted {total}"); + Ok(total) + } +} + +fn now_secs() -> u64 { SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() } + +fn remove_file_tolerant(path: PathBuf) -> u64 { + match std::fs::metadata(&path) { + Ok(meta) => { + let len = meta.len(); + if let Err(e) = std::fs::remove_file(&path) { + trace!(?path, "ignore remove error: {e}"); + 0 + } else { + trace!(?path, "removed"); + len + } + }, + Err(_) => 0, + } +} diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index e62f142a5..8118b0ef6 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -4,6 +4,7 @@ use std::{ }; use futures::StreamExt; +use tracing::warn; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedUserId, RoomId, RoomVersionId, UserId, events::{ @@ -285,7 +286,7 @@ where self.increment_notification_counts(pdu.room_id(), notifies, highlights); match *pdu.kind() { - | TimelineEventType::RoomRedaction => { + | TimelineEventType::RoomRedaction => { use RoomVersionId::*; let room_version_id = self @@ -305,6 +306,8 @@ where { self.redact_pdu(redact_id, pdu, shortroomid) .await?; + // media retention decrement + self.services.media.retention_decrement_on_redaction(redact_id.as_str()).await; } } }, @@ -319,6 +322,7 @@ where { self.redact_pdu(redact_id, pdu, shortroomid) .await?; + self.services.media.retention_decrement_on_redaction(redact_id.as_str()).await; } } }, @@ -367,7 +371,7 @@ where .await?; } }, - | TimelineEventType::RoomMessage => { + | TimelineEventType::RoomMessage => { let content: ExtractBody = pdu.get_content()?; if let Some(body) = content.body { self.services @@ -384,6 +388,32 @@ where .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) .await; } + // media retention insertion (structured extraction + fallback JSON scan) + if let Ok(msg_full) = pdu.get_content::() { + warn!(event_id=%pdu.event_id(), msg=?msg_full, "retention: debug message content"); + use ruma::events::room::MediaSource; + let mut mxcs: Vec<(String,bool,String)> = Vec::new(); + let push_media = |mxcs: &mut Vec<(String,bool,String)>, src: &MediaSource, label: &str, this: &super::Service| { + let (maybe_mxc, enc) = match src { MediaSource::Plain(m) => (Some(m.to_string()), false), MediaSource::Encrypted(f) => (Some(f.url.to_string()), true) }; + if let Some(uri) = maybe_mxc { if uri.starts_with("mxc://") { + let local = >::try_from(uri.as_str()).map(|p| this.services.globals.server_is_ours(p.server_name)).unwrap_or(false); + mxcs.push((uri.clone(), local, label.to_owned())); + if enc { warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted encrypted media"); } else { warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted plain media"); } + }} + }; + match &msg_full.msgtype { + ruma::events::room::message::MessageType::Image(c) => { push_media(&mut mxcs, &c.source, "image.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "image.thumbnail_source", self); } } }, + ruma::events::room::message::MessageType::File(c) => { push_media(&mut mxcs, &c.source, "file.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "file.thumbnail_source", self); } } }, + ruma::events::room::message::MessageType::Video(c) => { push_media(&mut mxcs, &c.source, "video.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "video.thumbnail_source", self); } } }, + ruma::events::room::message::MessageType::Audio(c) => { push_media(&mut mxcs, &c.source, "audio.source", self); }, + _ => {}, + } + // (fallback JSON scan removed for now, structured extraction should capture supported media types) + if mxcs.is_empty() { warn!(event_id=%pdu.event_id(), "retention: no media sources extracted"); } + else { warn!(event_id=%pdu.event_id(), count=mxcs.len(), "retention: inserting media refs"); self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), &mxcs); } + } else { + warn!(event_id=%pdu.event_id(), "retention: failed to decode RoomMessageEventContent for extraction"); + } }, | _ => {}, } diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs index 3db322ccc..229d9a1e3 100644 --- a/src/service/rooms/timeline/backfill.rs +++ b/src/service/rooms/timeline/backfill.rs @@ -8,7 +8,12 @@ use ruma::{ CanonicalJsonObject, EventId, RoomId, ServerName, api::federation, events::{ - StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent, + StateEventType, TimelineEventType, + room::{ + power_levels::RoomPowerLevelsEventContent, + message::{RoomMessageEventContent, MessageType}, + redaction::RoomRedactionEventContent, + }, }, uint, }; @@ -213,6 +218,37 @@ pub async fn backfill_pdu( .search .index_pdu(shortroomid, &pdu_id, &body); } + // Retention insertion for backfilled message + if let Ok(full) = pdu.get_content::() { + use ruma::events::room::MediaSource; + let mut mxcs: Vec<(String, bool, String)> = Vec::new(); + let push_plain = |mxcs: &mut Vec<(String, bool, String)>, src: &MediaSource, label: &str| { + if let MediaSource::Plain(mxc) = src { let s = mxc.to_string(); if s.starts_with("mxc://") { mxcs.push((s, true, label.to_owned())); } } + }; + match &full.msgtype { + MessageType::Image(c) => { + push_plain(&mut mxcs, &c.source, "image.source"); + if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "image.thumbnail_source"); } } + }, + MessageType::File(c) => { + push_plain(&mut mxcs, &c.source, "file.source"); + if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "file.thumbnail_source"); } } + }, + MessageType::Video(c) => { + push_plain(&mut mxcs, &c.source, "video.source"); + if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "video.thumbnail_source"); } } + }, + MessageType::Audio(c) => { + push_plain(&mut mxcs, &c.source, "audio.source"); + }, + _ => {}, + } + if !mxcs.is_empty() { self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), &mxcs); } + } + } + + if pdu.kind == TimelineEventType::RoomRedaction { + if let Ok(red) = pdu.get_content::() { if let Some(rid) = red.redacts { self.services.media.retention_decrement_on_redaction(rid.as_str()).await; } } } drop(mutex_lock); diff --git a/tuwunel-example.toml b/tuwunel-example.toml index 51d829d74..50567827e 100644 --- a/tuwunel-example.toml +++ b/tuwunel-example.toml @@ -1321,6 +1321,10 @@ # #forbidden_remote_server_names = [] +# Media retention configuration (flattened; formerly media.retention.*) +# +#media = false + # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing # our users from wandering into bad servers or spaces. @@ -1821,6 +1825,17 @@ # #support_mxid = +#[global.media] + +# What to do with local media when an event referencing it is redacted. +# keep | delete_if_unreferenced | force_delete_local +# +#on_redaction = "keep" + +# Grace period in seconds before deleting queued media. +# +#grace_period_secs = 0 + #[global.blurhashing] # blurhashing x component, 4 is recommended by https://blurha.sh/ From 687f9b811d9f18e43cd3dcd9bf13879914eaccdc Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sat, 11 Oct 2025 11:25:03 +0200 Subject: [PATCH 02/17] code changes to enable sending messages to user rooms, basic implementation for sending messages to users if they delete a message containing media --- rust-toolchain.toml => rust-toolchainn.toml | 0 src/admin/debug/commands.rs | 15 +- src/api/client/state.rs | 5 +- src/api/router/auth.rs | 20 +- src/core/config/mod.rs | 5 +- src/service/admin/create.rs | 5 +- src/service/admin/grant.rs | 7 +- src/service/command/run.rs | 4 +- src/service/media/data.rs | 20 +- src/service/media/mod.rs | 401 ++++++++++++-- src/service/media/retention.rs | 580 ++++++++++++-------- src/service/rooms/timeline/append.rs | 91 ++- src/service/rooms/timeline/backfill.rs | 61 +- src/service/rooms/timeline/build.rs | 93 +++- src/service/userroom/mod.rs | 38 +- src/user/user.rs | 35 +- 16 files changed, 1015 insertions(+), 365 deletions(-) rename rust-toolchain.toml => rust-toolchainn.toml (100%) diff --git a/rust-toolchain.toml b/rust-toolchainn.toml similarity index 100% rename from rust-toolchain.toml rename to rust-toolchainn.toml diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 166d94988..8d8d57d36 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -231,8 +231,9 @@ pub(super) async fn get_remote_pdu( }) .await { - | Err(e) => - Err!("Remote server did not have PDU or failed sending request to remote server: {e}"), + | Err(e) => { + Err!("Remote server did not have PDU or failed sending request to remote server: {e}") + }, | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -374,8 +375,9 @@ pub(super) async fn change_log_level( .reload .reload(&old_filter_layer, Some(handles)) { - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), + | Err(e) => { + return Err!("Failed to modify and reload the global tracing log level: {e}"); + }, | Ok(()) => { let value = &self.services.server.config.log; return Ok(format!( @@ -401,8 +403,9 @@ pub(super) async fn change_log_level( | Ok(()) => { return Ok("Successfully changed log level".to_owned()); }, - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), + | Err(e) => { + return Err!("Failed to modify and reload the global tracing log level: {e}"); + }, } } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index b8d11d014..5e5a13953 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -349,7 +349,7 @@ async fn allowed_to_send_state_event( }, } }, - | StateEventType::RoomMember => + | StateEventType::RoomMember => { match json.deserialize_as_unchecked::() { | Ok(membership_content) => { let Ok(_state_key) = UserId::parse(state_key) else { @@ -394,7 +394,8 @@ async fn allowed_to_send_state_event( membership state: {e}" ))); }, - }, + } + }, | _ => (), } diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index b88f78b7c..09aad6f5d 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -122,11 +122,13 @@ pub(super) async fn auth( Err(BadRequest(UnknownToken { soft_logout: true }, "Expired access token.")) }, - | (AppserviceToken, User(_)) => - Err!(Request(Unauthorized("Appservice tokens must be used on this endpoint."))), + | (AppserviceToken, User(_)) => { + Err!(Request(Unauthorized("Appservice tokens must be used on this endpoint."))) + }, - | (ServerSignatures, Appservice(_) | User(_)) => - Err!(Request(Unauthorized("Server signatures must be used on this endpoint."))), + | (ServerSignatures, Appservice(_) | User(_)) => { + Err!(Request(Unauthorized("Server signatures must be used on this endpoint."))) + }, | (ServerSignatures, Token::None) => Ok(auth_server(services, request, json_body).await?), @@ -182,8 +184,9 @@ fn check_auth_still_required(services: &Services, metadata: &Metadata, token: &T .require_auth_for_profile_requests => match token { | Token::Appservice(_) | Token::User(_) => Ok(()), - | Token::None | Token::Expired(_) | Token::Invalid => - Err!(Request(MissingToken("Missing or invalid access token."))), + | Token::None | Token::Expired(_) | Token::Invalid => { + Err!(Request(MissingToken("Missing or invalid access token."))) + }, }, | &get_public_rooms::v3::Request::METADATA if !services @@ -192,8 +195,9 @@ fn check_auth_still_required(services: &Services, metadata: &Metadata, token: &T .allow_public_room_directory_without_auth => match token { | Token::Appservice(_) | Token::User(_) => Ok(()), - | Token::None | Token::Expired(_) | Token::Invalid => - Err!(Request(MissingToken("Missing or invalid access token."))), + | Token::None | Token::Expired(_) | Token::Invalid => { + Err!(Request(MissingToken("Missing or invalid access token."))) + }, }, | _ => Ok(()), } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index dbd826098..f066a70c5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2137,10 +2137,7 @@ pub struct WellKnownConfig { } #[derive(Clone, Debug, Deserialize, Default)] -#[config_example_generator( - filename = "tuwunel-example.toml", - section = "global.media" -)] +#[config_example_generator(filename = "tuwunel-example.toml", section = "global.media")] pub struct MediaRetentionConfig { /// What to do with local media when an event referencing it is redacted. /// keep | delete_if_unreferenced | force_delete_local diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 375da716f..739dcaa47 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -30,7 +30,10 @@ pub async fn create_admin_room(&self) -> Result { let server_user = self.services.globals.server_user.as_ref(); let name = format!("{} Admin Room", self.services.config.server_name); - let topic = format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://github.com/matrix-construct/tuwunel/", self.services.config.server_name); + let topic = format!( + "Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://github.com/matrix-construct/tuwunel/", + self.services.config.server_name + ); self.services .create diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index c4d966887..01713b19f 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -143,7 +143,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result { if self.services.server.config.admin_room_notices { let welcome_message = String::from( - "## Thank you for trying out tuwunel!\n\nTuwunel is a continuation of conduwuit which was technically a hard fork of Conduit.\n\nHelpful links:\n> GitHub Repo: https://github.com/matrix-construct/tuwunel\n> Documentation: https://github.com/matrix-construct/tuwunel\n> Report issues: https://github.com/matri-construct/tuwunel/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`" + "## Thank you for trying out tuwunel!\n\nTuwunel is a continuation of conduwuit which was technically a hard fork of Conduit.\n\nHelpful links:\n> GitHub Repo: https://github.com/matrix-construct/tuwunel\n> Documentation: https://github.com/matrix-construct/tuwunel\n> Report issues: https://github.com/matri-construct/tuwunel/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`", ); // Send welcome message @@ -209,8 +209,9 @@ pub async fn revoke_admin(&self, user_id: &UserId) -> Result { | Err(e) => return Err!(error!(?e, "Failure occurred while attempting revoke.")), - | Ok(event) if !matches!(event.membership, Invite | Knock | Join) => - return Err!("Cannot revoke {user_id} in membership state {:?}.", event.membership), + | Ok(event) if !matches!(event.membership, Invite | Knock | Join) => { + return Err!("Cannot revoke {user_id} in membership state {:?}.", event.membership); + }, | Ok(event) => { assert!( diff --git a/src/service/command/run.rs b/src/service/command/run.rs index d7a9c3b72..6f72883d2 100644 --- a/src/service/command/run.rs +++ b/src/service/command/run.rs @@ -47,12 +47,12 @@ impl Service { let error = Error::from_panic(panic); error!("Panic while processing command: {error:?}"); Err(format!( - "Panic occurred while processing command:\n\ + "Panic occurred while processing command:\n\ ```\n\ {error:#?}\n\ ```\n\ Please submit a [bug report](https://github.com/matrix-construct/tuwunel/issues/new).🄺" - )) + )) }); let (output, err) = match result { diff --git a/src/service/media/data.rs b/src/service/media/data.rs index db1346d3b..7924638e0 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; +use ruma::{Mxc, OwnedMxcUri, OwnedUserId, UserId, http_headers::ContentDisposition}; use tuwunel_core::{ Err, Result, debug, debug_info, err, utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, @@ -153,6 +153,24 @@ impl Data { .await } + pub(super) async fn get_media_owner(&self, mxc: &str) -> Option { + let prefix = (mxc, Interfix); + let mut stream = self + .mediaid_user + .stream_prefix_raw(&prefix) + .ignore_err(); + + while let Some((_, raw_user)) = stream.next().await { + if let Ok(user) = string_from_bytes(raw_user) { + if let Ok(user_id) = OwnedUserId::try_from(user) { + return Some(user_id); + } + } + } + + None + } + /// Gets all the media keys in our database (this includes all the metadata /// associated with it such as width, height, content-type, etc) pub(crate) async fn get_all_media_keys(&self) -> Vec> { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5ec6bd8f7..23d5ff23a 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,27 +1,40 @@ pub mod blurhash; mod data; -mod retention; pub(super) mod migrations; mod preview; mod remote; +mod retention; mod tests; mod thumbnail; -use std::{path::PathBuf, sync::Arc, time::{Duration, SystemTime}}; +use std::{ + collections::HashSet, + path::PathBuf, + sync::Arc, + time::{Duration, SystemTime}, +}; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; -use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; +use ruma::{ + Mxc, OwnedMxcUri, OwnedUserId, UserId, events::GlobalAccountDataEventType, + http_headers::ContentDisposition, +}; +use serde_json::Value; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, }; use tuwunel_core::{ - Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, warn + Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace, + utils::{self, MutexMap}, + warn, }; -use self::data::{Data, Metadata}; -use self::retention::Retention; pub use self::thumbnail::Dim; +use self::{ + data::{Data, Metadata}, + retention::Retention, +}; #[derive(Debug)] pub struct FileMeta { @@ -34,7 +47,35 @@ pub struct Service { url_preview_mutex: MutexMap, pub(super) db: Data, services: Arc, - retention: Retention, + retention: Retention, +} + +const MEDIA_RETENTION_ACCOUNT_DATA_KIND: &str = "im.tuwunel.media.retention"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum UserRetentionPreference { + Ask, + Delete, + Keep, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum CandidateAction { + DeleteImmediately, + AwaitConfirmation, + Skip, +} + +#[derive(Debug, Clone)] +struct RetentionCandidate { + mxc: String, + room_id: Option, +} + +#[derive(Debug, Clone)] +struct CandidateDecision { + action: CandidateAction, + owner: Option, } /// generated MXC ID (`media-id`) length @@ -61,10 +102,27 @@ impl crate::Service for Service { self.create_media_dir().await?; // startup summary for retention configuration - warn!(policy = self.services.server.config.media_retention_on_redaction(), grace = self.services.server.config.media_retention_grace_period_secs(), "retention: startup configuration"); + warn!( + policy = self + .services + .server + .config + .media_retention_on_redaction(), + grace = self + .services + .server + .config + .media_retention_grace_period_secs(), + "retention: startup configuration" + ); // deletion worker loop (scaffold): runs periodically respecting grace period - let grace = Duration::from_secs(self.services.server.config.media_retention_grace_period_secs()); + let grace = Duration::from_secs( + self.services + .server + .config + .media_retention_grace_period_secs(), + ); let retention = self.retention.clone(); let this = self.clone(); warn!("creating media deletion worker"); @@ -84,43 +142,253 @@ impl crate::Service for Service { } impl Service { - // below helpers can be called by message processing pipelines when events are created/edited/redacted. - pub fn retention_insert_mxcs_on_event(&self, event_id: &str, room_id: &str, mxcs: &[(String, bool, String)]) { - self.retention.insert_mxcs_on_event(event_id, room_id, mxcs); + // below helpers can be called by message processing pipelines when events are + // created/edited/redacted. + pub fn retention_insert_mxcs_on_event( + &self, + event_id: &str, + room_id: &str, + mxcs: &[(String, bool, String)], + ) { + self.retention + .insert_mxcs_on_event(event_id, room_id, mxcs); } pub async fn retention_decrement_on_redaction(&self, event_id: &str) { use self::retention::RetentionPolicy; - let policy = RetentionPolicy::from_str(self.services.server.config.media_retention_on_redaction()); - // try normal path - if let Ok(deleted) = self.retention.decrement_refcount_on_redaction(event_id, policy).await { - if !deleted.is_empty() { return; } + + let policy = RetentionPolicy::from_str( + self.services + .server + .config + .media_retention_on_redaction(), + ); + let mut candidates: Vec = Vec::new(); + let mut event_value: Option = None; + + if let Ok(primary) = self + .retention + .decrement_refcount_on_redaction(event_id, policy) + .await + { + if !primary.is_empty() { + candidates.extend( + primary + .into_iter() + .map(|(mxc, room_id)| RetentionCandidate { mxc, room_id: Some(room_id) }), + ); + } } - // fallback: attempt reconstruction if no mer: entries existed - // fetch original PDU JSON; if found, scan for mxc:// URIs - if let Ok(parsed_eid) = ruma::EventId::parse(event_id) { if let Ok(json_obj) = self.services.timeline.get_pdu_json(&parsed_eid).await { - let event_type = json_obj.get("type").cloned(); - let unsigned_val = json_obj.get("unsigned").cloned(); - let unsigned_keys = unsigned_val.as_ref().and_then(|u| u.as_object()).map(|obj| obj.keys().cloned().collect::>()); - let content_debug = json_obj.get("content").cloned(); - warn!(event_id, ?event_type, keys=?json_obj.keys().collect::>(), ?unsigned_keys, ?unsigned_val, ?content_debug, "retention: fallback inspecting raw event"); - let mut mxcs = std::collections::HashSet::new(); - fn scan(val: &ruma::CanonicalJsonValue, out: &mut std::collections::HashSet) { - match val { - ruma::CanonicalJsonValue::String(s) if s.starts_with("mxc://") => { out.insert(s.clone()); }, - ruma::CanonicalJsonValue::Object(map) => { for v in map.values() { scan(v, out); } }, - ruma::CanonicalJsonValue::Array(arr) => { for v in arr { scan(v, out); } }, - _ => {}, - } + + if let Ok(parsed_eid) = ruma::EventId::parse(event_id) { + match self + .services + .timeline + .get_pdu_json(&parsed_eid) + .await + { + | Ok(canonical) => match serde_json::to_value(&canonical) { + | Ok(val) => { + if candidates.is_empty() { + let mut discovered = HashSet::new(); + collect_mxcs(&val, &mut discovered); + if !discovered.is_empty() { + let room_id = val + .get("room_id") + .and_then(|v| v.as_str()) + .map(str::to_owned); + candidates.extend(discovered.into_iter().map(|mxc| { + RetentionCandidate { mxc, room_id: room_id.clone() } + })); + } + } + event_value = Some(val); + }, + | Err(e) => { + warn!(%event_id, "retention: failed to convert canonical event to json value: {e}") + }, + }, + | Err(e) => { + debug_warn!(%event_id, "retention: unable to load original event for redaction: {e}") + }, } - scan(&ruma::CanonicalJsonValue::Object(json_obj.clone()), &mut mxcs); - let mxcs_len = mxcs.len(); - for mxc in mxcs { - if policy == RetentionPolicy::ForceDeleteLocal { - self.retention.queue_media_for_deletion(&mxc); + } + + if candidates.is_empty() { + debug!(%event_id, "retention: no media discovered for redaction"); + return; + } + + for candidate in candidates { + let decision = self + .evaluate_retention_candidate(policy, event_value.as_ref(), &candidate) + .await; + + match (decision.action, decision.owner) { + | (CandidateAction::DeleteImmediately, owner) => { + self.retention.queue_media_for_deletion( + &candidate.mxc, + owner.as_deref(), + false, + ); + }, + | (CandidateAction::AwaitConfirmation, Some(owner)) => { + self.retention.queue_media_for_deletion( + &candidate.mxc, + Some(owner.as_ref()), + true, + ); + + if self + .services + .globals + .user_is_local(owner.as_ref()) + { + let body = self.build_retention_notice(&candidate, event_value.as_ref()); + if let Err(e) = self + .services + .userroom + .send_text(owner.as_ref(), &body) + .await + { + warn!( + %event_id, + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: failed to notify user about pending deletion: {e}", + ); + } else { + debug_info!( + %event_id, + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: sent user confirmation request" + ); + } + } + }, + | (CandidateAction::AwaitConfirmation, None) => { + warn!(%event_id, mxc = %candidate.mxc, "retention: confirmation requested but owner is unknown"); + }, + | (CandidateAction::Skip, _) => { + debug!(%event_id, mxc = %candidate.mxc, "retention: skipping deletion for candidate"); + }, + } + } + } + + async fn evaluate_retention_candidate( + &self, + policy: retention::RetentionPolicy, + event_value: Option<&Value>, + candidate: &RetentionCandidate, + ) -> CandidateDecision { + use self::retention::RetentionPolicy; + + if matches!(policy, RetentionPolicy::Keep) { + return CandidateDecision { + action: CandidateAction::Skip, + owner: None, + }; + } + + let mut owner = self.db.get_media_owner(&candidate.mxc).await; + if owner.is_none() { + if let Some(val) = event_value { + if let Some(sender) = val.get("sender").and_then(|s| s.as_str()) { + if let Ok(parsed) = OwnedUserId::try_from(sender.to_owned()) { + owner = Some(parsed); + } } } - warn!(event_id, count=mxcs_len, "retention: fallback redaction scan queued media"); }} + } + + if let Some(owner_id) = owner.as_ref() { + if !self + .services + .globals + .user_is_local(owner_id.as_ref()) + { + let action = match policy { + | RetentionPolicy::Keep => CandidateAction::Skip, + | RetentionPolicy::DeleteIfUnreferenced + | RetentionPolicy::ForceDeleteLocal => CandidateAction::DeleteImmediately, + }; + return CandidateDecision { action, owner }; + } + + match self + .user_retention_preference(owner_id.as_ref()) + .await + { + | UserRetentionPreference::Delete => CandidateDecision { + action: CandidateAction::DeleteImmediately, + owner, + }, + | UserRetentionPreference::Keep => + CandidateDecision { action: CandidateAction::Skip, owner }, + | UserRetentionPreference::Ask => CandidateDecision { + action: CandidateAction::AwaitConfirmation, + owner, + }, + } + } else { + let action = match policy { + | RetentionPolicy::Keep => CandidateAction::Skip, + | RetentionPolicy::DeleteIfUnreferenced | RetentionPolicy::ForceDeleteLocal => + CandidateAction::DeleteImmediately, + }; + CandidateDecision { action, owner: None } + } + } + + fn build_retention_notice( + &self, + candidate: &RetentionCandidate, + event_value: Option<&Value>, + ) -> String { + let room_segment = candidate + .room_id + .as_deref() + .map(|room| format!(" in room {room}")) + .unwrap_or_default(); + + let timestamp = event_value + .and_then(|val| val.get("origin_server_ts")) + .and_then(canonical_json_to_u64) + .map(|ts| format!(" at {ts}")) + .unwrap_or_default(); + + format!( + "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion. \ + Run `!user retention confirm {mxc}` here to delete it now, or update your media retention preference to keep it.", + mxc = candidate.mxc + ) + } + + pub async fn retention_confirm_deletion(&self, user: &UserId, mxc: &str) -> Result { + self.retention.confirm_candidate(self, mxc, user).await + } + + async fn user_retention_preference(&self, user: &UserId) -> UserRetentionPreference { + if !self.services.globals.user_is_local(user) { + return UserRetentionPreference::Delete; + } + + let kind = GlobalAccountDataEventType::from(MEDIA_RETENTION_ACCOUNT_DATA_KIND); + match self + .services + .account_data + .get_global::(user, kind) + .await + { + | Ok(value) => + parse_user_retention_preference(&value).unwrap_or(UserRetentionPreference::Ask), + | Err(e) => { + debug!(user = user.as_str(), "retention: failed to load user preference: {e}"); + UserRetentionPreference::Ask + }, + } } /// Uploads a file. @@ -477,6 +745,63 @@ impl Service { } } +fn parse_user_retention_preference(value: &Value) -> Option { + if let Some(mode) = value.get("mode").and_then(|v| v.as_str()) { + return match mode { + | "delete" | "auto" => Some(UserRetentionPreference::Delete), + | "keep" => Some(UserRetentionPreference::Keep), + | "ask" => Some(UserRetentionPreference::Ask), + | _ => None, + }; + } + + if let Some(confirm) = value + .get("confirm_before_delete") + .and_then(|v| v.as_bool()) + { + return Some(if confirm { + UserRetentionPreference::Ask + } else { + UserRetentionPreference::Delete + }); + } + + if let Some(keep) = value.get("retain").and_then(|v| v.as_bool()) { + return Some(if keep { + UserRetentionPreference::Keep + } else { + UserRetentionPreference::Delete + }); + } + + None +} + +fn collect_mxcs(value: &Value, out: &mut HashSet) { + match value { + | Value::String(s) if s.starts_with("mxc://") => { + out.insert(s.to_owned()); + }, + | Value::Array(arr) => + for item in arr { + collect_mxcs(item, out); + }, + | Value::Object(map) => + for item in map.values() { + collect_mxcs(item, out); + }, + | _ => {}, + } +} + +fn canonical_json_to_u64(value: &Value) -> Option { + match value { + | Value::Number(num) => num.as_u64(), + | Value::String(s) => s.parse::().ok(), + | _ => None, + } +} + #[inline] #[must_use] pub fn encode_key(key: &[u8]) -> String { general_purpose::URL_SAFE_NO_PAD.encode(key) } diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 674755484..a1a797922 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -1,6 +1,11 @@ -use std::{path::PathBuf, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}}; +use std::{ + path::PathBuf, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use futures::StreamExt; +use ruma::UserId; use serde::{Deserialize, Serialize}; use tuwunel_core::{Result, err, trace, warn}; use tuwunel_database::{Cbor, Deserialized, Map, keyval::serialize_val}; @@ -14,262 +19,373 @@ const K_QUEUE: &str = "qdel:"; // qdel: => DeletionCandidate #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct MediaRef { - pub refcount: i64, - pub local: bool, - pub first_seen_ts: u64, - pub last_seen_ts: u64, + pub refcount: i64, + pub local: bool, + pub first_seen_ts: u64, + pub last_seen_ts: u64, } #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct MediaEventRef { - pub mxc: String, - pub room_id: String, - pub kind: String, // "content.url", "thumbnail_url" + pub mxc: String, + pub room_id: String, + pub kind: String, // "content.url", "thumbnail_url" } #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct DeletionCandidate { - pub mxc: String, - pub enqueued_ts: u64, + pub mxc: String, + pub enqueued_ts: u64, + #[serde(default)] + pub user_id: Option, + #[serde(default)] + pub awaiting_confirmation: bool, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(super) enum RetentionPolicy { - Keep, - DeleteIfUnreferenced, - ForceDeleteLocal, + Keep, + DeleteIfUnreferenced, + ForceDeleteLocal, } impl RetentionPolicy { - pub(super) fn from_str(s: &str) -> Self { - match s { - "delete_if_unreferenced" => Self::DeleteIfUnreferenced, - "force_delete_local" => Self::ForceDeleteLocal, - _ => Self::Keep, - } - } + pub(super) fn from_str(s: &str) -> Self { + match s { + | "delete_if_unreferenced" => Self::DeleteIfUnreferenced, + | "force_delete_local" => Self::ForceDeleteLocal, + | _ => Self::Keep, + } + } } #[derive(Clone)] pub(super) struct Retention { - cf: Arc, + cf: Arc, } impl Retention { - pub(super) fn new(db: &Arc) -> Self { - Self { cf: db["media_retention"].clone() } - } - - #[inline] - fn key_mref(mxc: &str) -> String { format!("{K_MREF}{mxc}") } - - #[inline] - fn key_mer(event_id: &str, kind: &str) -> String { format!("{K_MER}{event_id}:{kind}") } - - #[inline] - fn key_queue(mxc: &str) -> String { format!("{K_QUEUE}{mxc}") } - - #[allow(dead_code)] - pub(super) async fn get_media_ref(&self, mxc: &str) -> Result> { - match self.cf.get(&Self::key_mref(mxc)).await { - Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), - Err(_) => Ok(None), - } - } - - #[allow(dead_code)] - pub(super) fn put_media_ref(&self, mxc: &str, mr: &MediaRef) { self.cf.raw_put(Self::key_mref(mxc), Cbor(mr)); } - - #[allow(dead_code)] - pub(super) async fn get_media_event_ref(&self, event_id: &str, kind: &str) -> Result> { - match self.cf.get(&Self::key_mer(event_id, kind)).await { - Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), - Err(_) => Ok(None), - } - } - - #[allow(dead_code)] - pub(super) fn put_media_event_ref(&self, event_id: &str, mer: &MediaEventRef) { - let key = Self::key_mer(event_id, &mer.kind); - self.cf.raw_put(key, Cbor(mer)); - } - - /// insert/update references for a newly created or edited event. - /// - /// assumptions: - /// - `mxcs` is a slice of (mxc_uri, local, kind) - pub(super) fn insert_mxcs_on_event(&self, event_id: &str, room_id: &str, mxcs: &[(String, bool, String)]) { - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - if mxcs.is_empty() { - warn!(%event_id, "retention: insert called with zero MXCs"); - return; - } - warn!(%event_id, count = mxcs.len(), %room_id, "retention: inserting media refs for event"); - - let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); - for (mxc, local, kind) in mxcs.iter() { - // update MediaEventRef - let mer = MediaEventRef { mxc: mxc.clone(), room_id: room_id.to_owned(), kind: kind.clone() }; - let key_mer = Self::key_mer(event_id, kind).into_bytes(); - let val_mer = serialize_val(Cbor(&mer)).expect("serialize mer").to_vec(); - puts.push((key_mer, val_mer)); - - // upsert MediaRef - let key_mref = Self::key_mref(mxc); - let current = self.cf.get_blocking(&key_mref); - let (mr, new) = match current.and_then(|h| h.deserialized::>()) { - Ok(Cbor(mut v)) => { - v.refcount = v.refcount.saturating_add(1); - v.last_seen_ts = now; - (v, false) - }, - _ => (MediaRef { refcount: 1, local: *local, first_seen_ts: now, last_seen_ts: now }, true), - }; - if new { - warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: new media ref"); - } else { - warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: increment media ref"); - } - let val_mref = serialize_val(Cbor(&mr)).expect("serialize mref").to_vec(); - puts.push((key_mref.into_bytes(), val_mref)); - } - self.cf.write_batch_raw(puts, std::iter::empty()); - } - - /// decrement refcounts for all MediaEventRef mapped by this event id. - /// if policy is set to delete unreferenced/local, enqueue for deletion - pub(super) async fn decrement_refcount_on_redaction( - &self, - event_id: &str, - policy: RetentionPolicy, - ) -> Result> { - warn!(%event_id, ?policy, "retention: redaction decrement start"); - let prefix = format!("{K_MER}{event_id}:"); - let prefixb = prefix.as_bytes().to_vec(); - let mut to_delete: Vec = Vec::new(); - let mut puts: Vec<(Vec, Vec)> = Vec::new(); - let mut dels: Vec> = Vec::new(); - let mut processed = 0usize; - - let mut stream = self.cf.stream_raw_prefix::<&str, Cbor, _>(&prefixb); - while let Some(item) = stream.next().await.transpose()? { - let (key, Cbor(mer)) = item; - processed = processed.saturating_add(1); - // load MediaRef - let key_mref = Self::key_mref(&mer.mxc); - let current = self.cf.get(&key_mref).await.ok(); - if let Some(handle) = current { - let Cbor(mut mr): Cbor = handle.deserialized::>()?; - mr.refcount = mr.refcount.saturating_sub(1); - mr.last_seen_ts = now_secs(); - let should_queue = match policy { - RetentionPolicy::Keep => false, - RetentionPolicy::DeleteIfUnreferenced => mr.refcount == 0, - RetentionPolicy::ForceDeleteLocal => mr.local, - }; - warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, "retention: redaction updated ref"); - let val_mref = serialize_val(Cbor(&mr))?.to_vec(); - puts.push((key_mref.into_bytes(), val_mref)); - if should_queue { - let kq = Self::key_queue(&mer.mxc).into_bytes(); - let vq = serialize_val(Cbor(&DeletionCandidate { mxc: mer.mxc.clone(), enqueued_ts: now_secs() }))?.to_vec(); - puts.push((kq, vq)); - warn!(%event_id, mxc = %mer.mxc, "retention: queued media for deletion"); - to_delete.push(mer.mxc.clone()); - } - } - - // remove the mer entry regardless - dels.push(key.as_bytes().to_vec()); - } - self.cf.write_batch_raw(puts, dels); - if processed == 0 { - warn!(%event_id, "retention: no media event refs found on redaction; did insert run during creation?"); - } - warn!(%event_id, queued = to_delete.len(), processed, "retention: redaction decrement complete"); - Ok(to_delete) - } - - /// qeue a media item for deletion (idempotent best-effort). - pub(super) fn queue_media_for_deletion(&self, mxc: &str) { - let key = Self::key_queue(mxc); - // overwrite / insert candidate with fresh timestamp - let cand = DeletionCandidate { mxc: mxc.to_owned(), enqueued_ts: now_secs() }; - warn!(mxc, "retention: fallback queue media for deletion"); - self.cf.raw_put(key, Cbor(&cand)); - } - - /// worker: processes queued deletion candidates after grace period. - pub(super) async fn worker_process_queue(&self, service: &Service, grace: Duration) -> Result<()> { - let prefix = K_QUEUE.as_bytes(); - warn!(?grace, "retention: worker iteration start"); - let mut stream = self.cf.stream_raw_prefix::<&str, Cbor, _>(&prefix); - let mut processed = 0usize; - let mut deleted = 0usize; - while let Some(item) = stream.next().await.transpose()? { - let (key, Cbor(cand)) = item; - let now = now_secs(); - if now < cand.enqueued_ts.saturating_add(grace.as_secs()) { - warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); - continue; - } - - // attempt deletion of local media files - let deleted_bytes = self.delete_local_media(service, &cand.mxc).await.unwrap_or(0); - if deleted_bytes > 0 { - warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); - } else { - warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); - } - - // remove metadata entries (best-effort) - let dels = vec![key.as_bytes().to_vec(), Self::key_mref(&cand.mxc).into_bytes()]; - self.cf.write_batch_raw(std::iter::empty(), dels); - processed = processed.saturating_add(1); - deleted = deleted.saturating_add(1); - } - if processed == 0 { - warn!("retention: worker iteration found no deletion candidates"); - } else { - warn!(processed, deleted, "retention: worker iteration complete"); - } - - Ok(()) - } - - async fn delete_local_media(&self, service: &Service, mxc: &str) -> Result { - // delete original + thumbnails (any dimensions) - use ruma::Mxc; - let mxc_parsed: Mxc<'_> = mxc.try_into().map_err(|_| err!(Request(BadJson("invalid mxc"))))?; - - // delete originals - let keys = service.db.search_mxc_metadata_prefix(&mxc_parsed).await.unwrap_or_default(); - let mut total = 0u64; - for key in keys { - let path = service.get_media_file(&key); - total = total.saturating_add(remove_file_tolerant(path)); - let legacy = service.get_media_file_b64(&key); - total = total.saturating_add(remove_file_tolerant(legacy)); - } + pub(super) fn new(db: &Arc) -> Self { + Self { cf: db["media_retention"].clone() } + } + + #[inline] + fn key_mref(mxc: &str) -> String { format!("{K_MREF}{mxc}") } + + #[inline] + fn key_mer(event_id: &str, kind: &str) -> String { format!("{K_MER}{event_id}:{kind}") } + + #[inline] + fn key_queue(mxc: &str) -> String { format!("{K_QUEUE}{mxc}") } + + #[allow(dead_code)] + pub(super) async fn get_media_ref(&self, mxc: &str) -> Result> { + match self.cf.get(&Self::key_mref(mxc)).await { + | Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), + | Err(_) => Ok(None), + } + } + + #[allow(dead_code)] + pub(super) fn put_media_ref(&self, mxc: &str, mr: &MediaRef) { + self.cf.raw_put(Self::key_mref(mxc), Cbor(mr)); + } + + #[allow(dead_code)] + pub(super) async fn get_media_event_ref( + &self, + event_id: &str, + kind: &str, + ) -> Result> { + match self.cf.get(&Self::key_mer(event_id, kind)).await { + | Ok(handle) => Ok(Some(handle.deserialized::>()?.0)), + | Err(_) => Ok(None), + } + } + + #[allow(dead_code)] + pub(super) fn put_media_event_ref(&self, event_id: &str, mer: &MediaEventRef) { + let key = Self::key_mer(event_id, &mer.kind); + self.cf.raw_put(key, Cbor(mer)); + } + + /// insert/update references for a newly created or edited event. + /// + /// assumptions: + /// - `mxcs` is a slice of (mxc_uri, local, kind) + pub(super) fn insert_mxcs_on_event( + &self, + event_id: &str, + room_id: &str, + mxcs: &[(String, bool, String)], + ) { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + if mxcs.is_empty() { + warn!(%event_id, "retention: insert called with zero MXCs"); + return; + } + warn!(%event_id, count = mxcs.len(), %room_id, "retention: inserting media refs for event"); + + let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); + for (mxc, local, kind) in mxcs.iter() { + // update MediaEventRef + let mer = MediaEventRef { + mxc: mxc.clone(), + room_id: room_id.to_owned(), + kind: kind.clone(), + }; + let key_mer = Self::key_mer(event_id, kind).into_bytes(); + let val_mer = serialize_val(Cbor(&mer)) + .expect("serialize mer") + .to_vec(); + puts.push((key_mer, val_mer)); + + // upsert MediaRef + let key_mref = Self::key_mref(mxc); + let current = self.cf.get_blocking(&key_mref); + let (mr, new) = match current.and_then(|h| h.deserialized::>()) { + | Ok(Cbor(mut v)) => { + v.refcount = v.refcount.saturating_add(1); + v.last_seen_ts = now; + (v, false) + }, + | _ => ( + MediaRef { + refcount: 1, + local: *local, + first_seen_ts: now, + last_seen_ts: now, + }, + true, + ), + }; + if new { + warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: new media ref"); + } else { + warn!(%event_id, %mxc, %kind, local = local, refcount = mr.refcount, "retention: increment media ref"); + } + let val_mref = serialize_val(Cbor(&mr)) + .expect("serialize mref") + .to_vec(); + puts.push((key_mref.into_bytes(), val_mref)); + } + self.cf.write_batch_raw(puts, std::iter::empty()); + } + + /// decrement refcounts for all MediaEventRef mapped by this event id. + /// if policy is set to delete unreferenced/local, enqueue for deletion + pub(super) async fn decrement_refcount_on_redaction( + &self, + event_id: &str, + policy: RetentionPolicy, + ) -> Result> { + warn!(%event_id, ?policy, "retention: redaction decrement start"); + let prefix = format!("{K_MER}{event_id}:"); + let prefixb = prefix.as_bytes().to_vec(); + let mut to_delete: Vec<(String, String)> = Vec::new(); + let mut puts: Vec<(Vec, Vec)> = Vec::new(); + let mut dels: Vec> = Vec::new(); + let mut processed = 0usize; + + let mut stream = self + .cf + .stream_raw_prefix::<&str, Cbor, _>(&prefixb); + while let Some(item) = stream.next().await.transpose()? { + let (key, Cbor(mer)) = item; + processed = processed.saturating_add(1); + // load MediaRef + let key_mref = Self::key_mref(&mer.mxc); + let current = self.cf.get(&key_mref).await.ok(); + if let Some(handle) = current { + let Cbor(mut mr): Cbor = handle.deserialized::>()?; + mr.refcount = mr.refcount.saturating_sub(1); + mr.last_seen_ts = now_secs(); + let should_queue = match policy { + | RetentionPolicy::Keep => false, + | RetentionPolicy::DeleteIfUnreferenced => mr.refcount == 0, + | RetentionPolicy::ForceDeleteLocal => mr.local, + }; + warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, "retention: redaction updated ref"); + let val_mref = serialize_val(Cbor(&mr))?.to_vec(); + puts.push((key_mref.into_bytes(), val_mref)); + if should_queue { + warn!(%event_id, mxc = %mer.mxc, room = %mer.room_id, "retention: media candidate ready for deletion"); + to_delete.push((mer.mxc.clone(), mer.room_id.clone())); + } + } + + // remove the mer entry regardless + dels.push(key.as_bytes().to_vec()); + } + self.cf.write_batch_raw(puts, dels); + if processed == 0 { + warn!(%event_id, "retention: no media event refs found on redaction; did insert run during creation?"); + } + warn!(%event_id, queued = to_delete.len(), processed, "retention: redaction decrement complete"); + Ok(to_delete) + } + + /// qeue a media item for deletion (idempotent best-effort). + pub(super) fn queue_media_for_deletion( + &self, + mxc: &str, + owner: Option<&UserId>, + awaiting_confirmation: bool, + ) { + let key = Self::key_queue(mxc); + // overwrite / insert candidate with fresh timestamp + let cand = DeletionCandidate { + mxc: mxc.to_owned(), + enqueued_ts: now_secs(), + user_id: owner.map(|u| u.to_string()), + awaiting_confirmation, + }; + warn!( + mxc, + awaiting_confirmation, + owner = owner.map(UserId::as_str), + "retention: queue media for deletion" + ); + self.cf.raw_put(key, Cbor(&cand)); + } + + pub(super) async fn confirm_candidate( + &self, + service: &Service, + mxc: &str, + requester: &UserId, + ) -> Result { + let key = Self::key_queue(mxc); + let handle = self + .cf + .get(&key) + .await + .map_err(|_| err!(Request(NotFound("no pending deletion for this media"))))?; + let Cbor(mut candidate) = handle.deserialized::>()?; + + let Some(owner) = candidate.user_id.as_deref() else { + return Err(err!(Request(Forbidden("media candidate owner unknown")))); + }; + if owner != requester.as_str() { + return Err(err!(Request(Forbidden("media candidate owned by another user")))); + } + if !candidate.awaiting_confirmation { + return Err(err!(Request(InvalidParam( + "media deletion already processed", + )))); + } + + candidate.awaiting_confirmation = false; + candidate.enqueued_ts = now_secs(); + + let deleted_bytes = self.delete_local_media(service, mxc).await?; + let mut dels = Vec::with_capacity(2); + dels.push(key.into_bytes()); + dels.push(Self::key_mref(mxc).into_bytes()); + self.cf.write_batch_raw(std::iter::empty(), dels); + warn!(mxc, bytes = deleted_bytes, user = requester.as_str(), "retention: media deletion confirmed by user"); + Ok(deleted_bytes) + } + + /// worker: processes queued deletion candidates after grace period. + pub(super) async fn worker_process_queue( + &self, + service: &Service, + grace: Duration, + ) -> Result<()> { + let prefix = K_QUEUE.as_bytes(); + warn!(?grace, "retention: worker iteration start"); + let mut stream = self + .cf + .stream_raw_prefix::<&str, Cbor, _>(&prefix); + let mut processed = 0usize; + let mut deleted = 0usize; + while let Some(item) = stream.next().await.transpose()? { + let (key, Cbor(cand)) = item; + let now = now_secs(); + if cand.awaiting_confirmation { + warn!(mxc = %cand.mxc, "retention: awaiting user confirmation, skipping candidate"); + continue; + } + + if now < cand.enqueued_ts.saturating_add(grace.as_secs()) { + warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); + continue; + } + + // attempt deletion of local media files + let deleted_bytes = self + .delete_local_media(service, &cand.mxc) + .await + .unwrap_or(0); + if deleted_bytes > 0 { + warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); + } else { + warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); + } + + // remove metadata entries (best-effort) + let dels = vec![key.as_bytes().to_vec(), Self::key_mref(&cand.mxc).into_bytes()]; + self.cf.write_batch_raw(std::iter::empty(), dels); + processed = processed.saturating_add(1); + deleted = deleted.saturating_add(1); + } + if processed == 0 { + warn!("retention: worker iteration found no deletion candidates"); + } else { + warn!(processed, deleted, "retention: worker iteration complete"); + } + + Ok(()) + } + + async fn delete_local_media(&self, service: &Service, mxc: &str) -> Result { + // delete original + thumbnails (any dimensions) + use ruma::Mxc; + let mxc_parsed: Mxc<'_> = mxc + .try_into() + .map_err(|_| err!(Request(BadJson("invalid mxc"))))?; + + // delete originals + let keys = service + .db + .search_mxc_metadata_prefix(&mxc_parsed) + .await + .unwrap_or_default(); + let mut total = 0u64; + for key in keys { + let path = service.get_media_file(&key); + total = total.saturating_add(remove_file_tolerant(path)); + let legacy = service.get_media_file_b64(&key); + total = total.saturating_add(remove_file_tolerant(legacy)); + } warn!("retention: total bytes deleted {total}"); - Ok(total) - } + Ok(total) + } } -fn now_secs() -> u64 { SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() } +fn now_secs() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs() +} fn remove_file_tolerant(path: PathBuf) -> u64 { - match std::fs::metadata(&path) { - Ok(meta) => { - let len = meta.len(); - if let Err(e) = std::fs::remove_file(&path) { - trace!(?path, "ignore remove error: {e}"); - 0 - } else { - trace!(?path, "removed"); - len - } - }, - Err(_) => 0, - } + match std::fs::metadata(&path) { + | Ok(meta) => { + let len = meta.len(); + if let Err(e) = std::fs::remove_file(&path) { + trace!(?path, "ignore remove error: {e}"); + 0 + } else { + trace!(?path, "removed"); + len + } + }, + | Err(_) => 0, + } } diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 8118b0ef6..5713ec656 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -4,9 +4,10 @@ use std::{ }; use futures::StreamExt; +use std::pin::Pin; use tracing::warn; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedUserId, RoomId, RoomVersionId, UserId, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedUserId, RoomId, RoomVersionId, UserId, events::{ GlobalAccountDataEventType, TimelineEventType, push_rules::PushRulesEvent, @@ -80,26 +81,26 @@ where Ok(Some(pdu_id)) } -/// Creates a new persisted data unit and adds it to a room. -/// -/// By this point the incoming event should be fully authenticated, no auth -/// happens in `append_pdu`. -/// -/// Returns pdu id -#[implement(super::Service)] -#[tracing::instrument(name = "append", level = "debug", skip_all, ret(Debug))] -pub async fn append_pdu<'a, Leafs>( - &'a self, - pdu: &'a PduEvent, - mut pdu_json: CanonicalJsonObject, - leafs: Leafs, - state_lock: &'a RoomMutexGuard, -) -> Result -where - Leafs: Iterator + Send + 'a, -{ +impl super::Service { + /// Creates a new persisted data unit and adds it to a room. + /// + /// By this point the incoming event should be fully authenticated, no auth + /// happens in `append_pdu`. + /// + /// Returns pdu id + async fn append_pdu_inner<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, + mut pdu_json: CanonicalJsonObject, + leafs: Leafs, + state_lock: &'a RoomMutexGuard, + ) -> Result<(RawPduId, Vec)> + where + Leafs: Iterator + Send + 'a, + { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); + let mut retention_targets: Vec = Vec::new(); let shortroomid = self .services @@ -306,8 +307,7 @@ where { self.redact_pdu(redact_id, pdu, shortroomid) .await?; - // media retention decrement - self.services.media.retention_decrement_on_redaction(redact_id.as_str()).await; + retention_targets.push(redact_id.to_owned()); } } }, @@ -322,7 +322,7 @@ where { self.redact_pdu(redact_id, pdu, shortroomid) .await?; - self.services.media.retention_decrement_on_redaction(redact_id.as_str()).await; + retention_targets.push(redact_id.to_owned()); } } }, @@ -511,6 +511,53 @@ where } } + Ok((pdu_id, retention_targets)) + } +} + +#[implement(super::Service)] +#[tracing::instrument(name = "append", level = "debug", skip_all, ret(Debug))] +pub async fn append_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, + pdu_json: CanonicalJsonObject, + leafs: Leafs, + state_lock: &'a RoomMutexGuard, +) -> Result +where + Leafs: Iterator + Send + 'a, +{ + let (pdu_id, retention_targets) = self + .append_pdu_inner::(pdu, pdu_json, leafs, state_lock) + .await?; + + for event_id in retention_targets { + let fut = self + .services + .media + .retention_decrement_on_redaction(event_id.as_str()); + Pin::from(Box::new(fut)).await; + } + + Ok(pdu_id) +} + +#[implement(super::Service)] +#[tracing::instrument(name = "append", level = "debug", skip_all, ret(Debug))] +pub async fn append_pdu_without_retention<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, + pdu_json: CanonicalJsonObject, + leafs: Leafs, + state_lock: &'a RoomMutexGuard, +) -> Result +where + Leafs: Iterator + Send + 'a, +{ + let (pdu_id, _) = self + .append_pdu_inner::(pdu, pdu_json, leafs, state_lock) + .await?; + Ok(pdu_id) } diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs index 229d9a1e3..54a5b5fbd 100644 --- a/src/service/rooms/timeline/backfill.rs +++ b/src/service/rooms/timeline/backfill.rs @@ -10,8 +10,8 @@ use ruma::{ events::{ StateEventType, TimelineEventType, room::{ + message::{MessageType, RoomMessageEventContent}, power_levels::RoomPowerLevelsEventContent, - message::{RoomMessageEventContent, MessageType}, redaction::RoomRedactionEventContent, }, }, @@ -222,33 +222,66 @@ pub async fn backfill_pdu( if let Ok(full) = pdu.get_content::() { use ruma::events::room::MediaSource; let mut mxcs: Vec<(String, bool, String)> = Vec::new(); - let push_plain = |mxcs: &mut Vec<(String, bool, String)>, src: &MediaSource, label: &str| { - if let MediaSource::Plain(mxc) = src { let s = mxc.to_string(); if s.starts_with("mxc://") { mxcs.push((s, true, label.to_owned())); } } - }; + let push_plain = + |mxcs: &mut Vec<(String, bool, String)>, src: &MediaSource, label: &str| { + if let MediaSource::Plain(mxc) = src { + let s = mxc.to_string(); + if s.starts_with("mxc://") { + mxcs.push((s, true, label.to_owned())); + } + } + }; match &full.msgtype { - MessageType::Image(c) => { + | MessageType::Image(c) => { push_plain(&mut mxcs, &c.source, "image.source"); - if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "image.thumbnail_source"); } } + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_plain(&mut mxcs, th, "image.thumbnail_source"); + } + } }, - MessageType::File(c) => { + | MessageType::File(c) => { push_plain(&mut mxcs, &c.source, "file.source"); - if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "file.thumbnail_source"); } } + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_plain(&mut mxcs, th, "file.thumbnail_source"); + } + } }, - MessageType::Video(c) => { + | MessageType::Video(c) => { push_plain(&mut mxcs, &c.source, "video.source"); - if let Some(info) = c.info.as_ref() { if let Some(th) = info.thumbnail_source.as_ref() { push_plain(&mut mxcs, th, "video.thumbnail_source"); } } + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_plain(&mut mxcs, th, "video.thumbnail_source"); + } + } }, - MessageType::Audio(c) => { + | MessageType::Audio(c) => { push_plain(&mut mxcs, &c.source, "audio.source"); }, - _ => {}, + | _ => {}, + } + if !mxcs.is_empty() { + self.services + .media + .retention_insert_mxcs_on_event( + pdu.event_id().as_str(), + pdu.room_id().as_str(), + &mxcs, + ); } - if !mxcs.is_empty() { self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), &mxcs); } } } if pdu.kind == TimelineEventType::RoomRedaction { - if let Ok(red) = pdu.get_content::() { if let Some(rid) = red.redacts { self.services.media.retention_decrement_on_redaction(rid.as_str()).await; } } + if let Ok(red) = pdu.get_content::() { + if let Some(rid) = red.redacts { + self.services + .media + .retention_decrement_on_redaction(rid.as_str()) + .await; + } + } } drop(mutex_lock); diff --git a/src/service/rooms/timeline/build.rs b/src/service/rooms/timeline/build.rs index 62226afc7..218482737 100644 --- a/src/service/rooms/timeline/build.rs +++ b/src/service/rooms/timeline/build.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, iter::once}; -use futures::{FutureExt, StreamExt}; +use futures::StreamExt; use ruma::{ OwnedEventId, OwnedServerName, RoomId, RoomVersionId, UserId, events::{ @@ -19,18 +19,17 @@ use tuwunel_core::{ use super::RoomMutexGuard; -/// Creates a new persisted data unit and adds it to a room. This function -/// takes a roomid_mutex_state, meaning that only this function is able to -/// mutate the room state. -#[implement(super::Service)] -#[tracing::instrument(skip(self, state_lock), level = "debug", ret)] -pub async fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - state_lock: &RoomMutexGuard, -) -> Result { +impl super::Service { + /// Creates a new persisted data unit and adds it to a room. This function + /// takes a roomid_mutex_state, meaning that only this fnuction is able to + /// mutate the room state. + async fn build_and_append_pdu_inner( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &RoomMutexGuard, + ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) .await?; @@ -50,8 +49,8 @@ pub async fn build_and_append_pdu( .is_admin_room(pdu.room_id()) .await { - self.check_pdu_for_admin_room(&pdu, sender) - .boxed() + self + .check_pdu_for_admin_room(&pdu, sender) .await?; } @@ -123,16 +122,29 @@ pub async fn build_and_append_pdu( // fail. let statehashid = self.services.state.append_to_state(&pdu).await?; - let pdu_id = self - .append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - once(pdu.event_id()), - state_lock, - ) - .await?; + let pdu_id = if DO_MEDIA_RETENTION { + self + .append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + once(pdu.event_id()), + state_lock, + ) + .await? + } else { + self + .append_pdu_without_retention( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + once(pdu.event_id()), + state_lock, + ) + .await? + }; // We set the room state after inserting the pdu, so that we never have a moment // in time where events in the current room state do not exist @@ -169,7 +181,36 @@ pub async fn build_and_append_pdu( .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) .await?; - Ok(pdu.event_id().to_owned()) + Ok(pdu.event_id().to_owned()) + } +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self, state_lock), level = "debug", ret)] +pub async fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &RoomMutexGuard, +) -> Result { + self + .build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) + .await +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self, state_lock), level = "debug", ret)] +pub async fn build_and_append_pdu_without_retention( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &RoomMutexGuard, +) -> Result { + self + .build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) + .await } #[implement(super::Service)] diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index ba26e0e6b..b269469c7 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -5,10 +5,11 @@ use ruma::{ events::room::{ guest_access::GuestAccess, member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, }, room::JoinRule, }; -use tuwunel_core::{Result, debug_info, pdu::PduBuilder}; +use tuwunel_core::{Result, debug_info, debug_warn, pdu::PduBuilder}; use crate::command::{CommandResult, CommandSystem}; @@ -77,7 +78,7 @@ impl Service { debug_info!("Inviting user {user_id} to user room {room_id}"); self.services .timeline - .build_and_append_pdu( + .build_and_append_pdu_without_retention( PduBuilder::state( String::from(user_id), &RoomMemberEventContent::new(MembershipState::Invite), @@ -91,7 +92,7 @@ impl Service { debug_info!("Force joining user {user_id} to user room {room_id}"); self.services .timeline - .build_and_append_pdu( + .build_and_append_pdu_without_retention( PduBuilder::state( String::from(user_id), &RoomMemberEventContent::new(MembershipState::Join), @@ -105,7 +106,36 @@ impl Service { Ok(()) } - pub async fn send_text(&self, user_id: &UserId, body: &str) -> Result { Ok(()) } + pub async fn send_text(&self, user_id: &UserId, body: &str) -> Result { + if !self.services.globals.user_is_local(user_id) { + debug_info!(%user_id, "Skipping user room send for remote user"); + return Ok(()); + } + + let room_id = match self.get_user_room(user_id).await { + | Ok(room_id) => room_id, + | Err(e) => { + debug_warn!(%user_id, error = %e, "User room missing; unable to deliver message"); + return Ok(()); + }, + }; + + let state_lock = self.services.state.mutex.lock(&room_id).await; + let content = RoomMessageEventContent::text_markdown(body); + + self.services + .timeline + .build_and_append_pdu_without_retention( + PduBuilder::timeline(&content), + &self.services.globals.server_user, + &room_id, + &state_lock, + ) + //.boxed() + .await?; + + Ok(()) + } pub async fn message_hook( &self, diff --git a/src/user/user.rs b/src/user/user.rs index 84433ff26..c262c980f 100644 --- a/src/user/user.rs +++ b/src/user/user.rs @@ -2,14 +2,16 @@ use clap::Parser; use tuwunel_core::Result; use tuwunel_macros::{command, command_dispatch}; -use crate::user::debug::Cmd; +use crate::user::{debug::Cmd as DebugCmd, retention::Cmd as RetentionCmd}; #[derive(Debug, Parser)] #[command(name = "tuwunel", version = tuwunel_core::version())] #[command_dispatch] pub(super) enum UserCommand { #[command(subcommand)] - Debug(Cmd), + Debug(DebugCmd), + #[command(subcommand)] + Retention(RetentionCmd), } mod debug { @@ -29,3 +31,32 @@ mod debug { Ok(format!("Running echo command from {sender}")) } } + +mod retention { + use clap::Subcommand; + use tuwunel_core::Result; + use tuwunel_macros::{command, command_dispatch}; + + #[command_dispatch] + #[derive(Debug, Subcommand)] + pub(crate) enum Cmd { + Confirm { mxc: String }, + } + + #[command] + pub(super) async fn confirm(&self, mxc: String) -> Result { + let bytes = self + .services + .media + .retention_confirm_deletion(self.sender, &mxc) + .await?; + + let summary = if bytes > 0 { + format!("Removed {bytes} bytes of local media.") + } else { + "No local media files were found to delete.".to_owned() + }; + + Ok(format!("Confirmed deletion for {mxc}. {summary}")) + } +} From 5cdae72cda5661bec1baf44fcd0d73a4c06ce8ed Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sun, 12 Oct 2025 16:43:33 +0200 Subject: [PATCH 03/17] test message and changes --- rust-toolchain.toml | 29 +++++++ src/service/media/mod.rs | 102 +++++++++++++++++++++---- src/service/media/retention.rs | 18 +++-- src/service/rooms/timeline/append.rs | 22 ++++-- src/service/rooms/timeline/backfill.rs | 1 + 5 files changed, 147 insertions(+), 25 deletions(-) create mode 100644 rust-toolchain.toml diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000..a1afc2351 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,29 @@ +# This is the authoritiative configuration of this project's Rust toolchain. +# +# Other files that need upkeep when this changes: +# +# * `Cargo.toml` +# * `flake.nix` +# +# Search in those files for `rust-toolchain.toml` to find the relevant places. +# If you're having trouble making the relevant changes, bug a maintainer. + +[toolchain] +channel = "1.88.0" +profile = "minimal" +components = [ + # For rust-analyzer + "rust-src", + "rust-analyzer", + # For CI and editors + "rustfmt", + "clippy", +] +targets = [ + #"x86_64-apple-darwin", + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", + "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", + #"aarch64-apple-darwin", +] diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 23d5ff23a..c00bb9101 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -70,6 +70,7 @@ enum CandidateAction { struct RetentionCandidate { mxc: String, room_id: Option, + sender: Option, // user ID who uploaded the media } #[derive(Debug, Clone)] @@ -148,10 +149,11 @@ impl Service { &self, event_id: &str, room_id: &str, + sender: &str, mxcs: &[(String, bool, String)], ) { self.retention - .insert_mxcs_on_event(event_id, room_id, mxcs); + .insert_mxcs_on_event(event_id, room_id, sender, mxcs); } pub async fn retention_decrement_on_redaction(&self, event_id: &str) { @@ -172,11 +174,32 @@ impl Service { .await { if !primary.is_empty() { - candidates.extend( - primary - .into_iter() - .map(|(mxc, room_id)| RetentionCandidate { mxc, room_id: Some(room_id) }), - ); + // Check if this is an encrypted event marker + let is_encrypted_marker = primary.iter().any(|(mxc, _, _)| mxc.starts_with("encrypted-event:")); + + if is_encrypted_marker { + // For encrypted events, we can't see the content, so we don't know what media was in it. + // Just get the sender from the marker and we'll handle it below + for (marker, room_id, sender) in primary { + if marker.starts_with("encrypted-event:") { + // This is a marker, not a real MXC. We'll ask the user about recent uploads. + if let Some(sender_str) = sender { + candidates.push(RetentionCandidate { + mxc: marker, // Keep the marker so we can identify it later + room_id: Some(room_id), + sender: Some(sender_str) + }); + } + } + } + } else { + // Normal unencrypted media references + candidates.extend( + primary + .into_iter() + .map(|(mxc, room_id, sender)| RetentionCandidate { mxc, room_id: Some(room_id), sender }), + ); + } } } @@ -198,7 +221,7 @@ impl Service { .and_then(|v| v.as_str()) .map(str::to_owned); candidates.extend(discovered.into_iter().map(|mxc| { - RetentionCandidate { mxc, room_id: room_id.clone() } + RetentionCandidate { mxc, room_id: room_id.clone(), sender: None } })); } } @@ -220,6 +243,26 @@ impl Service { } for candidate in candidates { + // Skip encrypted markers - they're not real MXCs, just notifications + if candidate.mxc.starts_with("encrypted-event:") { + // For encrypted events, we notify the user but don't queue anything for automatic deletion + // The user will have to manually confirm with the actual MXC URI of their media + if let Some(sender_str) = &candidate.sender { + if let Ok(owner) = OwnedUserId::try_from(sender_str.clone()) { + if self.services.globals.user_is_local(&owner) { + let body = self.build_retention_notice(&candidate, event_value.as_ref()); + if let Err(e) = self.services.userroom.send_text(&owner, &body).await { + warn!(%event_id, user = %owner, "retention: failed to notify user about encrypted event redaction: {e}"); + } else { + debug_info!(%event_id, user = %owner, "retention: notified user about encrypted event redaction"); + } + } + } + } + continue; + } + + // Evaluate candidate using policy and user preferences let decision = self .evaluate_retention_candidate(policy, event_value.as_ref(), &candidate) .await; @@ -239,11 +282,8 @@ impl Service { true, ); - if self - .services - .globals - .user_is_local(owner.as_ref()) - { + // Send notification to the uploader's user room (not the room where it was posted!) + if self.services.globals.user_is_local(owner.as_ref()) { let body = self.build_retention_notice(&candidate, event_value.as_ref()); if let Err(e) = self .services @@ -262,7 +302,7 @@ impl Service { %event_id, mxc = %candidate.mxc, user = owner.as_str(), - "retention: sent user confirmation request" + "retention: sent user confirmation request to their user room" ); } } @@ -292,7 +332,18 @@ impl Service { }; } - let mut owner = self.db.get_media_owner(&candidate.mxc).await; + // Prefer sender from candidate (who uploaded the media) over database lookup + let mut owner: Option = candidate + .sender + .as_deref() + .and_then(|s| OwnedUserId::try_from(s.to_owned()).ok()); + + // Fallback to database lookup if sender not in candidate + if owner.is_none() { + owner = self.db.get_media_owner(&candidate.mxc).await; + } + + // Last resort: try to get from event value if owner.is_none() { if let Some(val) = event_value { if let Some(sender) = val.get("sender").and_then(|s| s.as_str()) { @@ -347,6 +398,29 @@ impl Service { candidate: &RetentionCandidate, event_value: Option<&Value>, ) -> String { + // Check if this is an encrypted event marker + if candidate.mxc.starts_with("encrypted-event:") { + let room_segment = candidate + .room_id + .as_deref() + .map(|room| format!(" in room {room}")) + .unwrap_or_default(); + + let timestamp = event_value + .and_then(|val| val.get("origin_server_ts")) + .and_then(canonical_json_to_u64) + .map(|ts| format!(" at {ts}")) + .unwrap_or_default(); + + return format!( + "You redacted an encrypted message{room_segment}{timestamp}. \ + If that message contained media you'd like to delete, reply with the MXC URI using: \ + `!user retention confirm mxc://...` \ + or update your media retention preference to automatically handle it.", + ); + } + + // Regular unencrypted media let room_segment = candidate .room_id .as_deref() diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index a1a797922..4a17aece8 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -30,6 +30,8 @@ pub(crate) struct MediaEventRef { pub mxc: String, pub room_id: String, pub kind: String, // "content.url", "thumbnail_url" + #[serde(default)] + pub sender: Option, // user ID who uploaded/sent this media } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -113,10 +115,12 @@ impl Retention { /// /// assumptions: /// - `mxcs` is a slice of (mxc_uri, local, kind) + /// - `sender` is the user ID who sent/uploaded this media pub(super) fn insert_mxcs_on_event( &self, event_id: &str, room_id: &str, + sender: &str, mxcs: &[(String, bool, String)], ) { let now = SystemTime::now() @@ -127,7 +131,7 @@ impl Retention { warn!(%event_id, "retention: insert called with zero MXCs"); return; } - warn!(%event_id, count = mxcs.len(), %room_id, "retention: inserting media refs for event"); + warn!(%event_id, count = mxcs.len(), %room_id, sender=%sender, "retention: inserting media refs for event"); let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); for (mxc, local, kind) in mxcs.iter() { @@ -136,6 +140,7 @@ impl Retention { mxc: mxc.clone(), room_id: room_id.to_owned(), kind: kind.clone(), + sender: Some(sender.to_owned()), }; let key_mer = Self::key_mer(event_id, kind).into_bytes(); let val_mer = serialize_val(Cbor(&mer)) @@ -177,15 +182,16 @@ impl Retention { /// decrement refcounts for all MediaEventRef mapped by this event id. /// if policy is set to delete unreferenced/local, enqueue for deletion + /// Returns Vec<(mxc, room_id, sender)> pub(super) async fn decrement_refcount_on_redaction( &self, event_id: &str, policy: RetentionPolicy, - ) -> Result> { + ) -> Result)>> { warn!(%event_id, ?policy, "retention: redaction decrement start"); let prefix = format!("{K_MER}{event_id}:"); let prefixb = prefix.as_bytes().to_vec(); - let mut to_delete: Vec<(String, String)> = Vec::new(); + let mut to_delete: Vec<(String, String, Option)> = Vec::new(); let mut puts: Vec<(Vec, Vec)> = Vec::new(); let mut dels: Vec> = Vec::new(); let mut processed = 0usize; @@ -208,12 +214,12 @@ impl Retention { | RetentionPolicy::DeleteIfUnreferenced => mr.refcount == 0, | RetentionPolicy::ForceDeleteLocal => mr.local, }; - warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, "retention: redaction updated ref"); + warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, sender = ?mer.sender, "retention: redaction updated ref"); let val_mref = serialize_val(Cbor(&mr))?.to_vec(); puts.push((key_mref.into_bytes(), val_mref)); if should_queue { - warn!(%event_id, mxc = %mer.mxc, room = %mer.room_id, "retention: media candidate ready for deletion"); - to_delete.push((mer.mxc.clone(), mer.room_id.clone())); + warn!(%event_id, mxc = %mer.mxc, room = %mer.room_id, sender = ?mer.sender, "retention: media candidate ready for deletion"); + to_delete.push((mer.mxc.clone(), mer.room_id.clone(), mer.sender.clone())); } } diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 5713ec656..2fcde9c90 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -388,7 +388,7 @@ impl super::Service { .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) .await; } - // media retention insertion (structured extraction + fallback JSON scan) + // media retention insertion (structured extraction for unencrypted messages) if let Ok(msg_full) = pdu.get_content::() { warn!(event_id=%pdu.event_id(), msg=?msg_full, "retention: debug message content"); use ruma::events::room::MediaSource; @@ -408,13 +408,24 @@ impl super::Service { ruma::events::room::message::MessageType::Audio(c) => { push_media(&mut mxcs, &c.source, "audio.source", self); }, _ => {}, } - // (fallback JSON scan removed for now, structured extraction should capture supported media types) if mxcs.is_empty() { warn!(event_id=%pdu.event_id(), "retention: no media sources extracted"); } - else { warn!(event_id=%pdu.event_id(), count=mxcs.len(), "retention: inserting media refs"); self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), &mxcs); } - } else { - warn!(event_id=%pdu.event_id(), "retention: failed to decode RoomMessageEventContent for extraction"); + else { warn!(event_id=%pdu.event_id(), count=mxcs.len(), "retention: inserting media refs"); self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), pdu.sender().as_str(), &mxcs); } } }, + | TimelineEventType::RoomEncrypted => { + // For encrypted rooms: We can't read the content (it's E2EE), so we can't extract MXC URIs. + // Instead, we track the encrypted event metadata so when it's redacted, we can query + // the database for media uploaded by this user around this time. + // Track using a special marker that will be recognized during redaction handling. + warn!(event_id=%pdu.event_id(), sender=%pdu.sender(), room=%pdu.room_id(), "retention: tracking encrypted event"); + let marker = format!("encrypted-event:{}", pdu.event_id()); + self.services.media.retention_insert_mxcs_on_event( + pdu.event_id().as_str(), + pdu.room_id().as_str(), + pdu.sender().as_str(), + &vec![(marker, false, "encrypted.marker".to_owned())] + ); + }, | _ => {}, } @@ -606,6 +617,7 @@ fn increment_notification_counts( } } + //TODO: this is an ABA fn increment(db: &Arc, key: &[u8]) { let old = db.get_blocking(key); diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs index 54a5b5fbd..e84b36261 100644 --- a/src/service/rooms/timeline/backfill.rs +++ b/src/service/rooms/timeline/backfill.rs @@ -267,6 +267,7 @@ pub async fn backfill_pdu( .retention_insert_mxcs_on_event( pdu.event_id().as_str(), pdu.room_id().as_str(), + pdu.sender().as_str(), &mxcs, ); } From 713c39f38b1af249bc49fe501e2de0724d41d8be Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sun, 12 Oct 2025 17:09:36 +0200 Subject: [PATCH 04/17] basic mediadeletion system --- src/api/client/media.rs | 4 ++ src/service/media/mod.rs | 85 +++++----------------- src/service/media/retention.rs | 103 +++++++++++++++++++++++++++ src/service/rooms/timeline/append.rs | 49 +++++++++---- 4 files changed, 161 insertions(+), 80 deletions(-) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 9b1cb9562..37fcc88ff 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -66,6 +66,10 @@ pub(crate) async fn create_content_route( .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) .await?; + services + .media + .retention_track_pending_upload(user.as_str(), &mxc.to_string()); + let blurhash = body.generate_blurhash.then(|| { services .media diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index c00bb9101..4dc310e6d 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -156,6 +156,18 @@ impl Service { .insert_mxcs_on_event(event_id, room_id, sender, mxcs); } + pub fn retention_track_pending_upload(&self, user_id: &str, mxc: &str) { + self.retention.track_pending_upload(user_id, mxc); + } + + pub async fn retention_consume_pending_uploads( + &self, + user_id: &str, + event_ts: u64, + ) -> Vec<(String, bool, String)> { + self.retention.consume_pending_uploads(user_id, event_ts).await + } + pub async fn retention_decrement_on_redaction(&self, event_id: &str) { use self::retention::RetentionPolicy; @@ -174,32 +186,11 @@ impl Service { .await { if !primary.is_empty() { - // Check if this is an encrypted event marker - let is_encrypted_marker = primary.iter().any(|(mxc, _, _)| mxc.starts_with("encrypted-event:")); - - if is_encrypted_marker { - // For encrypted events, we can't see the content, so we don't know what media was in it. - // Just get the sender from the marker and we'll handle it below - for (marker, room_id, sender) in primary { - if marker.starts_with("encrypted-event:") { - // This is a marker, not a real MXC. We'll ask the user about recent uploads. - if let Some(sender_str) = sender { - candidates.push(RetentionCandidate { - mxc: marker, // Keep the marker so we can identify it later - room_id: Some(room_id), - sender: Some(sender_str) - }); - } - } - } - } else { - // Normal unencrypted media references - candidates.extend( - primary - .into_iter() - .map(|(mxc, room_id, sender)| RetentionCandidate { mxc, room_id: Some(room_id), sender }), - ); - } + candidates.extend( + primary + .into_iter() + .map(|(mxc, room_id, sender)| RetentionCandidate { mxc, room_id: Some(room_id), sender }), + ); } } @@ -243,25 +234,6 @@ impl Service { } for candidate in candidates { - // Skip encrypted markers - they're not real MXCs, just notifications - if candidate.mxc.starts_with("encrypted-event:") { - // For encrypted events, we notify the user but don't queue anything for automatic deletion - // The user will have to manually confirm with the actual MXC URI of their media - if let Some(sender_str) = &candidate.sender { - if let Ok(owner) = OwnedUserId::try_from(sender_str.clone()) { - if self.services.globals.user_is_local(&owner) { - let body = self.build_retention_notice(&candidate, event_value.as_ref()); - if let Err(e) = self.services.userroom.send_text(&owner, &body).await { - warn!(%event_id, user = %owner, "retention: failed to notify user about encrypted event redaction: {e}"); - } else { - debug_info!(%event_id, user = %owner, "retention: notified user about encrypted event redaction"); - } - } - } - } - continue; - } - // Evaluate candidate using policy and user preferences let decision = self .evaluate_retention_candidate(policy, event_value.as_ref(), &candidate) @@ -398,29 +370,6 @@ impl Service { candidate: &RetentionCandidate, event_value: Option<&Value>, ) -> String { - // Check if this is an encrypted event marker - if candidate.mxc.starts_with("encrypted-event:") { - let room_segment = candidate - .room_id - .as_deref() - .map(|room| format!(" in room {room}")) - .unwrap_or_default(); - - let timestamp = event_value - .and_then(|val| val.get("origin_server_ts")) - .and_then(canonical_json_to_u64) - .map(|ts| format!(" at {ts}")) - .unwrap_or_default(); - - return format!( - "You redacted an encrypted message{room_segment}{timestamp}. \ - If that message contained media you'd like to delete, reply with the MXC URI using: \ - `!user retention confirm mxc://...` \ - or update your media retention preference to automatically handle it.", - ); - } - - // Regular unencrypted media let room_segment = candidate .room_id .as_deref() diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 4a17aece8..684b342d4 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -16,6 +16,14 @@ use super::Service; const K_MREF: &str = "mref:"; // mref: const K_MER: &str = "mer:"; // mer:: const K_QUEUE: &str = "qdel:"; // qdel: => DeletionCandidate +const K_PENDING: &str = "pending:"; // pending:: => PendingUpload + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct PendingUpload { + pub mxc: String, + pub user_id: String, + pub upload_ts: u64, // milliseconds since epoch +} #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct MediaRef { @@ -80,6 +88,14 @@ impl Retention { #[inline] fn key_queue(mxc: &str) -> String { format!("{K_QUEUE}{mxc}") } + #[inline] + fn key_pending(user_id: &str, timestamp_ms: u64) -> String { + format!("{K_PENDING}{user_id}:{timestamp_ms}") + } + + #[inline] + fn pending_prefix(user_id: &str) -> String { format!("{K_PENDING}{user_id}:") } + #[allow(dead_code)] pub(super) async fn get_media_ref(&self, mxc: &str) -> Result> { match self.cf.get(&Self::key_mref(mxc)).await { @@ -234,6 +250,93 @@ impl Retention { Ok(to_delete) } + pub(super) fn track_pending_upload(&self, user_id: &str, mxc: &str) { + let upload_ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + + let pending = PendingUpload { + mxc: mxc.to_owned(), + user_id: user_id.to_owned(), + upload_ts, + }; + + let key = Self::key_pending(user_id, upload_ts); + warn!(user_id, mxc, upload_ts, "retention: tracking pending upload for encrypted event association"); + self.cf.raw_put(key, Cbor(&pending)); + + self.cleanup_old_pending_uploads(user_id, upload_ts); + } + + pub(super) async fn consume_pending_uploads( + &self, + user_id: &str, + event_ts: u64, + ) -> Vec<(String, bool, String)> { + let window_ms = 60_000u64; // 60 seconds + let cutoff_ts = event_ts.saturating_sub(window_ms); + + let prefix = Self::pending_prefix(user_id); + let mut found_mxcs: Vec<(String, bool, String)> = Vec::new(); + let mut to_delete: Vec> = Vec::new(); + + let mut stream = self + .cf + .stream_raw_prefix::<&str, Cbor, _>(prefix.as_bytes()); + + while let Some(item) = stream.next().await.transpose().ok().flatten() { + let (key, Cbor(pending)) = item; + + if pending.upload_ts >= cutoff_ts && pending.upload_ts <= event_ts { + found_mxcs.push((pending.mxc.clone(), true, "encrypted.media".to_owned())); + to_delete.push(key.as_bytes().to_vec()); + warn!( + user_id, + mxc = %pending.mxc, + upload_ts = pending.upload_ts, + event_ts, + "retention: consuming pending upload for encrypted event" + ); + } else if pending.upload_ts < cutoff_ts { + to_delete.push(key.as_bytes().to_vec()); + } + } + + if !to_delete.is_empty() { + self.cf.write_batch_raw(std::iter::empty(), to_delete); + } + + found_mxcs + } + + fn cleanup_old_pending_uploads(&self, user_id: &str, current_ts: u64) { + let cf = self.cf.clone(); + let user_id = user_id.to_owned(); + let cutoff = current_ts.saturating_sub(60_000); + + // Spawn cleanup task to avoid blocking + tokio::spawn(async move { + let prefix = Self::pending_prefix(&user_id); + let mut to_delete: Vec> = Vec::new(); + + let mut stream = cf.stream_raw_prefix::<&str, Cbor, _>(prefix.as_bytes()); + + while let Some(item) = stream.next().await.transpose().ok().flatten() { + let (key, Cbor(pending)) = item; + if pending.upload_ts < cutoff { + to_delete.push(key.as_bytes().to_vec()); + } + } + + if !to_delete.is_empty() { + let count = to_delete.len(); + cf.write_batch_raw(std::iter::empty(), to_delete); + trace!(user_id, count, "retention: cleaned up old pending uploads"); + } + }); + } + /// qeue a media item for deletion (idempotent best-effort). pub(super) fn queue_media_for_deletion( &self, diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 2fcde9c90..e3d121851 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -413,18 +413,43 @@ impl super::Service { } }, | TimelineEventType::RoomEncrypted => { - // For encrypted rooms: We can't read the content (it's E2EE), so we can't extract MXC URIs. - // Instead, we track the encrypted event metadata so when it's redacted, we can query - // the database for media uploaded by this user around this time. - // Track using a special marker that will be recognized during redaction handling. - warn!(event_id=%pdu.event_id(), sender=%pdu.sender(), room=%pdu.room_id(), "retention: tracking encrypted event"); - let marker = format!("encrypted-event:{}", pdu.event_id()); - self.services.media.retention_insert_mxcs_on_event( - pdu.event_id().as_str(), - pdu.room_id().as_str(), - pdu.sender().as_str(), - &vec![(marker, false, "encrypted.marker".to_owned())] - ); + // For encrypted rooms: We can't read the content (it's E2EE), so we can't extract MXC URIs directly. + // However, we CAN associate recent media uploads with this encrypted event + // Strategy: When user uploads media, we track it as "pending". When they send an encrypted event + // within 60 seconds, we consume those pending uploads and associate them with this event. + // todo: find a more realistic time window, 60s may be a bit long + + // Get the event timestamp (milliseconds since epoch) + let event_ts: u64 = pdu.origin_server_ts().get().into(); + + // Consume any pending uploads from this user within the last 60 seconds + let pending_mxcs = self.services + .media + .retention_consume_pending_uploads(pdu.sender().as_str(), event_ts) + .await; + + if !pending_mxcs.is_empty() { + warn!( + event_id=%pdu.event_id(), + sender=%pdu.sender(), + room=%pdu.room_id(), + count=pending_mxcs.len(), + "retention: associated pending uploads with encrypted event" + ); + self.services.media.retention_insert_mxcs_on_event( + pdu.event_id().as_str(), + pdu.room_id().as_str(), + pdu.sender().as_str(), + &pending_mxcs + ); + } else { + warn!( + event_id=%pdu.event_id(), + sender=%pdu.sender(), + room=%pdu.room_id(), + "retention: no pending uploads found for encrypted event" + ); + } }, | _ => {}, } From 4ee969cd1540f27be8390d5357eb707fe33567f1 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sun, 12 Oct 2025 17:18:59 +0200 Subject: [PATCH 05/17] added comments --- src/api/client/media.rs | 1 + src/service/media/mod.rs | 2 ++ src/service/media/retention.rs | 14 +++++++++++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 37fcc88ff..16681d51c 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -66,6 +66,7 @@ pub(crate) async fn create_content_route( .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) .await?; + // Track this upload as potentially being used in an encrypted message soon services .media .retention_track_pending_upload(user.as_str(), &mxc.to_string()); diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 4dc310e6d..19856ee7f 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -156,10 +156,12 @@ impl Service { .insert_mxcs_on_event(event_id, room_id, sender, mxcs); } + /// Track a media upload for potential association with an upcoming encrypted event. pub fn retention_track_pending_upload(&self, user_id: &str, mxc: &str) { self.retention.track_pending_upload(user_id, mxc); } + /// Consume pending uploads for a user and return them as MXC refs for an encrypted event. pub async fn retention_consume_pending_uploads( &self, user_id: &str, diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 684b342d4..66c40e1b8 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -250,6 +250,8 @@ impl Retention { Ok(to_delete) } + /// Track a media upload that might be used in an upcoming encrypted message. + /// These pending uploads will be matched to encrypted events within a time window. pub(super) fn track_pending_upload(&self, user_id: &str, mxc: &str) { let upload_ts = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -266,13 +268,17 @@ impl Retention { warn!(user_id, mxc, upload_ts, "retention: tracking pending upload for encrypted event association"); self.cf.raw_put(key, Cbor(&pending)); + // Clean up old pending uploads (older than 60 seconds) asynchronously self.cleanup_old_pending_uploads(user_id, upload_ts); } + /// Find and consume pending uploads for a user within the last N seconds. + /// Returns Vec<(mxc, local, kind)> suitable for insert_mxcs_on_event. + /// Time window: 60 seconds (uploads must have happened within last minute). pub(super) async fn consume_pending_uploads( &self, user_id: &str, - event_ts: u64, + event_ts: u64, // event timestamp in milliseconds ) -> Vec<(String, bool, String)> { let window_ms = 60_000u64; // 60 seconds let cutoff_ts = event_ts.saturating_sub(window_ms); @@ -288,7 +294,10 @@ impl Retention { while let Some(item) = stream.next().await.transpose().ok().flatten() { let (key, Cbor(pending)) = item; + // Only match uploads within the time window if pending.upload_ts >= cutoff_ts && pending.upload_ts <= event_ts { + // Assume local=true since user uploaded to our server + // Mark as encrypted media found_mxcs.push((pending.mxc.clone(), true, "encrypted.media".to_owned())); to_delete.push(key.as_bytes().to_vec()); warn!( @@ -299,10 +308,12 @@ impl Retention { "retention: consuming pending upload for encrypted event" ); } else if pending.upload_ts < cutoff_ts { + // Too old, clean it up to_delete.push(key.as_bytes().to_vec()); } } + // Remove consumed/old pending uploads if !to_delete.is_empty() { self.cf.write_batch_raw(std::iter::empty(), to_delete); } @@ -310,6 +321,7 @@ impl Retention { found_mxcs } + /// Clean up pending uploads older than 60 seconds for a specific user. fn cleanup_old_pending_uploads(&self, user_id: &str, current_ts: u64) { let cf = self.cf.clone(); let user_id = user_id.to_owned(); From d399853e1ba79cd1a661b942d936483327354f3f Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sun, 12 Oct 2025 18:10:01 +0200 Subject: [PATCH 06/17] improved user experience by adding simple reactions to delete or keep media, added comments --- src/service/media/mod.rs | 143 +++++++++++++++++++++------ src/service/media/retention.rs | 70 ++++++++++++- src/service/rooms/timeline/append.rs | 12 +++ src/service/userroom/mod.rs | 142 +++++++++++++++++++++++++- 4 files changed, 332 insertions(+), 35 deletions(-) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 19856ee7f..e1661941d 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -16,7 +16,7 @@ use std::{ use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; use ruma::{ - Mxc, OwnedMxcUri, OwnedUserId, UserId, events::GlobalAccountDataEventType, + EventId, Mxc, OwnedMxcUri, OwnedUserId, UserId, events::GlobalAccountDataEventType, http_headers::ContentDisposition, }; use serde_json::Value; @@ -196,7 +196,7 @@ impl Service { } } - if let Ok(parsed_eid) = ruma::EventId::parse(event_id) { + if let Ok(parsed_eid) = EventId::parse(event_id) { match self .services .timeline @@ -247,39 +247,69 @@ impl Service { &candidate.mxc, owner.as_deref(), false, + None, // No notification for immediate deletion + None, // No reactions + None, // No reactions ); }, | (CandidateAction::AwaitConfirmation, Some(owner)) => { + // Send notification to the uploader's user room (not the room where it was posted!) + let (notification_event_id, confirm_reaction_id, cancel_reaction_id) = + if self.services.globals.user_is_local(owner.as_ref()) { + let body = self.build_retention_notice(&candidate, event_value.as_ref()); + match self + .services + .userroom + .send_text_with_event_id(owner.as_ref(), &body) + .await + { + | Ok(event_id) => { + // Add reaction options: āœ… to confirm deletion, āŒ to cancel + let confirm_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āœ…").await { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + warn!(%event_id, "retention: failed to add āœ… reaction: {e}"); + None + } + }; + let cancel_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āŒ").await { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + warn!(%event_id, "retention: failed to add āŒ reaction: {e}"); + None + } + }; + + debug_info!( + %event_id, + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: sent user confirmation request with reactions to their user room" + ); + (Some(event_id.to_string()), confirm_id, cancel_id) + }, + | Err(e) => { + warn!( + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: failed to notify user about pending deletion: {e}", + ); + (None, None, None) + }, + } + } else { + (None, None, None) + }; + + // Queue for deletion with the notification and reaction event IDs self.retention.queue_media_for_deletion( &candidate.mxc, Some(owner.as_ref()), true, + notification_event_id, + confirm_reaction_id, + cancel_reaction_id, ); - - // Send notification to the uploader's user room (not the room where it was posted!) - if self.services.globals.user_is_local(owner.as_ref()) { - let body = self.build_retention_notice(&candidate, event_value.as_ref()); - if let Err(e) = self - .services - .userroom - .send_text(owner.as_ref(), &body) - .await - { - warn!( - %event_id, - mxc = %candidate.mxc, - user = owner.as_str(), - "retention: failed to notify user about pending deletion: {e}", - ); - } else { - debug_info!( - %event_id, - mxc = %candidate.mxc, - user = owner.as_str(), - "retention: sent user confirmation request to their user room" - ); - } - } }, | (CandidateAction::AwaitConfirmation, None) => { warn!(%event_id, mxc = %candidate.mxc, "retention: confirmation requested but owner is unknown"); @@ -385,14 +415,65 @@ impl Service { .unwrap_or_default(); format!( - "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion. \ - Run `!user retention confirm {mxc}` here to delete it now, or update your media retention preference to keep it.", + "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion.\n\n\ + React with āœ… to confirm deletion or āŒ to keep it.\n\ + (You can also run `!user retention confirm {mxc}` to delete it manually.)", mxc = candidate.mxc ) } - pub async fn retention_confirm_deletion(&self, user: &UserId, mxc: &str) -> Result { - self.retention.confirm_candidate(self, mxc, user).await + let (deleted_bytes, cancel_reaction_id) = self.retention.confirm_candidate(self, mxc, user).await?; + + // Redact the unused āŒ reaction to clean up the UI (spawned as background task) + if let Some(reaction_id_str) = cancel_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services.userroom.redact_reaction(user, &reaction_id); + } + } + + Ok(deleted_bytes) + } + + /// Confirm deletion (āœ… reaction) on the notification message + pub async fn retention_confirm_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { + // Find the deletion candidate by notification event ID + if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user confirmed deletion via āœ… reaction"); + let (deleted_bytes, cancel_reaction_id) = self.retention.confirm_candidate(self, &mxc, user).await?; + + // Redact the unused āŒ reaction to clean up the UI (spawned as background task) + if let Some(reaction_id_str) = cancel_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services.userroom.redact_reaction(user, &reaction_id); + } + } + + Ok(deleted_bytes) + } else { + warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + Ok(0) + } + } + + /// Cancel deletion (āŒ reaction) on the notification message + pub async fn retention_cancel_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { + // Find and remove the deletion candidate + if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user cancelled deletion via āŒ reaction"); + let confirm_reaction_id = self.retention.cancel_candidate(&mxc, user).await?; + + // Redact the unused āœ… reaction to clean up the UI (spawned as background task) + if let Some(reaction_id_str) = confirm_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services.userroom.redact_reaction(user, &reaction_id); + } + } + + Ok(()) + } else { + warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + Ok(()) + } } async fn user_retention_preference(&self, user: &UserId) -> UserRetentionPreference { diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 66c40e1b8..6ae07b00f 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -50,6 +50,15 @@ pub(crate) struct DeletionCandidate { pub user_id: Option, #[serde(default)] pub awaiting_confirmation: bool, + /// Event ID of the notification message sent to the user (for reaction handling) + #[serde(default)] + pub notification_event_id: Option, + /// Event ID of the āœ… reaction (for cleanup) + #[serde(default)] + pub confirm_reaction_id: Option, + /// Event ID of the āŒ reaction (for cleanup) + #[serde(default)] + pub cancel_reaction_id: Option, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -355,6 +364,9 @@ impl Retention { mxc: &str, owner: Option<&UserId>, awaiting_confirmation: bool, + notification_event_id: Option, + confirm_reaction_id: Option, + cancel_reaction_id: Option, ) { let key = Self::key_queue(mxc); // overwrite / insert candidate with fresh timestamp @@ -363,6 +375,9 @@ impl Retention { enqueued_ts: now_secs(), user_id: owner.map(|u| u.to_string()), awaiting_confirmation, + notification_event_id, + confirm_reaction_id, + cancel_reaction_id, }; warn!( mxc, @@ -378,7 +393,7 @@ impl Retention { service: &Service, mxc: &str, requester: &UserId, - ) -> Result { + ) -> Result<(u64, Option)> { let key = Self::key_queue(mxc); let handle = self .cf @@ -392,13 +407,16 @@ impl Retention { }; if owner != requester.as_str() { return Err(err!(Request(Forbidden("media candidate owned by another user")))); - } + }; if !candidate.awaiting_confirmation { return Err(err!(Request(InvalidParam( "media deletion already processed", )))); } + // Save the cancel reaction ID to redact it + let cancel_reaction_to_redact = candidate.cancel_reaction_id.clone(); + candidate.awaiting_confirmation = false; candidate.enqueued_ts = now_secs(); @@ -408,7 +426,53 @@ impl Retention { dels.push(Self::key_mref(mxc).into_bytes()); self.cf.write_batch_raw(std::iter::empty(), dels); warn!(mxc, bytes = deleted_bytes, user = requester.as_str(), "retention: media deletion confirmed by user"); - Ok(deleted_bytes) + Ok((deleted_bytes, cancel_reaction_to_redact)) + } + + /// Find MXC by notification event ID (for reaction-based confirmation) + pub(super) async fn find_mxc_by_notification_event(&self, notification_event_id: &str) -> Option { + let prefix = K_QUEUE.as_bytes(); + let mut stream = self + .cf + .stream_raw_prefix::<&str, Cbor, _>(&prefix); + + while let Some(item) = stream.next().await.transpose().ok().flatten() { + let (_key, Cbor(cand)) = item; + if let Some(ref stored_event_id) = cand.notification_event_id { + if stored_event_id == notification_event_id { + return Some(cand.mxc.clone()); + } + } + } + + None + } + + /// Cancel a deletion candidate (remove from queue) + /// Returns the confirm reaction ID to redact it + pub(super) async fn cancel_candidate(&self, mxc: &str, requester: &UserId) -> Result> { + let key = Self::key_queue(mxc); + match self.cf.get(&key).await { + | Ok(handle) => { + let Cbor(candidate) = handle.deserialized::>()?; + + let Some(owner) = candidate.user_id.as_deref() else { + return Err(err!(Request(Forbidden("media candidate owner unknown")))); + }; + if owner != requester.as_str() { + return Err(err!(Request(Forbidden("media candidate owned by another user")))); + } + + // Save the confirm reaction ID to redact it + let confirm_reaction_to_redact = candidate.confirm_reaction_id.clone(); + + // Remove from queue + self.cf.remove(key.as_str()); + warn!(mxc, user = requester.as_str(), "retention: media deletion cancelled by user"); + Ok(confirm_reaction_to_redact) + }, + | Err(_) => Err(err!(Request(NotFound("no pending deletion for this media")))), + } } /// worker: processes queued deletion candidates after grace period. diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index e3d121851..7ffa60c3a 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -482,6 +482,18 @@ impl super::Service { .add_to_thread(&thread.event_id, pdu) .await?; }, + | Relation::Annotation(annotation) => { + self.services + .userroom + .reaction_hook( + pdu.event_id(), + pdu.room_id(), + pdu.sender(), + &annotation.event_id, + &annotation.key, + ) + .await; + }, | _ => {}, // TODO: Aggregate other types } } diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index b269469c7..3c591fc8d 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, OnceLock}; use ruma::{ - EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId, + EventId, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId, events::room::{ guest_access::GuestAccess, member::{MembershipState, RoomMemberEventContent}, @@ -137,6 +137,72 @@ impl Service { Ok(()) } + /// Send a text message to the user's admin room and return the event ID. + /// This allows adding reactions or further processing. + pub async fn send_text_with_event_id(&self, user_id: &UserId, body: &str) -> Result { + if !self.services.globals.user_is_local(user_id) { + debug_info!(%user_id, "Skipping user room send for remote user"); + return Err(tuwunel_core::err!(Request(Forbidden("User is not local")))); + } + + let room_id = match self.get_user_room(user_id).await { + | Ok(room_id) => room_id, + | Err(e) => { + debug_warn!(%user_id, error = %e, "User room missing; unable to deliver message"); + return Err(e); + }, + }; + + let state_lock = self.services.state.mutex.lock(&room_id).await; + let content = RoomMessageEventContent::text_markdown(body); + + let event_id = self.services + .timeline + .build_and_append_pdu_without_retention( + PduBuilder::timeline(&content), + &self.services.globals.server_user, + &room_id, + &state_lock, + ) + .await?; + + Ok(event_id) + } + + /// Add a reaction to an event in the user's admin room + /// Returns the event ID of the reaction event + pub async fn add_reaction(&self, user_id: &UserId, event_id: &EventId, emoji: &str) -> Result { + if !self.services.globals.user_is_local(user_id) { + return Err(tuwunel_core::err!(Request(Forbidden("User is not local")))); + } + + let room_id = match self.get_user_room(user_id).await { + | Ok(room_id) => room_id, + | Err(e) => { + debug_warn!(%user_id, error = %e, "User room missing; unable to add reaction"); + return Err(e); + }, + }; + + let state_lock = self.services.state.mutex.lock(&room_id).await; + + // Create reaction content + use ruma::events::{reaction::ReactionEventContent, relation::Annotation}; + let content = ReactionEventContent::new(Annotation::new(event_id.to_owned(), emoji.to_owned())); + + let reaction_event_id = self.services + .timeline + .build_and_append_pdu_without_retention( + PduBuilder::timeline(&content), + &self.services.globals.server_user, + &room_id, + &state_lock, + ) + .await?; + + Ok(reaction_event_id) + } + pub async fn message_hook( &self, event_id: &EventId, @@ -192,7 +258,81 @@ impl Service { .expect("user command system already initialized"); } + /// Remove a specific reaction event by redacting it + /// This is used to clean up the UI after a user makes their choice + /// Spawns as a background task to avoid recursion issues + pub fn redact_reaction(&self, user_id: &UserId, reaction_event_id: &EventId) { + use ruma::events::room::redaction::RoomRedactionEventContent; + + let user_id = user_id.to_owned(); + let reaction_event_id = reaction_event_id.to_owned(); + let services = self.services.clone(); + + // Spawn as background task to avoid async recursion + tokio::spawn(async move { + let Ok(room_id) = services.userroom.get_user_room(&user_id).await else { + return; + }; + + let server_user = &services.globals.server_user; + let state_lock = services.state.mutex.lock(&room_id).await; + + // Redact the reaction event to remove it from the UI + let _ = services + .timeline + .build_and_append_pdu_without_retention( + PduBuilder { + redacts: Some(reaction_event_id.clone()), + ..PduBuilder::timeline(&RoomRedactionEventContent { + redacts: Some(reaction_event_id.clone()), + reason: Some("Cleanup unused reaction".to_owned()), + }) + }, + server_user, + &room_id, + &state_lock, + ) + .await; + }); + } + + /// Handle reactions in user admin rooms (for media retention confirmation) + pub async fn reaction_hook( + &self, + _event_id: &EventId, + room_id: &RoomId, + sender: &UserId, + relates_to_event: &EventId, + emoji: &str, + ) { + if !self.services.globals.user_is_local(sender) { + return; + } + + if !self + .get_user_room(sender) + .await + .is_ok_and(|user_room| room_id == user_room) + { + return; + } + + // Check if this is a media retention confirmation reaction + if emoji == "āœ…" { + // User confirmed deletion - the media service will redact the unused āŒ reaction + if let Err(e) = self.services.media.retention_confirm_by_reaction(sender, relates_to_event).await { + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āœ… reaction: {e}"); + } + } else if emoji == "āŒ" { + // User cancelled deletion - the media service will redact the unused āœ… reaction + if let Err(e) = self.services.media.retention_cancel_by_reaction(sender, relates_to_event).await { + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āŒ reaction: {e}"); + } + } + } + fn get_user_command_system(&self) -> &Arc { + self.user_command_system .get() .expect("user command system empty") From 3c3eabf92415743908f3677a771424b2546fe6dc Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Sun, 12 Oct 2025 21:39:38 +0200 Subject: [PATCH 07/17] fixed 'Unable to decrypt message' errors --- src/service/rooms/timeline/append.rs | 43 ++++++++++++---------------- 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 7ffa60c3a..006364121 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -7,7 +7,7 @@ use futures::StreamExt; use std::pin::Pin; use tracing::warn; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedUserId, RoomId, RoomVersionId, UserId, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedUserId, RoomId, RoomVersionId, UserId, events::{ GlobalAccountDataEventType, TimelineEventType, push_rules::PushRulesEvent, @@ -94,13 +94,12 @@ impl super::Service { mut pdu_json: CanonicalJsonObject, leafs: Leafs, state_lock: &'a RoomMutexGuard, - ) -> Result<(RawPduId, Vec)> + ) -> Result where Leafs: Iterator + Send + 'a, { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); - let mut retention_targets: Vec = Vec::new(); let shortroomid = self .services @@ -305,9 +304,14 @@ impl super::Service { .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .await? { + let fut = self + .services + .media + .retention_decrement_on_redaction(redact_id.as_str()); + Pin::from(Box::new(fut)).await; + self.redact_pdu(redact_id, pdu, shortroomid) .await?; - retention_targets.push(redact_id.to_owned()); } } }, @@ -320,9 +324,14 @@ impl super::Service { .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .await? { + let fut = self + .services + .media + .retention_decrement_on_redaction(redact_id.as_str()); + Pin::from(Box::new(fut)).await; + self.redact_pdu(redact_id, pdu, shortroomid) .await?; - retention_targets.push(redact_id.to_owned()); } } }, @@ -559,7 +568,7 @@ impl super::Service { } } - Ok((pdu_id, retention_targets)) + Ok(pdu_id) } } @@ -575,19 +584,8 @@ pub async fn append_pdu<'a, Leafs>( where Leafs: Iterator + Send + 'a, { - let (pdu_id, retention_targets) = self - .append_pdu_inner::(pdu, pdu_json, leafs, state_lock) - .await?; - - for event_id in retention_targets { - let fut = self - .services - .media - .retention_decrement_on_redaction(event_id.as_str()); - Pin::from(Box::new(fut)).await; - } - - Ok(pdu_id) + self.append_pdu_inner::(pdu, pdu_json, leafs, state_lock) + .await } #[implement(super::Service)] @@ -602,11 +600,8 @@ pub async fn append_pdu_without_retention<'a, Leafs>( where Leafs: Iterator + Send + 'a, { - let (pdu_id, _) = self - .append_pdu_inner::(pdu, pdu_json, leafs, state_lock) - .await?; - - Ok(pdu_id) + self.append_pdu_inner::(pdu, pdu_json, leafs, state_lock) + .await } #[implement(super::Service)] From 8b38fae93a3e2b287c0b65fe99b37d4f23cebba3 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Thu, 16 Oct 2025 20:25:11 +0200 Subject: [PATCH 08/17] remove config file --- rust-toolchainn.toml | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 rust-toolchainn.toml diff --git a/rust-toolchainn.toml b/rust-toolchainn.toml deleted file mode 100644 index a1afc2351..000000000 --- a/rust-toolchainn.toml +++ /dev/null @@ -1,29 +0,0 @@ -# This is the authoritiative configuration of this project's Rust toolchain. -# -# Other files that need upkeep when this changes: -# -# * `Cargo.toml` -# * `flake.nix` -# -# Search in those files for `rust-toolchain.toml` to find the relevant places. -# If you're having trouble making the relevant changes, bug a maintainer. - -[toolchain] -channel = "1.88.0" -profile = "minimal" -components = [ - # For rust-analyzer - "rust-src", - "rust-analyzer", - # For CI and editors - "rustfmt", - "clippy", -] -targets = [ - #"x86_64-apple-darwin", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-unknown-linux-musl", - "aarch64-unknown-linux-gnu", - #"aarch64-apple-darwin", -] From 2655ada54f1b2d5e33807fbc37ae345cdccb7513 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Fri, 17 Oct 2025 22:01:08 +0200 Subject: [PATCH 09/17] feat(media): Add auto-delete preferences for encrypted/unencrypted rooms --- src/service/media/mod.rs | 205 +++++++++++++++++++++++++-------- src/service/media/retention.rs | 115 +++++++++++++++++- src/service/userroom/mod.rs | 57 +++++++-- src/user/user.rs | 135 +++++++++++++++++++++- 4 files changed, 457 insertions(+), 55 deletions(-) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index e1661941d..232625c91 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -47,7 +47,7 @@ pub struct Service { url_preview_mutex: MutexMap, pub(super) db: Data, services: Arc, - retention: Retention, + pub retention: Retention, } const MEDIA_RETENTION_ACCOUNT_DATA_KIND: &str = "im.tuwunel.media.retention"; @@ -71,6 +71,7 @@ struct RetentionCandidate { mxc: String, room_id: Option, sender: Option, // user ID who uploaded the media + from_encrypted_room: bool, // Was this from m.room.encrypted event? } #[derive(Debug, Clone)] @@ -181,6 +182,18 @@ impl Service { ); let mut candidates: Vec = Vec::new(); let mut event_value: Option = None; + let mut is_encrypted_event = false; + + if let Ok(parsed_eid) = EventId::parse(event_id) { + if let Ok(canonical) = self.services.timeline.get_pdu_json(&parsed_eid).await { + if let Ok(val) = serde_json::to_value(&canonical) { + if let Some(event_type) = val.get("type").and_then(|v| v.as_str()) { + is_encrypted_event = event_type == "m.room.encrypted"; + } + event_value = Some(val); + } + } + } if let Ok(primary) = self .retention @@ -191,42 +204,34 @@ impl Service { candidates.extend( primary .into_iter() - .map(|(mxc, room_id, sender)| RetentionCandidate { mxc, room_id: Some(room_id), sender }), + .map(|(mxc, room_id, sender)| RetentionCandidate { + mxc, + room_id: Some(room_id), + sender, + from_encrypted_room: is_encrypted_event, + }), ); } } - if let Ok(parsed_eid) = EventId::parse(event_id) { - match self - .services - .timeline - .get_pdu_json(&parsed_eid) - .await - { - | Ok(canonical) => match serde_json::to_value(&canonical) { - | Ok(val) => { - if candidates.is_empty() { - let mut discovered = HashSet::new(); - collect_mxcs(&val, &mut discovered); - if !discovered.is_empty() { - let room_id = val - .get("room_id") - .and_then(|v| v.as_str()) - .map(str::to_owned); - candidates.extend(discovered.into_iter().map(|mxc| { - RetentionCandidate { mxc, room_id: room_id.clone(), sender: None } - })); - } + if candidates.is_empty() { + if let Some(ref val) = event_value { + let mut discovered = HashSet::new(); + collect_mxcs(&val, &mut discovered); + if !discovered.is_empty() { + let room_id = val + .get("room_id") + .and_then(|v| v.as_str()) + .map(str::to_owned); + candidates.extend(discovered.into_iter().map(|mxc| { + RetentionCandidate { + mxc, + room_id: room_id.clone(), + sender: None, + from_encrypted_room: is_encrypted_event, } - event_value = Some(val); - }, - | Err(e) => { - warn!(%event_id, "retention: failed to convert canonical event to json value: {e}") - }, - }, - | Err(e) => { - debug_warn!(%event_id, "retention: unable to load original event for redaction: {e}") - }, + })); + } } } @@ -236,7 +241,50 @@ impl Service { } for candidate in candidates { - // Evaluate candidate using policy and user preferences + // Check user preferences for auto-deletion + let user_prefs = if let Some(ref sender) = candidate.sender { + self.retention.get_user_prefs(sender).await + } else { + Default::default() + }; + + let should_auto_delete = if candidate.from_encrypted_room { + user_prefs.auto_delete_encrypted + } else { + user_prefs.auto_delete_unencrypted + }; + + debug!( + mxc = %candidate.mxc, + sender = ?candidate.sender, + from_encrypted = candidate.from_encrypted_room, + should_auto_delete, + prefs_encrypted = user_prefs.auto_delete_encrypted, + prefs_unencrypted = user_prefs.auto_delete_unencrypted, + "retention: checking auto-delete preferences" + ); + + if should_auto_delete { + warn!( + mxc = %candidate.mxc, + sender = ?candidate.sender, + from_encrypted = candidate.from_encrypted_room, + "retention: auto-deleting per user preferences" + ); + self.retention.queue_media_for_deletion( + &candidate.mxc, + candidate.sender.as_ref().and_then(|s| UserId::parse(s).ok()).as_deref(), + false, // No confirmation needed + None, // No notification + None, // No reactions + None, // No reactions + None, // No auto-delete reaction + candidate.from_encrypted_room, + ); + continue; + } + + // Eval candidate using policy and user preferences let decision = self .evaluate_retention_candidate(policy, event_value.as_ref(), &candidate) .await; @@ -247,14 +295,16 @@ impl Service { &candidate.mxc, owner.as_deref(), false, - None, // No notification for immediate deletion - None, // No reactions - None, // No reactions + None, + None, + None, + None, + candidate.from_encrypted_room, ); }, | (CandidateAction::AwaitConfirmation, Some(owner)) => { // Send notification to the uploader's user room (not the room where it was posted!) - let (notification_event_id, confirm_reaction_id, cancel_reaction_id) = + let (notification_event_id, confirm_reaction_id, cancel_reaction_id, auto_reaction_id) = if self.services.globals.user_is_local(owner.as_ref()) { let body = self.build_retention_notice(&candidate, event_value.as_ref()); match self @@ -264,7 +314,10 @@ impl Service { .await { | Ok(event_id) => { - // Add reaction options: āœ… to confirm deletion, āŒ to cancel + // add reaction options: + // āœ… to confirm deletion + // āŒ to cancel + // āš™ļø to always auto-delete for room type let confirm_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āœ…").await { | Ok(id) => Some(id.to_string()), | Err(e) => { @@ -279,6 +332,13 @@ impl Service { None } }; + let auto_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āš™ļø").await { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + warn!(%event_id, "retention: failed to add āš™ļø reaction: {e}"); + None + } + }; debug_info!( %event_id, @@ -286,7 +346,7 @@ impl Service { user = owner.as_str(), "retention: sent user confirmation request with reactions to their user room" ); - (Some(event_id.to_string()), confirm_id, cancel_id) + (Some(event_id.to_string()), confirm_id, cancel_id, auto_id) }, | Err(e) => { warn!( @@ -294,11 +354,11 @@ impl Service { user = owner.as_str(), "retention: failed to notify user about pending deletion: {e}", ); - (None, None, None) + (None, None, None, None) }, } } else { - (None, None, None) + (None, None, None, None) }; // Queue for deletion with the notification and reaction event IDs @@ -309,6 +369,8 @@ impl Service { notification_event_id, confirm_reaction_id, cancel_reaction_id, + auto_reaction_id, + candidate.from_encrypted_room, ); }, | (CandidateAction::AwaitConfirmation, None) => { @@ -414,9 +476,22 @@ impl Service { .map(|ts| format!(" at {ts}")) .unwrap_or_default(); + let encryption_warning = if candidate.from_encrypted_room { + "\n\nāš ļø WARNING: This media was detected from an encrypted room based on upload timing. \ + Detection may have false positives since the server cannot read encrypted messages. \ + Use auto-delete at your own risk." + } else { + "" + }; + + let room_type = if candidate.from_encrypted_room { "encrypted rooms" } else { "unencrypted rooms" }; + format!( - "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion.\n\n\ - React with āœ… to confirm deletion or āŒ to keep it.\n\ + "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion.{encryption_warning}\n\n\ + React with:\n\ + āœ… to confirm deletion\n\ + āŒ to keep it\n\ + āš™ļø to always auto-delete media in {room_type}\n\n\ (You can also run `!user retention confirm {mxc}` to delete it manually.)", mxc = candidate.mxc ) @@ -456,8 +531,8 @@ impl Service { } /// Cancel deletion (āŒ reaction) on the notification message - pub async fn retention_cancel_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { - // Find and remove the deletion candidate + pub async fn retention_cancel_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result<()> { + // Find the deletion candidate by notification event ID if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user cancelled deletion via āŒ reaction"); let confirm_reaction_id = self.retention.cancel_candidate(&mxc, user).await?; @@ -476,6 +551,46 @@ impl Service { } } + /// Auto-delete (āš™ļø reaction) - enable auto-delete for this room type and delete immediately + pub async fn retention_auto_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { + // Find the deletion candidate by notification event ID + if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user enabled auto-delete via āš™ļø reaction"); + let (deleted_bytes, confirm_reaction_id, cancel_reaction_id, from_encrypted_room) = + self.retention.auto_delete_candidate(self, &mxc, user).await?; + + let room_type = if from_encrypted_room { "encrypted" } else { "unencrypted" }; + let command = if from_encrypted_room { "prefs-encrypted-off" } else { "prefs-unencrypted-off" }; + + // Send confirmation message in background to avoid async recursion + self.services.userroom.send_text_background( + user, + &format!( + "āœ… Auto-delete enabled for {} rooms.\n\n\ + To disable: `!user retention {}`", + room_type, command + ) + ); + + // Redact both unused reactions to clean up the UI (spawned as background tasks) + if let Some(reaction_id_str) = confirm_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services.userroom.redact_reaction(user, &reaction_id); + } + } + if let Some(reaction_id_str) = cancel_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services.userroom.redact_reaction(user, &reaction_id); + } + } + + Ok(deleted_bytes) + } else { + warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + Ok(0) + } + } + async fn user_retention_preference(&self, user: &UserId) -> UserRetentionPreference { if !self.services.globals.user_is_local(user) { return UserRetentionPreference::Delete; diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 6ae07b00f..f95dbe8fe 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -12,11 +12,34 @@ use tuwunel_database::{Cbor, Deserialized, Map, keyval::serialize_val}; use super::Service; +//todo: split into multiple files + /// keyspace prefixes inside the `media_retention` CF const K_MREF: &str = "mref:"; // mref: const K_MER: &str = "mer:"; // mer:: const K_QUEUE: &str = "qdel:"; // qdel: => DeletionCandidate const K_PENDING: &str = "pending:"; // pending:: => PendingUpload +const K_PREFS: &str = "prefs:"; // prefs: => UserRetentionPrefs + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct UserRetentionPrefs { + /// Auto-delete media in unencrypted rooms without asking + #[serde(default)] + pub auto_delete_unencrypted: bool, + /// Auto-delete media in encrypted rooms without asking + /// Warning: Detection is based on pending uploads, may have false positives + #[serde(default)] + pub auto_delete_encrypted: bool, +} + +impl Default for UserRetentionPrefs { + fn default() -> Self { + Self { + auto_delete_unencrypted: false, + auto_delete_encrypted: false, + } + } +} #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct PendingUpload { @@ -59,6 +82,13 @@ pub(crate) struct DeletionCandidate { /// Event ID of the āŒ reaction (for cleanup) #[serde(default)] pub cancel_reaction_id: Option, + /// Event ID of the āš™ļø reaction (always auto-delete for this room type) + #[serde(default)] + pub auto_reaction_id: Option, + /// Was this media detected as being from an encrypted room? + /// (based on pending upload matching, may have false positives) + #[serde(default)] + pub from_encrypted_room: bool, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -79,7 +109,7 @@ impl RetentionPolicy { } #[derive(Clone)] -pub(super) struct Retention { +pub struct Retention { cf: Arc, } @@ -105,6 +135,31 @@ impl Retention { #[inline] fn pending_prefix(user_id: &str) -> String { format!("{K_PENDING}{user_id}:") } + #[inline] + fn key_prefs(user_id: &str) -> String { format!("{K_PREFS}{user_id}") } + + /// Get user's retention preferences + pub async fn get_user_prefs(&self, user_id: &str) -> UserRetentionPrefs { + let key = Self::key_prefs(user_id); + match self.cf.get(&key).await { + | Ok(handle) => match handle.deserialized::>() { + | Ok(Cbor(prefs)) => prefs, + | Err(e) => { + warn!(%user_id, "retention: failed to deserialize user prefs: {e}"); + UserRetentionPrefs::default() + }, + }, + | Err(_) => UserRetentionPrefs::default(), + } + } + + /// Save user's retention preferences + pub async fn set_user_prefs(&self, user_id: &str, prefs: &UserRetentionPrefs) -> Result<()> { + let key = Self::key_prefs(user_id); + self.cf.raw_put(&key, Cbor(prefs)); + Ok(()) + } + #[allow(dead_code)] pub(super) async fn get_media_ref(&self, mxc: &str) -> Result> { match self.cf.get(&Self::key_mref(mxc)).await { @@ -367,6 +422,8 @@ impl Retention { notification_event_id: Option, confirm_reaction_id: Option, cancel_reaction_id: Option, + auto_reaction_id: Option, + from_encrypted_room: bool, ) { let key = Self::key_queue(mxc); // overwrite / insert candidate with fresh timestamp @@ -378,11 +435,14 @@ impl Retention { notification_event_id, confirm_reaction_id, cancel_reaction_id, + auto_reaction_id, + from_encrypted_room, }; warn!( mxc, awaiting_confirmation, owner = owner.map(UserId::as_str), + from_encrypted = from_encrypted_room, "retention: queue media for deletion" ); self.cf.raw_put(key, Cbor(&cand)); @@ -475,6 +535,59 @@ impl Retention { } } + /// Enable auto-delete for the room type (encrypted/unencrypted) and confirm deletion + /// Returns: (deleted_bytes, confirm_reaction_id, cancel_reaction_id) to redact unused reactions + pub(super) async fn auto_delete_candidate( + &self, + service: &Service, + mxc: &str, + requester: &UserId, + ) -> Result<(u64, Option, Option, bool)> { + let key = Self::key_queue(mxc); + match self.cf.get(&key).await { + | Ok(handle) => { + let Cbor(candidate) = handle.deserialized::>()?; + + let Some(owner) = candidate.user_id.as_deref() else { + return Err(err!(Request(Forbidden("media candidate owner unknown")))); + }; + if owner != requester.as_str() { + return Err(err!(Request(Forbidden("media candidate owned by another user")))); + } + + let from_encrypted_room = candidate.from_encrypted_room; + + let mut prefs = self.get_user_prefs(requester.as_str()).await; + if from_encrypted_room { + prefs.auto_delete_encrypted = true; + warn!(user = %requester, "retention: enabled auto-delete for encrypted rooms"); + } else { + prefs.auto_delete_unencrypted = true; + warn!(user = %requester, "retention: enabled auto-delete for unencrypted rooms"); + } + self.set_user_prefs(requester.as_str(), &prefs).await?; + + let confirm_reaction_to_redact = candidate.confirm_reaction_id.clone(); + let cancel_reaction_to_redact = candidate.cancel_reaction_id.clone(); + + let deleted_bytes = self.delete_local_media(service, mxc).await?; + let mut dels = Vec::with_capacity(2); + dels.push(key.into_bytes()); + dels.push(Self::key_mref(mxc).into_bytes()); + self.cf.write_batch_raw(std::iter::empty(), dels); + warn!( + mxc, + bytes = deleted_bytes, + user = requester.as_str(), + from_encrypted = from_encrypted_room, + "retention: media auto-deleted and preference saved" + ); + Ok((deleted_bytes, confirm_reaction_to_redact, cancel_reaction_to_redact, from_encrypted_room)) + }, + | Err(_) => Err(err!(Request(NotFound("no pending deletion for this media")))), + } + } + /// worker: processes queued deletion candidates after grace period. pub(super) async fn worker_process_queue( &self, diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index 3c591fc8d..d65254e10 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -137,6 +137,37 @@ impl Service { Ok(()) } + /// Send a text message to the user's admin room in the background (non-blocking). + /// This is useful to avoid async recursion. + pub fn send_text_background(&self, user_id: &UserId, body: &str) { + let user_id = user_id.to_owned(); + let body = body.to_owned(); + let services = self.services.clone(); + + tokio::spawn(async move { + if !services.globals.user_is_local(&user_id) { + return; + } + + let Ok(room_id) = services.userroom.get_user_room(&user_id).await else { + return; + }; + + let state_lock = services.state.mutex.lock(&room_id).await; + let content = RoomMessageEventContent::text_markdown(&body); + + let _ = services + .timeline + .build_and_append_pdu_without_retention( + PduBuilder::timeline(&content), + &services.globals.server_user, + &room_id, + &state_lock, + ) + .await; + }); + } + /// Send a text message to the user's admin room and return the event ID. /// This allows adding reactions or further processing. pub async fn send_text_with_event_id(&self, user_id: &UserId, body: &str) -> Result { @@ -318,15 +349,25 @@ impl Service { } // Check if this is a media retention confirmation reaction - if emoji == "āœ…" { - // User confirmed deletion - the media service will redact the unused āŒ reaction - if let Err(e) = self.services.media.retention_confirm_by_reaction(sender, relates_to_event).await { - debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āœ… reaction: {e}"); + //todo: maybe dont match for emojis here + match emoji { + "āœ…" => { + if let Err(e) = self.services.media.retention_confirm_by_reaction(sender, relates_to_event).await { + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āœ… reaction: {e}"); + } + } + "āŒ" => { + if let Err(e) = self.services.media.retention_cancel_by_reaction(sender, relates_to_event).await { + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āŒ reaction: {e}"); + } + } + "āš™ļø" => { + if let Err(e) = self.services.media.retention_auto_by_reaction(sender, relates_to_event).await { + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āš™ļø reaction: {e}"); + } } - } else if emoji == "āŒ" { - // User cancelled deletion - the media service will redact the unused āœ… reaction - if let Err(e) = self.services.media.retention_cancel_by_reaction(sender, relates_to_event).await { - debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āŒ reaction: {e}"); + _ => { + debug_warn!("Unknown reaction emoji in user room: {}", emoji); } } } diff --git a/src/user/user.rs b/src/user/user.rs index c262c980f..8dfcbd676 100644 --- a/src/user/user.rs +++ b/src/user/user.rs @@ -40,7 +40,21 @@ mod retention { #[command_dispatch] #[derive(Debug, Subcommand)] pub(crate) enum Cmd { - Confirm { mxc: String }, + Confirm { + mxc: String, + }, + /// Show current auto-delete preferences + PrefsShow, + /// Enable auto-delete for encrypted rooms + PrefsEncryptedOn, + /// Disable auto-delete for encrypted rooms + PrefsEncryptedOff, + /// Enable auto-delete for unencrypted rooms + PrefsUnencryptedOn, + /// Disable auto-delete for unencrypted rooms + PrefsUnencryptedOff, + /// Reset all preferences (disable auto-delete for both) + PrefsReset, } #[command] @@ -59,4 +73,123 @@ mod retention { Ok(format!("Confirmed deletion for {mxc}. {summary}")) } + + #[command] + pub(super) async fn prefs_show(&self) -> Result { + let prefs = self + .services + .media + .retention + .get_user_prefs(self.sender.as_str()) + .await; + + Ok(format!( + "Current auto-delete preferences:\n\ + - Encrypted rooms: {}\n\ + - Unencrypted rooms: {}", + if prefs.auto_delete_encrypted { + "enabled āœ…" + } else { + "disabled āŒ" + }, + if prefs.auto_delete_unencrypted { + "enabled āœ…" + } else { + "disabled āŒ" + } + )) + } + + #[command] + pub(super) async fn prefs_encrypted_on(&self) -> Result { + let mut prefs = self + .services + .media + .retention + .get_user_prefs(self.sender.as_str()) + .await; + + prefs.auto_delete_encrypted = true; + + self.services + .media + .retention + .set_user_prefs(self.sender.as_str(), &prefs) + .await?; + + Ok("Enabled auto-delete for encrypted rooms.".to_owned()) + } + + #[command] + pub(super) async fn prefs_encrypted_off(&self) -> Result { + let mut prefs = self + .services + .media + .retention + .get_user_prefs(self.sender.as_str()) + .await; + + prefs.auto_delete_encrypted = false; + + self.services + .media + .retention + .set_user_prefs(self.sender.as_str(), &prefs) + .await?; + + Ok("Disabled auto-delete for encrypted rooms.".to_owned()) + } + + #[command] + pub(super) async fn prefs_unencrypted_on(&self) -> Result { + let mut prefs = self + .services + .media + .retention + .get_user_prefs(self.sender.as_str()) + .await; + + prefs.auto_delete_unencrypted = true; + + self.services + .media + .retention + .set_user_prefs(self.sender.as_str(), &prefs) + .await?; + + Ok("Enabled auto-delete for unencrypted rooms.".to_owned()) + } + + #[command] + pub(super) async fn prefs_unencrypted_off(&self) -> Result { + let mut prefs = self + .services + .media + .retention + .get_user_prefs(self.sender.as_str()) + .await; + + prefs.auto_delete_unencrypted = false; + + self.services + .media + .retention + .set_user_prefs(self.sender.as_str(), &prefs) + .await?; + + Ok("Disabled auto-delete for unencrypted rooms.".to_owned()) + } + + #[command] + pub(super) async fn prefs_reset(&self) -> Result { + let prefs = Default::default(); + + self.services + .media + .retention + .set_user_prefs(self.sender.as_str(), &prefs) + .await?; + + Ok("Reset all auto-delete preferences. All auto-delete settings disabled.".to_owned()) + } } From a6daa9215143209b74a9e5cbd9b7339bcd3a5dbf Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Mon, 20 Oct 2025 20:05:24 +0200 Subject: [PATCH 10/17] fmt --- src/service/media/mod.rs | 370 ++++++----- src/service/media/retention.rs | 86 ++- src/service/rooms/timeline/append.rs | 876 ++++++++++++++------------- src/service/rooms/timeline/build.rs | 233 ++++--- src/service/userroom/mod.rs | 66 +- src/user/user.rs | 4 +- 6 files changed, 918 insertions(+), 717 deletions(-) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 232625c91..3a17d29d7 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -27,7 +27,6 @@ use tokio::{ use tuwunel_core::{ Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, - warn, }; pub use self::thumbnail::Dim; @@ -70,7 +69,7 @@ enum CandidateAction { struct RetentionCandidate { mxc: String, room_id: Option, - sender: Option, // user ID who uploaded the media + sender: Option, // user ID who uploaded the media from_encrypted_room: bool, // Was this from m.room.encrypted event? } @@ -104,7 +103,7 @@ impl crate::Service for Service { self.create_media_dir().await?; // startup summary for retention configuration - warn!( + debug_warn!( policy = self .services .server @@ -127,13 +126,13 @@ impl crate::Service for Service { ); let retention = self.retention.clone(); let this = self.clone(); - warn!("creating media deletion worker"); + debug_warn!("creating media deletion worker"); tokio::spawn(async move { loop { if let Err(e) = retention.worker_process_queue(&this, grace).await { debug_warn!("media retention worker error: {e}"); } - tokio::time::sleep(Duration::from_secs(10)).await; + tokio::time::sleep(Duration::from_secs(10)).await; //todo: make configurable / sleep for longer } }); @@ -157,18 +156,22 @@ impl Service { .insert_mxcs_on_event(event_id, room_id, sender, mxcs); } - /// Track a media upload for potential association with an upcoming encrypted event. + /// Track a media upload for potential association with an upcoming + /// encrypted event. pub fn retention_track_pending_upload(&self, user_id: &str, mxc: &str) { self.retention.track_pending_upload(user_id, mxc); } - /// Consume pending uploads for a user and return them as MXC refs for an encrypted event. + /// Consume pending uploads for a user and return them as MXC refs for an + /// encrypted event. pub async fn retention_consume_pending_uploads( &self, user_id: &str, event_ts: u64, ) -> Vec<(String, bool, String)> { - self.retention.consume_pending_uploads(user_id, event_ts).await + self.retention + .consume_pending_uploads(user_id, event_ts) + .await } pub async fn retention_decrement_on_redaction(&self, event_id: &str) { @@ -185,7 +188,12 @@ impl Service { let mut is_encrypted_event = false; if let Ok(parsed_eid) = EventId::parse(event_id) { - if let Ok(canonical) = self.services.timeline.get_pdu_json(&parsed_eid).await { + if let Ok(canonical) = self + .services + .timeline + .get_pdu_json(&parsed_eid) + .await + { if let Ok(val) = serde_json::to_value(&canonical) { if let Some(event_type) = val.get("type").and_then(|v| v.as_str()) { is_encrypted_event = event_type == "m.room.encrypted"; @@ -201,16 +209,14 @@ impl Service { .await { if !primary.is_empty() { - candidates.extend( - primary - .into_iter() - .map(|(mxc, room_id, sender)| RetentionCandidate { - mxc, - room_id: Some(room_id), - sender, - from_encrypted_room: is_encrypted_event, - }), - ); + candidates.extend(primary.into_iter().map(|(mxc, room_id, sender)| { + RetentionCandidate { + mxc, + room_id: Some(room_id), + sender, + from_encrypted_room: is_encrypted_event, + } + })); } } @@ -223,14 +229,16 @@ impl Service { .get("room_id") .and_then(|v| v.as_str()) .map(str::to_owned); - candidates.extend(discovered.into_iter().map(|mxc| { - RetentionCandidate { - mxc, - room_id: room_id.clone(), - sender: None, - from_encrypted_room: is_encrypted_event, - } - })); + candidates.extend( + discovered + .into_iter() + .map(|mxc| RetentionCandidate { + mxc, + room_id: room_id.clone(), + sender: None, + from_encrypted_room: is_encrypted_event, + }), + ); } } } @@ -265,7 +273,7 @@ impl Service { ); if should_auto_delete { - warn!( + debug_warn!( mxc = %candidate.mxc, sender = ?candidate.sender, from_encrypted = candidate.from_encrypted_room, @@ -273,12 +281,16 @@ impl Service { ); self.retention.queue_media_for_deletion( &candidate.mxc, - candidate.sender.as_ref().and_then(|s| UserId::parse(s).ok()).as_deref(), + candidate + .sender + .as_ref() + .and_then(|s| UserId::parse(s).ok()) + .as_deref(), false, // No confirmation needed - None, // No notification - None, // No reactions - None, // No reactions - None, // No auto-delete reaction + None, // No notification + None, // No reactions + None, // No reactions + None, // No auto-delete reaction candidate.from_encrypted_room, ); continue; @@ -303,63 +315,87 @@ impl Service { ); }, | (CandidateAction::AwaitConfirmation, Some(owner)) => { - // Send notification to the uploader's user room (not the room where it was posted!) - let (notification_event_id, confirm_reaction_id, cancel_reaction_id, auto_reaction_id) = - if self.services.globals.user_is_local(owner.as_ref()) { - let body = self.build_retention_notice(&candidate, event_value.as_ref()); - match self - .services - .userroom - .send_text_with_event_id(owner.as_ref(), &body) - .await - { - | Ok(event_id) => { - // add reaction options: - // āœ… to confirm deletion - // āŒ to cancel - // āš™ļø to always auto-delete for room type - let confirm_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āœ…").await { - | Ok(id) => Some(id.to_string()), - | Err(e) => { - warn!(%event_id, "retention: failed to add āœ… reaction: {e}"); - None - } - }; - let cancel_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āŒ").await { - | Ok(id) => Some(id.to_string()), - | Err(e) => { - warn!(%event_id, "retention: failed to add āŒ reaction: {e}"); - None - } - }; - let auto_id = match self.services.userroom.add_reaction(owner.as_ref(), &event_id, "āš™ļø").await { - | Ok(id) => Some(id.to_string()), - | Err(e) => { - warn!(%event_id, "retention: failed to add āš™ļø reaction: {e}"); - None - } - }; - - debug_info!( - %event_id, - mxc = %candidate.mxc, - user = owner.as_str(), - "retention: sent user confirmation request with reactions to their user room" - ); - (Some(event_id.to_string()), confirm_id, cancel_id, auto_id) - }, - | Err(e) => { - warn!( - mxc = %candidate.mxc, - user = owner.as_str(), - "retention: failed to notify user about pending deletion: {e}", - ); - (None, None, None, None) - }, - } - } else { - (None, None, None, None) - }; + // Send notification to the uploader's user room (not the room where it was + // posted!) + let ( + notification_event_id, + confirm_reaction_id, + cancel_reaction_id, + auto_reaction_id, + ) = if self + .services + .globals + .user_is_local(owner.as_ref()) + { + let body = self.build_retention_notice(&candidate, event_value.as_ref()); + match self + .services + .userroom + .send_text_with_event_id(owner.as_ref(), &body) + .await + { + | Ok(event_id) => { + // add reaction options: + // āœ… to confirm deletion + // āŒ to cancel + // āš™ļø to always auto-delete for room type + let confirm_id = match self + .services + .userroom + .add_reaction(owner.as_ref(), &event_id, "āœ…") + .await + { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + debug_warn!(%event_id, "retention: failed to add āœ… reaction: {e}"); + None + }, + }; + let cancel_id = match self + .services + .userroom + .add_reaction(owner.as_ref(), &event_id, "āŒ") + .await + { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + debug_warn!(%event_id, "retention: failed to add āŒ reaction: {e}"); + None + }, + }; + let auto_id = match self + .services + .userroom + .add_reaction(owner.as_ref(), &event_id, "āš™ļø") + .await + { + | Ok(id) => Some(id.to_string()), + | Err(e) => { + debug_warn!(%event_id, "retention: failed to add āš™ļø reaction: {e}"); + None + }, + }; + + debug_info!( + %event_id, + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: sent user confirmation request with reactions to their user room" + ); + (Some(event_id.to_string()), confirm_id, cancel_id, auto_id) + }, + | Err(e) => { + debug_warn!( + mxc = %candidate.mxc, + user = owner.as_str(), + "retention: failed to notify user about pending deletion: {e}", + ); + (None, None, None, None) + }, + } + } else { + (None, None, None, None) + }; // Queue for deletion with the notification and reaction event IDs self.retention.queue_media_for_deletion( @@ -374,7 +410,7 @@ impl Service { ); }, | (CandidateAction::AwaitConfirmation, None) => { - warn!(%event_id, mxc = %candidate.mxc, "retention: confirmation requested but owner is unknown"); + debug_warn!(%event_id, mxc = %candidate.mxc, "retention: confirmation requested but owner is unknown"); }, | (CandidateAction::Skip, _) => { debug!(%event_id, mxc = %candidate.mxc, "retention: skipping deletion for candidate"); @@ -403,12 +439,12 @@ impl Service { .sender .as_deref() .and_then(|s| OwnedUserId::try_from(s.to_owned()).ok()); - + // Fallback to database lookup if sender not in candidate if owner.is_none() { owner = self.db.get_media_owner(&candidate.mxc).await; } - + // Last resort: try to get from event value if owner.is_none() { if let Some(val) = event_value { @@ -477,116 +513,172 @@ impl Service { .unwrap_or_default(); let encryption_warning = if candidate.from_encrypted_room { - "\n\nāš ļø WARNING: This media was detected from an encrypted room based on upload timing. \ - Detection may have false positives since the server cannot read encrypted messages. \ - Use auto-delete at your own risk." + "\n\nāš ļø WARNING: This media was detected from an encrypted room based on upload \ + timing. Detection may have false positives since the server cannot read encrypted \ + messages. Use auto-delete at your own risk." } else { "" }; - let room_type = if candidate.from_encrypted_room { "encrypted rooms" } else { "unencrypted rooms" }; + let room_type = if candidate.from_encrypted_room { + "encrypted rooms" + } else { + "unencrypted rooms" + }; format!( - "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending deletion.{encryption_warning}\n\n\ - React with:\n\ - āœ… to confirm deletion\n\ - āŒ to keep it\n\ - āš™ļø to always auto-delete media in {room_type}\n\n\ - (You can also run `!user retention confirm {mxc}` to delete it manually.)", + "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending \ + deletion.{encryption_warning}\n\nReact with:\nāœ… to confirm deletion\nāŒ to keep \ + it\nāš™ļø to always auto-delete media in {room_type}\n\n(You can also run `!user \ + retention confirm {mxc}` to delete it manually.)", mxc = candidate.mxc ) } + pub async fn retention_confirm_deletion(&self, user: &UserId, mxc: &str) -> Result { - let (deleted_bytes, cancel_reaction_id) = self.retention.confirm_candidate(self, mxc, user).await?; - + let (deleted_bytes, cancel_reaction_id) = self + .retention + .confirm_candidate(self, mxc, user) + .await?; + // Redact the unused āŒ reaction to clean up the UI (spawned as background task) if let Some(reaction_id_str) = cancel_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services.userroom.redact_reaction(user, &reaction_id); + self.services + .userroom + .redact_reaction(user, &reaction_id); } } - + Ok(deleted_bytes) } /// Confirm deletion (āœ… reaction) on the notification message - pub async fn retention_confirm_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { + pub async fn retention_confirm_by_reaction( + &self, + user: &UserId, + notification_event_id: &EventId, + ) -> Result { // Find the deletion candidate by notification event ID - if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + if let Some(mxc) = self + .retention + .find_mxc_by_notification_event(notification_event_id.as_str()) + .await + { debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user confirmed deletion via āœ… reaction"); - let (deleted_bytes, cancel_reaction_id) = self.retention.confirm_candidate(self, &mxc, user).await?; - + let (deleted_bytes, cancel_reaction_id) = self + .retention + .confirm_candidate(self, &mxc, user) + .await?; + // Redact the unused āŒ reaction to clean up the UI (spawned as background task) if let Some(reaction_id_str) = cancel_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services.userroom.redact_reaction(user, &reaction_id); + self.services + .userroom + .redact_reaction(user, &reaction_id); } } - + Ok(deleted_bytes) } else { - warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + debug_warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); Ok(0) } } /// Cancel deletion (āŒ reaction) on the notification message - pub async fn retention_cancel_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result<()> { + pub async fn retention_cancel_by_reaction( + &self, + user: &UserId, + notification_event_id: &EventId, + ) -> Result<()> { // Find the deletion candidate by notification event ID - if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + if let Some(mxc) = self + .retention + .find_mxc_by_notification_event(notification_event_id.as_str()) + .await + { debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user cancelled deletion via āŒ reaction"); - let confirm_reaction_id = self.retention.cancel_candidate(&mxc, user).await?; - + let confirm_reaction_id = self + .retention + .cancel_candidate(&mxc, user) + .await?; + // Redact the unused āœ… reaction to clean up the UI (spawned as background task) if let Some(reaction_id_str) = confirm_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services.userroom.redact_reaction(user, &reaction_id); + self.services + .userroom + .redact_reaction(user, &reaction_id); } } - + Ok(()) } else { - warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + debug_warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); Ok(()) } } - /// Auto-delete (āš™ļø reaction) - enable auto-delete for this room type and delete immediately - pub async fn retention_auto_by_reaction(&self, user: &UserId, notification_event_id: &EventId) -> Result { + /// Auto-delete (āš™ļø reaction) - enable auto-delete for this room type and + /// delete immediately + pub async fn retention_auto_by_reaction( + &self, + user: &UserId, + notification_event_id: &EventId, + ) -> Result { // Find the deletion candidate by notification event ID - if let Some(mxc) = self.retention.find_mxc_by_notification_event(notification_event_id.as_str()).await { + if let Some(mxc) = self + .retention + .find_mxc_by_notification_event(notification_event_id.as_str()) + .await + { debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user enabled auto-delete via āš™ļø reaction"); - let (deleted_bytes, confirm_reaction_id, cancel_reaction_id, from_encrypted_room) = - self.retention.auto_delete_candidate(self, &mxc, user).await?; - - let room_type = if from_encrypted_room { "encrypted" } else { "unencrypted" }; - let command = if from_encrypted_room { "prefs-encrypted-off" } else { "prefs-unencrypted-off" }; - + let (deleted_bytes, confirm_reaction_id, cancel_reaction_id, from_encrypted_room) = + self.retention + .auto_delete_candidate(self, &mxc, user) + .await?; + + let room_type = if from_encrypted_room { + "encrypted" + } else { + "unencrypted" + }; + let command = if from_encrypted_room { + "prefs-encrypted-off" + } else { + "prefs-unencrypted-off" + }; + // Send confirmation message in background to avoid async recursion self.services.userroom.send_text_background( - user, + user, &format!( - "āœ… Auto-delete enabled for {} rooms.\n\n\ - To disable: `!user retention {}`", + "āœ… Auto-delete enabled for {} rooms.\n\nTo disable: `!user retention {}`", room_type, command - ) + ), ); // Redact both unused reactions to clean up the UI (spawned as background tasks) if let Some(reaction_id_str) = confirm_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services.userroom.redact_reaction(user, &reaction_id); + self.services + .userroom + .redact_reaction(user, &reaction_id); } } if let Some(reaction_id_str) = cancel_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services.userroom.redact_reaction(user, &reaction_id); + self.services + .userroom + .redact_reaction(user, &reaction_id); } } - + Ok(deleted_bytes) } else { - warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + debug_warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); Ok(0) } } @@ -866,7 +958,7 @@ impl Service { deletion_count = deletion_count.saturating_add(1); }, | Err(e) => { - warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); + debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); continue; }, } diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index f95dbe8fe..5a3725998 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -7,7 +7,7 @@ use std::{ use futures::StreamExt; use ruma::UserId; use serde::{Deserialize, Serialize}; -use tuwunel_core::{Result, err, trace, warn}; +use tuwunel_core::{Result, debug_warn, err, trace, warn}; use tuwunel_database::{Cbor, Deserialized, Map, keyval::serialize_val}; use super::Service; @@ -73,7 +73,8 @@ pub(crate) struct DeletionCandidate { pub user_id: Option, #[serde(default)] pub awaiting_confirmation: bool, - /// Event ID of the notification message sent to the user (for reaction handling) + /// Event ID of the notification message sent to the user (for reaction + /// handling) #[serde(default)] pub notification_event_id: Option, /// Event ID of the āœ… reaction (for cleanup) @@ -314,8 +315,9 @@ impl Retention { Ok(to_delete) } - /// Track a media upload that might be used in an upcoming encrypted message. - /// These pending uploads will be matched to encrypted events within a time window. + /// Track a media upload that might be used in an upcoming encrypted + /// message. These pending uploads will be matched to encrypted events + /// within a time window. pub(super) fn track_pending_upload(&self, user_id: &str, mxc: &str) { let upload_ts = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -329,7 +331,10 @@ impl Retention { }; let key = Self::key_pending(user_id, upload_ts); - warn!(user_id, mxc, upload_ts, "retention: tracking pending upload for encrypted event association"); + warn!( + user_id, + mxc, upload_ts, "retention: tracking pending upload for encrypted event association" + ); self.cf.raw_put(key, Cbor(&pending)); // Clean up old pending uploads (older than 60 seconds) asynchronously @@ -379,7 +384,8 @@ impl Retention { // Remove consumed/old pending uploads if !to_delete.is_empty() { - self.cf.write_batch_raw(std::iter::empty(), to_delete); + self.cf + .write_batch_raw(std::iter::empty(), to_delete); } found_mxcs @@ -396,7 +402,8 @@ impl Retention { let prefix = Self::pending_prefix(&user_id); let mut to_delete: Vec> = Vec::new(); - let mut stream = cf.stream_raw_prefix::<&str, Cbor, _>(prefix.as_bytes()); + let mut stream = + cf.stream_raw_prefix::<&str, Cbor, _>(prefix.as_bytes()); while let Some(item) = stream.next().await.transpose().ok().flatten() { let (key, Cbor(pending)) = item; @@ -469,9 +476,7 @@ impl Retention { return Err(err!(Request(Forbidden("media candidate owned by another user")))); }; if !candidate.awaiting_confirmation { - return Err(err!(Request(InvalidParam( - "media deletion already processed", - )))); + return Err(err!(Request(InvalidParam("media deletion already processed",)))); } // Save the cancel reaction ID to redact it @@ -485,12 +490,20 @@ impl Retention { dels.push(key.into_bytes()); dels.push(Self::key_mref(mxc).into_bytes()); self.cf.write_batch_raw(std::iter::empty(), dels); - warn!(mxc, bytes = deleted_bytes, user = requester.as_str(), "retention: media deletion confirmed by user"); + warn!( + mxc, + bytes = deleted_bytes, + user = requester.as_str(), + "retention: media deletion confirmed by user" + ); Ok((deleted_bytes, cancel_reaction_to_redact)) } /// Find MXC by notification event ID (for reaction-based confirmation) - pub(super) async fn find_mxc_by_notification_event(&self, notification_event_id: &str) -> Option { + pub(super) async fn find_mxc_by_notification_event( + &self, + notification_event_id: &str, + ) -> Option { let prefix = K_QUEUE.as_bytes(); let mut stream = self .cf @@ -510,7 +523,11 @@ impl Retention { /// Cancel a deletion candidate (remove from queue) /// Returns the confirm reaction ID to redact it - pub(super) async fn cancel_candidate(&self, mxc: &str, requester: &UserId) -> Result> { + pub(super) async fn cancel_candidate( + &self, + mxc: &str, + requester: &UserId, + ) -> Result> { let key = Self::key_queue(mxc); match self.cf.get(&key).await { | Ok(handle) => { @@ -520,7 +537,9 @@ impl Retention { return Err(err!(Request(Forbidden("media candidate owner unknown")))); }; if owner != requester.as_str() { - return Err(err!(Request(Forbidden("media candidate owned by another user")))); + return Err(err!(Request(Forbidden( + "media candidate owned by another user" + )))); } // Save the confirm reaction ID to redact it @@ -528,15 +547,20 @@ impl Retention { // Remove from queue self.cf.remove(key.as_str()); - warn!(mxc, user = requester.as_str(), "retention: media deletion cancelled by user"); + warn!( + mxc, + user = requester.as_str(), + "retention: media deletion cancelled by user" + ); Ok(confirm_reaction_to_redact) }, | Err(_) => Err(err!(Request(NotFound("no pending deletion for this media")))), } } - /// Enable auto-delete for the room type (encrypted/unencrypted) and confirm deletion - /// Returns: (deleted_bytes, confirm_reaction_id, cancel_reaction_id) to redact unused reactions + /// Enable auto-delete for the room type (encrypted/unencrypted) and confirm + /// deletion Returns: (deleted_bytes, confirm_reaction_id, + /// cancel_reaction_id) to redact unused reactions pub(super) async fn auto_delete_candidate( &self, service: &Service, @@ -552,7 +576,9 @@ impl Retention { return Err(err!(Request(Forbidden("media candidate owner unknown")))); }; if owner != requester.as_str() { - return Err(err!(Request(Forbidden("media candidate owned by another user")))); + return Err(err!(Request(Forbidden( + "media candidate owned by another user" + )))); } let from_encrypted_room = candidate.from_encrypted_room; @@ -565,7 +591,8 @@ impl Retention { prefs.auto_delete_unencrypted = true; warn!(user = %requester, "retention: enabled auto-delete for unencrypted rooms"); } - self.set_user_prefs(requester.as_str(), &prefs).await?; + self.set_user_prefs(requester.as_str(), &prefs) + .await?; let confirm_reaction_to_redact = candidate.confirm_reaction_id.clone(); let cancel_reaction_to_redact = candidate.cancel_reaction_id.clone(); @@ -582,7 +609,12 @@ impl Retention { from_encrypted = from_encrypted_room, "retention: media auto-deleted and preference saved" ); - Ok((deleted_bytes, confirm_reaction_to_redact, cancel_reaction_to_redact, from_encrypted_room)) + Ok(( + deleted_bytes, + confirm_reaction_to_redact, + cancel_reaction_to_redact, + from_encrypted_room, + )) }, | Err(_) => Err(err!(Request(NotFound("no pending deletion for this media")))), } @@ -595,7 +627,7 @@ impl Retention { grace: Duration, ) -> Result<()> { let prefix = K_QUEUE.as_bytes(); - warn!(?grace, "retention: worker iteration start"); + debug_warn!(?grace, "retention: worker iteration start"); let mut stream = self .cf .stream_raw_prefix::<&str, Cbor, _>(&prefix); @@ -605,12 +637,12 @@ impl Retention { let (key, Cbor(cand)) = item; let now = now_secs(); if cand.awaiting_confirmation { - warn!(mxc = %cand.mxc, "retention: awaiting user confirmation, skipping candidate"); + debug_warn!(mxc = %cand.mxc, "retention: awaiting user confirmation, skipping candidate"); continue; } if now < cand.enqueued_ts.saturating_add(grace.as_secs()) { - warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); + debug_warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); continue; } @@ -620,9 +652,9 @@ impl Retention { .await .unwrap_or(0); if deleted_bytes > 0 { - warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); + debug_warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); } else { - warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); + debug_warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); } // remove metadata entries (best-effort) @@ -632,9 +664,9 @@ impl Retention { deleted = deleted.saturating_add(1); } if processed == 0 { - warn!("retention: worker iteration found no deletion candidates"); + debug_warn!("retention: worker iteration found no deletion candidates"); } else { - warn!(processed, deleted, "retention: worker iteration complete"); + debug_warn!(processed, deleted, "retention: worker iteration complete"); } Ok(()) diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 006364121..ab2f4465b 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -1,11 +1,10 @@ use std::{ collections::{BTreeMap, HashSet}, + pin::Pin, sync::Arc, }; use futures::StreamExt; -use std::pin::Pin; -use tracing::warn; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedUserId, RoomId, RoomVersionId, UserId, events::{ @@ -19,6 +18,7 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, }; +use tracing::warn; use tuwunel_core::{ Result, err, error, implement, matrix::{ @@ -98,475 +98,534 @@ impl super::Service { where Leafs: Iterator + Send + 'a, { - // Coalesce database writes for the remainder of this scope. - let _cork = self.db.db.cork_and_flush(); + // Coalesce database writes for the remainder of this scope. + let _cork = self.db.db.cork_and_flush(); - let shortroomid = self - .services - .short - .get_shortroomid(pdu.room_id()) - .await - .map_err(|_| err!(Database("Room does not exist")))?; - - // Make unsigned fields correct. This is not properly documented in the spec, - // but state events need to have previous content in the unsigned field, so - // clients can easily interpret things like membership changes - if let Some(state_key) = pdu.state_key() { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) - { - if let Ok(shortstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(pdu.event_id()) - .await + let shortroomid = self + .services + .short + .get_shortroomid(pdu.room_id()) + .await + .map_err(|_| err!(Database("Room does not exist")))?; + + // Make unsigned fields correct. This is not properly documented in the spec, + // but state events need to have previous content in the unsigned field, so + // clients can easily interpret things like membership changes + if let Some(state_key) = pdu.state_key() { + if let CanonicalJsonValue::Object(unsigned) = pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) { - if let Ok(prev_state) = self + if let Ok(shortstatehash) = self .services .state_accessor - .state_get(shortstatehash, &pdu.kind().to_string().into(), state_key) + .pdu_shortstatehash(pdu.event_id()) .await { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.get_content_as_value()) - .map_err(|e| { - err!(Database(error!( - "Failed to convert prev_state to canonical JSON: {e}", - ))) - })?, - ), - ); - unsigned.insert( - String::from("prev_sender"), - CanonicalJsonValue::String(prev_state.sender().to_string()), - ); - unsigned.insert( - String::from("replaces_state"), - CanonicalJsonValue::String(prev_state.event_id().to_string()), - ); + if let Ok(prev_state) = self + .services + .state_accessor + .state_get(shortstatehash, &pdu.kind().to_string().into(), state_key) + .await + { + unsigned.insert( + "prev_content".to_owned(), + CanonicalJsonValue::Object( + utils::to_canonical_object(prev_state.get_content_as_value()) + .map_err(|e| { + err!(Database(error!( + "Failed to convert prev_state to canonical JSON: {e}", + ))) + })?, + ), + ); + unsigned.insert( + String::from("prev_sender"), + CanonicalJsonValue::String(prev_state.sender().to_string()), + ); + unsigned.insert( + String::from("replaces_state"), + CanonicalJsonValue::String(prev_state.event_id().to_string()), + ); + } } + } else { + error!("Invalid unsigned type in pdu."); } - } else { - error!("Invalid unsigned type in pdu."); } - } - // We must keep track of all events that have been referenced. - self.services - .pdu_metadata - .mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref)); + // We must keep track of all events that have been referenced. + self.services + .pdu_metadata + .mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref)); - self.services - .state - .set_forward_extremities(pdu.room_id(), leafs, state_lock) - .await; + self.services + .state + .set_forward_extremities(pdu.room_id(), leafs, state_lock) + .await; - let insert_lock = self.mutex_insert.lock(pdu.room_id()).await; - let next_count1 = self.services.globals.next_count(); - let next_count2 = self.services.globals.next_count(); + let insert_lock = self.mutex_insert.lock(pdu.room_id()).await; + let next_count1 = self.services.globals.next_count(); + let next_count2 = self.services.globals.next_count(); - // Mark as read first so the sending client doesn't get a notification even if - // appending fails - self.services - .read_receipt - .private_read_set(pdu.room_id(), pdu.sender(), *next_count2); + // Mark as read first so the sending client doesn't get a notification even if + // appending fails + self.services + .read_receipt + .private_read_set(pdu.room_id(), pdu.sender(), *next_count2); - self.services - .user - .reset_notification_counts(pdu.sender(), pdu.room_id()); + self.services + .user + .reset_notification_counts(pdu.sender(), pdu.room_id()); - let count = PduCount::Normal(*next_count1); - let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count }.into(); + let count = PduCount::Normal(*next_count1); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count }.into(); - // Insert pdu - self.append_pdu_json(&pdu_id, pdu, &pdu_json, count); + // Insert pdu + self.append_pdu_json(&pdu_id, pdu, &pdu_json, count); - drop(insert_lock); + drop(insert_lock); - // Don't notify the sender of their own events, and dont send from ignored users - let mut push_target: HashSet<_> = self - .services - .state_cache - .active_local_users_in_room(pdu.room_id()) - .map(ToOwned::to_owned) - .ready_filter(|user| *user != pdu.sender()) - .filter_map(async |recipient_user| { - self.services - .users - .user_is_ignored(pdu.sender(), &recipient_user) - .await - .eq(&false) - .then_some(recipient_user) - }) - .collect() - .await; + // Don't notify the sender of their own events, and dont send from ignored users + let mut push_target: HashSet<_> = self + .services + .state_cache + .active_local_users_in_room(pdu.room_id()) + .map(ToOwned::to_owned) + .ready_filter(|user| *user != pdu.sender()) + .filter_map(async |recipient_user| { + self.services + .users + .user_is_ignored(pdu.sender(), &recipient_user) + .await + .eq(&false) + .then_some(recipient_user) + }) + .collect() + .await; - let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); - let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); - if *pdu.kind() == TimelineEventType::RoomMember { - if let Some(state_key) = pdu.state_key() { - let target_user_id = UserId::parse(state_key)?; + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key) = pdu.state_key() { + let target_user_id = UserId::parse(state_key)?; - if self - .services - .users - .is_active_local(target_user_id) - .await - { - push_target.insert(target_user_id.to_owned()); + if self + .services + .users + .is_active_local(target_user_id) + .await + { + push_target.insert(target_user_id.to_owned()); + } } } - } - let serialized = pdu.to_format(); - for user in &push_target { - let rules_for_user = self - .services - .account_data - .get_global(user, GlobalAccountDataEventType::PushRules) - .await - .map_or_else( - |_| Ruleset::server_default(user), - |ev: PushRulesEvent| ev.content.global, - ); + let serialized = pdu.to_format(); + for user in &push_target { + let rules_for_user = self + .services + .account_data + .get_global(user, GlobalAccountDataEventType::PushRules) + .await + .map_or_else( + |_| Ruleset::server_default(user), + |ev: PushRulesEvent| ev.content.global, + ); - let mut highlight = false; - let mut notify = false; + let mut highlight = false; + let mut notify = false; - let power_levels = self - .services - .state_accessor - .get_power_levels(pdu.room_id()) - .await?; + let power_levels = self + .services + .state_accessor + .get_power_levels(pdu.room_id()) + .await?; - for action in self - .services - .pusher - .get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id()) - .await - { - match action { - | Action::Notify => notify = true, - | Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - }, - | _ => {}, + for action in self + .services + .pusher + .get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id()) + .await + { + match action { + | Action::Notify => notify = true, + | Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + }, + | _ => {}, + } + + // Break early if both conditions are true + if notify && highlight { + break; + } } - // Break early if both conditions are true - if notify && highlight { - break; + if notify { + notifies.push(user.clone()); } - } - if notify { - notifies.push(user.clone()); - } + if highlight { + highlights.push(user.clone()); + } - if highlight { - highlights.push(user.clone()); + self.services + .pusher + .get_pushkeys(user) + .ready_for_each(|push_key| { + self.services + .sending + .send_pdu_push(&pdu_id, user, push_key.to_owned()) + .expect("TODO: replace with future"); + }) + .await; } - self.services - .pusher - .get_pushkeys(user) - .ready_for_each(|push_key| { - self.services - .sending - .send_pdu_push(&pdu_id, user, push_key.to_owned()) - .expect("TODO: replace with future"); - }) - .await; - } - - self.increment_notification_counts(pdu.room_id(), notifies, highlights); + self.increment_notification_counts(pdu.room_id(), notifies, highlights); - match *pdu.kind() { - | TimelineEventType::RoomRedaction => { - use RoomVersionId::*; + match *pdu.kind() { + | TimelineEventType::RoomRedaction => { + use RoomVersionId::*; - let room_version_id = self - .services - .state - .get_room_version(pdu.room_id()) - .await?; + let room_version_id = self + .services + .state + .get_room_version(pdu.room_id()) + .await?; - match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = pdu.redacts() { - if self - .services - .state_accessor - .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) - .await? - { - let fut = self + match room_version_id { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = pdu.redacts() { + if self .services - .media - .retention_decrement_on_redaction(redact_id.as_str()); - Pin::from(Box::new(fut)).await; - - self.redact_pdu(redact_id, pdu, shortroomid) - .await?; + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + let fut = self + .services + .media + .retention_decrement_on_redaction(redact_id.as_str()); + Pin::from(Box::new(fut)).await; + + self.redact_pdu(redact_id, pdu, shortroomid) + .await?; + } } - } - }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - if self - .services - .state_accessor - .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) - .await? - { - let fut = self + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if self .services - .media - .retention_decrement_on_redaction(redact_id.as_str()); - Pin::from(Box::new(fut)).await; - - self.redact_pdu(redact_id, pdu, shortroomid) - .await?; + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + let fut = self + .services + .media + .retention_decrement_on_redaction(redact_id.as_str()); + Pin::from(Box::new(fut)).await; + + self.redact_pdu(redact_id, pdu, shortroomid) + .await?; + } } - } - }, - } - }, - | TimelineEventType::SpaceChild => - if let Some(_state_key) = pdu.state_key() { - self.services - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .remove(pdu.room_id()); + }, + } }, - | TimelineEventType::RoomMember => { - if let Some(state_key) = pdu.state_key() { - // if the state_key fails - let target_user_id = - UserId::parse(state_key).expect("This state_key was previously validated"); - - let content: RoomMemberEventContent = pdu.get_content()?; - let stripped_state = match content.membership { - | MembershipState::Invite | MembershipState::Knock => self - .services - .state - .summary_stripped(pdu) + | TimelineEventType::SpaceChild => + if let Some(_state_key) = pdu.state_key() { + self.services + .spaces + .roomid_spacehierarchy_cache + .lock() .await - .into(), - | _ => None, - }; + .remove(pdu.room_id()); + }, + | TimelineEventType::RoomMember => { + if let Some(state_key) = pdu.state_key() { + // if the state_key fails + let target_user_id = UserId::parse(state_key) + .expect("This state_key was previously validated"); + + let content: RoomMemberEventContent = pdu.get_content()?; + let stripped_state = match content.membership { + | MembershipState::Invite | MembershipState::Knock => self + .services + .state + .summary_stripped(pdu) + .await + .into(), + | _ => None, + }; + + // Update our membership info, we do this here incase a user is invited or + // knocked and immediately leaves we need the DB to record the invite or + // knock event for auth + self.services + .state_cache + .update_membership( + pdu.room_id(), + target_user_id, + content, + pdu.sender(), + stripped_state, + None, + true, + ) + .await?; + } + }, + | TimelineEventType::RoomMessage => { + let content: ExtractBody = pdu.get_content()?; + if let Some(body) = content.body { + self.services + .search + .index_pdu(shortroomid, &pdu_id, &body); - // Update our membership info, we do this here incase a user is invited or - // knocked and immediately leaves we need the DB to record the invite or - // knock event for auth - self.services - .state_cache - .update_membership( - pdu.room_id(), - target_user_id, - content, - pdu.sender(), - stripped_state, - None, - true, - ) - .await?; - } - }, - | TimelineEventType::RoomMessage => { - let content: ExtractBody = pdu.get_content()?; - if let Some(body) = content.body { - self.services - .search - .index_pdu(shortroomid, &pdu_id, &body); + self.services + .admin + .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) + .await; - self.services - .admin - .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) + self.services + .userroom + .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) + .await; + } + // media retention insertion (structured extraction for unencrypted messages) + if let Ok(msg_full) = + pdu.get_content::() + { + warn!(event_id=%pdu.event_id(), msg=?msg_full, "retention: debug message content"); + use ruma::events::room::MediaSource; + let mut mxcs: Vec<(String, bool, String)> = Vec::new(); + let push_media = |mxcs: &mut Vec<(String, bool, String)>, + src: &MediaSource, + label: &str, + this: &super::Service| { + let (maybe_mxc, enc) = match src { + | MediaSource::Plain(m) => (Some(m.to_string()), false), + | MediaSource::Encrypted(f) => (Some(f.url.to_string()), true), + }; + if let Some(uri) = maybe_mxc { + if uri.starts_with("mxc://") { + let local = >::try_from(uri.as_str()) + .map(|p| { + this.services + .globals + .server_is_ours(p.server_name) + }) + .unwrap_or(false); + mxcs.push((uri.clone(), local, label.to_owned())); + if enc { + warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted encrypted media"); + } else { + warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted plain media"); + } + } + } + }; + match &msg_full.msgtype { + | ruma::events::room::message::MessageType::Image(c) => { + push_media(&mut mxcs, &c.source, "image.source", self); + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_media(&mut mxcs, th, "image.thumbnail_source", self); + } + } + }, + | ruma::events::room::message::MessageType::File(c) => { + push_media(&mut mxcs, &c.source, "file.source", self); + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_media(&mut mxcs, th, "file.thumbnail_source", self); + } + } + }, + | ruma::events::room::message::MessageType::Video(c) => { + push_media(&mut mxcs, &c.source, "video.source", self); + if let Some(info) = c.info.as_ref() { + if let Some(th) = info.thumbnail_source.as_ref() { + push_media(&mut mxcs, th, "video.thumbnail_source", self); + } + } + }, + | ruma::events::room::message::MessageType::Audio(c) => { + push_media(&mut mxcs, &c.source, "audio.source", self); + }, + | _ => {}, + } + if mxcs.is_empty() { + warn!(event_id=%pdu.event_id(), "retention: no media sources extracted"); + } else { + warn!(event_id=%pdu.event_id(), count=mxcs.len(), "retention: inserting media refs"); + self.services + .media + .retention_insert_mxcs_on_event( + pdu.event_id().as_str(), + pdu.room_id().as_str(), + pdu.sender().as_str(), + &mxcs, + ); + } + } + }, + | TimelineEventType::RoomEncrypted => { + // For encrypted rooms: We can't read the content (it's E2EE), so we can't + // extract MXC URIs directly. However, we CAN associate recent media uploads + // with this encrypted event Strategy: When user uploads media, we track it + // as "pending". When they send an encrypted event within 60 seconds, we + // consume those pending uploads and associate them with this event. + // todo: find a more realistic time window, 60s may be a bit long + + // Get the event timestamp (milliseconds since epoch) + let event_ts: u64 = pdu.origin_server_ts().get().into(); + + // Consume any pending uploads from this user within the last 60 seconds + let pending_mxcs = self + .services + .media + .retention_consume_pending_uploads(pdu.sender().as_str(), event_ts) .await; - self.services - .userroom - .message_hook(&pdu.event_id, &pdu.room_id, &pdu.sender, &body) - .await; - } - // media retention insertion (structured extraction for unencrypted messages) - if let Ok(msg_full) = pdu.get_content::() { - warn!(event_id=%pdu.event_id(), msg=?msg_full, "retention: debug message content"); - use ruma::events::room::MediaSource; - let mut mxcs: Vec<(String,bool,String)> = Vec::new(); - let push_media = |mxcs: &mut Vec<(String,bool,String)>, src: &MediaSource, label: &str, this: &super::Service| { - let (maybe_mxc, enc) = match src { MediaSource::Plain(m) => (Some(m.to_string()), false), MediaSource::Encrypted(f) => (Some(f.url.to_string()), true) }; - if let Some(uri) = maybe_mxc { if uri.starts_with("mxc://") { - let local = >::try_from(uri.as_str()).map(|p| this.services.globals.server_is_ours(p.server_name)).unwrap_or(false); - mxcs.push((uri.clone(), local, label.to_owned())); - if enc { warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted encrypted media"); } else { warn!(event_id=%pdu.event_id(), label=%label, mxc=%uri, local, "retention: extracted plain media"); } - }} - }; - match &msg_full.msgtype { - ruma::events::room::message::MessageType::Image(c) => { push_media(&mut mxcs, &c.source, "image.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "image.thumbnail_source", self); } } }, - ruma::events::room::message::MessageType::File(c) => { push_media(&mut mxcs, &c.source, "file.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "file.thumbnail_source", self); } } }, - ruma::events::room::message::MessageType::Video(c) => { push_media(&mut mxcs, &c.source, "video.source", self); if let Some(info)=c.info.as_ref(){ if let Some(th)=info.thumbnail_source.as_ref(){ push_media(&mut mxcs, th, "video.thumbnail_source", self); } } }, - ruma::events::room::message::MessageType::Audio(c) => { push_media(&mut mxcs, &c.source, "audio.source", self); }, - _ => {}, + if !pending_mxcs.is_empty() { + warn!( + event_id=%pdu.event_id(), + sender=%pdu.sender(), + room=%pdu.room_id(), + count=pending_mxcs.len(), + "retention: associated pending uploads with encrypted event" + ); + self.services + .media + .retention_insert_mxcs_on_event( + pdu.event_id().as_str(), + pdu.room_id().as_str(), + pdu.sender().as_str(), + &pending_mxcs, + ); + } else { + warn!( + event_id=%pdu.event_id(), + sender=%pdu.sender(), + room=%pdu.room_id(), + "retention: no pending uploads found for encrypted event" + ); } - if mxcs.is_empty() { warn!(event_id=%pdu.event_id(), "retention: no media sources extracted"); } - else { warn!(event_id=%pdu.event_id(), count=mxcs.len(), "retention: inserting media refs"); self.services.media.retention_insert_mxcs_on_event(pdu.event_id().as_str(), pdu.room_id().as_str(), pdu.sender().as_str(), &mxcs); } - } - }, - | TimelineEventType::RoomEncrypted => { - // For encrypted rooms: We can't read the content (it's E2EE), so we can't extract MXC URIs directly. - // However, we CAN associate recent media uploads with this encrypted event - // Strategy: When user uploads media, we track it as "pending". When they send an encrypted event - // within 60 seconds, we consume those pending uploads and associate them with this event. - // todo: find a more realistic time window, 60s may be a bit long - - // Get the event timestamp (milliseconds since epoch) - let event_ts: u64 = pdu.origin_server_ts().get().into(); - - // Consume any pending uploads from this user within the last 60 seconds - let pending_mxcs = self.services - .media - .retention_consume_pending_uploads(pdu.sender().as_str(), event_ts) - .await; + }, + | _ => {}, + } - if !pending_mxcs.is_empty() { - warn!( - event_id=%pdu.event_id(), - sender=%pdu.sender(), - room=%pdu.room_id(), - count=pending_mxcs.len(), - "retention: associated pending uploads with encrypted event" - ); - self.services.media.retention_insert_mxcs_on_event( - pdu.event_id().as_str(), - pdu.room_id().as_str(), - pdu.sender().as_str(), - &pending_mxcs - ); - } else { - warn!( - event_id=%pdu.event_id(), - sender=%pdu.sender(), - room=%pdu.room_id(), - "retention: no pending uploads found for encrypted event" - ); + if let Ok(content) = pdu.get_content::() { + if let Ok(related_pducount) = self + .get_pdu_count(&content.relates_to.event_id) + .await + { + self.services + .pdu_metadata + .add_relation(count, related_pducount); } - }, - | _ => {}, - } - - if let Ok(content) = pdu.get_content::() { - if let Ok(related_pducount) = self - .get_pdu_count(&content.relates_to.event_id) - .await - { - self.services - .pdu_metadata - .add_relation(count, related_pducount); } - } - if let Ok(content) = pdu.get_content::() { - match content.relates_to { - | Relation::Reply { in_reply_to } => { - // We need to do it again here, because replies don't have - // event_id as a top level field - if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await { + if let Ok(content) = pdu.get_content::() { + match content.relates_to { + | Relation::Reply { in_reply_to } => { + // We need to do it again here, because replies don't have + // event_id as a top level field + if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await + { + self.services + .pdu_metadata + .add_relation(count, related_pducount); + } + }, + | Relation::Thread(thread) => { self.services - .pdu_metadata - .add_relation(count, related_pducount); - } - }, - | Relation::Thread(thread) => { - self.services - .threads - .add_to_thread(&thread.event_id, pdu) - .await?; - }, - | Relation::Annotation(annotation) => { - self.services - .userroom - .reaction_hook( - pdu.event_id(), - pdu.room_id(), - pdu.sender(), - &annotation.event_id, - &annotation.key, - ) - .await; - }, - | _ => {}, // TODO: Aggregate other types + .threads + .add_to_thread(&thread.event_id, pdu) + .await?; + }, + | Relation::Annotation(annotation) => { + self.services + .userroom + .reaction_hook( + pdu.event_id(), + pdu.room_id(), + pdu.sender(), + &annotation.event_id, + &annotation.key, + ) + .await; + }, + | _ => {}, // TODO: Aggregate other types + } } - } - drop(next_count1); - drop(next_count2); + drop(next_count1); + drop(next_count2); - for appservice in self.services.appservice.read().await.values() { - if self - .services - .state_cache - .appservice_in_room(pdu.room_id(), appservice) - .await - { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + for appservice in self.services.appservice.read().await.values() { + if self + .services + .state_cache + .appservice_in_room(pdu.room_id(), appservice) + .await + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; - continue; - } + continue; + } - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if *pdu.kind() == TimelineEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - let appservice_uid = appservice.registration.sender_localpart.as_str(); - if state_key_uid == &appservice_uid { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + let appservice_uid = appservice.registration.sender_localpart.as_str(); + if state_key_uid == &appservice_uid { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; - continue; + continue; + } } } - } - let matching_users = |users: &NamespaceRegex| { - appservice.users.is_match(pdu.sender().as_str()) - || *pdu.kind() == TimelineEventType::RoomMember - && pdu - .state_key - .as_ref() - .is_some_and(|state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: NamespaceRegex| { - self.services - .alias - .local_aliases_for_room(pdu.room_id()) - .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) - }; - - if matching_aliases(appservice.aliases.clone()).await - || appservice.rooms.is_match(pdu.room_id().as_str()) - || matching_users(&appservice.users) - { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + let matching_users = |users: &NamespaceRegex| { + appservice.users.is_match(pdu.sender().as_str()) + || *pdu.kind() == TimelineEventType::RoomMember + && pdu + .state_key + .as_ref() + .is_some_and(|state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: NamespaceRegex| { + self.services + .alias + .local_aliases_for_room(pdu.room_id()) + .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) + }; + + if matching_aliases(appservice.aliases.clone()).await + || appservice.rooms.is_match(pdu.room_id().as_str()) + || matching_users(&appservice.users) + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + } } - } Ok(pdu_id) } @@ -649,7 +708,6 @@ fn increment_notification_counts( } } - //TODO: this is an ABA fn increment(db: &Arc, key: &[u8]) { let old = db.get_blocking(key); diff --git a/src/service/rooms/timeline/build.rs b/src/service/rooms/timeline/build.rs index 218482737..c92d3eaa2 100644 --- a/src/service/rooms/timeline/build.rs +++ b/src/service/rooms/timeline/build.rs @@ -30,101 +30,99 @@ impl super::Service { room_id: &RoomId, state_lock: &RoomMutexGuard, ) -> Result { - let (pdu, pdu_json) = self - .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) - .await?; - - //TODO: Use proper room version here - if *pdu.kind() == TimelineEventType::RoomCreate && pdu.room_id().server_name().is_none() { - let _short_id = self - .services - .short - .get_or_create_shortroomid(pdu.room_id()) - .await; - } - - if self - .services - .admin - .is_admin_room(pdu.room_id()) - .await - { - self - .check_pdu_for_admin_room(&pdu, sender) + let (pdu, pdu_json) = self + .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) .await?; - } - // If redaction event is not authorized, do not append it to the timeline - if *pdu.kind() == TimelineEventType::RoomRedaction { - use RoomVersionId::*; - match self + //TODO: Use proper room version here + if *pdu.kind() == TimelineEventType::RoomCreate && pdu.room_id().server_name().is_none() { + let _short_id = self + .services + .short + .get_or_create_shortroomid(pdu.room_id()) + .await; + } + + if self .services - .state - .get_room_version(pdu.room_id()) - .await? + .admin + .is_admin_room(pdu.room_id()) + .await { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = pdu.redacts() { - if !self - .services - .state_accessor - .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); + self.check_pdu_for_admin_room(&pdu, sender) + .await?; + } + + // If redaction event is not authorized, do not append it to the timeline + if *pdu.kind() == TimelineEventType::RoomRedaction { + use RoomVersionId::*; + match self + .services + .state + .get_room_version(pdu.room_id()) + .await? + { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = pdu.redacts() { + if !self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } } - } - }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - if !self - .services - .state_accessor - .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if !self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } } - } - }, + }, + } } - } - if *pdu.kind() == TimelineEventType::RoomMember { - let content: RoomMemberEventContent = pdu.get_content()?; + if *pdu.kind() == TimelineEventType::RoomMember { + let content: RoomMemberEventContent = pdu.get_content()?; - if content.join_authorized_via_users_server.is_some() - && content.membership != MembershipState::Join - { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } + if content.join_authorized_via_users_server.is_some() + && content.membership != MembershipState::Join + { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } - if content - .join_authorized_via_users_server - .as_ref() - .is_some_and(|authorising_user| { - !self - .services - .globals - .user_is_local(authorising_user) - }) { - return Err!(Request(InvalidParam( - "Authorising user does not belong to this homeserver" - ))); + if content + .join_authorized_via_users_server + .as_ref() + .is_some_and(|authorising_user| { + !self + .services + .globals + .user_is_local(authorising_user) + }) { + return Err!(Request(InvalidParam( + "Authorising user does not belong to this homeserver" + ))); + } } - } - // We append to state before appending the pdu, so we don't have a moment in - // time with the pdu without it's state. This is okay because append_pdu can't - // fail. - let statehashid = self.services.state.append_to_state(&pdu).await?; + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehashid = self.services.state.append_to_state(&pdu).await?; - let pdu_id = if DO_MEDIA_RETENTION { - self - .append_pdu( + let pdu_id = if DO_MEDIA_RETENTION { + self.append_pdu( &pdu, pdu_json, // Since this PDU references all pdu_leaves we can update the leaves @@ -133,9 +131,8 @@ impl super::Service { state_lock, ) .await? - } else { - self - .append_pdu_without_retention( + } else { + self.append_pdu_without_retention( &pdu, pdu_json, // Since this PDU references all pdu_leaves we can update the leaves @@ -144,42 +141,42 @@ impl super::Service { state_lock, ) .await? - }; + }; - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - self.services - .state - .set_room_state(pdu.room_id(), statehashid, state_lock); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + self.services + .state + .set_room_state(pdu.room_id(), statehashid, state_lock); - let mut servers: HashSet = self - .services - .state_cache - .room_servers(pdu.room_id()) - .map(ToOwned::to_owned) - .collect() - .await; + let mut servers: HashSet = self + .services + .state_cache + .room_servers(pdu.room_id()) + .map(ToOwned::to_owned) + .collect() + .await; - // In case we are kicking or banning a user, we need to inform their server of - // the change - if *pdu.kind() == TimelineEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(state_key_uid.server_name().to_owned()); + // In case we are kicking or banning a user, we need to inform their server of + // the change + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(state_key_uid.server_name().to_owned()); + } } - } - // Remove our server from the server list since it will be added to it by - // room_servers() and/or the if statement above - servers.remove(self.services.globals.server_name()); + // Remove our server from the server list since it will be added to it by + // room_servers() and/or the if statement above + servers.remove(self.services.globals.server_name()); - self.services - .sending - .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) - .await?; + self.services + .sending + .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) + .await?; Ok(pdu.event_id().to_owned()) } @@ -194,8 +191,7 @@ pub async fn build_and_append_pdu( room_id: &RoomId, state_lock: &RoomMutexGuard, ) -> Result { - self - .build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) + self.build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) .await } @@ -208,8 +204,7 @@ pub async fn build_and_append_pdu_without_retention( room_id: &RoomId, state_lock: &RoomMutexGuard, ) -> Result { - self - .build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) + self.build_and_append_pdu_inner::(pdu_builder, sender, room_id, state_lock) .await } diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index d65254e10..3e58b9bd9 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -137,8 +137,8 @@ impl Service { Ok(()) } - /// Send a text message to the user's admin room in the background (non-blocking). - /// This is useful to avoid async recursion. + /// Send a text message to the user's admin room in the background + /// (non-blocking). This is useful to avoid async recursion. pub fn send_text_background(&self, user_id: &UserId, body: &str) { let user_id = user_id.to_owned(); let body = body.to_owned(); @@ -170,7 +170,11 @@ impl Service { /// Send a text message to the user's admin room and return the event ID. /// This allows adding reactions or further processing. - pub async fn send_text_with_event_id(&self, user_id: &UserId, body: &str) -> Result { + pub async fn send_text_with_event_id( + &self, + user_id: &UserId, + body: &str, + ) -> Result { if !self.services.globals.user_is_local(user_id) { debug_info!(%user_id, "Skipping user room send for remote user"); return Err(tuwunel_core::err!(Request(Forbidden("User is not local")))); @@ -187,7 +191,8 @@ impl Service { let state_lock = self.services.state.mutex.lock(&room_id).await; let content = RoomMessageEventContent::text_markdown(body); - let event_id = self.services + let event_id = self + .services .timeline .build_and_append_pdu_without_retention( PduBuilder::timeline(&content), @@ -202,7 +207,12 @@ impl Service { /// Add a reaction to an event in the user's admin room /// Returns the event ID of the reaction event - pub async fn add_reaction(&self, user_id: &UserId, event_id: &EventId, emoji: &str) -> Result { + pub async fn add_reaction( + &self, + user_id: &UserId, + event_id: &EventId, + emoji: &str, + ) -> Result { if !self.services.globals.user_is_local(user_id) { return Err(tuwunel_core::err!(Request(Forbidden("User is not local")))); } @@ -219,9 +229,11 @@ impl Service { // Create reaction content use ruma::events::{reaction::ReactionEventContent, relation::Annotation}; - let content = ReactionEventContent::new(Annotation::new(event_id.to_owned(), emoji.to_owned())); + let content = + ReactionEventContent::new(Annotation::new(event_id.to_owned(), emoji.to_owned())); - let reaction_event_id = self.services + let reaction_event_id = self + .services .timeline .build_and_append_pdu_without_retention( PduBuilder::timeline(&content), @@ -304,7 +316,7 @@ impl Service { let Ok(room_id) = services.userroom.get_user_room(&user_id).await else { return; }; - + let server_user = &services.globals.server_user; let state_lock = services.state.mutex.lock(&room_id).await; @@ -351,29 +363,43 @@ impl Service { // Check if this is a media retention confirmation reaction //todo: maybe dont match for emojis here match emoji { - "āœ…" => { - if let Err(e) = self.services.media.retention_confirm_by_reaction(sender, relates_to_event).await { + | "āœ…" => { + if let Err(e) = self + .services + .media + .retention_confirm_by_reaction(sender, relates_to_event) + .await + { debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āœ… reaction: {e}"); } - } - "āŒ" => { - if let Err(e) = self.services.media.retention_cancel_by_reaction(sender, relates_to_event).await { + }, + | "āŒ" => { + if let Err(e) = self + .services + .media + .retention_cancel_by_reaction(sender, relates_to_event) + .await + { debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āŒ reaction: {e}"); } - } - "āš™ļø" => { - if let Err(e) = self.services.media.retention_auto_by_reaction(sender, relates_to_event).await { + }, + | "āš™ļø" => { + if let Err(e) = self + .services + .media + .retention_auto_by_reaction(sender, relates_to_event) + .await + { debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āš™ļø reaction: {e}"); } - } - _ => { + }, + | _ => { debug_warn!("Unknown reaction emoji in user room: {}", emoji); - } + }, } } fn get_user_command_system(&self) -> &Arc { - self.user_command_system .get() .expect("user command system empty") diff --git a/src/user/user.rs b/src/user/user.rs index 8dfcbd676..04da11a93 100644 --- a/src/user/user.rs +++ b/src/user/user.rs @@ -84,9 +84,7 @@ mod retention { .await; Ok(format!( - "Current auto-delete preferences:\n\ - - Encrypted rooms: {}\n\ - - Unencrypted rooms: {}", + "Current auto-delete preferences:\n- Encrypted rooms: {}\n- Unencrypted rooms: {}", if prefs.auto_delete_encrypted { "enabled āœ…" } else { From c047d428b45299e248e406d067978e1bdbd5fdcd Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Mon, 20 Oct 2025 22:24:04 +0200 Subject: [PATCH 11/17] removed retention worker and made everything event driven --- src/admin/admin.rs | 3 +- src/core/config/mod.rs | 21 ++++--- src/service/command/run_matrix.rs | 11 ++-- src/service/media/mod.rs | 89 ++++++++++------------------ src/service/media/retention.rs | 96 +++++++++++-------------------- src/user/mod.rs | 4 +- src/user/user.rs | 10 +++- tuwunel-example.toml | 17 ++++-- 8 files changed, 109 insertions(+), 142 deletions(-) diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 6ae287a87..14bde5a21 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -10,7 +10,8 @@ use crate::{ }; #[derive(Debug, Parser)] -#[command(name = "tuwunel", version = tuwunel_core::version())] +#[command(name = "admin", version = tuwunel_core::version())] +#[command(arg_required_else_help = true)] #[command_dispatch] pub(super) enum AdminCommand { #[command(subcommand)] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index f066a70c5..5c00d2dbb 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2140,15 +2140,20 @@ pub struct WellKnownConfig { #[config_example_generator(filename = "tuwunel-example.toml", section = "global.media")] pub struct MediaRetentionConfig { /// What to do with local media when an event referencing it is redacted. - /// keep | delete_if_unreferenced | force_delete_local - /// default: "keep" + /// + /// Options: + /// "keep" - Never delete media (feature disabled) + /// "ask_sender" - Ask the user who sent the message via DM (shows + /// āœ…/āŒ/āš™ļø reactions) + /// "delete_always" - Always delete unreferenced media immediately + /// + /// Default: "keep" + /// + /// Note: Deletion is event-driven and immediate. Users can set + /// per-room-type auto-delete preferences using `!user retention` commands + /// or the āš™ļø reaction when `ask_sender` is enabled. #[serde(default = "default_media_retention_on_redaction")] pub on_redaction: String, - - /// Grace period in seconds before deleting queued media. - /// default: 0 - #[serde(default)] - pub grace_period_secs: u64, } fn default_media_retention_on_redaction() -> String { "keep".to_owned() } @@ -2597,8 +2602,6 @@ impl Config { // Media retention helpers pub fn media_retention_on_redaction(&self) -> &str { self.media.on_redaction.as_str() } - - pub fn media_retention_grace_period_secs(&self) -> u64 { self.media.grace_period_secs } } fn true_fn() -> bool { true } diff --git a/src/service/command/run_matrix.rs b/src/service/command/run_matrix.rs index f65526131..8df694c85 100644 --- a/src/service/command/run_matrix.rs +++ b/src/service/command/run_matrix.rs @@ -100,13 +100,14 @@ impl Service { if let Some(capture_level) = capture_level { output.push_str(&format_logs(&result.logs, capture_level)); } - if result.err { - output.push_str("Command completed with error:\n"); - } else { - output.push_str("Command completed:\n"); + if !result.output.starts_with("Usage:") { + if result.err { + output.push_str("Command completed with error:\n"); + } else { + output.push_str("Command completed:\n"); + } } output.push_str(&result.output); - let mut content = RoomMessageEventContent::notice_markdown(output); content.relates_to = Some(Relation::Reply { in_reply_to: InReplyTo { event_id: reply_id.to_owned() }, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 3a17d29d7..4d35f9351 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -6,12 +6,7 @@ mod remote; mod retention; mod tests; mod thumbnail; -use std::{ - collections::HashSet, - path::PathBuf, - sync::Arc, - time::{Duration, SystemTime}, -}; +use std::{collections::HashSet, path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; @@ -109,33 +104,9 @@ impl crate::Service for Service { .server .config .media_retention_on_redaction(), - grace = self - .services - .server - .config - .media_retention_grace_period_secs(), - "retention: startup configuration" + "retention: startup configuration (event-driven)" ); - // deletion worker loop (scaffold): runs periodically respecting grace period - let grace = Duration::from_secs( - self.services - .server - .config - .media_retention_grace_period_secs(), - ); - let retention = self.retention.clone(); - let this = self.clone(); - debug_warn!("creating media deletion worker"); - tokio::spawn(async move { - loop { - if let Err(e) = retention.worker_process_queue(&this, grace).await { - debug_warn!("media retention worker error: {e}"); - } - tokio::time::sleep(Duration::from_secs(10)).await; //todo: make configurable / sleep for longer - } - }); - Ok(()) } @@ -277,22 +248,22 @@ impl Service { mxc = %candidate.mxc, sender = ?candidate.sender, from_encrypted = candidate.from_encrypted_room, - "retention: auto-deleting per user preferences" - ); - self.retention.queue_media_for_deletion( - &candidate.mxc, - candidate - .sender - .as_ref() - .and_then(|s| UserId::parse(s).ok()) - .as_deref(), - false, // No confirmation needed - None, // No notification - None, // No reactions - None, // No reactions - None, // No auto-delete reaction - candidate.from_encrypted_room, + "retention: auto-deleting immediately per user preferences" ); + // Delete immediately + let _ = self + .retention + .delete_media_immediately( + self, + &candidate.mxc, + candidate + .sender + .as_ref() + .and_then(|s| UserId::parse(s).ok()) + .as_deref(), + candidate.from_encrypted_room, + ) + .await; continue; } @@ -303,16 +274,16 @@ impl Service { match (decision.action, decision.owner) { | (CandidateAction::DeleteImmediately, owner) => { - self.retention.queue_media_for_deletion( - &candidate.mxc, - owner.as_deref(), - false, - None, - None, - None, - None, - candidate.from_encrypted_room, - ); + // Delete immediately + let _ = self + .retention + .delete_media_immediately( + self, + &candidate.mxc, + owner.as_deref(), + candidate.from_encrypted_room, + ) + .await; }, | (CandidateAction::AwaitConfirmation, Some(owner)) => { // Send notification to the uploader's user room (not the room where it was @@ -464,8 +435,8 @@ impl Service { { let action = match policy { | RetentionPolicy::Keep => CandidateAction::Skip, - | RetentionPolicy::DeleteIfUnreferenced - | RetentionPolicy::ForceDeleteLocal => CandidateAction::DeleteImmediately, + | RetentionPolicy::AskSender | RetentionPolicy::DeleteAlways => + CandidateAction::DeleteImmediately, }; return CandidateDecision { action, owner }; } @@ -488,7 +459,7 @@ impl Service { } else { let action = match policy { | RetentionPolicy::Keep => CandidateAction::Skip, - | RetentionPolicy::DeleteIfUnreferenced | RetentionPolicy::ForceDeleteLocal => + | RetentionPolicy::AskSender | RetentionPolicy::DeleteAlways => CandidateAction::DeleteImmediately, }; CandidateDecision { action, owner: None } diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 5a3725998..85acd7c3c 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -1,13 +1,13 @@ use std::{ path::PathBuf, sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, + time::{SystemTime, UNIX_EPOCH}, }; use futures::StreamExt; use ruma::UserId; use serde::{Deserialize, Serialize}; -use tuwunel_core::{Result, debug_warn, err, trace, warn}; +use tuwunel_core::{Result, err, trace, warn}; use tuwunel_database::{Cbor, Deserialized, Map, keyval::serialize_val}; use super::Service; @@ -95,15 +95,15 @@ pub(crate) struct DeletionCandidate { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(super) enum RetentionPolicy { Keep, - DeleteIfUnreferenced, - ForceDeleteLocal, + AskSender, + DeleteAlways, } impl RetentionPolicy { pub(super) fn from_str(s: &str) -> Self { match s { - | "delete_if_unreferenced" => Self::DeleteIfUnreferenced, - | "force_delete_local" => Self::ForceDeleteLocal, + | "ask_sender" => Self::AskSender, + | "delete_always" => Self::DeleteAlways, | _ => Self::Keep, } } @@ -292,8 +292,8 @@ impl Retention { mr.last_seen_ts = now_secs(); let should_queue = match policy { | RetentionPolicy::Keep => false, - | RetentionPolicy::DeleteIfUnreferenced => mr.refcount == 0, - | RetentionPolicy::ForceDeleteLocal => mr.local, + | RetentionPolicy::AskSender => mr.refcount == 0, + | RetentionPolicy::DeleteAlways => mr.local, }; warn!(%event_id, mxc = %mer.mxc, kind = %mer.kind, new_refcount = mr.refcount, should_queue, local = mr.local, sender = ?mer.sender, "retention: redaction updated ref"); let val_mref = serialize_val(Cbor(&mr))?.to_vec(); @@ -450,11 +450,37 @@ impl Retention { awaiting_confirmation, owner = owner.map(UserId::as_str), from_encrypted = from_encrypted_room, - "retention: queue media for deletion" + "retention: queue media for deletion (awaiting user confirmation)" ); self.cf.raw_put(key, Cbor(&cand)); } + /// Delete media immediately (for auto-delete and "delete_always" mode) + /// event-driven + pub(super) async fn delete_media_immediately( + &self, + service: &Service, + mxc: &str, + owner: Option<&UserId>, + from_encrypted_room: bool, + ) -> Result { + let deleted_bytes = self.delete_local_media(service, mxc).await?; + + // Remove metadata entries + let dels = vec![Self::key_mref(mxc).into_bytes()]; + self.cf.write_batch_raw(std::iter::empty(), dels); + + warn!( + mxc, + bytes = deleted_bytes, + owner = owner.map(UserId::as_str), + from_encrypted = from_encrypted_room, + "retention: media deleted immediately (event-driven)" + ); + + Ok(deleted_bytes) + } + pub(super) async fn confirm_candidate( &self, service: &Service, @@ -620,58 +646,6 @@ impl Retention { } } - /// worker: processes queued deletion candidates after grace period. - pub(super) async fn worker_process_queue( - &self, - service: &Service, - grace: Duration, - ) -> Result<()> { - let prefix = K_QUEUE.as_bytes(); - debug_warn!(?grace, "retention: worker iteration start"); - let mut stream = self - .cf - .stream_raw_prefix::<&str, Cbor, _>(&prefix); - let mut processed = 0usize; - let mut deleted = 0usize; - while let Some(item) = stream.next().await.transpose()? { - let (key, Cbor(cand)) = item; - let now = now_secs(); - if cand.awaiting_confirmation { - debug_warn!(mxc = %cand.mxc, "retention: awaiting user confirmation, skipping candidate"); - continue; - } - - if now < cand.enqueued_ts.saturating_add(grace.as_secs()) { - debug_warn!(mxc = %cand.mxc, wait = cand.enqueued_ts + grace.as_secs() - now, "retention: grace period not met yet"); - continue; - } - - // attempt deletion of local media files - let deleted_bytes = self - .delete_local_media(service, &cand.mxc) - .await - .unwrap_or(0); - if deleted_bytes > 0 { - debug_warn!(mxc = %cand.mxc, bytes = deleted_bytes, "retention: media deleted"); - } else { - debug_warn!(mxc = %cand.mxc, "retention: queued media had no bytes deleted (already gone?)"); - } - - // remove metadata entries (best-effort) - let dels = vec![key.as_bytes().to_vec(), Self::key_mref(&cand.mxc).into_bytes()]; - self.cf.write_batch_raw(std::iter::empty(), dels); - processed = processed.saturating_add(1); - deleted = deleted.saturating_add(1); - } - if processed == 0 { - debug_warn!("retention: worker iteration found no deletion candidates"); - } else { - debug_warn!(processed, deleted, "retention: worker iteration complete"); - } - - Ok(()) - } - async fn delete_local_media(&self, service: &Service, mxc: &str) -> Result { // delete original + thumbnails (any dimensions) use ruma::Mxc; diff --git a/src/user/mod.rs b/src/user/mod.rs index d1fe241f4..fae5f3e47 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -45,7 +45,9 @@ impl CommandSystem for UserCommandSystem { ) -> Result { let command = match UserCommand::try_parse_from(command_line) { | Ok(command) => command, - | Err(error) => return Err!("Failed to parse command:\n{error}"), + | Err(error) => { + return Ok(error.to_string()); + }, }; let Some(sender) = sender else { diff --git a/src/user/user.rs b/src/user/user.rs index 04da11a93..44dc79e48 100644 --- a/src/user/user.rs +++ b/src/user/user.rs @@ -5,12 +5,19 @@ use tuwunel_macros::{command, command_dispatch}; use crate::user::{debug::Cmd as DebugCmd, retention::Cmd as RetentionCmd}; #[derive(Debug, Parser)] -#[command(name = "tuwunel", version = tuwunel_core::version())] +#[command(name = "user", version = tuwunel_core::version())] +#[command( + arg_required_else_help = true, + subcommand_required = true, + subcommand_value_name = "COMMAND" +)] #[command_dispatch] pub(super) enum UserCommand { #[command(subcommand)] + /// Debugging and diagnostic commands Debug(DebugCmd), #[command(subcommand)] + /// Media retention and auto-delete preferences Retention(RetentionCmd), } @@ -22,6 +29,7 @@ mod debug { #[command_dispatch] #[derive(Debug, Subcommand)] pub(crate) enum Cmd { + /// Echo test command Echo {}, } diff --git a/tuwunel-example.toml b/tuwunel-example.toml index 50567827e..98a501928 100644 --- a/tuwunel-example.toml +++ b/tuwunel-example.toml @@ -1828,13 +1828,20 @@ #[global.media] # What to do with local media when an event referencing it is redacted. -# keep | delete_if_unreferenced | force_delete_local # -#on_redaction = "keep" - -# Grace period in seconds before deleting queued media. +# Options: +# "keep" - Never delete media (feature disabled) +# "ask_sender" - Ask the user who sent the message via DM (shows +# āœ…/āŒ/āš™ļø reactions) +# "delete_always" - Always delete unreferenced media immediately +# +# Default: "keep" +# +# Note: Deletion is event-driven and immediate. Users can set +# per-room-type auto-delete preferences using `!user retention` commands +# or the āš™ļø reaction when `ask_sender` is enabled. # -#grace_period_secs = 0 +#on_redaction = #[global.blurhashing] From 9617219ff4b37959338116159231656ae68bc225 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 13:53:11 +0200 Subject: [PATCH 12/17] fix typos, clippy --- docs/deploying/generic.md | 2 +- src/api/client/space.rs | 2 +- src/core/config/mod.rs | 1 + src/core/log/capture.rs | 7 +++++++ src/service/media/retention.rs | 2 +- src/service/rooms/timeline/build.rs | 2 +- src/service/userroom/mod.rs | 1 - src/service/users/mod.rs | 2 +- 8 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 9e32aeb84..f5b098d13 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -166,7 +166,7 @@ sudo systemctl enable --now caddy ### Other Reverse Proxies -As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. +As we would prefer our users to use Caddy, we will not provide configuration files for other proxies. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 55e41502b..48bd8696e 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -44,7 +44,7 @@ pub(crate) async fn get_hierarchy_route( .as_ref() .and_then(|s| PaginationToken::from_str(s).ok()); - // Should prevent unexpeded behaviour in (bad) clients + // Should prevent unexpected behaviour in (bad) clients if let Some(ref token) = key { if token.suggested_only != body.suggested_only || token.max_depth != max_depth { return Err!(Request(InvalidParam( diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5c00d2dbb..a139d7c1f 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2156,6 +2156,7 @@ pub struct MediaRetentionConfig { pub on_redaction: String, } +#[must_use] fn default_media_retention_on_redaction() -> String { "keep".to_owned() } #[derive(Clone, Copy, Debug, Deserialize, Default)] diff --git a/src/core/log/capture.rs b/src/core/log/capture.rs index c8e8dec59..e94e7668f 100644 --- a/src/core/log/capture.rs +++ b/src/core/log/capture.rs @@ -38,7 +38,14 @@ impl EventData { } } +impl Default for CaptureManager { + fn default() -> Self { + Self::new() + } +} + impl CaptureManager { + #[must_use] pub fn new() -> Self { Self { captures: Mutex::new(Vec::new()) } } pub fn start_capture(&self, span_id: &Id) { diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 85acd7c3c..f19aa94b3 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -420,7 +420,7 @@ impl Retention { }); } - /// qeue a media item for deletion (idempotent best-effort). + /// queue a media item for deletion (idempotent best-effort). pub(super) fn queue_media_for_deletion( &self, mxc: &str, diff --git a/src/service/rooms/timeline/build.rs b/src/service/rooms/timeline/build.rs index c92d3eaa2..f1c6d4098 100644 --- a/src/service/rooms/timeline/build.rs +++ b/src/service/rooms/timeline/build.rs @@ -21,7 +21,7 @@ use super::RoomMutexGuard; impl super::Service { /// Creates a new persisted data unit and adds it to a room. This function - /// takes a roomid_mutex_state, meaning that only this fnuction is able to + /// takes a roomid_mutex_state, meaning that only this function is able to /// mutate the room state. async fn build_and_append_pdu_inner( &self, diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index 3e58b9bd9..344cc7e02 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -361,7 +361,6 @@ impl Service { } // Check if this is a media retention confirmation reaction - //todo: maybe dont match for emojis here match emoji { | "āœ…" => { if let Err(e) = self diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 79a27d3b3..5dbda06b4 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -261,7 +261,7 @@ impl Service { } /// Sets a new displayname or removes it if displayname is None. You still - /// need to nofify all rooms of this change. + /// need to notify all rooms of this change. pub fn set_displayname(&self, user_id: &UserId, displayname: Option) { if let Some(displayname) = displayname { self.db From 0365818da7677303cfbd803c00c598caf320c9e1 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 14:44:25 +0200 Subject: [PATCH 13/17] changed notifiaction messages, fix clippy --- src/core/config/mod.rs | 6 +-- src/core/log/capture.rs | 6 +-- src/service/media/mod.rs | 80 +++++++++++++++++++++++++--------- src/service/media/retention.rs | 2 +- src/service/userroom/mod.rs | 5 ++- tuwunel-example.toml | 4 +- 6 files changed, 70 insertions(+), 33 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a139d7c1f..7d283486f 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2144,19 +2144,18 @@ pub struct MediaRetentionConfig { /// Options: /// "keep" - Never delete media (feature disabled) /// "ask_sender" - Ask the user who sent the message via DM (shows - /// āœ…/āŒ/āš™ļø reactions) + /// āœ…/āŒ/ā™»ļø reactions) /// "delete_always" - Always delete unreferenced media immediately /// /// Default: "keep" /// /// Note: Deletion is event-driven and immediate. Users can set /// per-room-type auto-delete preferences using `!user retention` commands - /// or the āš™ļø reaction when `ask_sender` is enabled. + /// or the ā™»ļø reaction when `ask_sender` is enabled. #[serde(default = "default_media_retention_on_redaction")] pub on_redaction: String, } -#[must_use] fn default_media_retention_on_redaction() -> String { "keep".to_owned() } #[derive(Clone, Copy, Debug, Deserialize, Default)] @@ -2602,6 +2601,7 @@ impl Config { pub fn check(&self) -> Result<(), Error> { check(self) } // Media retention helpers + #[must_use] pub fn media_retention_on_redaction(&self) -> &str { self.media.on_redaction.as_str() } } diff --git a/src/core/log/capture.rs b/src/core/log/capture.rs index e94e7668f..7d15d97c5 100644 --- a/src/core/log/capture.rs +++ b/src/core/log/capture.rs @@ -39,13 +39,11 @@ impl EventData { } impl Default for CaptureManager { - fn default() -> Self { - Self::new() - } + fn default() -> Self { Self::new() } } impl CaptureManager { - #[must_use] + #[must_use] pub fn new() -> Self { Self { captures: Mutex::new(Vec::new()) } } pub fn start_capture(&self, span_id: &Id) { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 4d35f9351..dd0c38afc 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -298,7 +298,9 @@ impl Service { .globals .user_is_local(owner.as_ref()) { - let body = self.build_retention_notice(&candidate, event_value.as_ref()); + let body = self + .build_retention_notice(&candidate, event_value.as_ref()) + .await; match self .services .userroom @@ -309,7 +311,7 @@ impl Service { // add reaction options: // āœ… to confirm deletion // āŒ to cancel - // āš™ļø to always auto-delete for room type + // ā™»ļø to always auto-delete for room type let confirm_id = match self .services .userroom @@ -337,12 +339,12 @@ impl Service { let auto_id = match self .services .userroom - .add_reaction(owner.as_ref(), &event_id, "āš™ļø") + .add_reaction(owner.as_ref(), &event_id, "ā™»ļø") .await { | Ok(id) => Some(id.to_string()), | Err(e) => { - debug_warn!(%event_id, "retention: failed to add āš™ļø reaction: {e}"); + debug_warn!(%event_id, "retention: failed to add ā™»ļø reaction: {e}"); None }, }; @@ -466,42 +468,78 @@ impl Service { } } - fn build_retention_notice( + async fn build_retention_notice( &self, candidate: &RetentionCandidate, event_value: Option<&Value>, ) -> String { - let room_segment = candidate - .room_id - .as_deref() - .map(|room| format!(" in room {room}")) - .unwrap_or_default(); + // Try to get the room name instead of just the ID + let room_segment = if let Some(room_id_str) = candidate.room_id.as_deref() { + if let Ok(room_id) = ruma::RoomId::parse(room_id_str) { + // Try to get the room name + if let Ok(name) = self + .services + .state_accessor + .get_name(&room_id) + .await + { + format!(" in room \"{name}\"") + } else { + format!(" in room {room_id_str}") + } + } else { + format!(" in room {room_id_str}") + } + } else { + String::new() + }; + // Format timestamp in a human-readable way let timestamp = event_value .and_then(|val| val.get("origin_server_ts")) .and_then(canonical_json_to_u64) - .map(|ts| format!(" at {ts}")) + .and_then(|ts_millis| { + use std::time::{Duration, UNIX_EPOCH}; + let duration = Duration::from_millis(ts_millis); + let datetime = UNIX_EPOCH + duration; + let now = SystemTime::now(); + + if let Ok(elapsed) = now.duration_since(datetime) { + let secs = elapsed.as_secs(); + if secs < 60 { + Some(format!(" (just now)")) + } else if secs < 3600 { + Some(format!(" ({} minutes ago)", secs / 60)) + } else if secs < 86400 { + Some(format!(" ({} hours ago)", secs / 3600)) + } else { + Some(format!(" ({} days ago)", secs / 86400)) + } + } else { + None + } + }) .unwrap_or_default(); let encryption_warning = if candidate.from_encrypted_room { - "\n\nāš ļø WARNING: This media was detected from an encrypted room based on upload \ + "\n\n**Warning:** This media was uploaded from an encrypted room based on upload \ timing. Detection may have false positives since the server cannot read encrypted \ - messages. Use auto-delete at your own risk." + messages. It is accurate, but use auto-delete in encrypted rooms at your own risk." } else { "" }; let room_type = if candidate.from_encrypted_room { - "encrypted rooms" + "**encrypted rooms**" } else { - "unencrypted rooms" + "**unencrypted rooms**" }; format!( - "A piece of media ({mxc}) you uploaded{room_segment}{timestamp} is pending \ - deletion.{encryption_warning}\n\nReact with:\nāœ… to confirm deletion\nāŒ to keep \ - it\nāš™ļø to always auto-delete media in {room_type}\n\n(You can also run `!user \ - retention confirm {mxc}` to delete it manually.)", + "A piece of media ({mxc}) you uploaded{timestamp}{room_segment} is pending deletion \ + because you redacted it.{encryption_warning}\n\nReact with:\nāœ… to confirm \ + deletion\nāŒ to keep it\nā™»ļø to always auto-delete media in {room_type}\n\n(You can \ + also run `!user retention confirm {mxc}` to delete it manually.)", mxc = candidate.mxc ) } @@ -592,7 +630,7 @@ impl Service { } } - /// Auto-delete (āš™ļø reaction) - enable auto-delete for this room type and + /// Auto-delete (ā™»ļø reaction) - enable auto-delete for this room type and /// delete immediately pub async fn retention_auto_by_reaction( &self, @@ -605,7 +643,7 @@ impl Service { .find_mxc_by_notification_event(notification_event_id.as_str()) .await { - debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user enabled auto-delete via āš™ļø reaction"); + debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user enabled auto-delete via ā™»ļø reaction"); let (deleted_bytes, confirm_reaction_id, cancel_reaction_id, from_encrypted_room) = self.retention .auto_delete_candidate(self, &mxc, user) diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index f19aa94b3..b99e8ca0c 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -83,7 +83,7 @@ pub(crate) struct DeletionCandidate { /// Event ID of the āŒ reaction (for cleanup) #[serde(default)] pub cancel_reaction_id: Option, - /// Event ID of the āš™ļø reaction (always auto-delete for this room type) + /// Event ID of the ā™»ļø reaction (always auto-delete for this room type) #[serde(default)] pub auto_reaction_id: Option, /// Was this media detected as being from an encrypted room? diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index 344cc7e02..d16ff8122 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -361,6 +361,7 @@ impl Service { } // Check if this is a media retention confirmation reaction + //todo: maybe dont match for emojis here match emoji { | "āœ…" => { if let Err(e) = self @@ -382,14 +383,14 @@ impl Service { debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āŒ reaction: {e}"); } }, - | "āš™ļø" => { + | "ā™»ļø" => { if let Err(e) = self .services .media .retention_auto_by_reaction(sender, relates_to_event) .await { - debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process āš™ļø reaction: {e}"); + debug_warn!(user = %sender, reaction_to = %relates_to_event, "retention: failed to process ā™»ļø reaction: {e}"); } }, | _ => { diff --git a/tuwunel-example.toml b/tuwunel-example.toml index 98a501928..651bdc6de 100644 --- a/tuwunel-example.toml +++ b/tuwunel-example.toml @@ -1832,14 +1832,14 @@ # Options: # "keep" - Never delete media (feature disabled) # "ask_sender" - Ask the user who sent the message via DM (shows -# āœ…/āŒ/āš™ļø reactions) +# āœ…/āŒ/ā™»ļø reactions) # "delete_always" - Always delete unreferenced media immediately # # Default: "keep" # # Note: Deletion is event-driven and immediate. Users can set # per-room-type auto-delete preferences using `!user retention` commands -# or the āš™ļø reaction when `ask_sender` is enabled. +# or the ā™»ļø reaction when `ask_sender` is enabled. # #on_redaction = From f776918ef011af0571a8623b8dcf8562fce433c5 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 14:53:24 +0200 Subject: [PATCH 14/17] automated clippy fixes --- src/service/command/mod.rs | 2 +- src/service/media/mod.rs | 28 +++++++++++++--------------- src/service/media/retention.rs | 21 +++++++-------------- src/service/rooms/timeline/append.rs | 2 +- src/service/userroom/mod.rs | 4 ++-- src/user/user.rs | 2 +- 6 files changed, 25 insertions(+), 34 deletions(-) diff --git a/src/service/command/mod.rs b/src/service/command/mod.rs index de5767922..20651f332 100644 --- a/src/service/command/mod.rs +++ b/src/service/command/mod.rs @@ -16,7 +16,7 @@ pub struct CommandResult { pub struct CompletionTree { pub name: String, - pub nodes: Vec, + pub nodes: Vec, } #[async_trait] diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index dd0c38afc..4fadb8bd6 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -162,7 +162,7 @@ impl Service { if let Ok(canonical) = self .services .timeline - .get_pdu_json(&parsed_eid) + .get_pdu_json(parsed_eid) .await { if let Ok(val) = serde_json::to_value(&canonical) { @@ -194,7 +194,7 @@ impl Service { if candidates.is_empty() { if let Some(ref val) = event_value { let mut discovered = HashSet::new(); - collect_mxcs(&val, &mut discovered); + collect_mxcs(val, &mut discovered); if !discovered.is_empty() { let room_id = val .get("room_id") @@ -259,8 +259,7 @@ impl Service { candidate .sender .as_ref() - .and_then(|s| UserId::parse(s).ok()) - .as_deref(), + .and_then(|s| UserId::parse(s).ok()), candidate.from_encrypted_room, ) .await; @@ -480,7 +479,7 @@ impl Service { if let Ok(name) = self .services .state_accessor - .get_name(&room_id) + .get_name(room_id) .await { format!(" in room \"{name}\"") @@ -507,7 +506,7 @@ impl Service { if let Ok(elapsed) = now.duration_since(datetime) { let secs = elapsed.as_secs(); if secs < 60 { - Some(format!(" (just now)")) + Some(" (just now)".to_owned()) } else if secs < 3600 { Some(format!(" ({} minutes ago)", secs / 60)) } else if secs < 86400 { @@ -555,7 +554,7 @@ impl Service { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { self.services .userroom - .redact_reaction(user, &reaction_id); + .redact_reaction(user, reaction_id); } } @@ -585,7 +584,7 @@ impl Service { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { self.services .userroom - .redact_reaction(user, &reaction_id); + .redact_reaction(user, reaction_id); } } @@ -619,7 +618,7 @@ impl Service { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { self.services .userroom - .redact_reaction(user, &reaction_id); + .redact_reaction(user, reaction_id); } } @@ -664,8 +663,7 @@ impl Service { self.services.userroom.send_text_background( user, &format!( - "āœ… Auto-delete enabled for {} rooms.\n\nTo disable: `!user retention {}`", - room_type, command + "āœ… Auto-delete enabled for {room_type} rooms.\n\nTo disable: `!user retention {command}`" ), ); @@ -674,14 +672,14 @@ impl Service { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { self.services .userroom - .redact_reaction(user, &reaction_id); + .redact_reaction(user, reaction_id); } } if let Some(reaction_id_str) = cancel_reaction_id { if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { self.services .userroom - .redact_reaction(user, &reaction_id); + .redact_reaction(user, reaction_id); } } @@ -1079,7 +1077,7 @@ fn parse_user_retention_preference(value: &Value) -> Option Option: => Pendi const K_PREFS: &str = "prefs:"; // prefs: => UserRetentionPrefs #[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Default)] pub struct UserRetentionPrefs { /// Auto-delete media in unencrypted rooms without asking #[serde(default)] @@ -32,14 +33,6 @@ pub struct UserRetentionPrefs { pub auto_delete_encrypted: bool, } -impl Default for UserRetentionPrefs { - fn default() -> Self { - Self { - auto_delete_unencrypted: false, - auto_delete_encrypted: false, - } - } -} #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct PendingUpload { @@ -215,7 +208,7 @@ impl Retention { warn!(%event_id, count = mxcs.len(), %room_id, sender=%sender, "retention: inserting media refs for event"); let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); - for (mxc, local, kind) in mxcs.iter() { + for (mxc, local, kind) in mxcs { // update MediaEventRef let mer = MediaEventRef { mxc: mxc.clone(), @@ -275,7 +268,7 @@ impl Retention { let mut to_delete: Vec<(String, String, Option)> = Vec::new(); let mut puts: Vec<(Vec, Vec)> = Vec::new(); let mut dels: Vec> = Vec::new(); - let mut processed = 0usize; + let mut processed = 0_usize; let mut stream = self .cf @@ -349,7 +342,7 @@ impl Retention { user_id: &str, event_ts: u64, // event timestamp in milliseconds ) -> Vec<(String, bool, String)> { - let window_ms = 60_000u64; // 60 seconds + let window_ms = 60_000_u64; // 60 seconds let cutoff_ts = event_ts.saturating_sub(window_ms); let prefix = Self::pending_prefix(user_id); @@ -437,7 +430,7 @@ impl Retention { let cand = DeletionCandidate { mxc: mxc.to_owned(), enqueued_ts: now_secs(), - user_id: owner.map(|u| u.to_string()), + user_id: owner.map(ToString::to_string), awaiting_confirmation, notification_event_id, confirm_reaction_id, @@ -500,7 +493,7 @@ impl Retention { }; if owner != requester.as_str() { return Err(err!(Request(Forbidden("media candidate owned by another user")))); - }; + } if !candidate.awaiting_confirmation { return Err(err!(Request(InvalidParam("media deletion already processed",)))); } @@ -659,7 +652,7 @@ impl Retention { .search_mxc_metadata_prefix(&mxc_parsed) .await .unwrap_or_default(); - let mut total = 0u64; + let mut total = 0_u64; for key in keys { let path = service.get_media_file(&key); total = total.saturating_add(remove_file_tolerant(path)); diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index ab2f4465b..08a674897 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -407,7 +407,7 @@ impl super::Service { let push_media = |mxcs: &mut Vec<(String, bool, String)>, src: &MediaSource, label: &str, - this: &super::Service| { + this: &Self| { let (maybe_mxc, enc) = match src { | MediaSource::Plain(m) => (Some(m.to_string()), false), | MediaSource::Encrypted(f) => (Some(f.url.to_string()), true), diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index d16ff8122..d7e387bde 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -53,12 +53,12 @@ impl Service { let server_user = &self.services.globals.server_user; let alias = self.get_user_room_alias(user_id); let name = format!("User Room of {user_id}"); - let topic = format!("eeeeee .-."); + let topic = "eeeeee .-.".to_owned(); let (room_id, state_lock) = self .services .create .create_room( - &server_user, + server_user, None, None, Some(&alias), diff --git a/src/user/user.rs b/src/user/user.rs index 44dc79e48..78f42e155 100644 --- a/src/user/user.rs +++ b/src/user/user.rs @@ -1,6 +1,6 @@ use clap::Parser; use tuwunel_core::Result; -use tuwunel_macros::{command, command_dispatch}; +use tuwunel_macros::command_dispatch; use crate::user::{debug::Cmd as DebugCmd, retention::Cmd as RetentionCmd}; From 29149687bb0e1e21a94e622cd699159ac40854d7 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 18:48:01 +0200 Subject: [PATCH 15/17] Fix various admin and media service logic issues --- src/admin/mod.rs | 1 + src/api/client/message.rs | 1 + src/service/admin/mod.rs | 8 +-- src/service/media/mod.rs | 78 ++++++++++++++-------------- src/service/media/retention.rs | 23 ++++---- src/service/mod.rs | 1 + src/service/rooms/create/mod.rs | 3 +- src/service/rooms/timeline/append.rs | 4 +- src/service/userroom/mod.rs | 19 ++++--- src/user/mod.rs | 1 + 10 files changed, 72 insertions(+), 67 deletions(-) diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 7f3c82add..e8cc25ce1 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -1,5 +1,6 @@ #![allow(clippy::wildcard_imports)] #![allow(clippy::enum_glob_use)] +#![allow(clippy::large_futures)] pub(crate) mod admin; mod tests; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index f86b309ee..5990dd07d 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -99,6 +99,7 @@ pub(crate) async fn get_message_events_route( services .timeline .backfill_if_required(room_id, from) + .boxed() .await .log_err() .ok(); diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index cdea05e62..ce557bacb 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -46,12 +46,14 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[allow(clippy::enum_variant_names)] enum AdminCommandCheckVerdict { - NotAdminCommand, - AdminEscapeCommand, - AdminRoomCommand, + NotAdminCommand, + AdminEscapeCommand, + AdminRoomCommand, } + impl Service { /// Sends markdown notice to the admin room as the admin user. pub async fn notice(&self, body: &str) { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 4fadb8bd6..b72a247e1 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -6,7 +6,7 @@ mod remote; mod retention; mod tests; mod thumbnail; -use std::{collections::HashSet, path::PathBuf, sync::Arc, time::SystemTime}; +use std::{collections::HashSet, path::PathBuf, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}}; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; @@ -251,7 +251,7 @@ impl Service { "retention: auto-deleting immediately per user preferences" ); // Delete immediately - let _ = self + self .retention .delete_media_immediately( self, @@ -262,7 +262,8 @@ impl Service { .and_then(|s| UserId::parse(s).ok()), candidate.from_encrypted_room, ) - .await; + .await + .unwrap_or_default(); continue; } @@ -274,7 +275,7 @@ impl Service { match (decision.action, decision.owner) { | (CandidateAction::DeleteImmediately, owner) => { // Delete immediately - let _ = self + self .retention .delete_media_immediately( self, @@ -282,7 +283,8 @@ impl Service { owner.as_deref(), candidate.from_encrypted_room, ) - .await; + .await + .unwrap_or_default(); }, | (CandidateAction::AwaitConfirmation, Some(owner)) => { // Send notification to the uploader's user room (not the room where it was @@ -498,24 +500,20 @@ impl Service { .and_then(|val| val.get("origin_server_ts")) .and_then(canonical_json_to_u64) .and_then(|ts_millis| { - use std::time::{Duration, UNIX_EPOCH}; let duration = Duration::from_millis(ts_millis); - let datetime = UNIX_EPOCH + duration; - let now = SystemTime::now(); - - if let Ok(elapsed) = now.duration_since(datetime) { - let secs = elapsed.as_secs(); - if secs < 60 { - Some(" (just now)".to_owned()) - } else if secs < 3600 { - Some(format!(" ({} minutes ago)", secs / 60)) - } else if secs < 86400 { - Some(format!(" ({} hours ago)", secs / 3600)) - } else { - Some(format!(" ({} days ago)", secs / 86400)) - } + UNIX_EPOCH.checked_add(duration) + }) + .and_then(|event_time| SystemTime::now().duration_since(event_time).ok()) + .map(|elapsed| { + let secs = elapsed.as_secs(); + if secs < 60 { + " (just now)".to_owned() + } else if secs < 3600 { + format!(" ({} minutes ago)", secs / 60) + } else if secs < 86400 { + format!(" ({} hours ago)", secs / 3600) } else { - None + format!(" ({} days ago)", secs / 86400) } }) .unwrap_or_default(); @@ -602,31 +600,31 @@ impl Service { notification_event_id: &EventId, ) -> Result<()> { // Find the deletion candidate by notification event ID - if let Some(mxc) = self + let Some(mxc) = self .retention .find_mxc_by_notification_event(notification_event_id.as_str()) .await - { - debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user cancelled deletion via āŒ reaction"); - let confirm_reaction_id = self - .retention - .cancel_candidate(&mxc, user) - .await?; + else { + debug_warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); + return Ok(()); + }; - // Redact the unused āœ… reaction to clean up the UI (spawned as background task) - if let Some(reaction_id_str) = confirm_reaction_id { - if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { - self.services - .userroom - .redact_reaction(user, reaction_id); - } - } + debug_info!(user = %user, event_id = %notification_event_id, mxc = %mxc, "retention: user cancelled deletion via āŒ reaction"); + let confirm_reaction_id = self + .retention + .cancel_candidate(&mxc, user) + .await?; - Ok(()) - } else { - debug_warn!(user = %user, event_id = %notification_event_id, "retention: no pending deletion found for reaction"); - Ok(()) + // Redact the unused āœ… reaction to clean up the UI (spawned as background task) + if let Some(reaction_id_str) = confirm_reaction_id { + if let Ok(reaction_id) = EventId::parse(&reaction_id_str) { + self.services + .userroom + .redact_reaction(user, reaction_id); + } } + + Ok(()) } /// Auto-delete (ā™»ļø reaction) - enable auto-delete for this room type and diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 2eea147e5..157b94149 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -207,7 +207,7 @@ impl Retention { } warn!(%event_id, count = mxcs.len(), %room_id, sender=%sender, "retention: inserting media refs for event"); - let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len() * 2); + let mut puts: Vec<(Vec, Vec)> = Vec::with_capacity(mxcs.len().saturating_mul(2)); for (mxc, local, kind) in mxcs { // update MediaEventRef let mer = MediaEventRef { @@ -315,7 +315,8 @@ impl Retention { let upload_ts = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() - .as_millis() as u64; + .as_millis().try_into() + .unwrap_or_default(); let pending = PendingUpload { mxc: mxc.to_owned(), @@ -505,9 +506,7 @@ impl Retention { candidate.enqueued_ts = now_secs(); let deleted_bytes = self.delete_local_media(service, mxc).await?; - let mut dels = Vec::with_capacity(2); - dels.push(key.into_bytes()); - dels.push(Self::key_mref(mxc).into_bytes()); + let dels = vec![key.into_bytes(), Self::key_mref(mxc).into_bytes()]; self.cf.write_batch_raw(std::iter::empty(), dels); warn!( mxc, @@ -617,9 +616,7 @@ impl Retention { let cancel_reaction_to_redact = candidate.cancel_reaction_id.clone(); let deleted_bytes = self.delete_local_media(service, mxc).await?; - let mut dels = Vec::with_capacity(2); - dels.push(key.into_bytes()); - dels.push(Self::key_mref(mxc).into_bytes()); + let dels = vec![key.into_bytes(), Self::key_mref(mxc).into_bytes()]; self.cf.write_batch_raw(std::iter::empty(), dels); warn!( mxc, @@ -655,9 +652,9 @@ impl Retention { let mut total = 0_u64; for key in keys { let path = service.get_media_file(&key); - total = total.saturating_add(remove_file_tolerant(path)); + total = total.saturating_add(remove_file_tolerant(&path)); let legacy = service.get_media_file_b64(&key); - total = total.saturating_add(remove_file_tolerant(legacy)); + total = total.saturating_add(remove_file_tolerant(&legacy)); } warn!("retention: total bytes deleted {total}"); Ok(total) @@ -671,11 +668,11 @@ fn now_secs() -> u64 { .as_secs() } -fn remove_file_tolerant(path: PathBuf) -> u64 { - match std::fs::metadata(&path) { +fn remove_file_tolerant(path: &PathBuf) -> u64 { + match std::fs::metadata(path) { | Ok(meta) => { let len = meta.len(); - if let Err(e) = std::fs::remove_file(&path) { + if let Err(e) = std::fs::remove_file(path) { trace!(?path, "ignore remove error: {e}"); 0 } else { diff --git a/src/service/mod.rs b/src/service/mod.rs index 94f0da835..7d40b602e 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,6 @@ #![type_length_limit = "8192"] #![allow(refining_impl_trait)] +#![allow(clippy::large_futures)] mod manager; mod migrations; diff --git a/src/service/rooms/create/mod.rs b/src/service/rooms/create/mod.rs index 7d3b311ab..303d3972a 100644 --- a/src/service/rooms/create/mod.rs +++ b/src/service/rooms/create/mod.rs @@ -141,7 +141,7 @@ impl Service { .await?; // 3. Power levels - let power_levels_content = self.build_power_levels_content( + let power_levels_content = Self::build_power_levels_content( &version_rules, power_level_content_override, publish, @@ -472,7 +472,6 @@ impl Service { } fn build_power_levels_content( - &self, version_rules: &RoomVersionRules, power_level_content_override: Option<&Raw>, publish: bool, diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 08a674897..2b1f0dd19 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -28,10 +28,13 @@ use tuwunel_core::{ utils::{self, ReadyExt}, }; use tuwunel_database::{Json, Map}; +use ruma::events::room::MediaSource; use super::{ExtractBody, ExtractRelatesTo, ExtractRelatesToEventId, RoomMutexGuard}; use crate::{appservice::NamespaceRegex, rooms::state_compressor::CompressedState}; + + /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[implement(super::Service)] @@ -402,7 +405,6 @@ impl super::Service { pdu.get_content::() { warn!(event_id=%pdu.event_id(), msg=?msg_full, "retention: debug message content"); - use ruma::events::room::MediaSource; let mut mxcs: Vec<(String, bool, String)> = Vec::new(); let push_media = |mxcs: &mut Vec<(String, bool, String)>, src: &MediaSource, diff --git a/src/service/userroom/mod.rs b/src/service/userroom/mod.rs index d7e387bde..9b814d2dc 100644 --- a/src/service/userroom/mod.rs +++ b/src/service/userroom/mod.rs @@ -2,10 +2,14 @@ use std::sync::{Arc, OnceLock}; use ruma::{ EventId, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId, - events::room::{ - guest_access::GuestAccess, - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, + events::{ + reaction::ReactionEventContent, + relation::Annotation, + room::{ + guest_access::GuestAccess, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + }, }, room::JoinRule, }; @@ -156,7 +160,7 @@ impl Service { let state_lock = services.state.mutex.lock(&room_id).await; let content = RoomMessageEventContent::text_markdown(&body); - let _ = services + let _: Result<_> = services .timeline .build_and_append_pdu_without_retention( PduBuilder::timeline(&content), @@ -228,7 +232,6 @@ impl Service { let state_lock = self.services.state.mutex.lock(&room_id).await; // Create reaction content - use ruma::events::{reaction::ReactionEventContent, relation::Annotation}; let content = ReactionEventContent::new(Annotation::new(event_id.to_owned(), emoji.to_owned())); @@ -269,7 +272,7 @@ impl Service { return; } - let command = &command[1..]; + let command = command.strip_prefix('!').unwrap_or(command); self.services.command.run_command_matrix_detached( self.get_user_command_system(), @@ -321,7 +324,7 @@ impl Service { let state_lock = services.state.mutex.lock(&room_id).await; // Redact the reaction event to remove it from the UI - let _ = services + let _: Result<_> = services .timeline .build_and_append_pdu_without_retention( PduBuilder { diff --git a/src/user/mod.rs b/src/user/mod.rs index fae5f3e47..b6ed8a66a 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -65,6 +65,7 @@ impl CommandSystem for UserCommandSystem { pub(crate) struct Context<'a> { pub services: &'a Services, + #[allow(dead_code)] pub input: &'a str, pub sender: &'a UserId, } From 5b79782f4503f3b97e5c1c4fae97ca5925415c43 Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 18:48:46 +0200 Subject: [PATCH 16/17] Adjust future-size-threshold to 12288 in clippy config --- clippy.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clippy.toml b/clippy.toml index eec1e7a45..cbca69e8d 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,7 +1,7 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 100 # TODO reduce me ALARA excessive-nesting-threshold = 8 -future-size-threshold = 8192 +future-size-threshold = 12288 stack-size-threshold = 196608 # TODO reduce me ALARA too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 From f4debd15e411f36586dc547cf831ff79eff6f59f Mon Sep 17 00:00:00 2001 From: tototomate123 Date: Tue, 21 Oct 2025 18:51:23 +0200 Subject: [PATCH 17/17] reformat --- src/service/admin/mod.rs | 7 +++---- src/service/media/mod.rs | 16 ++++++++++------ src/service/media/retention.rs | 7 +++---- src/service/rooms/timeline/append.rs | 4 +--- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ce557bacb..4c5128603 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -48,12 +48,11 @@ impl crate::Service for Service { #[allow(clippy::enum_variant_names)] enum AdminCommandCheckVerdict { - NotAdminCommand, - AdminEscapeCommand, - AdminRoomCommand, + NotAdminCommand, + AdminEscapeCommand, + AdminRoomCommand, } - impl Service { /// Sends markdown notice to the admin room as the admin user. pub async fn notice(&self, body: &str) { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index b72a247e1..751521d99 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -6,7 +6,12 @@ mod remote; mod retention; mod tests; mod thumbnail; -use std::{collections::HashSet, path::PathBuf, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}}; +use std::{ + collections::HashSet, + path::PathBuf, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use async_trait::async_trait; use base64::{Engine as _, engine::general_purpose}; @@ -251,8 +256,7 @@ impl Service { "retention: auto-deleting immediately per user preferences" ); // Delete immediately - self - .retention + self.retention .delete_media_immediately( self, &candidate.mxc, @@ -275,8 +279,7 @@ impl Service { match (decision.action, decision.owner) { | (CandidateAction::DeleteImmediately, owner) => { // Delete immediately - self - .retention + self.retention .delete_media_immediately( self, &candidate.mxc, @@ -661,7 +664,8 @@ impl Service { self.services.userroom.send_text_background( user, &format!( - "āœ… Auto-delete enabled for {room_type} rooms.\n\nTo disable: `!user retention {command}`" + "āœ… Auto-delete enabled for {room_type} rooms.\n\nTo disable: `!user \ + retention {command}`" ), ); diff --git a/src/service/media/retention.rs b/src/service/media/retention.rs index 157b94149..d261c09b6 100644 --- a/src/service/media/retention.rs +++ b/src/service/media/retention.rs @@ -21,8 +21,7 @@ const K_QUEUE: &str = "qdel:"; // qdel: => DeletionCandidate const K_PENDING: &str = "pending:"; // pending:: => PendingUpload const K_PREFS: &str = "prefs:"; // prefs: => UserRetentionPrefs -#[derive(Clone, Debug, Deserialize, Serialize)] -#[derive(Default)] +#[derive(Clone, Debug, Deserialize, Serialize, Default)] pub struct UserRetentionPrefs { /// Auto-delete media in unencrypted rooms without asking #[serde(default)] @@ -33,7 +32,6 @@ pub struct UserRetentionPrefs { pub auto_delete_encrypted: bool, } - #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct PendingUpload { pub mxc: String, @@ -315,7 +313,8 @@ impl Retention { let upload_ts = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() - .as_millis().try_into() + .as_millis() + .try_into() .unwrap_or_default(); let pending = PendingUpload { diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index 2b1f0dd19..f69c64d62 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -11,6 +11,7 @@ use ruma::{ GlobalAccountDataEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ + MediaSource, encrypted::Relation, member::{MembershipState, RoomMemberEventContent}, redaction::RoomRedactionEventContent, @@ -28,13 +29,10 @@ use tuwunel_core::{ utils::{self, ReadyExt}, }; use tuwunel_database::{Json, Map}; -use ruma::events::room::MediaSource; use super::{ExtractBody, ExtractRelatesTo, ExtractRelatesToEventId, RoomMutexGuard}; use crate::{appservice::NamespaceRegex, rooms::state_compressor::CompressedState}; - - /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[implement(super::Service)]