Skip to content

Commit

Permalink
working on notif handler
Browse files Browse the repository at this point in the history
  • Loading branch information
wildonion committed Aug 14, 2024
1 parent 2132924 commit e12848f
Showing 1 changed file with 54 additions and 16 deletions.
70 changes: 54 additions & 16 deletions src/apis/v1/http/hoop.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@


use context::AppContext;
use deadpool_redis::redis::AsyncCommands;
use middlewares::check_token::check_token;
use models::{event::{EventQuery, EventType, HoopEventForm}, user::UserData};
use salvo::{http::form::FormData, Error};
Expand All @@ -26,10 +27,7 @@ use crate::*;
| left a hoop => DELETE /hoop/user/leave
|
NOTE: authorizing and KYCing process will be done using gem server
users_hoop, hoops, notifs
tables: users_hoop (user events interactions), hoops (events), notifs (platform notifs)
*/

Expand All @@ -54,26 +52,21 @@ pub async fn add_hoop(
let actors = app_ctx.clone().unwrap().actors.unwrap();
let hoop_mutator_actor_controller = actors.clone().cqrs_actors.mutators.hoop_mutator_actor;


let cover = req.file("cover").await.unwrap();
let decoded = serde_json::from_str::
<Vec<std::collections::HashMap<String, i64>>>
(&hoop_info.invitations.clone()).unwrap();



let etype = match hoop_info.etype.as_str(){
"social" => EventType::SocialGathering,
"proposal" => EventType::Proposal,
"streaming" => EventType::Streaming, // use hooper streamer handlers
_ => EventType::None
};


// store cover on vps then on s3 or digispaces
// store hoop info in db by sending the message to the hoop_mutator_actor_controller


res.render("developing...")

}
Expand Down Expand Up @@ -111,18 +104,63 @@ pub async fn get_hoop(
query_params: QueryParam<EventQuery, true> // query param is required, showcasing in swagger ui
){

// extracting necessary structures from the app context
let app_ctx = depot.obtain::<Option<AppContext>>().unwrap(); // extracting shared app context
let redis_pool = app_ctx.clone().unwrap().app_storage.clone().unwrap().get_redis_pool().await.unwrap();
let sea_orm_pool = app_ctx.clone().unwrap().app_storage.clone().unwrap().get_seaorm_pool().await.unwrap();
let actors = app_ctx.clone().unwrap().actors.unwrap();
let hoop_mutator_actor_controller = actors.clone().cqrs_actors.mutators.hoop_mutator_actor;
let redis_conn = redis_pool.get().await.unwrap();

// event actor scheduler (check the endTime of the event constantly to close the event):
// an actor cron scheduler to check the end time of the hoop constantly to update the is_finished field
// loop tokio spawn interval tick
// include!{} hoop_scheduler.rs
// redis exp key
// get live hoops

// trying to get the user data in here
let user_data = depot.get::<UserData>("user_data").unwrap();

/* --------------------------------------------------------------------------------------------------------
event scheduler (check the endTime of the event constantly to close the event)
resource-intensive with regular checking in a loop{}:
1 - an actor task or cron scheduler to check the end time of the hoop constantly to update the is_finished field
2 - loop tokio spawn interval tick then include!{} hoop_scheduler.rs to call the method
the optimal and none intensive solution would be using of key space notifications
which allows allow clients to subscribe to Pub/Sub channels in order to receive
events affecting the Redis data set in some wayin Redis, however the followings
are the steps must be taken to complete the logic. We're consuming that we have a
user_id as the key and some value with an exportable key for 10mins later
after login time.
let login_time = chrono::Local::now();
let ten_mins_later = login_time + chrono::Duration::minutes(10);
redis_conn.set_exp(user_id, ten_mins_later);
1 - configuring Redis to enable key space notifications
2 - when the key expires Redis publish its event to a prebuilt expiration channel
2 - we then subscribe to the __keyevent@0__:expired channel
3 - we'll receive the event from the channel
4 - trigger the notification for the related user id (expired key)
5 - publish triggered notif to rmq producer using notif_borker_actor
6 - consume notif from rmq broker to cache on redis and store in db for future short pollings
7 - send received notif to mpsc sender of the ws server
8 - receive the notif from the mpsc channel inside ws server setup
at this time to send notif to client we can either
cache the notif on Redis or store it on db, allows clients use short polling approach to
fetch the notif through an interval process.
or another approach which is more resource intensive for push notification strategies
is by using a channel (MPSC) to send the notif to a websocket server actor configuration
thread from there send to the ws peer actor in realtime.
the ws setup could be an actor based setup which is more simpler to send messages to
peer sessions from ws server through actor concepts like:
atomic syncing with mutex rwlock and channels, os/async io threads or tasks,
select, mailbox mpsc channels and task scheduler interval.
*/

// get live hoops (those ones that are not finished or expired)
// get all owner hoops
// get all user joined hoops

let query_params = req.parse_queries::<EventQuery>().unwrap();

res.render("developing...")
}

Expand Down

0 comments on commit e12848f

Please sign in to comment.