From 2243c1a1ab9ef2a76456f5a4c50b48b743cfeb2d Mon Sep 17 00:00:00 2001 From: Beast Date: Fri, 9 Jan 2026 14:35:28 +0800 Subject: [PATCH 1/9] feat: clean models --- src/models/address.rs | 5 ----- src/models/admin.rs | 6 ------ src/models/auth.rs | 4 +--- src/models/task.rs | 37 ------------------------------------- src/repositories/admin.rs | 26 +------------------------- 5 files changed, 2 insertions(+), 76 deletions(-) diff --git a/src/models/address.rs b/src/models/address.rs index b3d8004..44ccaff 100644 --- a/src/models/address.rs +++ b/src/models/address.rs @@ -111,11 +111,6 @@ pub struct AddressInput { pub referral_code: String, } -#[derive(Debug, Clone, Deserialize)] -pub struct NewAddressPayload { - pub quan_address: String, -} - #[derive(Debug, Serialize, Deserialize)] pub struct SyncTransfersResponse { pub success: bool, diff --git a/src/models/admin.rs b/src/models/admin.rs index 7f1fe96..8329535 100644 --- a/src/models/admin.rs +++ b/src/models/admin.rs @@ -29,12 +29,6 @@ impl<'r> FromRow<'r, PgRow> for Admin { } } -#[derive(Debug, Clone)] -pub struct CreateAdmin { - pub username: String, - pub password: String, -} - #[derive(Deserialize)] pub struct AdminLoginPayload { pub username: String, diff --git a/src/models/auth.rs b/src/models/auth.rs index bbc90fb..7a37c68 100644 --- a/src/models/auth.rs +++ b/src/models/auth.rs @@ -8,9 +8,7 @@ pub struct TokenClaims { } #[derive(Debug, Deserialize)] -pub struct RequestChallengeBody { - pub address: Option, -} +pub struct RequestChallengeBody; #[derive(Debug, Serialize)] pub struct RequestChallengeResponse { diff --git a/src/models/task.rs b/src/models/task.rs index 513dcf9..888ed11 100644 --- a/src/models/task.rs +++ b/src/models/task.rs @@ -111,43 +111,6 @@ impl Task { updated_at: None, }) } - - pub fn set_transaction_sent( - &mut self, - reversible_tx_id: String, - send_time: DateTime, - end_time: DateTime, - ) { - self.reversible_tx_id = Some(reversible_tx_id); - self.send_time = Some(send_time); - self.end_time = Some(end_time); - self.status = TaskStatus::Pending; - } - - pub fn mark_completed(&mut self) { - self.status = TaskStatus::Completed; - } - - pub fn mark_reversed(&mut self) { - self.status = TaskStatus::Reversed; - } - - pub fn mark_failed(&mut self) { - self.status = TaskStatus::Failed; - } - - pub fn is_ready_for_reversal(&self, early_minutes: i64) -> bool { - if self.status != TaskStatus::Pending { - return false; - } - - if let Some(end_time) = self.end_time { - let reversal_time = end_time - chrono::Duration::minutes(early_minutes); - Utc::now() >= reversal_time - } else { - false - } - } } impl<'r> FromRow<'r, PgRow> for Task { fn from_row(row: &'r PgRow) -> Result { diff --git a/src/repositories/admin.rs b/src/repositories/admin.rs index e262d17..d620d8e 100644 --- a/src/repositories/admin.rs +++ b/src/repositories/admin.rs @@ -1,11 +1,7 @@ use sqlx::{PgPool, Postgres, QueryBuilder}; use uuid::Uuid; -use crate::{ - db_persistence::DbError, - models::admin::{Admin, CreateAdmin}, - repositories::DbResult, -}; +use crate::{models::admin::Admin, repositories::DbResult}; #[derive(Clone, Debug)] pub struct AdminRepository { @@ -20,26 +16,6 @@ impl AdminRepository { Self { pool: pool.clone() } } - pub async fn create(&self, new_admin: &CreateAdmin) -> DbResult { - let created_id = sqlx::query_scalar::<_, String>( - " - INSERT INTO admins (username, password) - VALUES ($1, $2) - RETURNING id - ", - ) - .bind(new_admin.username.clone()) - .bind(new_admin.password.clone()) - .fetch_optional(&self.pool) - .await?; - - if let Some(id) = created_id { - Ok(id) - } else { - Err(DbError::RecordNotFound("Record id is generated".to_string())) - } - } - pub async fn find_by_id(&self, id: &Uuid) -> DbResult> { let mut qb = AdminRepository::create_select_base_query(); qb.push(" WHERE id = "); From 2c4fd5f7e7c4970241288b502e580a82ca4e66fd Mon Sep 17 00:00:00 2001 From: Beast Date: Fri, 9 Jan 2026 17:56:45 +0800 Subject: [PATCH 2/9] feat: clean respositories --- src/handlers/address.rs | 7 +-- src/handlers/auth.rs | 8 +-- src/lib.rs | 2 +- src/repositories/eth_association.rs | 38 +------------ src/repositories/opt_in.rs | 53 +++++++----------- src/repositories/task.rs | 48 ---------------- src/repositories/tweet_pull_usage.rs | 52 ++++++++--------- src/repositories/x_association.rs | 82 --------------------------- src/services/reverser.rs | 84 ---------------------------- 9 files changed, 54 insertions(+), 320 deletions(-) diff --git a/src/handlers/address.rs b/src/handlers/address.rs index b3e58d2..813b0a9 100644 --- a/src/handlers/address.rs +++ b/src/handlers/address.rs @@ -905,10 +905,9 @@ mod tests { assert_eq!(response.status(), StatusCode::NO_CONTENT); // Verification in DB - let saved_assoc = state - .db - .x_associations - .find_by_username("twitter_pro_101") + let saved_assoc = sqlx::query_as::<_, XAssociation>("SELECT * FROM x_associations WHERE username = $1") + .bind(new_association.username) + .fetch_optional(&state.db.pool) .await .unwrap(); diff --git a/src/handlers/auth.rs b/src/handlers/auth.rs index 8037c5f..b0ab229 100644 --- a/src/handlers/auth.rs +++ b/src/handlers/auth.rs @@ -344,6 +344,7 @@ mod tests { use crate::{ handlers::auth::handle_x_oauth_callback, http_server::AppState, + models::x_association::XAssociation, routes::auth::auth_routes, utils::{ test_app_state::create_test_app_state, @@ -527,10 +528,9 @@ mod tests { assert!(location.contains(&format!("payload={}", expected_username))); // Check DB Side Effects - let saved_assoc = state - .db - .x_associations - .find_by_username(expected_username) + let saved_assoc = sqlx::query_as::<_, XAssociation>("SELECT * FROM x_associations WHERE username = $1") + .bind(expected_username) + .fetch_optional(&state.db.pool) .await .unwrap(); diff --git a/src/lib.rs b/src/lib.rs index 4ec8764..863d184 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,7 +24,7 @@ pub use config::Config; pub use errors::{AppError, AppResult}; pub use http_server::AppState; pub use services::graphql_client::{GraphqlClient, SyncStats, Transfer}; -pub use services::reverser::{ReversalStats, ReverserService}; +pub use services::reverser::ReverserService; pub use services::task_generator::TaskGenerator; pub use services::transaction_manager::TransactionManager; diff --git a/src/repositories/eth_association.rs b/src/repositories/eth_association.rs index 48df5aa..0de90ae 100644 --- a/src/repositories/eth_association.rs +++ b/src/repositories/eth_association.rs @@ -1,10 +1,7 @@ use sqlx::PgPool; use crate::{ - models::{ - address::QuanAddress, - eth_association::{EthAddress, EthAssociation}, - }, + models::{address::QuanAddress, eth_association::EthAssociation}, repositories::DbResult, }; @@ -43,15 +40,6 @@ impl EthAssociationRepository { Ok(association) } - pub async fn find_by_eth_address(&self, eth_address: &EthAddress) -> DbResult> { - let association = sqlx::query_as::<_, EthAssociation>("SELECT * FROM eth_associations WHERE eth_address = $1") - .bind(ð_address.0) - .fetch_optional(&self.pool) - .await?; - - Ok(association) - } - pub async fn update_eth_address(&self, new_association: &EthAssociation) -> DbResult { let association = sqlx::query_as::<_, EthAssociation>( r#" @@ -133,30 +121,6 @@ mod tests { assert_eq!(found.eth_address.0, "0x00000000219ab540356cBB839Cbe05303d7705Fa"); } - #[tokio::test] - async fn test_find_by_eth_address() { - let (address_repo, eth_repo) = setup_test_repositories().await; - - let address = create_persisted_address(&address_repo, "user_02").await; - - let input = EthAssociationInput { - quan_address: address.quan_address.0.clone(), - eth_address: "0x00000000219ab540356cBB839Cbe05303d7705Fa".to_string(), - }; - let new_association = EthAssociation::new(input).unwrap(); - eth_repo.create(&new_association).await.unwrap(); - - // Find by ETH Address - let found = eth_repo - .find_by_eth_address(&new_association.eth_address) - .await - .unwrap(); - - assert!(found.is_some()); - let found = found.unwrap(); - assert_eq!(found.quan_address.0, address.quan_address.0); - } - #[tokio::test] async fn test_update_eth_address() { let (address_repo, eth_repo) = setup_test_repositories().await; diff --git a/src/repositories/opt_in.rs b/src/repositories/opt_in.rs index ca5b3d2..a7c59cd 100644 --- a/src/repositories/opt_in.rs +++ b/src/repositories/opt_in.rs @@ -55,14 +55,6 @@ impl OptInRepository { Ok(opt_ins) } - - pub async fn count(&self) -> DbResult { - let count = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM opt_ins") - .fetch_one(&self.pool) - .await?; - - Ok(count) - } } #[cfg(test)] @@ -75,6 +67,7 @@ mod tests { use crate::repositories::address::AddressRepository; use crate::utils::test_db::reset_database; use sqlx::{postgres::PgPoolOptions, PgPool}; + use sqlx::{Pool, Postgres}; use std::time::Duration; use tokio::time::sleep; @@ -94,6 +87,14 @@ mod tests { (opt_in_repo, address_repo, pool) } + async fn count_records(pool: &Pool) -> DbResult { + let count = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM opt_ins") + .fetch_one(pool) + .await?; + + Ok(count) + } + fn create_test_address(id: &str) -> Address { let input = AddressInput { quan_address: format!("qz_test_{}", id), @@ -108,7 +109,7 @@ mod tests { let address = create_test_address("test_create_001"); address_repo.create(&address).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); + let count = count_records(&_pool).await.unwrap(); let opt_in = opt_in_repo.create(&address.quan_address.0).await.unwrap(); assert_eq!(opt_in.quan_address.0, address.quan_address.0); @@ -129,7 +130,6 @@ mod tests { let address = create_test_address("test_delete_001"); address_repo.create(&address).await.unwrap(); - let count_before = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&address.quan_address.0).await.unwrap(); assert!(opt_in_repo @@ -159,15 +159,12 @@ mod tests { address_repo.create(&addr2).await.unwrap(); address_repo.create(&addr3).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr1.quan_address.0).await.unwrap(); sleep(Duration::from_millis(10)).await; - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr2.quan_address.0).await.unwrap(); sleep(Duration::from_millis(10)).await; - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr3.quan_address.0).await.unwrap(); let all = opt_in_repo.get_all_ordered(100).await.unwrap(); @@ -189,15 +186,12 @@ mod tests { address_repo.create(&addr2).await.unwrap(); address_repo.create(&addr3).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr1.quan_address.0).await.unwrap(); sleep(Duration::from_millis(10)).await; - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr2.quan_address.0).await.unwrap(); sleep(Duration::from_millis(10)).await; - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr3.quan_address.0).await.unwrap(); let limited = opt_in_repo.get_all_ordered(2).await.unwrap(); @@ -210,7 +204,7 @@ mod tests { async fn test_count() { let (opt_in_repo, address_repo, _pool) = setup_test_repository().await; - assert_eq!(opt_in_repo.count().await.unwrap(), 0); + assert_eq!(count_records(&_pool).await.unwrap(), 0); let addr1 = create_test_address("test_count_001"); let addr2 = create_test_address("test_count_002"); @@ -220,20 +214,17 @@ mod tests { address_repo.create(&addr2).await.unwrap(); address_repo.create(&addr3).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr1.quan_address.0).await.unwrap(); - assert_eq!(opt_in_repo.count().await.unwrap(), 1); + assert_eq!(count_records(&_pool).await.unwrap(), 1); - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr2.quan_address.0).await.unwrap(); - assert_eq!(opt_in_repo.count().await.unwrap(), 2); + assert_eq!(count_records(&_pool).await.unwrap(), 2); - let count = opt_in_repo.count().await.unwrap(); opt_in_repo.create(&addr3.quan_address.0).await.unwrap(); - assert_eq!(opt_in_repo.count().await.unwrap(), 3); + assert_eq!(count_records(&_pool).await.unwrap(), 3); opt_in_repo.delete(&addr2.quan_address.0).await.unwrap(); - assert_eq!(opt_in_repo.count().await.unwrap(), 2); + assert_eq!(count_records(&_pool).await.unwrap(), 2); } #[tokio::test] @@ -243,13 +234,11 @@ mod tests { address_repo.create(&address).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); let opt_in1 = opt_in_repo.create(&address.quan_address.0).await.unwrap(); let first_created_at = opt_in1.created_at; sleep(Duration::from_millis(10)).await; - let count = opt_in_repo.count().await.unwrap(); let opt_in2 = opt_in_repo.create(&address.quan_address.0).await.unwrap(); assert_eq!(opt_in2.quan_address.0, address.quan_address.0); @@ -285,22 +274,19 @@ mod tests { address_repo.create(&addr2).await.unwrap(); address_repo.create(&addr3).await.unwrap(); - assert_eq!(opt_in_repo.count().await.unwrap(), 0); + assert_eq!(count_records(&_pool).await.unwrap(), 0); - let count = opt_in_repo.count().await.unwrap(); let opt_in1 = opt_in_repo.create(&addr1.quan_address.0).await.unwrap(); assert_eq!(opt_in1.opt_in_number, 1); - assert_eq!(opt_in_repo.count().await.unwrap(), 1); + assert_eq!(count_records(&_pool).await.unwrap(), 1); - let count = opt_in_repo.count().await.unwrap(); let opt_in2 = opt_in_repo.create(&addr2.quan_address.0).await.unwrap(); assert_eq!(opt_in2.opt_in_number, 2); - assert_eq!(opt_in_repo.count().await.unwrap(), 2); + assert_eq!(count_records(&_pool).await.unwrap(), 2); - let count = opt_in_repo.count().await.unwrap(); let opt_in3 = opt_in_repo.create(&addr3.quan_address.0).await.unwrap(); assert_eq!(opt_in3.opt_in_number, 3); - assert_eq!(opt_in_repo.count().await.unwrap(), 3); + assert_eq!(count_records(&_pool).await.unwrap(), 3); let all = opt_in_repo.get_all_ordered(100).await.unwrap(); assert_eq!(all[0].opt_in_number, 1); @@ -314,7 +300,6 @@ mod tests { let address = create_test_address("test_timestamp_001"); address_repo.create(&address).await.unwrap(); - let count = opt_in_repo.count().await.unwrap(); let opt_in = opt_in_repo.create(&address.quan_address.0).await.unwrap(); assert!(!opt_in.created_at.to_rfc3339().is_empty()); diff --git a/src/repositories/task.rs b/src/repositories/task.rs index b299af0..a4a5210 100644 --- a/src/repositories/task.rs +++ b/src/repositories/task.rs @@ -104,14 +104,6 @@ impl TaskRepository { Ok(()) } - pub async fn get_tasks_by_status(&self, status: TaskStatus) -> DbResult> { - let tasks = sqlx::query_as::<_, Task>("SELECT * FROM tasks WHERE status = $1 ORDER BY created_at") - .bind(status.to_string()) - .fetch_all(&self.pool) - .await?; - Ok(tasks) - } - pub async fn get_tasks_ready_for_reversal(&self, early_minutes: i64) -> DbResult> { let cutoff_time = Utc::now() + chrono::Duration::minutes(early_minutes); @@ -156,28 +148,6 @@ impl TaskRepository { Ok(counts) } - - pub async fn get_address_stats(&self) -> DbResult> { - let stats = sqlx::query_as::<_, (String, i64)>( - r#" - SELECT - a.quan_address, - COUNT(t.id) as task_count - FROM - addresses a - LEFT JOIN - tasks t ON a.quan_address = t.quan_address - GROUP BY - a.quan_address - ORDER BY - a.quan_address - "#, - ) - .fetch_all(&self.pool) - .await?; - - Ok(stats) - } } #[cfg(test)] @@ -265,24 +235,6 @@ mod tests { assert!(updated_task.end_time.is_some()); } - #[tokio::test] - async fn test_get_tasks_by_status() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "004").await; - - let mut task1 = create_mock_task_object(&address.quan_address.0); - task1.status = TaskStatus::Pending; - task_repo.create(&task1).await.unwrap(); - - let mut task2 = create_mock_task_object(&address.quan_address.0); - task2.status = TaskStatus::Completed; - task_repo.create(&task2).await.unwrap(); - - let pending_tasks = task_repo.get_tasks_by_status(TaskStatus::Pending).await.unwrap(); - assert_eq!(pending_tasks.len(), 1); - assert_eq!(pending_tasks[0].task_id, task1.task_id); - } - #[tokio::test] async fn test_get_tasks_ready_for_reversal() { let (address_repo, task_repo) = setup_test_repositories().await; diff --git a/src/repositories/tweet_pull_usage.rs b/src/repositories/tweet_pull_usage.rs index 1813819..d1adcd8 100644 --- a/src/repositories/tweet_pull_usage.rs +++ b/src/repositories/tweet_pull_usage.rs @@ -53,32 +53,11 @@ impl TweetPullUsageRepository { } } - pub async fn get_current_usage(&self, reset_day: u32) -> Result { - let period = Self::get_current_period(reset_day); - self.get_usage_for_period(&period).await - } - pub async fn increment_usage(&self, amount: i32, reset_day: u32) -> Result { let period = Self::get_current_period(reset_day); self.increment_usage_for_period(amount, &period).await } - /// Internal helper to get usage for a specific period string. - async fn get_usage_for_period(&self, period: &str) -> Result { - let usage = sqlx::query_as::<_, TweetPullUsage>( - "INSERT INTO tweet_pull_usage (period, tweet_count) - VALUES ($1, 0) - ON CONFLICT (period) DO UPDATE SET period = EXCLUDED.period - RETURNING *", - ) - .bind(period) - .fetch_one(&self.pool) - .await - .map_err(DbError::Database)?; - - Ok(usage) - } - /// Internal helper to increment usage for a specific period string. async fn increment_usage_for_period(&self, amount: i32, period: &str) -> Result { let usage = sqlx::query_as::<_, TweetPullUsage>( @@ -119,21 +98,42 @@ mod tests { use crate::utils::test_app_state::create_test_app_state; use crate::utils::test_db::reset_database; use chrono::{TimeZone, Utc}; + use sqlx::{Pool, Postgres}; + + async fn get_current_usage(pool: &Pool, reset_day: u32) -> Result { + let period = TweetPullUsageRepository::get_current_period(reset_day); + get_usage_for_period(pool, &period).await + } + + /// Internal helper to get usage for a specific period string. + async fn get_usage_for_period(pool: &Pool, period: &str) -> Result { + let usage = sqlx::query_as::<_, TweetPullUsage>( + "INSERT INTO tweet_pull_usage (period, tweet_count) + VALUES ($1, 0) + ON CONFLICT (period) DO UPDATE SET period = EXCLUDED.period + RETURNING *", + ) + .bind(period) + .fetch_one(pool) + .await + .map_err(DbError::Database)?; + + Ok(usage) + } #[tokio::test] async fn test_get_current_usage_integration() { let state = create_test_app_state().await; reset_database(&state.db.pool).await; - let repo = &state.db.tweet_pull_usage; let reset_day = 1; // 1. Initial call should create a record with 0 - let usage = repo.get_current_usage(reset_day).await.unwrap(); + let usage = get_current_usage(&state.db.pool, reset_day).await.unwrap(); assert_eq!(usage.tweet_count, 0); // 2. Subsequent call should return the same record - let usage2 = repo.get_current_usage(reset_day).await.unwrap(); + let usage2 = get_current_usage(&state.db.pool, reset_day).await.unwrap(); assert_eq!(usage2.tweet_count, 0); assert_eq!(usage.period, usage2.period); } @@ -171,8 +171,8 @@ mod tests { repo.increment_usage_for_period(50, period_b).await.unwrap(); // 3. Verify they are separate - let usage_a = repo.get_usage_for_period(period_a).await.unwrap(); - let usage_b = repo.get_usage_for_period(period_b).await.unwrap(); + let usage_a = get_usage_for_period(&state.db.pool, period_a).await.unwrap(); + let usage_b = get_usage_for_period(&state.db.pool, period_b).await.unwrap(); assert_eq!(usage_a.tweet_count, 100); assert_eq!(usage_b.tweet_count, 50); diff --git a/src/repositories/x_association.rs b/src/repositories/x_association.rs index d95c63b..d99a88b 100644 --- a/src/repositories/x_association.rs +++ b/src/repositories/x_association.rs @@ -42,32 +42,6 @@ impl XAssociationRepository { Ok(association) } - pub async fn find_by_username(&self, username: &str) -> DbResult> { - let association = sqlx::query_as::<_, XAssociation>("SELECT * FROM x_associations WHERE username = $1") - .bind(username) - .fetch_optional(&self.pool) - .await?; - - Ok(association) - } - - pub async fn update_username(&self, quan_address: &QuanAddress, new_username: &str) -> DbResult { - let association = sqlx::query_as::<_, XAssociation>( - r#" - UPDATE x_associations - SET username = $2 - WHERE quan_address = $1 - RETURNING * - "#, - ) - .bind(&quan_address.0) - .bind(new_username) - .fetch_one(&self.pool) - .await?; - - Ok(association) - } - pub async fn delete(&self, quan_address: &QuanAddress) -> DbResult<()> { sqlx::query("DELETE FROM x_associations WHERE quan_address = $1") .bind(&quan_address.0) @@ -128,54 +102,6 @@ mod tests { assert_eq!(found.username, "x_user_01"); } - #[tokio::test] - async fn test_find_by_username() { - let (address_repo, x_repo) = setup_test_repositories().await; - - let address = create_persisted_address(&address_repo, "user_02").await; - - let input = XAssociationInput { - quan_address: address.quan_address.0.clone(), - username: "unique_handler_123".to_string(), - }; - let new_association = XAssociation::new(input).unwrap(); - x_repo.create(&new_association).await.unwrap(); - - // Find by Username - let found = x_repo.find_by_username("unique_handler_123").await.unwrap(); - - assert!(found.is_some()); - let found = found.unwrap(); - assert_eq!(found.quan_address.0, address.quan_address.0); - } - - #[tokio::test] - async fn test_update_username() { - let (address_repo, x_repo) = setup_test_repositories().await; - - let address = create_persisted_address(&address_repo, "user_03").await; - - // Initial Create - let input = XAssociationInput { - quan_address: address.quan_address.0.clone(), - username: "old_username".to_string(), - }; - let new_association = XAssociation::new(input).unwrap(); - x_repo.create(&new_association).await.unwrap(); - - // Update - let updated = x_repo - .update_username(&address.quan_address, "new_cool_username") - .await - .unwrap(); - - assert_eq!(updated.username, "new_cool_username"); - - // Verify in DB - let found = x_repo.find_by_address(&address.quan_address).await.unwrap().unwrap(); - assert_eq!(found.username, "new_cool_username"); - } - #[tokio::test] async fn test_delete_association() { let (address_repo, x_repo) = setup_test_repositories().await; @@ -199,12 +125,4 @@ mod tests { let found = x_repo.find_by_address(&address.quan_address).await.unwrap(); assert!(found.is_none()); } - - #[tokio::test] - async fn test_find_non_existent() { - let (_address_repo, x_repo) = setup_test_repositories().await; - - let result = x_repo.find_by_username("ghost_user").await.unwrap(); - assert!(result.is_none()); - } } diff --git a/src/services/reverser.rs b/src/services/reverser.rs index 4fd3b6d..ef65357 100644 --- a/src/services/reverser.rs +++ b/src/services/reverser.rs @@ -134,37 +134,6 @@ impl ReverserService { Ok(()) } - /// Get statistics about tasks that need attention - pub async fn get_reversal_stats(&self) -> ReverserResult { - let pending_tasks = self.db.tasks.get_tasks_by_status(TaskStatus::Pending).await?; - let tasks_ready_for_reversal = self - .db - .tasks - .get_tasks_ready_for_reversal(self.early_reversal_minutes) - .await?; - - let mut tasks_expiring_soon = 0; - let mut tasks_already_expired = 0; - let now = chrono::Utc::now(); - - for task in &pending_tasks { - if let Some(end_time) = task.end_time { - if end_time <= now { - tasks_already_expired += 1; - } else if end_time <= now + chrono::Duration::minutes(self.early_reversal_minutes) { - tasks_expiring_soon += 1; - } - } - } - - Ok(ReversalStats { - total_pending: pending_tasks.len(), - ready_for_reversal: tasks_ready_for_reversal.len(), - expiring_soon: tasks_expiring_soon, - already_expired: tasks_already_expired, - }) - } - /// Manual trigger for reversal check (useful for testing or admin endpoints) pub async fn trigger_reversal_check(&self) -> ReverserResult { let tasks_to_reverse = self @@ -183,14 +152,6 @@ impl ReverserService { } } -#[derive(Debug, Clone, serde::Serialize)] -pub struct ReversalStats { - pub total_pending: usize, - pub ready_for_reversal: usize, - pub expiring_soon: usize, - pub already_expired: usize, -} - /// Start the reverser service in a background task pub async fn start_reverser_service( db: Arc, @@ -335,49 +296,4 @@ mod tests { let not_reversed_task = db.tasks.get_task(&task.task_id).await.unwrap().unwrap(); assert_eq!(not_reversed_task.status, TaskStatus::Pending); } - - #[tokio::test] - async fn chain_test_get_reversal_stats() { - let (reverser, _tm, db) = setup_test_reverser().await; - - // We will manually create tasks with specific timings for this test. - let now = Utc::now(); - let early_reversal_window = ChronoDuration::minutes(reverser.early_reversal_minutes); - - // Task 1: Already expired (should be ready for reversal) - let task1 = create_reversable_task(&db, &reverser.transaction_manager, "stats_01").await; - sqlx::query("UPDATE tasks SET end_time = $1 WHERE task_id = $2") - .bind(now - ChronoDuration::minutes(10)) - .bind(&task1.task_id) - .execute(&db.pool) - .await - .unwrap(); - - // Task 2: Expiring soon (inside the window, also ready for reversal) - let task2 = create_reversable_task(&db, &reverser.transaction_manager, "stats_02").await; - sqlx::query("UPDATE tasks SET end_time = $1 WHERE task_id = $2") - .bind(now + early_reversal_window - ChronoDuration::minutes(1)) - .bind(&task2.task_id) - .execute(&db.pool) - .await - .unwrap(); - - // Task 3: Pending, but not expiring soon (outside the window) - let task3 = create_reversable_task(&db, &reverser.transaction_manager, "stats_03").await; - sqlx::query("UPDATE tasks SET end_time = $1 WHERE task_id = $2") - .bind(now + early_reversal_window + ChronoDuration::minutes(10)) - .bind(&task3.task_id) - .execute(&db.pool) - .await - .unwrap(); - - // Act: Get the stats - let stats = reverser.get_reversal_stats().await.unwrap(); - - // Assert - assert_eq!(stats.total_pending, 3); - assert_eq!(stats.ready_for_reversal, 2); // Expired + Expiring Soon - assert_eq!(stats.expiring_soon, 1); // Only task2 - assert_eq!(stats.already_expired, 1); // Only task1 - } } From 2770639799fda3178bb348af0c5066151693ffe4 Mon Sep 17 00:00:00 2001 From: Beast Date: Sat, 10 Jan 2026 00:25:09 +0800 Subject: [PATCH 3/9] feat: clean services --- src/lib.rs | 3 +- src/services/ethereum_service.rs | 7 ----- src/services/graphql_client.rs | 53 -------------------------------- src/services/mod.rs | 1 - src/services/reverser.rs | 17 ---------- src/services/task_generator.rs | 24 ++++++--------- 6 files changed, 10 insertions(+), 95 deletions(-) delete mode 100644 src/services/ethereum_service.rs diff --git a/src/lib.rs b/src/lib.rs index 863d184..4b46ccd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,14 +23,13 @@ pub mod utils; pub use config::Config; pub use errors::{AppError, AppResult}; pub use http_server::AppState; -pub use services::graphql_client::{GraphqlClient, SyncStats, Transfer}; +pub use services::graphql_client::{GraphqlClient, Transfer}; pub use services::reverser::ReverserService; pub use services::task_generator::TaskGenerator; pub use services::transaction_manager::TransactionManager; // Re-export errors pub use db_persistence::DbError; -pub use services::ethereum_service::EthAddressAssociation; pub use services::graphql_client::GraphqlError; pub use services::reverser::ReverserError; pub use services::task_generator::TaskGeneratorError; diff --git a/src/services/ethereum_service.rs b/src/services/ethereum_service.rs deleted file mode 100644 index 5382087..0000000 --- a/src/services/ethereum_service.rs +++ /dev/null @@ -1,7 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EthAddressAssociation { - pub quan_address: String, - pub eth_address: String, -} diff --git a/src/services/graphql_client.rs b/src/services/graphql_client.rs index b3c319f..78abb71 100644 --- a/src/services/graphql_client.rs +++ b/src/services/graphql_client.rs @@ -270,21 +270,6 @@ impl GraphqlClient { Ok((transfer_count, address_count as usize)) } - /// Get statistics about stored transfers and addresses - pub async fn get_sync_stats(&self) -> GraphqlResult { - // Note: This would require additional database queries to get counts - // For now, we'll return basic stats from the current sync - let transfers = self.fetch_transfers().await?; - let unique_addresses: std::collections::HashSet<&String> = - transfers.iter().flat_map(|t| [&t.from.id, &t.to.id]).collect(); - - Ok(SyncStats { - total_transfers: transfers.len(), - unique_addresses: unique_addresses.len(), - last_sync_time: chrono::Utc::now(), - }) - } - pub async fn get_address_stats(&self, id: String) -> GraphqlResult { const GET_STATS_QUERY: &str = r#" query GetStatsById($id: String!) { @@ -460,13 +445,6 @@ query GetEventCountByIds($ids: [String!]!) { } } -#[derive(Debug, Serialize, Deserialize)] -pub struct SyncStats { - pub total_transfers: usize, - pub unique_addresses: usize, - pub last_sync_time: chrono::DateTime, -} - #[derive(Debug, Serialize, Deserialize)] pub struct AddressStats { pub total_transactions: u64, @@ -1099,37 +1077,6 @@ query GetEventCountByIds($ids: [String!]!) { assert!(debug_str.contains("1000")); } - // ============================================================================ - // SyncStats Tests - // ============================================================================ - - #[test] - fn test_sync_stats_serialization() { - let stats = SyncStats { - total_transfers: 10, - unique_addresses: 15, - last_sync_time: chrono::Utc::now(), - }; - - let json = serde_json::to_string(&stats).unwrap(); - assert!(json.contains("total_transfers")); - assert!(json.contains("unique_addresses")); - assert!(json.contains("last_sync_time")); - } - - #[test] - fn test_sync_stats_deserialization() { - let json = r#"{ - "total_transfers": 5, - "unique_addresses": 8, - "last_sync_time": "2024-01-01T00:00:00Z" - }"#; - - let stats: SyncStats = serde_json::from_str(json).unwrap(); - assert_eq!(stats.total_transfers, 5); - assert_eq!(stats.unique_addresses, 8); - } - // ============================================================================ // Edge Cases // ============================================================================ diff --git a/src/services/mod.rs b/src/services/mod.rs index 3c674ec..6e69bcf 100644 --- a/src/services/mod.rs +++ b/src/services/mod.rs @@ -1,5 +1,4 @@ pub mod alert_service; -pub mod ethereum_service; pub mod graphql_client; pub mod raid_leaderboard_service; pub mod reverser; diff --git a/src/services/reverser.rs b/src/services/reverser.rs index ef65357..e746a7e 100644 --- a/src/services/reverser.rs +++ b/src/services/reverser.rs @@ -133,23 +133,6 @@ impl ReverserService { Ok(()) } - - /// Manual trigger for reversal check (useful for testing or admin endpoints) - pub async fn trigger_reversal_check(&self) -> ReverserResult { - let tasks_to_reverse = self - .db - .tasks - .get_tasks_ready_for_reversal(self.early_reversal_minutes) - .await?; - - let count = tasks_to_reverse.len(); - - if count > 0 { - self.check_and_reverse_tasks().await?; - } - - Ok(count) - } } /// Start the reverser service in a background task diff --git a/src/services/task_generator.rs b/src/services/task_generator.rs index 47d6282..5e32cf8 100644 --- a/src/services/task_generator.rs +++ b/src/services/task_generator.rs @@ -14,8 +14,6 @@ pub enum TaskGeneratorError { ValidationError, #[error("No candidates available")] NoCandidates, - #[error("Not enough candidates for selection")] - InsufficientCandidates, #[error("CSV error: {0}")] Database(#[from] DbError), #[error("HTTP error: {0}")] @@ -199,11 +197,6 @@ impl TaskGenerator { self.candidates.len() } - /// Get current candidates list (for debugging/status) - pub fn get_candidates(&self) -> &[String] { - &self.candidates - } - /// Check for duplicate task URLs to avoid collisions pub async fn ensure_unique_task_urls(&self, tasks: &mut [Task]) -> TaskGeneratorResult<()> { for task in tasks { @@ -248,6 +241,11 @@ mod tests { TaskGenerator::new(db) } + /// Get current candidates list (for debugging/status) + fn get_candidates(task_generator: &TaskGenerator) -> &[String] { + &task_generator.candidates + } + #[tokio::test] async fn test_generate_random_quan_amount() { let generator = setup_test_generator().await; @@ -290,8 +288,8 @@ mod tests { generator.refresh_candidates_from_db().await.unwrap(); assert_eq!(generator.candidates_count(), 2); - assert!(generator.get_candidates().contains(&addr1.quan_address.0)); - assert!(generator.get_candidates().contains(&addr2.quan_address.0)); + assert!(get_candidates(&generator).contains(&addr1.quan_address.0)); + assert!(get_candidates(&generator).contains(&addr2.quan_address.0)); } #[tokio::test] @@ -320,12 +318,8 @@ mod tests { // Assert that only valid candidates were added. assert_eq!(generator.candidates_count(), 2); - assert!(generator - .get_candidates() - .contains(&"qz_a_valid_test_address_1".to_string())); - assert!(generator - .get_candidates() - .contains(&"qz_a_valid_test_address_2".to_string())); + assert!(get_candidates(&generator).contains(&"qz_a_valid_test_address_1".to_string())); + assert!(get_candidates(&generator).contains(&"qz_a_valid_test_address_2".to_string())); } #[tokio::test] From 2a02973bca5fb2f924e11ecb993c29014aa78b3f Mon Sep 17 00:00:00 2001 From: Beast Date: Sat, 10 Jan 2026 00:35:48 +0800 Subject: [PATCH 4/9] feat: clean rest --- src/config.rs | 4 ---- src/db_persistence.rs | 39 +------------------------------------ src/errors.rs | 2 +- src/http_server.rs | 17 ---------------- src/main.rs | 39 ------------------------------------- src/utils/test_app_state.rs | 6 ++---- 6 files changed, 4 insertions(+), 103 deletions(-) diff --git a/src/config.rs b/src/config.rs index 244a4fa..d13330b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -144,10 +144,6 @@ impl Config { tokio::time::Duration::from_secs(self.candidates.refresh_interval_minutes * 60) } - pub fn get_task_generation_duration(&self) -> tokio::time::Duration { - tokio::time::Duration::from_secs(self.task_generation.generation_interval_minutes * 60) - } - pub fn get_reverser_check_duration(&self) -> tokio::time::Duration { tokio::time::Duration::from_secs(self.reverser.check_interval_seconds) } diff --git a/src/db_persistence.rs b/src/db_persistence.rs index 77dc591..09b353d 100644 --- a/src/db_persistence.rs +++ b/src/db_persistence.rs @@ -24,8 +24,6 @@ pub enum DbError { TaskNotFound(String), #[error("Address not found: {0}")] AddressNotFound(String), - #[error("Invalid task status: {0}")] - InvalidStatus(String), #[error("Record not found: {0}")] RecordNotFound(String), #[error("Conflict error: {0}")] @@ -48,6 +46,7 @@ pub struct DbPersistence { pub raid_leaderboards: RaidLeaderboardRepository, pub tweet_pull_usage: TweetPullUsageRepository, + #[allow(unused_variables)] pub pool: PgPool, } @@ -88,40 +87,4 @@ impl DbPersistence { tweet_pull_usage, }) } - - #[cfg(test)] - pub async fn new_unmigrated(database_url: &str) -> DbResult { - let pool = PgPoolOptions::new().max_connections(5).connect(database_url).await?; - - let tasks = TaskRepository::new(&pool); - let addresses = AddressRepository::new(&pool); - let referrals = ReferralRepository::new(&pool); - let opt_ins = OptInRepository::new(&pool); - let x_associations = XAssociationRepository::new(&pool); - let eth_associations = EthAssociationRepository::new(&pool); - let admin = AdminRepository::new(&pool); - let relevant_tweets = RelevantTweetRepository::new(&pool); - let tweet_authors = TweetAuthorRepository::new(&pool); - let raid_quests = RaidQuestRepository::new(&pool); - let raid_submissions = RaidSubmissionRepository::new(&pool); - let raid_leaderboards = RaidLeaderboardRepository::new(&pool); - let tweet_pull_usage = TweetPullUsageRepository::new(pool.clone()); - - Ok(Self { - pool, - tasks, - addresses, - referrals, - opt_ins, - x_associations, - eth_associations, - admin, - relevant_tweets, - tweet_authors, - raid_quests, - raid_submissions, - raid_leaderboards, - tweet_pull_usage, - }) - } } diff --git a/src/errors.rs b/src/errors.rs index 3cd734f..b9e478a 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -183,7 +183,7 @@ fn map_db_error(err: DbError) -> (StatusCode, String) { } } - DbError::InvalidStatus(_) | DbError::Migration(_) => ( + DbError::Migration(_) => ( StatusCode::INTERNAL_SERVER_ERROR, "An internal server error occurred".to_string(), ), diff --git a/src/http_server.rs b/src/http_server.rs index af4bc6d..2ba1ab7 100644 --- a/src/http_server.rs +++ b/src/http_server.rs @@ -14,7 +14,6 @@ use crate::{ metrics::{metrics_handler, track_metrics, Metrics}, models::task::TaskStatus, routes::api_routes, - services::alert_service::AlertService, Config, GraphqlClient, }; use chrono::{DateTime, Utc}; @@ -30,7 +29,6 @@ pub struct AppState { pub oauth_sessions: Arc>>, pub twitter_oauth_tokens: Arc>>, pub twitter_gateway: Arc, - pub alert_client: Arc, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -117,7 +115,6 @@ pub async fn start_server( db: Arc, graphql_client: Arc, twitter_gateway: Arc, - alert_client: Arc, bind_address: &str, config: Arc, ) -> Result<(), Box> { @@ -125,7 +122,6 @@ pub async fn start_server( db, metrics: Arc::new(Metrics::new()), graphql_client, - alert_client: alert_client, config, twitter_gateway, challenges: Arc::new(RwLock::new(HashMap::new())), @@ -141,16 +137,3 @@ pub async fn start_server( Ok(()) } - -#[cfg(test)] -mod tests { - use crate::utils::test_app_state::create_test_app_state; - - use super::*; - - async fn test_app() -> axum::Router { - let state = create_test_app_state().await; - - create_router(state) - } -} diff --git a/src/main.rs b/src/main.rs index 37a5fcb..ca198e6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -277,13 +277,11 @@ async fn main() -> AppResult<()> { let server_addr_clone = server_address.clone(); let server_config = Arc::new(config.clone()); let server_twitter_gateway = twitter_gateway.clone(); - let server_alert_service = alert_service.clone(); let server_task = tokio::spawn(async move { http_server::start_server( server_db, graphql_client, server_twitter_gateway, - server_alert_service, &server_addr_clone, server_config, ) @@ -408,43 +406,6 @@ async fn start_candidates_refresh_task( }) } -async fn start_task_generation_task( - task_generator: TaskGenerator, - transaction_manager: Arc, - taskees_per_round: usize, - generation_interval: Duration, -) -> tokio::task::JoinHandle> { - tokio::spawn(async move { - let mut interval = tokio::time::interval(generation_interval); - - loop { - interval.tick().await; - - info!("Generating new batch of {} tasks...", taskees_per_round); - - let tasks = match task_generator.generate_and_save_tasks(taskees_per_round).await { - Ok(tasks) => tasks, - Err(e) => { - error!("Failed to generate tasks: {}", e); - return Err(AppError::TaskGenerator(e)); - } - }; - - info!("Generated {} tasks, processing transactions...", tasks.len()); - - match transaction_manager.process_task_batch(tasks).await { - Ok(processed) => { - info!("Successfully processed {} transactions", processed.len()); - } - Err(e) => { - error!("Failed to process transaction batch: {}", e); - return Err(AppError::Transaction(e)); - } - } - } - }) -} - fn init_logging(level: &str) -> AppResult<()> { let log_level = match level.to_lowercase().as_str() { "error" => tracing::Level::ERROR, diff --git a/src/utils/test_app_state.rs b/src/utils/test_app_state.rs index 8b94b84..8ffe053 100644 --- a/src/utils/test_app_state.rs +++ b/src/utils/test_app_state.rs @@ -1,6 +1,6 @@ use crate::{ - db_persistence::DbPersistence, http_server::AppState, metrics::Metrics, models::auth::TokenClaims, - services::alert_service::AlertService, Config, GraphqlClient, + db_persistence::DbPersistence, http_server::AppState, metrics::Metrics, models::auth::TokenClaims, Config, + GraphqlClient, }; use jsonwebtoken::{encode, EncodingKey, Header}; use rusx::RusxGateway; @@ -13,13 +13,11 @@ pub async fn create_test_app_state() -> AppState { let graphql_client = GraphqlClient::new(db.clone(), config.candidates.graphql_url.clone()); let db = Arc::new(db); - let alert_client = Arc::new(AlertService::new(config.clone(), db.tweet_pull_usage.clone())); return AppState { db, metrics: Arc::new(Metrics::new()), graphql_client: Arc::new(graphql_client), - alert_client, config: Arc::new(config), twitter_gateway: Arc::new(twitter_gateway), oauth_sessions: Arc::new(Mutex::new(std::collections::HashMap::new())), From 6d637f9089da8eabd9bae7395fbc55b2f7425606 Mon Sep 17 00:00:00 2001 From: Beast Date: Sat, 10 Jan 2026 01:28:09 +0800 Subject: [PATCH 5/9] fix: allow directive --- src/db_persistence.rs | 2 +- src/main.rs | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/src/db_persistence.rs b/src/db_persistence.rs index 09b353d..7f9aea4 100644 --- a/src/db_persistence.rs +++ b/src/db_persistence.rs @@ -46,7 +46,7 @@ pub struct DbPersistence { pub raid_leaderboards: RaidLeaderboardRepository, pub tweet_pull_usage: TweetPullUsageRepository, - #[allow(unused_variables)] + #[allow(dead_code)] pub pool: PgPool, } diff --git a/src/main.rs b/src/main.rs index ca198e6..84aad06 100644 --- a/src/main.rs +++ b/src/main.rs @@ -336,15 +336,6 @@ async fn main() -> AppResult<()> { error!("Raid leaderboard synchronizer exited: {:?}", result); result??; } - // result = start_task_generation_task( - // task_generator.clone(), - // transaction_manager.clone(), - // config.task_generation.taskees_per_round, - // config.get_task_generation_duration(), - // ) => { - // error!("Task generation task exited: {:?}", result); - // result.await??; - // } result = start_reverser_service( db.clone(), transaction_manager.clone(), From cb030ece0052c113572117f808dae9bd99f0739d Mon Sep 17 00:00:00 2001 From: Beast Date: Mon, 12 Jan 2026 14:33:50 +0800 Subject: [PATCH 6/9] feat: clean up more unused file/code --- src/config.rs | 38 --- src/db_persistence.rs | 9 +- src/errors.rs | 31 +-- src/handlers/address.rs | 33 +-- src/handlers/mod.rs | 7 +- src/handlers/task.rs | 139 ----------- src/http_server.rs | 41 +--- src/lib.rs | 7 +- src/main.rs | 260 +------------------- src/models/address.rs | 8 - src/models/mod.rs | 1 - src/models/task.rs | 157 ------------ src/repositories/address.rs | 1 + src/repositories/mod.rs | 1 - src/repositories/task.rs | 294 ----------------------- src/routes/address.rs | 3 +- src/routes/mod.rs | 4 +- src/routes/task.rs | 16 -- src/services/graphql_client.rs | 11 - src/services/mod.rs | 3 - src/services/reverser.rs | 282 ---------------------- src/services/task_generator.rs | 359 ---------------------------- src/services/transaction_manager.rs | 298 ----------------------- 23 files changed, 16 insertions(+), 1987 deletions(-) delete mode 100644 src/handlers/task.rs delete mode 100644 src/models/task.rs delete mode 100644 src/repositories/task.rs delete mode 100644 src/routes/task.rs delete mode 100644 src/services/reverser.rs delete mode 100644 src/services/task_generator.rs delete mode 100644 src/services/transaction_manager.rs diff --git a/src/config.rs b/src/config.rs index d13330b..7ff2ad5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -7,8 +7,6 @@ pub struct Config { pub server: ServerConfig, pub blockchain: BlockchainConfig, pub candidates: CandidatesConfig, - pub task_generation: TaskGenerationConfig, - pub reverser: ReverserConfig, pub data: DataConfig, pub logging: LoggingConfig, pub jwt: JwtConfig, @@ -42,18 +40,6 @@ pub struct CandidatesConfig { pub refresh_interval_minutes: u64, } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TaskGenerationConfig { - pub generation_interval_minutes: u64, - pub taskees_per_round: usize, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReverserConfig { - pub early_reversal_minutes: u64, - pub check_interval_seconds: u64, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DataConfig { pub database_url: String, @@ -140,22 +126,6 @@ impl Config { &self.server.base_api_url } - pub fn get_candidates_refresh_duration(&self) -> tokio::time::Duration { - tokio::time::Duration::from_secs(self.candidates.refresh_interval_minutes * 60) - } - - pub fn get_reverser_check_duration(&self) -> tokio::time::Duration { - tokio::time::Duration::from_secs(self.reverser.check_interval_seconds) - } - - pub fn get_reversal_period_duration(&self) -> chrono::Duration { - chrono::Duration::hours(self.blockchain.reversal_period_hours as i64) - } - - pub fn get_early_reversal_duration(&self) -> chrono::Duration { - chrono::Duration::minutes(self.reverser.early_reversal_minutes as i64) - } - pub fn get_jwt_expiration(&self) -> chrono::Duration { chrono::Duration::hours(self.jwt.exp_in_hours) } @@ -196,14 +166,6 @@ impl Default for Config { graphql_url: "http://localhost:4000/graphql".to_string(), refresh_interval_minutes: 30, }, - task_generation: TaskGenerationConfig { - generation_interval_minutes: 60, - taskees_per_round: 5, - }, - reverser: ReverserConfig { - early_reversal_minutes: 2, - check_interval_seconds: 30, - }, data: DataConfig { database_url: "postgres://postgres:postgres@127.0.0.1:5432/task_master".to_string(), }, diff --git a/src/db_persistence.rs b/src/db_persistence.rs index 7f9aea4..cf822bb 100644 --- a/src/db_persistence.rs +++ b/src/db_persistence.rs @@ -10,9 +10,7 @@ use crate::repositories::tweet_author::TweetAuthorRepository; use crate::repositories::tweet_pull_usage::TweetPullUsageRepository; use crate::repositories::x_association::XAssociationRepository; use crate::repositories::DbResult; -use crate::repositories::{ - address::AddressRepository, opt_in::OptInRepository, referral::ReferralRepository, task::TaskRepository, -}; +use crate::repositories::{address::AddressRepository, opt_in::OptInRepository, referral::ReferralRepository}; #[derive(Debug, thiserror::Error)] pub enum DbError { @@ -20,8 +18,6 @@ pub enum DbError { Database(#[from] sqlx::Error), #[error("Migration error: {0}")] Migration(#[from] sqlx::migrate::MigrateError), - #[error("Task not found: {0}")] - TaskNotFound(String), #[error("Address not found: {0}")] AddressNotFound(String), #[error("Record not found: {0}")] @@ -32,7 +28,6 @@ pub enum DbError { #[derive(Debug, Clone)] pub struct DbPersistence { - pub tasks: TaskRepository, pub addresses: AddressRepository, pub referrals: ReferralRepository, pub opt_ins: OptInRepository, @@ -56,7 +51,6 @@ impl DbPersistence { sqlx::migrate!("./migrations").run(&pool).await?; - let tasks = TaskRepository::new(&pool); let addresses = AddressRepository::new(&pool); let referrals = ReferralRepository::new(&pool); let opt_ins = OptInRepository::new(&pool); @@ -72,7 +66,6 @@ impl DbPersistence { Ok(Self { pool, - tasks, addresses, referrals, opt_ins, diff --git a/src/errors.rs b/src/errors.rs index b9e478a..4c7b5f5 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -9,15 +9,9 @@ use tracing::error; use crate::{ db_persistence::DbError, - handlers::{ - address::AddressHandlerError, auth::AuthHandlerError, referral::ReferralHandlerError, task::TaskHandlerError, - HandlerError, - }, + handlers::{address::AddressHandlerError, auth::AuthHandlerError, referral::ReferralHandlerError, HandlerError}, models::ModelError, - services::{ - graphql_client::GraphqlError, reverser::ReverserError, task_generator::TaskGeneratorError, - transaction_manager::TransactionError, - }, + services::graphql_client::GraphqlError, }; #[derive(Debug, thiserror::Error)] @@ -30,12 +24,6 @@ pub enum AppError { Model(#[from] ModelError), #[error("Database error: {0}")] Database(#[from] DbError), - #[error("Transaction manager error: {0}")] - Transaction(#[from] TransactionError), - #[error("Task generator error: {0}")] - TaskGenerator(#[from] TaskGeneratorError), - #[error("Reverser error: {0}")] - Reverser(#[from] ReverserError), #[error("Server error: {0}")] Server(String), #[error("Join error: {0}")] @@ -74,10 +62,7 @@ impl IntoResponse for AppError { AppError::Database(err) => map_db_error(err), // --- Everything else --- - e @ (AppError::Transaction(_) - | AppError::TaskGenerator(_) - | AppError::Reverser(_) - | AppError::Join(_) + e @ (AppError::Join(_) | AppError::Graphql(_) | AppError::Config(_) | AppError::Http(_) @@ -150,21 +135,13 @@ fn map_handler_error(err: HandlerError) -> (StatusCode, String) { ReferralHandlerError::InvalidReferral(err) => (StatusCode::BAD_REQUEST, err), ReferralHandlerError::DuplicateReferral(err) => (StatusCode::CONFLICT, err), }, - - HandlerError::Task(err) => match err { - TaskHandlerError::TaskNotFound(err) => (StatusCode::NOT_FOUND, err.message.clone()), - TaskHandlerError::InvalidTaskUrl(err) => (StatusCode::BAD_REQUEST, err.message.clone()), - TaskHandlerError::StatusConflict(err) => (StatusCode::CONFLICT, err.message.clone()), - }, } } fn map_db_error(err: DbError) -> (StatusCode, String) { match err { DbError::UniqueViolation(err) => (StatusCode::CONFLICT, err), - DbError::RecordNotFound(err) | DbError::AddressNotFound(err) | DbError::TaskNotFound(err) => { - (StatusCode::NOT_FOUND, err) - } + DbError::RecordNotFound(err) | DbError::AddressNotFound(err) => (StatusCode::NOT_FOUND, err), DbError::Database(err) => { error!("Database error: {}", err); diff --git a/src/handlers/address.rs b/src/handlers/address.rs index 813b0a9..0fced40 100644 --- a/src/handlers/address.rs +++ b/src/handlers/address.rs @@ -15,7 +15,7 @@ use crate::{ address::{ Address, AddressFilter, AddressSortColumn, AddressStatsResponse, AddressWithOptInAndAssociations, AddressWithRank, AggregateStatsQueryParams, AssociatedAccountsResponse, OptedInPositionResponse, - RewardProgramStatusPayload, SyncTransfersResponse, + RewardProgramStatusPayload, }, admin::Admin, eth_association::{ @@ -355,37 +355,6 @@ pub async fn retrieve_associated_accounts( })) } -pub async fn sync_transfers(State(state): State) -> Result, AppError> { - tracing::info!("Received request to sync transfers from GraphQL endpoint"); - - match state.graphql_client.sync_transfers_and_addresses().await { - Ok((transfer_count, address_count)) => { - tracing::info!( - "Transfer sync completed successfully: {} transfers, {} addresses", - transfer_count, - address_count - ); - - let response = SyncTransfersResponse { - success: true, - message: format!( - "Successfully processed {} transfers and stored {} addresses", - transfer_count, address_count - ), - transfers_processed: Some(transfer_count), - addresses_stored: Some(address_count), - }; - - Ok(Json(response)) - } - Err(e) => { - tracing::error!("Failed to sync transfers: {}", e); - - Err(AppError::Graphql(e)) - } - } -} - pub async fn handle_get_opted_in_users( State(state): State, ) -> Result>>, AppError> { diff --git a/src/handlers/mod.rs b/src/handlers/mod.rs index d065696..a9340d4 100644 --- a/src/handlers/mod.rs +++ b/src/handlers/mod.rs @@ -3,9 +3,7 @@ use serde::{Deserialize, Serialize}; use std::fmt::Display; use crate::{ - handlers::{ - address::AddressHandlerError, auth::AuthHandlerError, referral::ReferralHandlerError, task::TaskHandlerError, - }, + handlers::{address::AddressHandlerError, auth::AuthHandlerError, referral::ReferralHandlerError}, AppError, }; @@ -14,13 +12,10 @@ pub mod auth; pub mod raid_quest; pub mod referral; pub mod relevant_tweet; -pub mod task; pub mod tweet_author; #[derive(Debug, thiserror::Error)] pub enum HandlerError { - #[error("Task handler error")] - Task(#[from] TaskHandlerError), #[error("Referral handler error")] Referral(#[from] ReferralHandlerError), #[error("Address handler error")] diff --git a/src/handlers/task.rs b/src/handlers/task.rs deleted file mode 100644 index 8ea11bb..0000000 --- a/src/handlers/task.rs +++ /dev/null @@ -1,139 +0,0 @@ -use axum::{ - extract::{Path, State}, - Json, -}; - -use crate::{ - db_persistence::DbError, - handlers::HandlerError, - http_server::AppState, - models::task::{CompleteTaskRequest, CompleteTaskResponse, Task, TaskStatus}, - AppError, -}; - -use super::SuccessResponse; - -#[derive(Debug, thiserror::Error)] -pub enum TaskHandlerError { - #[error("Task not found")] - TaskNotFound(Json), - #[error("Invalid task URL format")] - InvalidTaskUrl(Json), - #[error("Invalid task status")] - StatusConflict(Json), -} - -pub async fn list_all_tasks(State(state): State) -> Result>>, AppError> { - let tasks = state.db.tasks.get_all_tasks().await?; - - Ok(SuccessResponse::new(tasks)) -} - -pub async fn get_task( - State(state): State, - Path(task_id): Path, -) -> Result>, AppError> { - let task = state.db.tasks.get_task(&task_id).await?; - - match task { - Some(task) => Ok(SuccessResponse::new(task)), - None => Err(AppError::Database(DbError::TaskNotFound("".to_string()))), - } -} - -pub async fn complete_task( - State(state): State, - Json(payload): Json, -) -> Result, AppError> { - tracing::info!("Received task completion request for URL: {}", payload.task_url); - - // Validate task URL format (12 digits) - if payload.task_url.len() != 12 || !payload.task_url.chars().all(|c| c.is_ascii_digit()) { - let response = CompleteTaskResponse { - success: false, - message: format!("Invalid task URL format: {}", payload.task_url), - task_id: None, - }; - return Err(AppError::Handler(HandlerError::Task(TaskHandlerError::InvalidTaskUrl( - Json(response), - )))); - } - - // Find task by URL - let task = match state.db.tasks.find_task_by_url(&payload.task_url).await { - Ok(Some(task)) => task, - Ok(None) => { - let response = CompleteTaskResponse { - success: false, - message: format!("Task not found with URL: {}", payload.task_url), - task_id: None, - }; - return Err(AppError::Handler(HandlerError::Task(TaskHandlerError::TaskNotFound( - Json(response), - )))); - } - Err(db_err) => { - return Err(AppError::Database(db_err)); - } - }; - - // Check if task is in a valid state for completion - match task.status { - TaskStatus::Pending => { - // Task can be completed - } - TaskStatus::Completed => { - let response = CompleteTaskResponse { - success: false, - message: "Task is already completed".to_string(), - task_id: Some(task.task_id.clone()), - }; - return Err(AppError::Handler(HandlerError::Task(TaskHandlerError::StatusConflict( - Json(response), - )))); - } - TaskStatus::Reversed => { - let response = CompleteTaskResponse { - success: false, - message: "Task has already been reversed".to_string(), - task_id: Some(task.task_id.clone()), - }; - return Err(AppError::Handler(HandlerError::Task(TaskHandlerError::StatusConflict( - Json(response), - )))); - } - TaskStatus::Failed => { - let response = CompleteTaskResponse { - success: false, - message: "Task has failed and cannot be completed".to_string(), - task_id: Some(task.task_id.clone()), - }; - return Err(AppError::Handler(HandlerError::Task(TaskHandlerError::StatusConflict( - Json(response), - )))); - } - } - - // Mark task as completed - match state - .db - .tasks - .update_task_status(&task.task_id, TaskStatus::Completed) - .await - { - Ok(()) => { - tracing::info!("Task {} marked as completed", task.task_id); - let response = CompleteTaskResponse { - success: true, - message: "Task completed successfully".to_string(), - task_id: Some(task.task_id.clone()), - }; - Ok(Json(response)) - } - Err(e) => { - tracing::error!("Failed to update task {}: {}", task.task_id, e); - - return Err(AppError::Database(e)); - } - } -} diff --git a/src/http_server.rs b/src/http_server.rs index 2ba1ab7..2a00812 100644 --- a/src/http_server.rs +++ b/src/http_server.rs @@ -1,4 +1,4 @@ -use axum::{extract::State, http::StatusCode, middleware, response::Json, routing::get, Router}; +use axum::{middleware, response::Json, routing::get, Router}; use rusx::{PkceCodeVerifier, TwitterGateway}; use serde::{Deserialize, Serialize}; use std::{ @@ -12,7 +12,6 @@ use tower_http::{cors::CorsLayer, trace::TraceLayer}; use crate::{ db_persistence::DbPersistence, metrics::{metrics_handler, track_metrics, Metrics}, - models::task::TaskStatus, routes::api_routes, Config, GraphqlClient, }; @@ -38,16 +37,6 @@ pub struct Challenge { pub created_at: DateTime, } -#[derive(Debug, Serialize)] -pub struct StatusResponse { - pub status: String, - pub total_tasks: usize, - pub pending_tasks: usize, - pub completed_tasks: usize, - pub reversed_tasks: usize, - pub failed_tasks: usize, -} - #[derive(Debug, Serialize)] pub struct HealthResponse { pub healthy: bool, @@ -60,7 +49,6 @@ pub struct HealthResponse { pub fn create_router(state: AppState) -> Router { Router::new() .route("/health", get(health_check)) - .route("/status", get(get_status)) .route("/metrics", get(metrics_handler)) .nest("/api", api_routes(state.clone())) .layer(middleware::from_fn(track_metrics)) @@ -83,33 +71,6 @@ async fn health_check() -> Json { }) } -/// Get service status and task counts -async fn get_status(State(state): State) -> Result, StatusCode> { - let status_counts = state - .db - .tasks - .status_counts() - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; - let total_tasks = state - .db - .tasks - .task_count() - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; - - let response = StatusResponse { - status: "running".to_string(), - total_tasks: total_tasks as usize, - pending_tasks: status_counts.get(&TaskStatus::Pending).copied().unwrap_or(0), - completed_tasks: status_counts.get(&TaskStatus::Completed).copied().unwrap_or(0), - reversed_tasks: status_counts.get(&TaskStatus::Reversed).copied().unwrap_or(0), - failed_tasks: status_counts.get(&TaskStatus::Failed).copied().unwrap_or(0), - }; - - Ok(Json(response)) -} - /// Start the HTTP server pub async fn start_server( db: Arc, diff --git a/src/lib.rs b/src/lib.rs index 4b46ccd..0f925d1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,16 +24,11 @@ pub use config::Config; pub use errors::{AppError, AppResult}; pub use http_server::AppState; pub use services::graphql_client::{GraphqlClient, Transfer}; -pub use services::reverser::ReverserService; -pub use services::task_generator::TaskGenerator; -pub use services::transaction_manager::TransactionManager; // Re-export errors pub use db_persistence::DbError; pub use services::graphql_client::GraphqlError; -pub use services::reverser::ReverserError; -pub use services::task_generator::TaskGeneratorError; -pub use services::transaction_manager::TransactionError; +pub use services::signature_service::SigServiceError; /// Library version pub const VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/src/main.rs b/src/main.rs index 84aad06..77033d6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,11 +2,9 @@ use crate::{ args::Args, db_persistence::DbPersistence, errors::{AppError, AppResult}, - models::task::{Task, TaskInput}, services::{ alert_service::AlertService, graphql_client::GraphqlClient, raid_leaderboard_service::RaidLeaderboardService, - reverser::start_reverser_service, task_generator::TaskGenerator, telegram_service::TelegramService, - transaction_manager::TransactionManager, tweet_synchronizer_service::TweetSynchronizerService, + telegram_service::TelegramService, tweet_synchronizer_service::TweetSynchronizerService, }, }; @@ -14,8 +12,8 @@ use clap::Parser; use rusx::RusxGateway; use sp_core::crypto::{self, Ss58AddressFormat}; use std::sync::Arc; -use tokio::time::Duration; -use tracing::{error, info, warn}; + +use tracing::{error, info}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; mod args; @@ -66,9 +64,6 @@ async fn main() -> AppResult<()> { info!("Database URL: {}", db_url); let db = Arc::new(DbPersistence::new(db_url).await?); - let initial_task_count = db.tasks.task_count().await?; - info!("Loaded {} existing tasks from database", initial_task_count); - // Initialize graphql client let graphql_client = GraphqlClient::new((*db).clone(), config.candidates.graphql_url.clone()); @@ -82,186 +77,6 @@ async fn main() -> AppResult<()> { return Ok(()); } - if args.test_selection { - info!("Running in test-selection mode"); - let mut task_generator = TaskGenerator::new(db.clone()); - - // Load candidates from database - if let Err(e) = task_generator.refresh_candidates_from_db().await { - error!("Failed to refresh candidates from database: {}", e); - return Err(AppError::TaskGenerator(e)); - } - - info!("Loaded {} candidates from database", task_generator.candidates_count()); - - // Test generating tasks - let test_count = 5; // Generate 5 test tasks - match task_generator.generate_tasks(test_count).await { - Ok(tasks) => { - info!("Successfully generated {} test tasks:", tasks.len()); - for task in &tasks { - info!( - " Task {}: {} -> {} QUAN (URL: {})", - task.task_id, task.quan_address.0, task.quan_amount.0, task.task_url - ); - } - - // Optionally save the tasks to database - info!("Saving test tasks to database..."); - if let Err(e) = task_generator.save_tasks(tasks).await { - error!("Failed to save test tasks: {}", e); - return Err(AppError::TaskGenerator(e)); - } - info!("Test tasks saved successfully!"); - } - Err(e) => { - error!("Failed to generate test tasks: {}", e); - return Err(AppError::TaskGenerator(e)); - } - } - - return Ok(()); - } - - if args.test_transaction { - info!("Running in test-transaction mode"); - // Initialize transaction manager for testing - info!("Connecting to Quantus node..."); - let transaction_manager = Arc::new( - TransactionManager::new( - &config.blockchain.node_url, - &config.blockchain.wallet_name, - &config.blockchain.wallet_password, - db.clone(), - config.get_reversal_period_duration(), - ) - .await?, - ); - - // Perform health check - if let Err(e) = transaction_manager.health_check().await { - error!("Node health check failed: {}", e); - return Err(AppError::Transaction(e)); - } - - let node_info = transaction_manager.get_node_info().await?; - info!("โœ… Connected to: {}", node_info); - info!("Wallet address: {}", transaction_manager.get_wallet_address()); - - // Check wallet balance - match transaction_manager.get_wallet_balance().await { - Ok(balance) => info!("Wallet balance: {} units", balance), - Err(e) => warn!("Could not check wallet balance: {}", e), - } - - // Create or get test task - let (task_id, destination_address, amount) = if let (Some(dest), Some(amt)) = (&args.destination, args.amount) { - // Create a temporary task for testing with custom parameters - let task_input = TaskInput { - quan_address: dest.clone(), - quan_amount: amt, - task_url: format!("test-{}", rand::random::()), - }; - - let test_task = Task::new(task_input)?; - - info!( - "Creating temporary test task: {} -> {} (amount: {})", - test_task.task_id, dest, amt - ); - - // Add the task to database - db.tasks.create(&test_task).await?; - - (test_task.task_id, dest.clone(), amt) - } else { - // Use existing task from database - let tasks = db.tasks.get_all_tasks().await?; - if tasks.is_empty() { - error!("No tasks found in database. Run --test-selection first to create some tasks, or provide --destination and --amount arguments."); - return Err(AppError::Server("No tasks available for testing".to_string())); - } - - let test_task = &tasks[0]; - ( - test_task.task_id.clone(), - test_task.quan_address.0.clone(), - test_task.quan_amount.0 as u64, - ) - }; - - info!( - "Testing transaction with task: {} -> {} (amount: {})", - task_id, destination_address, amount - ); - - // Send a reversible transaction - match transaction_manager.send_reversible_transaction(&task_id).await { - Ok(tx_hash) => { - info!("โœ… Reversible transaction sent successfully!"); - info!("Transaction hash: {}", tx_hash); - info!("Task ID: {}", task_id); - info!("Recipient: {}", destination_address); - info!("Amount: {} QUAN", amount); - } - Err(e) => { - error!("โŒ Failed to send reversible transaction: {}", e); - return Err(AppError::Transaction(e)); - } - } - - return Ok(()); - } - - // Initialize transaction manager - info!("Connecting to Quantus node..."); - let transaction_manager = Arc::new( - TransactionManager::new( - &config.blockchain.node_url, - &config.blockchain.wallet_name, - &config.blockchain.wallet_password, - db.clone(), - config.get_reversal_period_duration(), - ) - .await?, - ); - - // Perform health check - if let Err(e) = transaction_manager.health_check().await { - error!("Node health check failed: {}", e); - return Err(AppError::Transaction(e)); - } - - let node_info = transaction_manager.get_node_info().await?; - info!("โœ… Connected to: {}", node_info); - info!("Wallet address: {}", transaction_manager.get_wallet_address()); - - // Check wallet balance - match transaction_manager.get_wallet_balance().await { - Ok(balance) => info!("Wallet balance: {} units", balance), - Err(e) => warn!("Could not check wallet balance: {}", e), - } - - // Initialize task generator - let mut task_generator = TaskGenerator::new(db.clone()); - - // Initial candidate refresh - info!("Fetching initial candidates..."); - if let Err(e) = task_generator.refresh_candidates(&config.candidates.graphql_url).await { - error!("Failed to fetch initial candidates: {}", e); - return Err(AppError::TaskGenerator(e)); - } - info!("Loaded {} candidates", task_generator.candidates_count()); - - if args.run_once { - info!("Running in single-run mode"); - return run_once(config, task_generator, transaction_manager).await; - } - - // Start the reverser service - info!("Starting reverser service..."); - // Tasks will be started directly in the tokio::select! macro - // Start HTTP server let server_address = config.get_server_address(); info!("Starting HTTP server on {}", server_address); @@ -291,10 +106,7 @@ async fn main() -> AppResult<()> { info!("๐ŸŽฏ TaskMaster is now running!"); info!("HTTP API available at: http://{}", server_address); - info!( - "Task generation interval: {} minutes", - config.task_generation.generation_interval_minutes - ); + info!( "Candidates refresh interval: {} minutes", config.candidates.refresh_interval_minutes @@ -320,14 +132,6 @@ async fn main() -> AppResult<()> { error!("HTTP server exited: {:?}", result); result??; } - result = start_candidates_refresh_task( - task_generator.clone(), - config.candidates.graphql_url.clone(), - config.get_candidates_refresh_duration(), - ) => { - error!("Candidates refresh task exited: {:?}", result); - result.await??; - } result = tweet_synchronizer.spawn_tweet_synchronizer() => { error!("Tweet synchronizer exited: {:?}", result); result??; @@ -336,67 +140,11 @@ async fn main() -> AppResult<()> { error!("Raid leaderboard synchronizer exited: {:?}", result); result??; } - result = start_reverser_service( - db.clone(), - transaction_manager.clone(), - config.get_reverser_check_duration(), - config.get_early_reversal_duration().num_minutes(), - ) => { - error!("Reverser service exited: {:?}", result); - result.await?.map_err(AppError::Reverser)?; - } } Ok(()) } -async fn run_once( - config: Config, - task_generator: TaskGenerator, - transaction_manager: Arc, -) -> AppResult<()> { - info!("Generating {} tasks...", config.task_generation.taskees_per_round); - - let tasks = task_generator - .generate_and_save_tasks(config.task_generation.taskees_per_round) - .await?; - - info!("Generated {} tasks", tasks.len()); - - info!("Processing transactions..."); - let processed = transaction_manager.process_task_batch(tasks).await?; - - info!("Successfully processed {} transactions", processed.len()); - info!("Single run completed successfully"); - - Ok(()) -} - -async fn start_candidates_refresh_task( - mut task_generator: TaskGenerator, - graphql_url: String, - refresh_interval: Duration, -) -> tokio::task::JoinHandle> { - tokio::spawn(async move { - let mut interval = tokio::time::interval(refresh_interval); - - loop { - interval.tick().await; - - info!("Refreshing candidates..."); - match task_generator.refresh_candidates(&graphql_url).await { - Ok(()) => { - info!("Candidates refreshed: {} available", task_generator.candidates_count()); - } - Err(e) => { - error!("Failed to refresh candidates: {}", e); - return Err(AppError::TaskGenerator(e)); - } - } - } - }) -} - fn init_logging(level: &str) -> AppResult<()> { let log_level = match level.to_lowercase().as_str() { "error" => tracing::Level::ERROR, diff --git a/src/models/address.rs b/src/models/address.rs index 44ccaff..f999c78 100644 --- a/src/models/address.rs +++ b/src/models/address.rs @@ -111,14 +111,6 @@ pub struct AddressInput { pub referral_code: String, } -#[derive(Debug, Serialize, Deserialize)] -pub struct SyncTransfersResponse { - pub success: bool, - pub message: String, - pub transfers_processed: Option, - pub addresses_stored: Option, -} - #[derive(Debug, Clone, Deserialize)] pub struct RewardProgramStatusPayload { pub new_status: bool, diff --git a/src/models/mod.rs b/src/models/mod.rs index a5b3e32..bd4e57a 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -18,7 +18,6 @@ pub mod raid_quest; pub mod raid_submission; pub mod referrals; pub mod relevant_tweet; -pub mod task; pub mod tweet_author; pub mod tweet_pull_usage; pub mod x_association; diff --git a/src/models/task.rs b/src/models/task.rs deleted file mode 100644 index 888ed11..0000000 --- a/src/models/task.rs +++ /dev/null @@ -1,157 +0,0 @@ -use chrono::{DateTime, Utc}; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use sqlx::{postgres::PgRow, FromRow, Row}; -use uuid::Uuid; - -use crate::models::{address::QuanAddress, ModelError, ModelResult}; - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum TaskStatus { - #[serde(rename = "pending")] - Pending, - #[serde(rename = "completed")] - Completed, - #[serde(rename = "reversed")] - Reversed, - #[serde(rename = "failed")] - Failed, -} - -impl std::fmt::Display for TaskStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - TaskStatus::Pending => write!(f, "pending"), - TaskStatus::Completed => write!(f, "completed"), - TaskStatus::Reversed => write!(f, "reversed"), - TaskStatus::Failed => write!(f, "failed"), - } - } -} - -impl std::str::FromStr for TaskStatus { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "pending" => Ok(TaskStatus::Pending), - "completed" => Ok(TaskStatus::Completed), - "reversed" => Ok(TaskStatus::Reversed), - "failed" => Ok(TaskStatus::Failed), - _ => Err(format!("Invalid task status: {}", s)), - } - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, sqlx::Type)] -#[sqlx(transparent)] -pub struct TokenAmount(pub i64); -impl TokenAmount { - pub fn from(input: i64) -> Result { - if input <= 0 { - return Err(String::from("Token amount can't be less or equal 0.")); - } - - Ok(TokenAmount(input)) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Task { - pub id: Option, - pub task_id: String, - pub quan_address: QuanAddress, - pub quan_amount: TokenAmount, - pub usdc_amount: i64, - pub task_url: String, - pub status: TaskStatus, - pub reversible_tx_id: Option, - pub send_time: Option>, - pub end_time: Option>, - pub created_at: Option>, - pub updated_at: Option>, -} - -impl Task { - pub fn new(input: TaskInput) -> ModelResult { - let quan_address = match QuanAddress::from(&input.quan_address) { - Ok(name) => name, - Err(e) => { - tracing::error!(error = %e, "Invalid quan address input for task"); - return Err(ModelError::InvalidInput); - } - }; - - let quan_amount = match TokenAmount::from(input.quan_amount as i64) { - Ok(quan_amount) => quan_amount, - Err(e) => { - tracing::error!(error = %e, "Invalid token amount input for task"); - return Err(ModelError::InvalidInput); - } - }; - - let task_url = input.task_url; - - let mut rng = rand::rng(); - let usdc_amount = rng.random_range(1..=25); - let task_id = Uuid::new_v4().to_string(); - - Ok(Task { - id: None, - task_id, - quan_address, - quan_amount, - usdc_amount, - task_url, - status: TaskStatus::Pending, - reversible_tx_id: None, - send_time: None, - end_time: None, - created_at: None, - updated_at: None, - }) - } -} -impl<'r> FromRow<'r, PgRow> for Task { - fn from_row(row: &'r PgRow) -> Result { - let status_str: String = row.try_get("status")?; - let status = status_str - .parse::() - .map_err(|e| sqlx::Error::Decode(Box::new(std::io::Error::new(std::io::ErrorKind::InvalidData, e))))?; - - Ok(Task { - id: row.try_get("id")?, - task_id: row.try_get("task_id")?, - quan_address: row.try_get("quan_address")?, - quan_amount: row.try_get("quan_amount")?, - usdc_amount: row.try_get("usdc_amount")?, - task_url: row.try_get("task_url")?, - status, - reversible_tx_id: row.try_get("reversible_tx_id")?, - send_time: row.try_get("send_time")?, - end_time: row.try_get("end_time")?, - created_at: row.try_get("created_at")?, - updated_at: row.try_get("updated_at")?, - }) - } -} - -// An unvalidated version that we can deserialize directly from JSON -#[derive(Debug, Deserialize)] -pub struct TaskInput { - pub quan_address: String, - pub quan_amount: u64, - pub task_url: String, -} - -#[derive(Debug, Deserialize)] -pub struct CompleteTaskRequest { - pub task_url: String, -} - -#[derive(Debug, Serialize)] -pub struct CompleteTaskResponse { - pub success: bool, - pub message: String, - pub task_id: Option, -} diff --git a/src/repositories/address.rs b/src/repositories/address.rs index 0566202..1164143 100644 --- a/src/repositories/address.rs +++ b/src/repositories/address.rs @@ -200,6 +200,7 @@ impl AddressRepository { Ok(total_items) } + #[allow(dead_code)] pub async fn find_all(&self) -> DbResult> { let addresses = sqlx::query_as::<_, Address>("SELECT * FROM addresses") .fetch_all(&self.pool) diff --git a/src/repositories/mod.rs b/src/repositories/mod.rs index 2d893df..547105c 100644 --- a/src/repositories/mod.rs +++ b/src/repositories/mod.rs @@ -13,7 +13,6 @@ pub mod raid_quest; pub mod raid_submission; pub mod referral; pub mod relevant_tweet; -pub mod task; pub mod tweet_author; pub mod tweet_pull_usage; pub mod x_association; diff --git a/src/repositories/task.rs b/src/repositories/task.rs deleted file mode 100644 index a4a5210..0000000 --- a/src/repositories/task.rs +++ /dev/null @@ -1,294 +0,0 @@ -use std::{collections::HashMap, str::FromStr}; - -use chrono::{DateTime, Utc}; -use sqlx::{PgPool, Row}; - -use crate::{ - db_persistence::DbError, - models::task::{Task, TaskStatus}, - repositories::DbResult, -}; - -#[derive(Clone, Debug)] -pub struct TaskRepository { - pool: PgPool, -} -impl TaskRepository { - pub fn new(pool: &PgPool) -> Self { - Self { pool: pool.clone() } - } - - pub async fn create(&self, new_task: &Task) -> DbResult { - let created_task_id = sqlx::query_scalar::<_, String>( - " - INSERT INTO tasks ( - task_id, quan_address, quan_amount, usdc_amount, task_url, - status, reversible_tx_id, send_time, end_time - ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - RETURNING task_id - ", - ) - .bind(&new_task.task_id) - .bind(&new_task.quan_address.0) - .bind(new_task.quan_amount.0) - .bind(new_task.usdc_amount) - .bind(&new_task.task_url) - .bind(new_task.status.to_string()) - .bind(&new_task.reversible_tx_id) - .bind(new_task.send_time) - .bind(new_task.end_time) - .fetch_one(&self.pool) - .await?; - - Ok(created_task_id) - } - - pub async fn get_task(&self, task_id: &str) -> DbResult> { - let task = sqlx::query_as::<_, Task>("SELECT * FROM tasks WHERE task_id = $1") - .bind(task_id) - .fetch_optional(&self.pool) - .await?; - Ok(task) - } - - pub async fn find_task_by_url(&self, task_url: &str) -> DbResult> { - let task = sqlx::query_as::<_, Task>("SELECT * FROM tasks WHERE task_url = $1") - .bind(task_url) - .fetch_optional(&self.pool) - .await?; - Ok(task) - } - - pub async fn update_task_status(&self, task_id: &str, status: TaskStatus) -> DbResult<()> { - let result = sqlx::query("UPDATE tasks SET status = $1, updated_at = NOW() WHERE task_id = $2") - .bind(status.to_string()) - .bind(task_id) - .execute(&self.pool) - .await?; - - if result.rows_affected() == 0 { - return Err(DbError::TaskNotFound(task_id.to_string())); - } - - Ok(()) - } - - pub async fn update_task_transaction( - &self, - task_id: &str, - reversible_tx_id: &str, - send_time: DateTime, - end_time: DateTime, - ) -> DbResult<()> { - let result = sqlx::query( - " - UPDATE tasks - SET reversible_tx_id = $1, send_time = $2, end_time = $3, - status = $4, updated_at = NOW() - WHERE task_id = $5 - ", - ) - .bind(reversible_tx_id) - .bind(send_time) - .bind(end_time) - .bind(TaskStatus::Pending.to_string()) // Assuming you want to set it to pending - .bind(task_id) - .execute(&self.pool) - .await?; - - if result.rows_affected() == 0 { - return Err(DbError::TaskNotFound(task_id.to_string())); - } - - Ok(()) - } - - pub async fn get_tasks_ready_for_reversal(&self, early_minutes: i64) -> DbResult> { - let cutoff_time = Utc::now() + chrono::Duration::minutes(early_minutes); - - let tasks = sqlx::query_as::<_, Task>( - "SELECT * FROM tasks WHERE status = $1 AND end_time IS NOT NULL AND end_time <= $2", - ) - .bind(TaskStatus::Pending.to_string()) - .bind(cutoff_time) - .fetch_all(&self.pool) - .await?; - Ok(tasks) - } - - pub async fn get_all_tasks(&self) -> DbResult> { - let tasks = sqlx::query_as::<_, Task>("SELECT * FROM tasks ORDER BY created_at DESC") - .fetch_all(&self.pool) - .await?; - Ok(tasks) - } - - pub async fn task_count(&self) -> DbResult { - let count = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM tasks") - .fetch_one(&self.pool) - .await?; - Ok(count) - } - - pub async fn status_counts(&self) -> DbResult> { - let rows = sqlx::query("SELECT status, COUNT(*) as count FROM tasks GROUP BY status") - .fetch_all(&self.pool) - .await?; - - let mut counts = HashMap::new(); - for row in rows { - let status_str: String = row.get("status"); - let count: i64 = row.get("count"); - - if let Ok(status) = TaskStatus::from_str(&status_str) { - counts.insert(status, count as usize); - } - } - - Ok(counts) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - config::Config, - models::task::TaskInput, - repositories::address::AddressRepository, - utils::test_db::{create_persisted_address, reset_database}, - }; - use uuid::Uuid; - - // Helper to set up repositories and clean all tables. - async fn setup_test_repositories() -> (AddressRepository, TaskRepository) { - let config = Config::load_test_env().expect("Failed to load configuration for tests"); - let pool = PgPool::connect(config.get_database_url()) - .await - .expect("Failed to create pool."); - - reset_database(&pool).await; - - (AddressRepository::new(&pool), TaskRepository::new(&pool)) - } - - // Helper to create a mock Task object. - fn create_mock_task_object(quan_address: &str) -> Task { - let input = TaskInput { - quan_address: quan_address.to_string(), - quan_amount: 1000, - task_url: format!("http://example.com/task/{}", Uuid::new_v4()), - }; - Task::new(input).unwrap() - } - - #[tokio::test] - async fn test_create_and_get_task() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "001").await; - let new_task = create_mock_task_object(&address.quan_address.0); - - let created_id = task_repo.create(&new_task).await.unwrap(); - assert_eq!(created_id, new_task.task_id); - - let fetched_task = task_repo.get_task(&created_id).await.unwrap().unwrap(); - assert_eq!(fetched_task.task_id, new_task.task_id); - assert_eq!(fetched_task.status, TaskStatus::Pending); - } - - #[tokio::test] - async fn test_update_task_status() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "002").await; - let new_task = create_mock_task_object(&address.quan_address.0); - task_repo.create(&new_task).await.unwrap(); - - task_repo - .update_task_status(&new_task.task_id, TaskStatus::Completed) - .await - .unwrap(); - - let fetched_task = task_repo.get_task(&new_task.task_id).await.unwrap().unwrap(); - assert_eq!(fetched_task.status, TaskStatus::Completed); - } - - #[tokio::test] - async fn test_update_task_transaction() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "003").await; - let new_task = create_mock_task_object(&address.quan_address.0); - task_repo.create(&new_task).await.unwrap(); - - let tx_id = "0x123abc"; - let send_time = Utc::now(); - let end_time = send_time + chrono::Duration::hours(1); - - task_repo - .update_task_transaction(&new_task.task_id, tx_id, send_time, end_time) - .await - .unwrap(); - - let updated_task = task_repo.get_task(&new_task.task_id).await.unwrap().unwrap(); - assert_eq!(updated_task.reversible_tx_id, Some(tx_id.to_string())); - assert!(updated_task.send_time.is_some()); - assert!(updated_task.end_time.is_some()); - } - - #[tokio::test] - async fn test_get_tasks_ready_for_reversal() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "005").await; - - // This task's end time is soon, so it should be picked up - let task1 = create_mock_task_object(&address.quan_address.0); - task_repo.create(&task1).await.unwrap(); - let end_time1 = Utc::now() + chrono::Duration::minutes(5); - task_repo - .update_task_transaction(&task1.task_id, "tx1", Utc::now(), end_time1) - .await - .unwrap(); - - // This task's end time is far in the future - let task2 = create_mock_task_object(&address.quan_address.0); - task_repo.create(&task2).await.unwrap(); - let end_time2 = Utc::now() + chrono::Duration::minutes(30); - task_repo - .update_task_transaction(&task2.task_id, "tx2", Utc::now(), end_time2) - .await - .unwrap(); - - // Looking for tasks ending within the next 10 minutes - let reversible_tasks = task_repo.get_tasks_ready_for_reversal(10).await.unwrap(); - assert_eq!(reversible_tasks.len(), 1); - assert_eq!(reversible_tasks[0].task_id, task1.task_id); - } - - #[tokio::test] - async fn test_counts() { - let (address_repo, task_repo) = setup_test_repositories().await; - let address = create_persisted_address(&address_repo, "006").await; - - let mut task1 = create_mock_task_object(&address.quan_address.0); - task1.status = TaskStatus::Pending; - task_repo.create(&task1).await.unwrap(); - - let mut task2 = create_mock_task_object(&address.quan_address.0); - task2.status = TaskStatus::Pending; - task_repo.create(&task2).await.unwrap(); - - let mut task3 = create_mock_task_object(&address.quan_address.0); - task3.status = TaskStatus::Completed; - task_repo.create(&task3).await.unwrap(); - - // Test total count - let total = task_repo.task_count().await.unwrap(); - assert_eq!(total, 3); - - // Test status counts - let counts = task_repo.status_counts().await.unwrap(); - assert_eq!(counts.get(&TaskStatus::Pending), Some(&2)); - assert_eq!(counts.get(&TaskStatus::Completed), Some(&1)); - assert_eq!(counts.get(&TaskStatus::Failed), None); - } -} diff --git a/src/routes/address.rs b/src/routes/address.rs index a69b8f4..41f45f1 100644 --- a/src/routes/address.rs +++ b/src/routes/address.rs @@ -10,7 +10,7 @@ use crate::{ associate_eth_address, associate_x_handle, dissociate_eth_address, dissociate_x_account, handle_aggregate_address_stats, handle_get_address_reward_status_by_id, handle_get_address_stats, handle_get_addresses, handle_get_leaderboard, handle_get_opted_in_position, handle_get_opted_in_users, - handle_update_reward_program_status, retrieve_associated_accounts, sync_transfers, update_eth_address, + handle_update_reward_program_status, retrieve_associated_accounts, update_eth_address, }, http_server::AppState, middlewares::jwt_auth, @@ -62,5 +62,4 @@ pub fn address_routes(state: AppState) -> Router { post(associate_x_handle.layer(middleware::from_fn_with_state(state.clone(), jwt_auth::jwt_auth))) .delete(dissociate_x_account.layer(middleware::from_fn_with_state(state, jwt_auth::jwt_auth))), ) - .route("/addresses/sync-transfers", post(sync_transfers)) } diff --git a/src/routes/mod.rs b/src/routes/mod.rs index 00e12e0..2b8c773 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -6,7 +6,7 @@ use crate::{ http_server::AppState, routes::{ address::address_routes, raid_quest::raid_quest_routes, relevant_tweet::relevant_tweet_routes, - task::task_routes, tweet_author::tweet_author_routes, + tweet_author::tweet_author_routes, }, }; @@ -15,7 +15,6 @@ pub mod auth; pub mod raid_quest; pub mod referral; pub mod relevant_tweet; -pub mod task; pub mod tweet_author; pub fn api_routes(state: AppState) -> Router { @@ -23,7 +22,6 @@ pub fn api_routes(state: AppState) -> Router { .merge(referral_routes(state.clone())) .merge(address_routes(state.clone())) .merge(auth_routes(state.clone())) - .merge(task_routes(state.clone())) .merge(relevant_tweet_routes(state.clone())) .merge(tweet_author_routes(state.clone())) .merge(raid_quest_routes(state)) diff --git a/src/routes/task.rs b/src/routes/task.rs deleted file mode 100644 index 3c73607..0000000 --- a/src/routes/task.rs +++ /dev/null @@ -1,16 +0,0 @@ -use axum::{ - routing::{get, put}, - Router, -}; - -use crate::{ - handlers::task::{complete_task, get_task, list_all_tasks}, - http_server::AppState, -}; - -pub fn task_routes(_: AppState) -> Router { - Router::new() - .route("/tasks", get(list_all_tasks)) - .route("/tasks/complete", put(complete_task)) - .route("/tasks/:task_id", get(get_task)) -} diff --git a/src/services/graphql_client.rs b/src/services/graphql_client.rs index 78abb71..16b8481 100644 --- a/src/services/graphql_client.rs +++ b/src/services/graphql_client.rs @@ -884,17 +884,6 @@ query GetEventCountByIds($ids: [String!]!) { assert_eq!(err.to_string(), "GraphQL response error: Query failed"); } - #[test] - fn test_graphql_error_from_db_error() { - let db_err = DbError::TaskNotFound("task-123".to_string()); - let graphql_err: GraphqlError = db_err.into(); - - match graphql_err { - GraphqlError::DatabaseError(_) => (), - _ => panic!("Expected DatabaseError conversion"), - } - } - #[test] fn test_graphql_error_from_json_error() { let json_err = serde_json::from_str::("invalid json").unwrap_err(); diff --git a/src/services/mod.rs b/src/services/mod.rs index 6e69bcf..dc7bd65 100644 --- a/src/services/mod.rs +++ b/src/services/mod.rs @@ -1,9 +1,6 @@ pub mod alert_service; pub mod graphql_client; pub mod raid_leaderboard_service; -pub mod reverser; pub mod signature_service; -pub mod task_generator; pub mod telegram_service; -pub mod transaction_manager; pub mod tweet_synchronizer_service; diff --git a/src/services/reverser.rs b/src/services/reverser.rs deleted file mode 100644 index e746a7e..0000000 --- a/src/services/reverser.rs +++ /dev/null @@ -1,282 +0,0 @@ -use crate::{ - db_persistence::{DbError, DbPersistence}, - models::task::TaskStatus, - services::transaction_manager::TransactionManager, -}; -use std::sync::Arc; -use subxt::error::TransactionError; -use tokio::time::{interval, Duration}; - -#[derive(Debug, thiserror::Error)] -pub enum ReverserError { - #[error("Database error: {0}")] - Database(#[from] DbError), - #[error("Transaction error: {0}")] - Transaction(#[from] TransactionError), - #[error("Reverser service error: {0}")] - Service(String), -} - -pub type ReverserResult = Result; - -pub struct ReverserService { - db: Arc, - transaction_manager: Arc, - check_interval: Duration, - early_reversal_minutes: i64, -} - -impl ReverserService { - pub fn new( - db: Arc, - transaction_manager: Arc, - check_interval: Duration, - early_reversal_minutes: i64, - ) -> Self { - Self { - db, - transaction_manager, - check_interval, - early_reversal_minutes, - } - } - - /// Start the reverser service monitoring loop - pub async fn start(&self) -> ReverserResult<()> { - tracing::info!( - "Starting reverser service with {} minute early reversal and {} second check interval", - self.early_reversal_minutes, - self.check_interval.as_secs() - ); - - let mut interval_timer = interval(self.check_interval); - - loop { - interval_timer.tick().await; - - if let Err(e) = self.check_and_reverse_tasks().await { - tracing::error!("Error in reverser service: {}", e); - // For now, log and die as requested - return Err(e); - } - } - } - - /// Check for tasks that need to be reversed and reverse them - async fn check_and_reverse_tasks(&self) -> ReverserResult<()> { - let tasks_to_reverse = self - .db - .tasks - .get_tasks_ready_for_reversal(self.early_reversal_minutes) - .await?; - - if tasks_to_reverse.is_empty() { - tracing::debug!("No tasks ready for reversal"); - return Ok(()); - } - - tracing::info!("Found {} tasks ready for reversal", tasks_to_reverse.len()); - - let mut reversal_count = 0; - let mut error_count = 0; - - for task in tasks_to_reverse { - tracing::info!( - "Reversing task {} (quan_address: {}, quan_amount: {}, usdc_amount: {}, tx: {})", - task.task_id, - task.quan_address.0, - task.quan_amount.0, - task.usdc_amount, - task.reversible_tx_id.as_deref().unwrap_or("none") - ); - - match self.transaction_manager.reverse_transaction(&task.task_id).await { - Ok(()) => { - reversal_count += 1; - tracing::info!("Successfully reversed task {}", task.task_id); - } - Err(e) => { - error_count += 1; - tracing::error!("Failed to reverse task {}: {}", task.task_id, e); - - // Mark task as failed if reversal failed - if let Err(db_err) = self - .db - .tasks - .update_task_status(&task.task_id, TaskStatus::Failed) - .await - { - tracing::error!( - "Failed to mark task {} as failed after reversal error: {}", - task.task_id, - db_err - ); - } - } - } - } - - tracing::info!( - "Reversal batch completed: {} successful, {} errors", - reversal_count, - error_count - ); - - // If there were any errors, return an error to trigger the "log and die" behavior - if error_count > 0 { - return Err(ReverserError::Service(format!( - "Failed to reverse {} out of {} tasks", - error_count, - reversal_count + error_count - ))); - } - - Ok(()) - } -} - -/// Start the reverser service in a background task -pub async fn start_reverser_service( - db: Arc, - transaction_manager: Arc, - check_interval: Duration, - early_reversal_minutes: i64, -) -> tokio::task::JoinHandle> { - let reverser = ReverserService::new(db, transaction_manager, check_interval, early_reversal_minutes); - - tokio::spawn(async move { reverser.start().await }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - config::Config, - db_persistence::DbPersistence, - models::address::{Address, AddressInput}, - models::task::{Task, TaskInput, TaskStatus}, - services::transaction_manager::TransactionManager, - utils::generate_referral_code::generate_referral_code, - utils::test_db::reset_database, - }; - use chrono::{Duration as ChronoDuration, Utc}; - use quantus_cli::wallet::WalletManager; - use uuid::Uuid; - - // Helper to set up a full test environment with a DB, TransactionManager, and ReverserService. - // NOTE: Requires a local Quantus node running. - async fn setup_test_reverser() -> (ReverserService, Arc, Arc) { - let config = Config::load_test_env().expect("Failed to load test configuration"); - std::env::set_var("TASKMASTER_USE_DEV_ALICE", "1"); - let db = Arc::new(DbPersistence::new(config.get_database_url()).await.unwrap()); - - reset_database(&db.pool).await; - - let wallet_name = "//Alice"; - let transaction_manager = Arc::new( - TransactionManager::new( - &config.blockchain.node_url, - &wallet_name, - "password", - db.clone(), - ChronoDuration::seconds(60), - ) - .await - .unwrap(), - ); - - let reverser = ReverserService::new( - db.clone(), - transaction_manager.clone(), - Duration::from_secs(10), - 5, // 5 minute early reversal window for tests - ); - - (reverser, transaction_manager, db) - } - - // Helper to create a task that is ready for reversal - async fn create_reversable_task( - db: &DbPersistence, - tm: &TransactionManager, - id: &str, // Used to keep task_url unique - ) -> Task { - let wallet_manager = WalletManager::new().unwrap(); - let recipient_wallet_name = format!("test_recipient_{}", Uuid::new_v4()); - let recipient_info = wallet_manager - .create_wallet(&recipient_wallet_name, Some("password")) - .await - .unwrap(); - // This is a real, valid SS58 address that the node will accept. - let quan_address = recipient_info.address; - - // Create and save the Address and Task objects using the valid address. - let referral_code = generate_referral_code(quan_address.clone()).await.unwrap(); - let address = Address::new(AddressInput { - quan_address, - referral_code, - }) - .unwrap(); - db.addresses.create(&address).await.unwrap(); - - let task = Task::new(TaskInput { - quan_address: address.quan_address.0, - quan_amount: 1000, - task_url: format!("http://example.com/{}", id), - }) - .unwrap(); - let task_id = db.tasks.create(&task).await.unwrap(); - - tm.send_reversible_transaction(&task_id).await.unwrap(); - - // Manually update the task's end_time to be within the reversal window. - let new_end_time = Utc::now() + ChronoDuration::minutes(2); - sqlx::query("UPDATE tasks SET end_time = $1 WHERE task_id = $2") - .bind(new_end_time) - .bind(&task.task_id) - .execute(&db.pool) - .await - .unwrap(); - - // Return the fully prepared task. - db.tasks.get_task(&task_id).await.unwrap().unwrap() - } - - #[tokio::test] - async fn chain_test_check_and_reverse_tasks_success() { - let (reverser, tm, db) = setup_test_reverser().await; - - // Arrange: Create a task that is ready to be reversed. - let task = create_reversable_task(&db, &tm, "001").await; - assert_eq!(task.status, TaskStatus::Pending); - - // Act: Run the reversal check. - reverser.check_and_reverse_tasks().await.unwrap(); - - // Assert: The task status in the DB should now be 'Reversed'. - let reversed_task = db.tasks.get_task(&task.task_id).await.unwrap().unwrap(); - assert_eq!(reversed_task.status, TaskStatus::Reversed); - } - - #[tokio::test] - async fn chain_test_check_and_reverse_does_nothing_if_no_tasks_ready() { - let (reverser, tm, db) = setup_test_reverser().await; - - // Arrange: Create a task, send its transaction, but its end_time is far in the future. - let task = create_reversable_task(&db, &tm, "002").await; - let future_end_time = Utc::now() + ChronoDuration::hours(1); - sqlx::query("UPDATE tasks SET end_time = $1 WHERE task_id = $2") - .bind(future_end_time) - .bind(&task.task_id) - .execute(&db.pool) - .await - .unwrap(); - - // Act: Run the reversal check. - reverser.check_and_reverse_tasks().await.unwrap(); - - // Assert: The task should not have been reversed. - let not_reversed_task = db.tasks.get_task(&task.task_id).await.unwrap().unwrap(); - assert_eq!(not_reversed_task.status, TaskStatus::Pending); - } -} diff --git a/src/services/task_generator.rs b/src/services/task_generator.rs deleted file mode 100644 index 5e32cf8..0000000 --- a/src/services/task_generator.rs +++ /dev/null @@ -1,359 +0,0 @@ -use crate::{ - db_persistence::{DbError, DbPersistence}, - models::{ - address::{Address, AddressInput, QuanAddress}, - task::{Task, TaskInput}, - }, - utils::generate_referral_code::generate_referral_code, -}; -use rand::prelude::*; - -#[derive(Debug, thiserror::Error)] -pub enum TaskGeneratorError { - #[error("Task input data contain one of more invalid value")] - ValidationError, - #[error("No candidates available")] - NoCandidates, - #[error("CSV error: {0}")] - Database(#[from] DbError), - #[error("HTTP error: {0}")] - Http(#[from] reqwest::Error), - #[error("JSON parsing error: {0}")] - Json(#[from] serde_json::Error), -} - -pub type TaskGeneratorResult = Result; - -#[derive(Debug, Clone)] -pub struct TaskGenerator { - candidates: Vec, - db: std::sync::Arc, - http_client: reqwest::Client, -} - -impl TaskGenerator { - pub fn new(db: std::sync::Arc) -> Self { - Self { - candidates: Vec::new(), - db, - http_client: reqwest::Client::new(), - } - } - - /// Fetch candidates from GraphQL endpoint - pub async fn refresh_candidates(&mut self, graphql_url: &str) -> TaskGeneratorResult<()> { - tracing::info!("Refreshing candidates from: {}", graphql_url); - - // Simple GraphQL query - adjust this based on your actual schema - let query = serde_json::json!({ - "query": "{ - accounts { - id - } - }"}); - - let response = self.http_client.post(graphql_url).json(&query).send().await?; - - if !response.status().is_success() { - tracing::error!("GraphQL request failed with status: {}", response.status()); - return Err(TaskGeneratorError::Http(reqwest::Error::from( - response.error_for_status().unwrap_err(), - ))); - } - - let response_json: serde_json::Value = response.json().await?; - - // Extract candidates array from GraphQL response - let candidates = response_json - .get("data") - .and_then(|data| data.get("accounts")) - .and_then(|accounts| accounts.as_array()) - .ok_or_else(|| { - TaskGeneratorError::Json(serde_json::Error::io(std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Invalid GraphQL response format", - ))) - })?; - - let mut new_candidates = Vec::new(); - for candidate in candidates { - if let Some(address) = candidate.get("id").and_then(|id| id.as_str()) { - // Validate that it's a proper quantus address (starts with qz) - if let Ok(valid_address) = QuanAddress::from(address) { - new_candidates.push(valid_address.0.to_string()); - } else { - tracing::warn!("Invalid candidate address format: {}", address); - } - } - } - - self.candidates = new_candidates; - tracing::info!("Refreshed {} candidates", self.candidates.len()); - Ok(()) - } - - /// Refresh candidates from local database addresses - pub async fn refresh_candidates_from_db(&mut self) -> TaskGeneratorResult<()> { - tracing::info!("Refreshing candidates from local database"); - - let addresses = self.db.addresses.find_all().await?; - - let mut new_candidates = Vec::new(); - for address in addresses { - // Validate that it's a proper quantus address (starts with qz) - - new_candidates.push(address.quan_address.0); - } - - self.candidates = new_candidates; - tracing::info!("Refreshed {} candidates from database", self.candidates.len()); - Ok(()) - } - - /// Generate tasks by randomly selecting taskees - pub async fn generate_tasks(&self, count: usize) -> TaskGeneratorResult> { - if self.candidates.is_empty() { - return Err(TaskGeneratorError::NoCandidates); - } - - if self.candidates.len() < count { - tracing::warn!( - "Requested {} taskees but only have {} candidates", - count, - self.candidates.len() - ); - } - - let mut rng = rand::rng(); - let selection_count = count.min(self.candidates.len()); - - // Randomly select unique candidates - let selected_candidates: Vec = self - .candidates - .choose_multiple(&mut rng, selection_count) - .cloned() - .collect(); - - let mut tasks = Vec::new(); - - for quan_address in selected_candidates { - let quan_amount = self.generate_random_quan_amount(); - let task_url = self.generate_task_url(); - let task_input = TaskInput { - quan_address, - quan_amount, - task_url, - }; - - if let Ok(task) = Task::new(task_input) { - tasks.push(task); - } else { - return Err(TaskGeneratorError::ValidationError); - }; - } - - tracing::info!("Generated {} new tasks", tasks.len()); - Ok(tasks) - } - - /// Save generated tasks to database - pub async fn save_tasks(&self, tasks: Vec) -> TaskGeneratorResult<()> { - for task in tasks { - tracing::debug!( - "Saving task: {} -> {} (quan_amount: {}, usdc_amount: {}, url: {})", - task.task_id, - task.quan_address.0, - task.quan_amount.0, - task.usdc_amount, - task.task_url - ); - - if let Ok(referral_code) = generate_referral_code(task.quan_address.0.clone()).await { - if let Ok(address) = Address::new(AddressInput { - quan_address: task.quan_address.0.clone(), - referral_code, - }) { - // Ensure address exists in database - self.db.addresses.create(&address).await?; - self.db.tasks.create(&task).await?; - } else { - return Err(TaskGeneratorError::ValidationError); - } - } - } - Ok(()) - } - - /// Generate and save tasks in one operation - pub async fn generate_and_save_tasks(&self, count: usize) -> TaskGeneratorResult> { - let mut tasks = self.generate_tasks(count).await?; - self.ensure_unique_task_urls(&mut tasks).await?; - self.save_tasks(tasks.clone()).await?; - Ok(tasks) - } - - /// Get current candidates count - pub fn candidates_count(&self) -> usize { - self.candidates.len() - } - - /// Check for duplicate task URLs to avoid collisions - pub async fn ensure_unique_task_urls(&self, tasks: &mut [Task]) -> TaskGeneratorResult<()> { - for task in tasks { - // Keep checking if URL exists and regenerate if needed - while let Some(_existing_task) = self.db.tasks.find_task_by_url(&task.task_url).await? { - tracing::warn!("Task URL collision detected, regenerating: {}", task.task_url); - task.task_url = self.generate_task_url(); - } - } - - Ok(()) - } - - fn generate_random_quan_amount(&self) -> u64 { - let mut rng = rand::rng(); - rng.random_range(1000..=9999) - } - - fn generate_task_url(&self) -> String { - let mut rng = rand::rng(); - // Generate 12 digit random number - let task_url: u64 = rng.random_range(100_000_000_000..=999_999_999_999); - task_url.to_string() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::Config; - use crate::utils::test_db::reset_database; - use std::sync::Arc; - use wiremock::{matchers::method, Mock, MockServer, ResponseTemplate}; - - // Helper to set up a test generator with a real PostgreSQL test database. - async fn setup_test_generator() -> TaskGenerator { - let config = Config::load_test_env().expect("Failed to load test configuration"); - let db = Arc::new(DbPersistence::new(config.get_database_url()).await.unwrap()); - - reset_database(&db.pool).await; - - TaskGenerator::new(db) - } - - /// Get current candidates list (for debugging/status) - fn get_candidates(task_generator: &TaskGenerator) -> &[String] { - &task_generator.candidates - } - - #[tokio::test] - async fn test_generate_random_quan_amount() { - let generator = setup_test_generator().await; - for _ in 0..100 { - let amount = generator.generate_random_quan_amount(); - assert!((1000..=9999).contains(&amount)); - } - } - - #[tokio::test] - async fn test_generate_task_url() { - let generator = setup_test_generator().await; - for _ in 0..100 { - let url = generator.generate_task_url(); - assert_eq!(url.len(), 12); - assert!(url.chars().all(|c| c.is_ascii_digit())); - } - } - - #[tokio::test] - async fn test_refresh_candidates_from_db() { - let mut generator = setup_test_generator().await; - - // Create and save some addresses to the DB. - // The dummy addresses must be > 10 characters to pass validation. - let addr1 = Address::new(AddressInput { - quan_address: "qz_a_valid_test_address_1".to_string(), - referral_code: "REF1".to_string(), - }) - .unwrap(); - let addr2 = Address::new(AddressInput { - quan_address: "qz_a_valid_test_address_2".to_string(), - referral_code: "REF2".to_string(), - }) - .unwrap(); - generator.db.addresses.create(&addr1).await.unwrap(); - generator.db.addresses.create(&addr2).await.unwrap(); - - // Refresh candidates from the database. - generator.refresh_candidates_from_db().await.unwrap(); - - assert_eq!(generator.candidates_count(), 2); - assert!(get_candidates(&generator).contains(&addr1.quan_address.0)); - assert!(get_candidates(&generator).contains(&addr2.quan_address.0)); - } - - #[tokio::test] - async fn test_refresh_candidates_with_mock_server() { - // Start a mock server. - let server = MockServer::start().await; - let mut generator = setup_test_generator().await; - - // Create a mock GraphQL response. - let mock_response = serde_json::json!({ - "data": { - "accounts": [ - { "id": "qz_a_valid_test_address_1" }, - { "id": "invalid_addr" }, // Should be filtered out - { "id": "qz_a_valid_test_address_2" } - ] - } - }); - Mock::given(method("POST")) - .respond_with(ResponseTemplate::new(200).set_body_json(mock_response)) - .mount(&server) - .await; - - // Call the function with the mock server's URI. - generator.refresh_candidates(&server.uri()).await.unwrap(); - - // Assert that only valid candidates were added. - assert_eq!(generator.candidates_count(), 2); - assert!(get_candidates(&generator).contains(&"qz_a_valid_test_address_1".to_string())); - assert!(get_candidates(&generator).contains(&"qz_a_valid_test_address_2".to_string())); - } - - #[tokio::test] - async fn test_generate_tasks_and_save() { - let mut generator = setup_test_generator().await; - - // Populate candidates manually for the test. - generator.candidates = vec![ - "qz_a_valid_test_address_1".to_string(), - "qz_a_valid_test_address_2".to_string(), - "qz_a_valid_test_address_3".to_string(), - ]; - - // Generate and save 2 tasks. - let tasks = generator.generate_and_save_tasks(2).await.unwrap(); - assert_eq!(tasks.len(), 2); - - // Verify the state after the first call. - let db_tasks = generator.db.tasks.get_all_tasks().await.unwrap(); - assert_eq!(db_tasks.len(), 2); - - // Generate and save 3 more tasks (capped by the 3 candidates). - generator.generate_and_save_tasks(5).await.unwrap(); - let db_tasks_total = generator.db.tasks.get_all_tasks().await.unwrap(); - - // The database now contains the original 2 tasks PLUS the 3 new ones. - // The total should be 5. - assert_eq!(db_tasks_total.len(), 5); - } - - #[tokio::test] - async fn test_no_candidates_error() { - let generator = setup_test_generator().await; // Candidates list is empty. - let result = generator.generate_tasks(1).await; - assert!(matches!(result, Err(TaskGeneratorError::NoCandidates))); - } -} diff --git a/src/services/transaction_manager.rs b/src/services/transaction_manager.rs deleted file mode 100644 index 3ff3eb3..0000000 --- a/src/services/transaction_manager.rs +++ /dev/null @@ -1,298 +0,0 @@ -use crate::db_persistence::DbPersistence; -use crate::models::task::{Task, TaskStatus}; -use chrono::Utc; -use quantus_cli::chain::client::QuantusClient; -use quantus_cli::cli::reversible::{cancel_transaction, schedule_transfer}; -use quantus_cli::qp_dilithium_crypto::crystal_alice; -use quantus_cli::wallet::{QuantumKeyPair, WalletManager}; -use std::sync::Arc; -use tokio::sync::RwLock; - -#[derive(Debug, thiserror::Error)] -pub enum TransactionError { - #[error("Quantus client error: {0}")] - QuantusClient(#[from] quantus_cli::error::QuantusError), - #[error("Wallet error: {0}")] - Wallet(#[from] quantus_cli::error::WalletError), - #[error("CSV error: {0}")] - Database(#[from] crate::db_persistence::DbError), - #[error("Transaction not found: {0}")] - TransactionNotFound(String), - #[error("Invalid transaction state: {0}")] - InvalidState(String), -} - -pub type TransactionResult = Result; - -pub struct TransactionManager { - client: Arc>, - keypair: QuantumKeyPair, - db: Arc, - reversal_period: chrono::Duration, -} - -impl TransactionManager { - pub async fn new( - node_url: &str, - wallet_name: &str, - wallet_password: &str, - db: Arc, - reversal_period: chrono::Duration, - ) -> TransactionResult { - // Connect to Quantus node - let client = QuantusClient::new(node_url).await?; - let client = Arc::new(RwLock::new(client)); - - // Initialize wallet manager - let wallet_manager = WalletManager::new()?; - - // Support dev keypair - let keypair = if wallet_name.starts_with("//Alice") { - tracing::info!("Using dev URI keypair: {}", wallet_name); - QuantumKeyPair::from_resonance_pair(&crystal_alice()) - } else { - // Load or create wallet - match wallet_manager.load_wallet(wallet_name, wallet_password) { - Ok(wallet_data) => { - tracing::info!("Loaded existing wallet: {}", wallet_name); - wallet_data.keypair - } - Err(_) => { - tracing::info!("Creating new wallet: {}", wallet_name); - let wallet_info = wallet_manager.create_wallet(wallet_name, Some(wallet_password)).await?; - tracing::info!("Created wallet with address: {}", wallet_info.address); - - // Load the newly created wallet - wallet_manager.load_wallet(wallet_name, wallet_password)?.keypair - } - } - }; - - tracing::info!( - "Transaction manager initialized with wallet address: {}", - keypair.to_account_id_ss58check() - ); - - Ok(Self { - client, - keypair, - db, - reversal_period, - }) - } - - /// Send a reversible transaction for a task - pub async fn send_reversible_transaction(&self, task_id: &str) -> TransactionResult { - let task = self - .db - .tasks - .get_task(task_id) - .await? - .ok_or_else(|| TransactionError::TransactionNotFound(task_id.to_string()))?; - - tracing::info!( - "Sending reversible transaction for task {} to {} (quan_amount: {})", - task_id, - task.quan_address.0, - task.quan_amount.0 - ); - - // Send the transaction - let client = self.client.read().await; - let tx_hash = schedule_transfer( - &*client, - &self.keypair, - &task.quan_address.0, - task.quan_amount.0 as u128, // Convert to u128 for quantus-cli - false, - ) - .await?; - - drop(client); - - // Calculate end time (current time + reversal period) - let send_time = Utc::now(); - let end_time = send_time + self.reversal_period; - - // Update task with transaction details - self.db - .tasks - .update_task_transaction(task_id, &format!("0x{:x}", tx_hash), send_time, end_time) - .await?; - - let tx_hash_string = format!("0x{:x}", tx_hash); - - tracing::info!( - "Transaction sent successfully. Hash: {}, End time: {}", - tx_hash_string, - end_time.format("%Y-%m-%d %H:%M:%S UTC") - ); - - Ok(tx_hash_string) - } - - /// Cancel/reverse a transaction - pub async fn reverse_transaction(&self, task_id: &str) -> TransactionResult<()> { - let task = self - .db - .tasks - .get_task(task_id) - .await? - .ok_or_else(|| TransactionError::TransactionNotFound(task_id.to_string()))?; - - let reversible_tx_id = task.reversible_tx_id.as_ref().ok_or_else(|| { - TransactionError::InvalidState("Task has no reversible transaction ID to reverse".to_string()) - })?; - - // Remove "0x" prefix if present for the cancel call - let tx_hash_str = reversible_tx_id.strip_prefix("0x").unwrap_or(reversible_tx_id); - - tracing::info!("Reversing transaction for task {} (tx: {})", task_id, reversible_tx_id); - - let client = self.client.read().await; - let cancel_tx_hash = cancel_transaction(&*client, &self.keypair, tx_hash_str, false).await?; - - drop(client); - - // Update task status - self.db.tasks.update_task_status(task_id, TaskStatus::Reversed).await?; - - tracing::info!("Transaction reversed successfully. Cancel tx: 0x{:x}", cancel_tx_hash); - - Ok(()) - } - - /// Process a batch of tasks for transaction sending - pub async fn process_task_batch(&self, tasks: Vec) -> TransactionResult> { - let mut processed_tasks = Vec::new(); - let task_count = tasks.len(); - - for task in tasks { - match self.send_reversible_transaction(&task.task_id).await { - Ok(tx_hash) => { - processed_tasks.push(task.task_id.clone()); - tracing::info!("Successfully processed task: {} with tx: {}", task.task_id, tx_hash); - } - Err(e) => { - tracing::error!("Failed to process task {}: {}", task.task_id, e); - - // Mark task as failed - if let Err(db_err) = self - .db - .tasks - .update_task_status(&task.task_id, TaskStatus::Failed) - .await - { - tracing::error!("Failed to mark task as failed: {}", db_err); - } - } - } - } - - tracing::info!( - "Batch processing completed. {}/{} tasks processed successfully", - processed_tasks.len(), - task_count - ); - - Ok(processed_tasks) - } - - /// Get wallet balance - pub async fn get_wallet_balance(&self) -> TransactionResult { - let client = self.client.read().await; - let account_id = self.keypair.to_account_id_32(); - - // Convert to subxt AccountId32 - let account_bytes: [u8; 32] = *account_id.as_ref(); - let subxt_account_id = subxt::utils::AccountId32::from(account_bytes); - - use quantus_cli::chain::quantus_subxt::api; - let storage_addr = api::storage().system().account(subxt_account_id); - let account_info = client - .client() - .storage() - .at_latest() - .await - .map_err(|e| { - TransactionError::QuantusClient(quantus_cli::error::QuantusError::NetworkError(e.to_string())) - })? - .fetch_or_default(&storage_addr) - .await - .map_err(|e| { - TransactionError::QuantusClient(quantus_cli::error::QuantusError::NetworkError(e.to_string())) - })?; - - Ok(account_info.data.free) - } - - /// Get wallet address - pub fn get_wallet_address(&self) -> String { - self.keypair.to_account_id_ss58check() - } - - /// Health check - verify connection to node - pub async fn health_check(&self) -> TransactionResult { - let client = self.client.read().await; - match client.get_runtime_version().await { - Ok(_) => { - tracing::debug!("Health check passed - connected to Quantus node"); - Ok(true) - } - Err(e) => { - tracing::error!("Health check failed: {}", e); - Err(TransactionError::QuantusClient(e)) - } - } - } - - /// Get node info for debugging - pub async fn get_node_info(&self) -> TransactionResult { - let client = self.client.read().await; - let runtime_version = client.get_runtime_version().await?; - Ok(format!( - "Quantus Node - Runtime Version: {}.{}", - runtime_version.0, runtime_version.1 - )) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::Config; - - #[tokio::test] - async fn chain_test_new_manager_creates_and_loads_wallet() { - // This test requires filesystem access to create a wallet. - let config = Config::load_test_env().expect("Failed to load test configuration"); - let db = Arc::new(DbPersistence::new(config.get_database_url()).await.unwrap()); - let wallet_name = "//Alice"; // use dev key for local node - - // First, create the manager, which should create a new wallet. - let manager1 = TransactionManager::new( - &config.blockchain.node_url, - &wallet_name, - "password", - db.clone(), - chrono::Duration::hours(12), - ) - .await - .unwrap(); - let addr1 = manager1.get_wallet_address(); - - // Now, create another manager with the same name to ensure it loads the existing wallet. - let manager2 = TransactionManager::new( - &config.blockchain.node_url, - &wallet_name, - "password", - db.clone(), - chrono::Duration::hours(12), - ) - .await - .unwrap(); - let addr2 = manager2.get_wallet_address(); - - assert_eq!(addr1, addr2); - } -} From de0208d42d01af4647df13ccc4c4fb13a5a8f59b Mon Sep 17 00:00:00 2001 From: Beast Date: Mon, 12 Jan 2026 14:40:46 +0800 Subject: [PATCH 7/9] feat: add migration to clean up unused task table, remove task table related code --- migrations/010_drop_tasks_table.sql | 2 + src/lib.rs | 8 +- src/utils/test_db.rs | 2 +- test.db | Bin 102400 -> 0 bytes test_endpoints.sh | 123 ---------------------------- 5 files changed, 7 insertions(+), 128 deletions(-) create mode 100644 migrations/010_drop_tasks_table.sql delete mode 100644 test.db delete mode 100755 test_endpoints.sh diff --git a/migrations/010_drop_tasks_table.sql b/migrations/010_drop_tasks_table.sql new file mode 100644 index 0000000..859252f --- /dev/null +++ b/migrations/010_drop_tasks_table.sql @@ -0,0 +1,2 @@ +-- Drop tasks table and its associated indexes and triggers +DROP TABLE IF EXISTS tasks CASCADE; diff --git a/src/lib.rs b/src/lib.rs index 0f925d1..aea43e6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,9 @@ //! # TaskMaster Library //! -//! A task management server that creates reversible blockchain transactions -//! using the Quantus Network. This library provides the core functionality -//! for managing tasks, interacting with the blockchain, and handling -//! HTTP API requests. +//! A reward management server that monitors social media interactions and +//! integrates with the Quantus Network. This library provides the core +//! functionality for managing rewards, interacting with the blockchain, +//! and handling HTTP API requests. pub mod args; pub mod config; diff --git a/src/utils/test_db.rs b/src/utils/test_db.rs index 8ea2673..5ad3044 100644 --- a/src/utils/test_db.rs +++ b/src/utils/test_db.rs @@ -15,7 +15,7 @@ use crate::{ }; pub async fn reset_database(pool: &PgPool) { - sqlx::query("TRUNCATE tasks, referrals, opt_ins, addresses, admins, eth_associations, x_associations, relevant_tweets, tweet_authors, raid_quests, raid_submissions, tweet_pull_usage RESTART IDENTITY CASCADE") + sqlx::query("TRUNCATE referrals, opt_ins, addresses, admins, eth_associations, x_associations, relevant_tweets, tweet_authors, raid_quests, raid_submissions, tweet_pull_usage RESTART IDENTITY CASCADE") .execute(pool) .await .expect("Failed to truncate tables for tests"); diff --git a/test.db b/test.db deleted file mode 100644 index 78d3b254d7896c47275a58eb9f1277d577538fd2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 102400 zcmeFa3AppvT_5cG-gmwC+McX$)|r>AGbA@$?a~k+vMkwS*@`7ui$!3f)mkEJ_abOv z(zFdNP}VkWA}%h=GkEBDp0CeWTKjqK z@5;ye|IS~(v*?`vIkH8*@8$MACrU#-zX#PGs2v@hz3JY)TCMhA_4nfHZ~MAk9bDhO zss10`cl>mR57yq0etEt5o3;Aw_tYBS*Zj6da{BuXrt#9#uQ+8-e)nW?^1R#cx$~1x zXMRXOC~#2VpujTYdW8Q$M};@X5XEHM7&KGQQV(tn(N*x;GRa8<4{I9!rm3zy92^Gc)z# zRR;G)v^u)Sy}A06`u)p~y=j)~Z+n|mcP$ieXHq1M0K~{wKTKzY-p7NQ!u+3uS)#JGr+FSRx&Ye8+ zN%fm|#q*<@H?{0VB~&%Mdt5K_X#M{2r;717@YIxsd3x(EarfwnD z3Vr&xZdqyN_d4ZlBlBZin^y1bKJnh0xqIuA?>&6K{UghhTX(64j-Gh=RVEu|JA3?X zdX_()*>AJW+_tWgPya%0S06puVwL69NB16n65Hc?{)l|%=3U_KQFfK{<>jsL@gjZO z`rXN2$n41{-FI1AyGP{PJ9+J?wJxbO!JVh!-JWzG?>6a?;jNo@&+i@uPw}@ZgS@yb zh}%!YdU?uyoR!eL<@vM7QE1^5IaDPj= zM@pkNKgLy7gQ|{S*4>rW2e<6q#i{ zuK#6cti7m8^Dl8@qt27s_EoceZCvT^O((Ct%rLdr-NmZ!$rszJyZk8I7Su<3fy)n` zF5OIRy{gz%Rs5*MRlZNtT@}ldF14g zM{eG{`w4fSt|M7jj>p0Ezgv&o_@AE&tWUi;JidANb+0=buWR0>d&=yju%}gjc-dQR z-(G)GZG6gy4^Q>betmW7BTr;Ux9&dl(9z#O)|^*O|LWk{lX$L@_paIZ>+_e?D?4|$ z-Q@bL%i8d4O8u!_*GKEi$s@qEn|JAlp00GRF7k2U>hQQ4Kdk2PPkP;`-rZ>a+$!xw z&EIPNm*xkXzu5e_`WMx^XMb|`4QJNbi%x&^^fOQ4lMkJI$;t5K`kimR^R_$pZomKb zJ8!@7)`xF>Kr2Yg(SZQf-11OpD zLm!F7S$LF_{iQ5)21~+!^+Rg|FG#&@wt+(na~yhL*Op>@8-o z1uT&fw~Gtda!nIZh~;FQg=I3)dw@-}2f3FqjuRk~PCLPp9D8}2Y4yk)D$`wD7hA4~ z%O<0sC#g7A2ytHmDP}$r4I4+NjGLFFjkQWeqq5W$WRPcfaedsDYuXKD#*f;G3#7Th zv$(FOMkpxcWC9^`-WpoPxT7L7EW^iu>g?ir!xa~UP@_@4=u!^a1$92_6dBIXV{t4M zsM0Y)Jg|r{R1)G$PEBkV*ZCC}VGaQ=#Rbn$2NV=K1vX=tK^ZUFqLtC(B>?m_TkWQ# z-Ql@#7uV~zTv1EPLDvn&qZyna!CWEZEO%`oN!tsjD7AQ!+k}GlVHfViVSg9b>#n%W zgc@^wCL)D$uE~IeqY%rgG$G~P_An=6I`0z{Lbh^)Qd{#~TpzpTijyoMM@7)hBr;RI zS)0aMj%Y`EfT==``-`;QiwPl0I@zKi^j%z!Y`LO^rc9T)7SG!qFsCWY>8E%AFqj_M zU^bpO6W>j3DKOfEKB1IdTn}%#Vi*NwVC?DY4DfAXruN1vpOR#c&PpT-`Xjf6PGlfo zst!4q$Gf;*yXA@smg~v=J^~Lkt7lH(ZUz@J2FM*6v<9K=fSu`F%6pM-(5NWw;(E=N zYszE22U^wCWlKQ51e$)Mmy{3tG8Sc;FrsIChKf6 zZ@aKR?)N7d<#rU`pS6eDq9;0@)PqI2JMG1;w#%!Jc&RvnJldds3p6&Ub*F(y0KjKf|-!GF+71;dD_N7vSZEp2%%*yCY;0y zC9>D*i%ZJhS&pyRas|bL>$fyzF$l(-4D;O?HSgd9IEL&z%>&PL2|9;c6c=z_Uuo^~ z>tnWD(bCnlVRz7Ca<3N;2ui~omSh=QZTG=BV#TnhWlHRnK+9GvWEa=HEtjv9SzhLg zx!!lWEX|=*($(7o871ub1a#$|ms5Q|B*oqj#ImN(Xk(MQMunK@%0h9+E5aVgU9O zSCU<#HDffntcqpFedy&|u7IRP(IaHS*RBZobYo#P_dWlO+E1r!~n(@6(fQuFR~mtQa4a)pfO z_#)}sny!NIR5KQtff-1YP{irh?Y^oN z9SV#f4^*K*?tx{}+vV4bwp{TXYRBP}wgZF@(vB|?tf;s>SuML%FoszN3T0Il6sm-a zDw?p1>+Y5-p!$O`%>)wQnP53n@T5gUeV(P2k|Ktnl`M6V;;2OzP4ILYiKwgFcW{eHBL>xK?QC!8VTS}Mg>DeHI;^8j8Ua;kgXgQc+<9SX3jFU`8 zvIk`@X{&PvHYhDb{l$0?%zVG?yIBWZ?&5m>71y$yGQ4PWW2DuY$&NJk=PfV;d2vj) zGu~f}lo{8hs03XQ_#ofK^}H>YpP9=U<^cUgoC`%?%9pb=R#0~8S}aCqcxs~@wqSy` zQSdoQ@8WvymTS5YC>2j+R$Qj^ps$(>+o3vM&^9xBKimZna>D-rgj+q|Va!oNX9U3UZkEp6N zdsdI>o2Y?@0R&WAlm&9UY$nUChSXKE!qmlslNvDn>DoNc*c zk&q|jg2;NZ7Z#~L2%zOeg+m?6XO5DqqO|nkS&52a&&cDQ<#>9$ zBd5B*%dgv8u9zkQ8y&Pci}H$jIt)iek8-*?;Fz{333;m{wyP$#6qZB5GD}yzeL(#s& zrfI^5Dc83_5tmXu@#Y=_FvtR_^VN z_~!g3dF!`to}SdNzxw#wYJYtF-`@Vh#`|u3+1VqvUwbsU`E{p%`^Hb#exT93{--y8 z?OJhsxB1=IzV_C4)PMfgn{WU5>381xoAs|~{6_QT4Z41G1YH08dU*YlkG}Nm@mnuG z{;iY$bjn{j_)HFvrVHp#%1k<372t<8(&Lf1`S_Y^= zb!IGp(7ux9I6@A%WX$FCCNGm1YIZdkRecwrXI3AbcZ?#WyNZjdvZ*Z!djJ={%WGYD zq&e-)b;12eg#C_~x9skO0GG4bFha5jRbpevsEpH5VwM>9(5}T8;()re3ZpT08or&2?#h;7>dV zHVqxX3DC;jK7m)R!d5uB4LfB6iywE0Lrrn#{Lu5Xt$0OcX z7Feoq$n1@mRsmMP6AiK)0U-VA&Jna`K*{lXAImM33ke71dtG(hM@cd4GLw(Lw3<$q zRAf%uemhKnd3Be8TMoeBWUurmbT=w=oE;^MF+qZGG0(k6FRkJkQzj)C+r2qr(`<=i zNxN<3QrpE7JHTujWtL)|iG2@-j6urrUL&G?2$89&$mK5(fctX|-q25x%tAGrS0Gto95fZmw&;kRh?H4M0^`+IGL9thDJlrq*^~z0N`vtmQvAM4OLRuelX|-qY z>fXk~{lZiaZSvYLAobuTul+(#FTb?fGsyI^ORGJ@NiW@4?H4$D$)(kvA)*&wTJ0GO zdeNoTo?)N6msWcQcwTsEwPz^j1skjVf;P{;wAwQ~^Sn!|Jp(b%y|mghvKNzYxZqjfeXMD{fy} z?HQK1b!qi<0f=h0_>RYlIoRaoVHEb<08JTko{aB%5Lqm zE7KQWn#L`0NCDMAQxMP~C=xau4SXs@VouPyGdQQDz>GRoOw^u%1(Qk@EOZ-1cKTn?$-XaGQE1$tD|ep zpRZNx{y%?}csMvHa8Tf&z(Ik70tW>S3LF$TC~#2VpujXRBAG&t+7{JkLC&1NfHUEF$#xK>X_5N>cJf|L?{rK4%PCxtPH&56*UvuZyZTr^u z-+IN(&#X>3ybcN+6gVhwP~fkU0#7{nK)ohKca9!e_4*KWK>)-B1Q6#JX!YC_Y&yib z0ocYldSO`@K$r$>5=j|W8!@yP58Wg~Bf^n&mq;MbAPu!EdxJ>`sw6>mVh-hKM6l3n zxt-}gYAoR9fk07*tYF+ z0tNJQ6LoCUzCcje!Jegq5P_pG0z(k-bRJgzeRL=PrPJWW6Av|>qm^AEEtGCiabTbb2v)7y#--Rnq5#ZC0ghSl~HsK}1!`CX8bM z?$|c4w};o)9ufe4QF(~&?rT8sFBbAD>4}%$zI5=lmwgQZ5e7s+=(z!2pyx1#0_TKo z>Q!UWZP>9bz>4^t#tQ@>{ z^y7>8K0}ytEQWJ4AqjNscKZb} zRE-6t^z~)RpfjK4TO|fgr%Lj`SJwd$0IE&DVAWw?{KaN`Q57&zDRy7W-X=Zq;Podt zsDqAy!31z_ILJj+2aQTW(5bYvtGgv?n+^_xm4o|jY<%_mUxZXOSOYTz&gqbLGERe!jrg4dqpAb{AqS=~)mJxtZj zU{yubs~gNYNa!%?T zFPnVhXPRZ>PUFuTVEvcxw>cai6gVhwP~f1zL4kt;2L%oa927Vxa8Tf&z|$!3@Ue9C zOd{_OtWJC;5%A{f#AgzSZmdpxCh_3<>cnRfmpxmZ_)Nm1r>hg6NtE(rb>cG#7T#H% z_)LO!w^t`Vljzy4)rrp}h;?&y;xh>*-B_LYOoBSs*FHXj;L7pp#Ag!Zxc1s(>CQ6= zHr!uJ*Yp3z57nAq-u$WN`>Lk^zPIs1%@;M_)Lb?`+`QiSK=bv@NpsZvocnDK#|H%t z3LF$TC~#2Vpuj>fNR|)rTrZ`?D+`oC@TJ!kTckbUTaDN%C=l@Nl z*8I=SUugdO<_}cg0r+*zf71No=4UnkS~F?d&2e+kB&!n+uY&>y1r7=v6gVhwP~f1z zL4kt;2L%oa927Vx@PC^Er^hcn+B~!Hzj!e@A%ru@k{S)e7?W9F6aMWRcn5@`CpsA-u#v32bw?I{K@9O zYyM#K`J$3LF$TC~#2VpujxTtBWIJ+Ol23K}b@ui$J2rz<#F!JQS{Ucs#u++4wp6eP6gVhwP~f1zL4kt;2L%oa927Vxa8TfXdj+oc&L8&w@45f~=!JJa{6Ag) zuf8APA2wESgz!+Vq-M^YNzEl$ygP+hnRI1>V@inqc$1`Ec`< z%?Fz=Za%-+Y@Rf4G;57NZu~*xw;R9N_>IP|Hh!V;bB&*B{8;0M8sFde?#8z_zPa%Y zjjwIIr}0l3U()yojW1|?PUD@8ztQ;Xjb$TmL=C@TH{RO#VH!I!}{;k|6Bb-^;J6&PwQV=e^>nr>z`Zytol3ZpHVOCd3{=U>qh-8b-6yQ_v-EXC)V*g zSbu%}HT8S-m(-tMZ`5zsubutL*&m#J`0T%&eemoT&wlRgC(nNL?BASy&)K)1ebd?3 zpMCY&KRx@Bv%i1#d1rs;>>X!+{cLfTp1tkNIeY7wb~ZZW&bnuz0?1G`k~WbJ^jGx&z%0m=?|ZN|LJ#~e(UKsp1$|=J*QuO z`j1Y(==5_>-+B7>(@#I0oyMpBsdf6Pr|PM2+CODZ-*o!uDSY~d(}z!Aar&~;7oI+F zdUAUGPMW$uFP$hm-f8{MgA4o_z1gzdHGrlW#cr=O z?;1a{#y?!+AFT2B*Z6yD{M|MF&KiGvjUQg)Z>{maukpXF@i*7_U)T6w*7%>-_@Ooa z#u|TpjsIzlA6($(zp%y+tnojt@jtBb=hygiYy8&PG zdX4X2<4>*eC)fDz*Z31_{P8vZ*c$)c8h><+KeEOjUgHm~@dwxVZ`b&5*7yT!{Qfn5 z-x|MnjsJR$-?PT=UgLMI@jKV}uh#e-Yy9>#e%l(qb&daWjo-4yZ(id!t?~a^<2SDH zeQW%NHU5h=e*GH1ZjJ9<mCv;uGizzQx_@Np}6!wSw<@cI?JZUrB^f=5>H@CshLg4e9z)hl?_ z3SPN_SFGS;R&Z|x53S(A6})@}FI&M&SMZV*ym$pKTEX2Fyl@3CSi$pG@Vpf~cLmQ` z!T-wj|FbWyHQv|wv{_(eDV6F)tS3LF$TC~#2VpuqoO3ap<0Kg|FC4=dEe z`i^l8g zzg+*KdbfJk{Tt4#vlpHI=;>#k!Y3a(`I73%@auQJ_0HSw+`Ik$+wZ*n##g@p(7e*FSRoz1QD*{rSf~c>L+d7uP;`?OoUU*J?-KbmShrto9RC zMmt_T_+~AtLA9ieRA~fG88{F`S4oQHe4#H9OyuW-l$c_;E=@rnrzZesD^S|D!3z*M zzaY*5?A`_XD1bZ)6Fq3l6(Z?~5u79_rmiFPCosZFON$#o$&?@Z=+sDNMFhek?8OVv z3U+aUS6qw9bO@4QI3VQCupcaZ&DMsN(eaWl%BME8w54TlF^er=iHx{iT)>uVnt(zq zC*v$ElZoB~Y@$8Ly^L|30FiXs36|v8%iBz=N9ItO?&7-Gaz$J=83jE_#j!$&`w~bo z^O0!SI67tAyd-U`RVo^lrLG`@JiCkQQjq*j8a?mcQ^I50JaDE<(W2r!ujuGO4MU0`65NC2~ zV!OD`ueb8N&?9c+nQEj2D?ySQF=#bqYcnCmkUDU@?f1|%GX zSXQM8Detz2IT6!&pP&%3l^c}Wn(yNJ*ezF_WC=Mcf^H^}nd;5jG}dxNJJJJ86>{8P zr0rfz2vO3>76qa2;(BDu6)iMny3DnB-tK@oO<7Jq#RGuB^vDLY@x+< zrR?H*c*_;TC@2GCPgiGvZwoWEH&*$SBztsLB2myExh-@e1Nl;Q$hkb;#r4`PS5&ZE zPww{-c%WH5a|(AexR5bG?$Dq$2yF-KOy^SGi+qDdMQIn;YqnfdCL^e+iuQZ`$t)ko zzT^T+x=AIzTnc&Mq;odfSyx}Z<%$5mT*fn5XOnr`h5d2AKglS!qxk-; zJ=7LG(eb1nEXv(!FLt$Ee!XhT6-+V(W;6~-7ZMH%tX1f=od(Vz&eceXu@dJ=1e(cY zX-z;a*~RtBE!WhI<*FCVgv5>E3CzmVHV%>C75r74R+Fy>^K@6M=s2Oq#OWanufc&1CxIozVSfb;rFYnNXiv*n7GuBHvUgBFu} zy?8)S8s@Mh%h+nW56%%QhCMA)Vy6UJwqhZ>xbAJae5K6tGGEN~zSCuC4yBT=-X6#( zVb3R^EBCzILYM&(Px+}g6L)bvwB-u?mZAxHNH0Ug$>6!!!vNia7l4!@VUO>}CLhO4 zF9;BeNwuARJ-Fra-8M6n;xI}FcF;v>iuXYi6~P{oEAnCh_7qo=U7|H(G`XybWygK! zD!vFg78!`7MXz= zNR&{>7zWTuD>QqVH3D&Gt_Xg%i|ZvFs7M6 z0z4BeX9}LQXsFM#v{F*U5VVq|PEs7T=mJWw)OYIj!Yx-qw^UcA)S;ncBNE8VFu;tl zp_Yh4=OBu!cy&wZl07{eWKcZZb&W6Baz(Tp%&_r1CjrJuCL`H{GMBW~IRhJ%7NY)Q zJP2mK-}c?C11@)QJ^zYpSxy;Vw7D_T>da(E8vFAWn1Q@FrrR0sFGk9Y>rzyLE(m;( z@8Wvimdnq~&4uky9WQ8`nLVI&hy>_V%3K!gLx^EQz2ZVF!i6b+h6XK$N4n~?tICN;$)-Js)gT6F zAejmJGJ;@u?7_RP@np*t%`+I|-H9&?m@IHgm=*Daf+DQeruF*&v%{#i9Bm z+MGps#XKE`qoPMST^(>tTa<*n)e+lO6I%+)plDx$mmy1C_=da9Mdn#`eS-(k}<;lq^c+n|U`sh)Uq zj{z8Dfl4u|VwAK!48uF(d^iVHt~2d`*bL+*?P|ywqZu9wyxxUC4_-tr zzH=OX{fdhS3rc`!n<;`pI#gykGap+cvV*`dEcW}Er~zz;ELq+t9TuE(M=4RBxpDMCdGuD4dXx z6+t#KMp8V}LDA6qAm7%Nz9;6v&TBl{az&9gF+!T{F6BraWOI#XBstLAJSbz^3ktDuA=j~hzz8LEVv*q?f~BTY zZLZ|L(P{6jqmwOH(4J*p!T-!8bw?vIz_s3ZyhEp+ zv*n6p#~KNSAyg9=z`?x5An9tVBSZ6fdpP!lkmz;MzJ*#iI!u?wE-rh^HC^C+L}c8_ zj11=zXTU_JwTMz*W)6f-^hKU>9X-SVl=8DNu(O}HuDGgtNDy?3!(^sCBIZO`LX?np zrgpmnP)Uiy$Pg%D$W=zAG2VIqF}GZyftA6C6DCj)NtQ_#y@f6bbcl^mi=Qt-G-@R( z7-;^mbo$xO`?Yb!HKMT;5R%c1w9JwZwWQO^tYMp|`ceRbK`<%C8KHPEPRm1i=QZkE zuBoQ=HPPZjQP)@Z8_N^N3z4m+ve;DRJ2ny+B{qjhvaNGLGUty``T7xbuaAw?nJ zY)Yc!a0m<_%bkP}-Nl_gR2Hzy4@XE_4~7!9i|g?%S4>uspOE9TvFQ&yvTz)NnHz1F zKz**C=Teu)28FU5$W&J+z}YUYPu+5bzE+qEWCEqM8%lIEXs0O8BYN3x4>BiNWVq#( zHo)_-)E*}X$xu(5Yf`gsO98yw-@CP#!V|tP8NS*4=>!4m%<`W`9Elp&)7P^z` zEnBVtkaMQRTG&`}kq}I#Eu)Y-DnQE~s|$TOo zF0ugCIg}zE!}mp$nyF}x1*V_**31mOvCCq?&iSmgF~bTp zi{l9=>OMC|M2#Av(>NGnV{=!%A8on9?wEoxUFrdvj?y}gVPs&<;UzoqhD*qj`C`df zat_T0zUg2)?~CG=D{x{$nJngW#~Dz=;k+N|9c&0JZ6$E$19r$-NjSBJKsBy&m0o+7 zU&59vc2bphhca5iEs|e^06Q`Xjl->J2I-J(#&f65!z`ND~EASVGj@abOIGiN?x~y6b-38*aJ6Y7W)rRSDGMKIn`j%E`Ku zz9lN%o<1^Vg%1}nS5Ph8*I_U9c3tC}w_Fio27C%uldqYdiY8rl<&I7$dV4yD0$Os| zR-bn>q1O)buEOkWL);ZtBnXsPdR=_!Su#}SF#%&z8INpbHe+niW6?I)Ru=w@f=A1p z^X$PDmmyj`n(T#<7YM{~(3@~G-4Y1M<{5v?_o-;)JCNwugVZ0i zq~4^OBEYy)ntf}S~W&jk`gK#?zG~5oznUSQ4XY@zPLRn%=mmJJ`qZUE$ ztP|ZWSH#V^(qj^@?DUy*q@!$S=$cU%>8F9EcWp76GmIiDofsZj`p$7qXUjEpX$YL* zu*I{HPmMGS6YwC;$GxRTsKKoh#MgnI7$qZub|Wb6!~EDJ;&RFxY2l$_ zR)QhE^ucf$PG{oIdjCmRTnR2oES*>J!jVvNlr)Cr126PGN@$h@C2RtHM6(!<9Hc)JTakzCymw$*u6W$F!Et*Q=&Uiq ztB|8#f_MxYEtJi@FlUGDumA@b%=UYo#ggBp7kb6TDOdn#1!A&F5)&QW3Kz03AYPHQ z7P6~B1DIoN--irRK)O5EFp(|Sbic17*x}dx^Z%1;-(PFh-d$^cSM!}szWLI|2OD2s z?Ez>t&W_5Xv!f5yfBO1q{oQrDqLfAIR(Ute5r9sl(4yN}&t;MyNoPOV?)ada&y z11WJ)$HJwc>jAB$!*Swhuoxq4OAer+FccD53SFQ}x5o?1kB)9`%pk5>KNYZ!Ptl=h zF9uXANQT|zOmobk9ddp!PSif&7hNsFS+4f;mu7w-bvdJ8i+oUy#xq^c#o-X1gGn|M zfQm&AblOmL~*L`@>Ccq+fPqOb|L~&}If0GE@Vjhj&uRSs>`toskkXfJ@gj z<*?gt)qZ=En;Xx2)v~CEl{01`Ep&`Uhh3?I0iZzvZF;g8^d+-h^{s+u(fCnwlbZrN zLrj{A4o4-(GEzbeH}Py1%%}KdR7{WwLM;nf7~&RQErlK3+2l4Q5D=L%y29zAZxIDE zkl_~V^C+STuIpQ3uBb(|d_afEyJq5v02&xQxJU)$uCd%4yHgFxwbxO}z@wvD^@X9;*Ts;9 z(rVAia7?(6WD$uCO{ne=?4S|ODKXc<$78>XC%h!ib2YAg%jGw2>=_bx>!pt&8dnnm zWCZvYfYN3l3+`|}p!m?lWOAA4DU;HP?8`=U)|`|U}AHkm=1hL@hvRp~LU zD-aJ#hKt2ku^6TFtYp&C4n$itmJpywEL8ikO=kNA?(3UEi_>c9wnGtyPqEav3NY(R z)|!sW0KhZ`DBVG4s%N@kvV81%;nDLhGuv~NKyIw|iwS)4#%jOS>{o29_L_(Bo649H z3s;*&9O`tno*F7GoQcGtnDTtP-BuIV>E%E$%#E3qXGvK5s*PC`DX`B45|%)bS;eiV zGcr(}g-knqY&fcvj8ZLVEgaPo_?B7w*IP4a>^TK5foS9oAgY>I6o@ni%iOKHmJWz? zN*L@`PC$a)i{jdMY|MgDJ_LD;!!@Xi^G)IbqDKYZB}zen7`locIPGd}DahTfSgn57 zzHFn&eka;qN=$<6}gwqYDct2S2qP3pys)qde2ZF6CU+jdNW9}@Ev<#?MMw5U89`#6Rgmf9N4!?7bnaNBc zJJseiR+=(whtPetsP35|1Pn7&rH!^=hCvCytmj%m8^|_R`>iwG+PF9d0atiKOqr(mg7^s>SYFpD_ zeKrVc-?_Oowu81J%~Ct?U_6rP&?k#96(e5<2O2tIs!=pW*boBG!<^3CPv2PWw~%za zagnJSkr;SvOTO3&q|rozH=tIyTGa$`noCL6g%%3zXj z^&z?#69g_*eSl;y+NeZTW7=g-5FJ}D6~n9jtL})ribsv%VR^BlN;|_vhY1IwS@j*6ZIFLt!Y6)3~;M#jP zneDf4I+m(P(93wcq-|TeemL5eFU6!z~(_;d99;ht=$x_aQEEf@u%T zHJ+KGh18blDn8WdPNGPaYyW2B<9>VJ9^6{(6{&dR#%jN{1ZiV6MrNZhndKRs&q;{r z_u1*#lys@Yh^z&77riby;l#o6E=L;OVn}JibTZ&&slWpP z6vp8~$@RsM^Vv}~l4ffEYLnT1TNLkYGE=5t&rDhwoaf^zoEU0M72Asgb>dX39Oy_$ zNua1IgM^Ol{(MyX+HGcg?R|Uk#!4Dg9saNtLjx|xXM^?vV1~(XPA?*^-O~_tQ1!-M zFwFqIL-)|)N%Q~eP3!1aYS+{2AAkME@sAz9>sUX2-L>Dl_MOe|tbXZl*nG)4A~0{f zsd1F8I^9z6Yx)AwC_(P??wKCPep%*mHkzaj@*`&)PZ z_|6a9`Fq#id}n;;6}Nx$_BY@DjN6ahe(tURcbehj&A<&%`d$9mYc7+@!=ca zcH`|g`Zr#9{gjC+*;{8jpaQpPX{i^DymVY!ZZsUJ>Xj0cp^sw zd2He#7j!9c5FlHty~5p{jn#gEv`05`*m(7@!Gt%osI88`YI`P3&Izv*c1W|@>Q@?B z6`u1wjm5a&(xTdLlmbxJQ3x~0!6n8!zKYW=uapIl=~c1a1+tt~TTl^TX!hdb=mi@y zf5{p}g3ZdgNERT}a>e8WE1H zyF>zc25G2W*&9qkP$dbf6LTm>BZ7q6Q+Ial4ac;yyu zh)J+8hLbJ^Y4fo*fsRf$xwSx%^@}Mhn0z@dZPS4l2IC7|B{osJ3JUmWB2&r01CoLs zhP8KX%ntE_|1aVNAxJ`#)}RwcToRQ1wve#%@em0+AeUPp#T#^wA8@S%!!a8&<=Wdf z>Q_PNnT!Tp7%uu!VOXRa;>J{&5emmGv3X}gw@i~uXC&IAV!!s^Htl7<%?+P&nc1{Q zb|)kXd05pC58T9L<<0=aB5TkxLa)WC_K0vs4h_lVyb5I0et(nMeow@F+{S9ZXA@qt zaglLadYHy&)Sh;*PFg)BQ&^Eeie_i#^%A28IK4qiqunW1#3QKo>svFd+E&WhOPp4; zjF94hgfeHUw4$Yv16*}4l;dGAWIR&qVO^s3?yZk|J)iT2jTJTT_4y@lWT8l~)8#m> zR(mHMOrPc>Fz-ZglEQjt#J8u6ubT9yZCs?Jp4CZd#@4fHnNJ0BB3Wor5T+9f!Izv2 zX*+U1z;*W%d(9GN-~0Js{>b|Rj%y>Tm&6$ z=8;n(>;&$}u*72EfkD=H$50WqX+W9yDyMtZrjfKqx&W0%i7@eMzq`4#{T?zp-}p$h zqftKXFiWY6F6}(@(RpiHR;#WwZ!U%-$yDY8O(W(iCb`kP_EQ@(T(W@xE-`UVz^pHE z$S~}6`UM;UX~&q+GHsb`4u~Nea&)NI-m~#>zs<&*zIckpd>lEg0c1PFHsoncg7ssj zdfGr^hA8gZ7-~aMG=OM_WESJvpKW|ZDS~FDxZs8%k{CAPiwRbER9D8TmAXYyas#-{ z^L$5J*q~L^estsGew&y#-@d))(kHcX5w7+*Sj%|W5~I$TVtccJ49xM#M4Iq3*ADb0 z;0E3(_Qr~zw}IL}+PDZYI#vv9Icfoc@2k~D{0=Augw-2TQdu=F1ujRlhj1eT2TZBl zWCnC+^rCuZis!>P7o{PO!HZTEU{6h!91$#wcG58k@LpBgJe$@&>oT)FH*5?yR(n2f zxUt&rA&i%7ta43(tto`6@ItrULN{V=VB#JR35!Lwfw?mmkQNk3_Oh(jq|xnoBAp{` zHEPjlzco`kf-thkZe(;?Bemmud4G=1DU~jG8rLDGYG(i7QpGS$y47yLpdU!-e8}lN z8sEq-#2}hd zS?wbmvnX#^aRZud;*o=@nI~F zXX3-y9#0{~9@}G&YqpZdkn2!rqI7mHTJf)FyhJC>QU=z@|0?<#jY%f%o`t2tmnZdN=4HWDX`EX97h z5A#(epn0=m-P)kf?RL%Y7ZF`$%S}*vcqnwcNyo5hV8o-fIL5xG&k>DKy6YEnKsje& z+k%m9=d$ljWMS>@Mk{-CiaoAqv(nm;w95-=7J4fS`&<2DXdV1N-Y>Q}*BrM}&X%ZD zoTSFPc-Qb*pec?lTCT(Gvc_cj-pai1Q&8Ev3MGnoHB_pq?4L>?>L8VQh z=S8W5GUVYnr?5ZMFQz!{Num^Mn|h*GYT&p{lMZ*DgMR0R+)O1BFqID0XgbmL*{Uh} z#S}5vt68^LWNncazSwL!iWPJ|J`t7}=VAy{Tq*c34OSyjJze$1%0!uU$I*ciqr!1^ zRgg$k?t`<+Ug-?aa_xMXX@zGo=Nd?WK@sKe^s7lL@bw-DWSx@)o=y(ixv&AW9FFsK zzXtp3*Wo-&#*=9sZzTcymHyGtScB^!O3pep7lq~dSe(d45>8#|S5rAy=|GEjUKo>; zCNMRS43KD;A|Ogt8)yjoiwlM6R2e zc-FE1-Y>>7x;>qaXNeh?`>^X~G1qE1=XO-WPB_bQ1 zDRtPM&wyPHD)SmHN5yE_@gBvF7Id^*@;i18=oQ#+40r2iKcDgcC)meO_5Z(k|3mlb zd!N7e!}m1M)BpJ0`0m^8eDcl*?~u1Ycl#r^!K}wy|KL`1>#eW+^H*Mb1;6^Ws~@~t zfj;fWUS9ck|5>`eh#1psWpZ|RKG(c|Qr32C>uR9X8i@lj$_X%byKup+xloj+xH^Fn zWUenN#uV*90by21*B2RMg7%Jpf?afs3ECAG3U(1PCTQQ?P_T=VF=MvNRU>pUt)!!8 zJMR|7dC9Bz*sSZ4ZFa6eJ5j82Q-e)LP!`tp3n`-?OS5=fyFf=KMx4;5?piIo<`|Eu z^KMx$wb9v5Rx5!e&U+UsZ@s=~84+9b(8s)p8Dkcy9RLNps2THIEF3S@_sz+vgv_ND zdXSh_y9f01?YUva`C+`o*{w{cr*I5qAYQ+aGagTAMg_UJS#+J|?c&rmaqtqWq@=C0 zWM0TYeJD+P6t93#1WNb6zUUcawDtw`F)xC~7_CtZ1$&`rjQ5Q|0ZGHk9hXbe0!ATo z!URlnd2V#8sVbYBXzX#j<8fAH*O-YG5BLK7dZC%NG~sDo2RtY$YU5_E&+@P+9G{w$hp{5&>Iupp zyuJtD{MfHIw-O6`h~JF6J!At z?RU-YM2l{CZDY(MwP>MW z7ja{*ww7g;O2g=lgNp~IS&bUGTI9=x9hxru0FZdTp6;NNm}3xo-4@l9QW)So+&(aqw+o>chVSuz@R?pq-sznSJL4 zOGG+9!mz$$z)Dj;!WSm!(wthNasy9Q#ig>4jw{)*Hk?S#+Hy|_oD5o9RIS0VJ}_Wq zYB0^bQPLgP4h~8Y8nttI~y38X>`@4lTi_6pu9l>)fQA&bsVq`Xx&9vUiE-g zjQ0jzELP3jXrvU7h@5%V%{1*aszF6^#HQ6zn9nS?XqD~k4CVh<0~TLGCPnMo<20qdtsuq9rjMNwIlXQvE&=>JeLU?pf5Z&+UWfQ2}) z4u+KtSO>JOISeZuut5FTY{*n>`Pf=BX_f+~r16LjbDz)rCXB{YR-n>|SH`FL9Io3- z2CR65#P)#Y_5J}XRpy&S(||t0=G3#@J+rDZ-I!bvm60BjaM|MRSzdXf!tkRQ%CnCL ztOTtv3d`#-V5Qqm41vs_U0MyPPPxeBHDj_hJX>9@=C|1)v<%mi#eWVV@nsaNS;H_)KzrRbLgB z$VV$_JlU)(yjT@yvt=p6nbttUA&^>%z5YDM49zF$O+yrtbJ|z-DYn}MQ-yZE3+(0I zOhbFmn3hh?@{IC=t*Ll6*Usd|WTlv(SH&Kz2Leua(sDdfm zHV3*FYmNgC3v1af_b2mw^<{ln+DbT(p=^`}C>uATvDCOw;m0L6<)^J)DWSIko{rsm z4LX;sdiAsqGelg?RQn)%_A=jA`zn_7aR`hRm2(p*TV=M)=UH9PHw13y)Kx##7STwi zSN7b{fu8H?eiwA0w4Gkv?|)_9$OeW|YI%xGlE|wgo>Ja;nuz*Q1e8cxN+fwU9q-q? z99Jh5`}Fg_VnmG{)kmRy10IGbB<~NWk3w_FdutzsrWKcmC?v;+O&_Jx%?TGeTThx@ z!O(;aD%3|>*|=xclAX2`h|^)A&N_0Ora&>+|H4$Q+g39|XX=r`okPPHIZj_yEJ5yR zd38F{QN8DI)!}BK9X-W9*M})b%UwtdZ7&Cf#&0sS8y9iN&{0-qZfI|0JD$;n(Pi{( z>oVBi?Q=vsOTB+sjM;gT8@X&ZcfyUycg#6dly2j&lW;c|#d663Ei0flm^F}vWB=~? zVu;4S(>@B#mM7?=&0GAe`&qK~x|=>j-V%ckf0tfjBHy$<|RDq1sBT_N2D z`$WGOntWdzqL564-`%GuAGS4PRh!gvmfL;E$%(XL)E!eedy3BzAk;jxUIq;M*cqcs z*q8b+w8|g$U44#dCWmkCqtNVrzp+mdt)+qe^dz01nO zHu6pEfA^1u=3>D2ixIS&?{&TP#OY{sJgz*xO&5(=YuO0A`$t2o)P6_5*kR?i^vqubva?ei_u!mJ@wfw(M){Cw)cRZ= z9HZb8YYtwN;5PC<`miWf*C|&>GQ&=zLgAD39=uF}3mZpOcr6uSdT`1)IS!h!FXh-5 z`yA1#eW#c1)_wo~BiO4y`s()8Cr^L+Y4!A-PyW@D4?lU&H6X@l%Q##VHK3?i^UM2 zexYc7pkOaNhR~RjP_T>1P>iP3go0gMh7vR)8x-tfGgOSGUL>bM?#mLsn(UPFjyLTF|ev!NJGxCsTjxD6Fkg9vrfUWj{}$jRNR zQ}$dX%#z3#9FH8WWAVt1Wt9egzbt!%g1xXCLQ~E|!7hG72^!%M3iiTqDCF_++?gyI za=Q<3P+?|9omTH-Z6OhqVK&>$1fI9gynh?1gBk^1UmS;GG_g4}vy0_Wyg-sOL&08n z4x#DfpkOadhcYBX11Q+VbqJ9?3g zPhf2!iJ1)*PvC@-i|T>RF4}4LRcuP?$QZWzwq9Vp^N{vM* z*kl09wxUi0j+h4wTt!7YtSDTj4$H}Oz97#^q7`Gpj?A-oQreb=8{uA$2QVPANAV(n zW;PnYQncec6zts>Ftkz*6zrP@FvN}u6zq=-UcpmTit3Q4mHMAt~{M11BwB!Rr)9ZVK=AsV16Yctoq}feH3L|TX6^$8d&dO~&9DOs_SFMe)*oxz@$J}@c69dX2oHr z?d@6QFM#%@45L$(h@*+A8EoloYEZiI1nBsT>$hLPsvxMwJToSy4Ve8j2R^u(67pr_ zERSuu+bD*}){Ryrt8pD$@T2=z3}E2<5XA)vx@T`2z)}H8SP2FDLjxG1H3byxtpiwg z+$eyobXw2!Vp=In(KA^v#-}A5bM52l*_q9@Ykwrpv!fJBFoN96H}B{C|Nmd_|F2zW zfiapX9Bd0;yU+qNv@$3R?E1nB%+LzBP_P%oKs2=`6zswbOhz<;197unt|wK^8D5k* zoCPMggz839aEA5*wORlI3QqVP1wWNusDUY3Nei0Ug&T<2Oo4)3$borYa5b3Z*o?|a zxf+*3nZ<-5;h@nEM&R)J3XIc{8|55rx~WaW-$)mBV20M#fM#}~2Lf*>6agX>?7|O3 z^reJ?T?m2+nm`E(c3}u2%Il$E7m8rcG4nGahzB(!LK$3ucDhyTH0f_~`M`UgeyX`s zN=%Ju3B+X3(OB0Pjv%5M9GcmMBv?#L`ABf$4zSBDjVY{6y^gu1QUH^9PgBBPjAj=K1$#jgM5`M^!7e<({7A4$X-$_U(b74GMN)3MOct5m2xTRWLUv8!&Xo_nJbn?Mb(h2! z5>;hT274k%%A|>T0>m`cbWK97yRR=?!3>R~1I_FOSrDzw4+VRPE!YZ%1$YD;b!boV zUVChD3W*iL7 z>{|ygL}nlq?0pw7`{?4$^ym|S_DmmGD-Z*c_qN6M$Om@a0Kd>U&1Um*;!`6EcKDXA z|I`2m2oossXJ}@BasbQGSfo&}ZyCU{3V16X z{Ln;zuQ~%gBbfqr`<8Bw=*cnIo3S0)9@_{KC!5fr#Mj0ER*=rw9?MFw&1i?(k9KRB z+eyJJX@t5y91V>aZOza-W}f6ZlNg%WdH{>j99N-W`T&-q8R$U4v;izevp$7_sRLMs zMqYt}DFYZ7A%J2L3KDw7a`P7{+Jo(6z{gZb;{;m68{`eF3 zrN`g%*nIrf#35Tc+N7*H}teQ9Na8juSxZw?Td18q=R`t;G43t z+xf1n51F#$mU67O?bVz5u#*=Ty4dl%F!xIF6#G@BnG4Rufi|Q)0wYs{I9b%BBZM)I z7grDaux@FGvcP}=B`cf(W98-xZ7CIbN=3dauPk0GK(~MkRz$eM+vU}ur9vy}VZYU{ zhE_eo{_FEF#L^D-oBe8Nr4sDF^kHbF2ki5Gk_^&S9{crvHMH9-_8kUSUQ5RLs(ABM(_!hWU?LnB9EAL}0tOTt_Uo3UhBvx0n%9pcxO@)Pg6jq05SC@8Z9ks@6qv%*kI9)aeW6?_q zb2QH$>@W5?qP45BzuxDFHnUahqtGTea(#+uIglN+&XU`_lu$V$zXrBUkq(7 z9??gkO#>DB6wzqH*w6N1XnY6kqkWEItY@J8=ugf-5_GH+f8U8lc3y5dn(t2H$e9Yl zcAFg3bWW+=(>@B#S0U}AcsllpB?kr`M#rX9lJ!wy1JM};U~q+@)iW@}iws7s)+*X= z7u9scv zV2DLUDA<<|UZ3`&a#cel1o2mc>ySVKlnqNV{UjB}H*8hL!z0YD`+TVv>eebKk|LL#Ze+zK+ zI{{a}`%nJGz0ZP)fBPq+$G`pf6QJ{NJ?s2~zW)av36I|J@G}ozfB61~#6#@CFFg4E z2k(3EuKQoS|C9GWcz^ja{>|r`9k|(nn;p2>ftww;*@55l9eCcuK)g0yy`>LByE$FG zvG17NIQ8;mJkgpPr{3z|opwoNhYK>M%>J+H#;Nz>)O#ztD!S~vwbHiU<_$L=*F5V5 za!JJ_fpju|T-p0ze+Wz8h{5nlXU|$+_q^$sHPw_r~wv zY>apHEC{yZ***t++q1C{=gVM!q5qwsX^ycU>T^6UR+1^p_;k7Tq+L1{tLf1sN9LR# zd&|TR#&h7DSxec$W+3H~@*5Zsuv&W95_UAN_`kBXeY6XZ@9^HfJF0%7>h z^hAUPGnq0ap0i_v&_)){a(ufgxo__G5Y6HU`^SA4+Rgi__ae{s2WKSdS+S=1Fq`D^ zRJC_>-leJwX3P?L)r}LXJ&cn!Nl!ZUC;G+EhJ;~1&~M1DVC|JGWQ+!=Wah}Ah{/dev/null || echo "$body")" - fi - else - echo -e "${RED}โŒ Status: $status (expected $expected_status)${NC}" - echo "๐Ÿ“„ Response: $body" - fi - echo "" -} - -# Wait for server to be ready -echo "โณ Checking if TaskMaster server is running..." -max_attempts=10 -attempt=1 - -while [ $attempt -le $max_attempts ]; do - if curl -s "$BASE_URL/health" > /dev/null 2>&1; then - echo -e "${GREEN}โœ… Server is ready!${NC}" - break - else - echo " Attempt $attempt/$max_attempts - waiting for server..." - sleep 2 - ((attempt++)) - fi -done - -if [ $attempt -gt $max_attempts ]; then - echo -e "${RED}โŒ Server is not responding. Please start TaskMaster first.${NC}" - echo " Run: cargo run" - exit 1 -fi - -echo "" - -# Test 1: Health Check -test_endpoint "GET" "/health" "" 200 - -# Test 2: Status Check -test_endpoint "GET" "/status" "" 200 - -# Test 3: List All Tasks -test_endpoint "GET" "/tasks" "" 200 - -# Test 4: Complete Task (should fail - task doesn't exist) -task_completion_data='{"task_url": "999999999999"}' -test_endpoint "POST" "/complete" "$task_completion_data" 404 - -# Test 5: Complete Task with invalid format -invalid_task_data='{"task_url": "invalid"}' -test_endpoint "POST" "/complete" "$invalid_task_data" 400 - -# Test 6: Get Non-existent Task -test_endpoint "GET" "/tasks/nonexistent-task-id" "" 404 - -echo "๐ŸŽ‰ API endpoint testing completed!" -echo "" - -# Additional functionality tests -echo "๐Ÿ“Š Additional Server Information" -echo "===============================" - -# Get current status -echo "Current Status:" -curl -s "$BASE_URL/status" | jq -C . 2>/dev/null || curl -s "$BASE_URL/status" -echo "" - -echo "Health Check:" -curl -s "$BASE_URL/health" | jq -C . 2>/dev/null || curl -s "$BASE_URL/health" -echo "" - -# Show example of how to complete a task when one exists -echo "๐Ÿ’ก To complete a task when one exists:" -echo " curl -X POST $BASE_URL/complete \\" -echo ' -H "Content-Type: application/json" \' -echo ' -d '"'"'{"task_url": "123456789012"}'"'" -echo "" - -echo "๐Ÿ“ To monitor the CSV file:" -echo " tail -f tasks.csv" -echo "" - -echo "๐Ÿ” To check logs with debug level:" -echo " TASKMASTER_LOGGING__LEVEL=debug cargo run" From 3421b6c1fc7ae2444761b5deee5852ad16de72e5 Mon Sep 17 00:00:00 2001 From: Beast Date: Mon, 12 Jan 2026 14:50:57 +0800 Subject: [PATCH 8/9] chore: add more detail in log --- src/services/tweet_synchronizer_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/services/tweet_synchronizer_service.rs b/src/services/tweet_synchronizer_service.rs index 416041c..16063e7 100644 --- a/src/services/tweet_synchronizer_service.rs +++ b/src/services/tweet_synchronizer_service.rs @@ -164,7 +164,7 @@ impl TweetSynchronizerService { tracing::info!("๐Ÿ”„ Background Worker: Starting Twitter Sync..."); match service.sync_relevant_tweets().await { - Ok(_) => tracing::info!("โœ… Sync Complete."), + Ok(_) => tracing::info!("โœ… Sync Complete. Relevant tweets synced."), Err(e) => tracing::error!("โŒ Sync Failed: {:?}", e), } } From e4d723e9a22d86cd6acda572911e146f08d14b37 Mon Sep 17 00:00:00 2001 From: Beast Date: Mon, 12 Jan 2026 15:01:35 +0800 Subject: [PATCH 9/9] fix: test error because of unit struct --- src/models/auth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/auth.rs b/src/models/auth.rs index 7a37c68..965a625 100644 --- a/src/models/auth.rs +++ b/src/models/auth.rs @@ -8,7 +8,7 @@ pub struct TokenClaims { } #[derive(Debug, Deserialize)] -pub struct RequestChallengeBody; +pub struct RequestChallengeBody {} #[derive(Debug, Serialize)] pub struct RequestChallengeResponse {