浏览代码

Run format

Mauricio Cassola 2 年之前
父节点
当前提交
f96b297d6a
共有 5 个文件被更改,包括 61 次插入50 次删除
  1. 18 9
      src/db.rs
  2. 20 26
      src/db/jobs.rs
  3. 1 1
      src/handlers.rs
  4. 17 10
      src/handlers/jobs.rs
  5. 5 4
      src/main.rs

+ 18 - 9
src/db.rs

@@ -1,14 +1,14 @@
+use crate::db::jobs::*;
+use crate::handlers::jobs::handle_job;
 use anyhow::Context as _;
 use native_tls::{Certificate, TlsConnector};
 use postgres_native_tls::MakeTlsConnector;
 use std::sync::{Arc, Mutex};
 use tokio::sync::{OwnedSemaphorePermit, Semaphore};
 use tokio_postgres::Client as DbClient;
-use crate::db::jobs::*;
-use crate::handlers::jobs::handle_job;
 
-pub mod jobs;
 pub mod issue_data;
+pub mod jobs;
 pub mod notifications;
 pub mod rustc_commits;
 
@@ -196,22 +196,31 @@ pub async fn run_scheduled_jobs(db: &DbClient) -> anyhow::Result<()> {
                 tracing::trace!("job succesfully executed (id={})", job.id);
 
                 if let Some(frequency) = job.frequency {
-                    let duration = get_duration_from_cron(frequency, job.frequency_unit.as_ref().unwrap());
+                    let duration =
+                        get_duration_from_cron(frequency, job.frequency_unit.as_ref().unwrap());
                     let new_expected_time = job.expected_time.checked_add_signed(duration).unwrap();
 
-                    insert_job(&db, &job.name, &new_expected_time, &Some(frequency), &job.frequency_unit, &job.metadata).await?;
+                    insert_job(
+                        &db,
+                        &job.name,
+                        &new_expected_time,
+                        &Some(frequency),
+                        &job.frequency_unit,
+                        &job.metadata,
+                    )
+                    .await?;
                     println!("job succesfully reinserted (name={})", job.name);
                     tracing::trace!("job succesfully reinserted (name={})", job.name);
                 }
 
                 delete_job(&db, &job.id).await?;
-            },
+            }
             Err(e) => {
                 println!("job failed on execution (id={:?}, error={:?})", job.id, e);
                 tracing::trace!("job failed on execution (id={:?}, error={:?})", job.id, e);
 
                 update_job_error_message(&db, &job.id, &e.to_string()).await?;
-            },
+            }
         }
     }
 
@@ -255,7 +264,7 @@ CREATE TABLE issue_data (
     PRIMARY KEY (repo, issue_number, key)
 );
 ",
-"
+    "
 CREATE TYPE frequency_unit AS ENUM ('days', 'hours', 'minutes', 'seconds');
 ",
     "
@@ -275,5 +284,5 @@ CREATE UNIQUE INDEX jobs_name_expected_time_unique_index
     ON jobs (
         name, expected_time
     );
-"
+",
 ];

+ 20 - 26
src/db/jobs.rs

@@ -1,10 +1,10 @@
 //! The `jobs` table provides a way to have scheduled jobs
-use anyhow::{Result, Context as _};
-use chrono::{DateTime, FixedOffset, Duration};
-use tokio_postgres::{Client as DbClient};
-use uuid::Uuid;
+use anyhow::{Context as _, Result};
+use chrono::{DateTime, Duration, FixedOffset};
+use postgres_types::{FromSql, ToSql};
 use serde::{Deserialize, Serialize};
-use postgres_types::{ToSql, FromSql};
+use tokio_postgres::Client as DbClient;
+use uuid::Uuid;
 
 #[derive(Serialize, Deserialize, Debug)]
 pub struct Job {
@@ -32,15 +32,15 @@ pub enum FrequencyUnit {
 }
 
 pub async fn insert_job(
-    db: &DbClient, 
+    db: &DbClient,
     name: &String,
     expected_time: &DateTime<FixedOffset>,
     frequency: &Option<i32>,
     frequency_unit: &Option<FrequencyUnit>,
-    metadata: &serde_json::Value
+    metadata: &serde_json::Value,
 ) -> Result<()> {
     tracing::trace!("insert_job(name={})", name);
-    
+
     db.execute(
         "INSERT INTO jobs (name, expected_time, frequency, frequency_unit, metadata) VALUES ($1, $2, $3, $4, $5) 
             ON CONFLICT (name, expected_time) DO UPDATE SET metadata = EXCLUDED.metadata",
@@ -54,20 +54,17 @@ pub async fn insert_job(
 
 pub async fn delete_job(db: &DbClient, id: &Uuid) -> Result<()> {
     tracing::trace!("delete_job(id={})", id);
-    
-    db.execute(
-        "DELETE FROM jobs WHERE id = $1",
-        &[&id],
-    )
-    .await
-    .context("Deleting job")?;
+
+    db.execute("DELETE FROM jobs WHERE id = $1", &[&id])
+        .await
+        .context("Deleting job")?;
 
     Ok(())
 }
 
 pub async fn update_job_error_message(db: &DbClient, id: &Uuid, message: &String) -> Result<()> {
     tracing::trace!("update_job_error_message(id={})", id);
-    
+
     db.execute(
         "UPDATE jobs SET error_message = $2 WHERE id = $1",
         &[&id, &message],
@@ -80,21 +77,18 @@ pub async fn update_job_error_message(db: &DbClient, id: &Uuid, message: &String
 
 pub async fn update_job_executed_at(db: &DbClient, id: &Uuid) -> Result<()> {
     tracing::trace!("update_job_executed_at(id={})", id);
-    
-    db.execute(
-        "UPDATE jobs SET executed_at = now() WHERE id = $1",
-        &[&id],
-    )
-    .await
-    .context("Updating job executed at")?;
+
+    db.execute("UPDATE jobs SET executed_at = now() WHERE id = $1", &[&id])
+        .await
+        .context("Updating job executed at")?;
 
     Ok(())
 }
 
 // Selects all jobs with:
-//  - expected_time in the past 
+//  - expected_time in the past
 //  - error_message is null or executed_at is at least 60 minutes ago (intended to make repeat executions rare enough)
-pub async fn get_jobs_to_execute(db: &DbClient) -> Result<Vec<Job>>  {
+pub async fn get_jobs_to_execute(db: &DbClient) -> Result<Vec<Job>> {
     let jobs = db
         .query(
             "
@@ -123,7 +117,7 @@ pub async fn get_jobs_to_execute(db: &DbClient) -> Result<Vec<Job>>  {
             frequency_unit,
             metadata,
             executed_at,
-            error_message
+            error_message,
         });
     }
 

+ 1 - 1
src/handlers.rs

@@ -28,6 +28,7 @@ mod autolabel;
 mod close;
 mod github_releases;
 mod glacier;
+pub mod jobs;
 mod major_change;
 mod mentions;
 mod milestone_prs;
@@ -43,7 +44,6 @@ mod review_submitted;
 mod rfc_helper;
 mod rustc_commits;
 mod shortcut;
-pub mod jobs;
 
 pub async fn handle(ctx: &Context, event: &Event) -> Vec<HandlerError> {
     let config = config::get(&ctx.github, event.repo()).await;

+ 17 - 10
src/handlers/jobs.rs

@@ -1,23 +1,23 @@
 // Function to match the scheduled job function with its corresponding handler.
-// In case you want to add a new one, just add a new clause to the match with 
+// In case you want to add a new one, just add a new clause to the match with
 // the job name and the corresponding function.
 
 // The metadata is a serde_json::Value
 // Please refer to https://docs.rs/serde_json/latest/serde_json/value/fn.from_value.html
 // on how to interpret it as an instance of type T, implementing Serialize/Deserialize.
 
-// For example, if we want to sends a Zulip message every Friday at 11:30am ET into #t-release 
+// For example, if we want to sends a Zulip message every Friday at 11:30am ET into #t-release
 // with a @T-release meeting! content, we should create some Job like:
-//    
+//
 //    #[derive(Serialize, Deserialize)]
 //    struct ZulipMetadata {
 //      pub message: String
 //    }
-//    
+//
 //    let metadata = serde_json::value::to_value(ZulipMetadata {
 //      message: "@T-release meeting!".to_string()
 //     }).unwrap();
-// 
+//
 //    Job {
 //      name: "send_zulip_message",
 //      expected_time: "2022-09-30T11:30:00+10:00",
@@ -25,18 +25,25 @@
 //      frequency_unit: Some(FrequencyUnit::Days),
 //      metadata: metadata
 //    }
-// 
+//
 // ... and add the corresponding "send_zulip_message" handler.
 
 pub async fn handle_job(name: &String, metadata: &serde_json::Value) -> anyhow::Result<()> {
     match name {
-      _ => default(&name, &metadata)
+        _ => default(&name, &metadata),
     }
 }
 
 fn default(name: &String, metadata: &serde_json::Value) -> anyhow::Result<()> {
-  println!("handle_job fell into default case: (name={:?}, metadata={:?})", name, metadata);
-  tracing::trace!("handle_job fell into default case: (name={:?}, metadata={:?})", name, metadata);
+    println!(
+        "handle_job fell into default case: (name={:?}, metadata={:?})",
+        name, metadata
+    );
+    tracing::trace!(
+        "handle_job fell into default case: (name={:?}, metadata={:?})",
+        name,
+        metadata
+    );
 
-  Ok(())
+    Ok(())
 }

+ 5 - 4
src/main.rs

@@ -7,11 +7,11 @@ use hyper::{header, Body, Request, Response, Server, StatusCode};
 use reqwest::Client;
 use route_recognizer::Router;
 use std::{env, net::SocketAddr, sync::Arc, time::Duration};
+use tokio::{task, time::sleep};
 use tower::{Service, ServiceExt};
 use tracing as log;
 use tracing::Instrument;
 use triagebot::{db, github, handlers::Context, notification_listing, payload, EventName};
-use tokio::{task, time::sleep};
 
 const JOB_PROCESSING_CADENCE_IN_SECS: u64 = 60;
 
@@ -238,17 +238,18 @@ async fn run_server(addr: SocketAddr) -> anyhow::Result<()> {
     let pool = db::ClientPool::new();
     db::run_migrations(&*pool.get().await)
         .await
-        .context("database migrations")?;  
+        .context("database migrations")?;
 
     // spawning a background task that will run the scheduled jobs
     // every JOB_PROCESSING_CADENCE_IN_SECS
     task::spawn(async move {
         let pool = db::ClientPool::new();
 
-        loop { 
+        loop {
             db::run_scheduled_jobs(&*pool.get().await)
                 .await
-                .context("run database scheduled jobs").unwrap(); 
+                .context("run database scheduled jobs")
+                .unwrap();
 
             sleep(Duration::from_secs(JOB_PROCESSING_CADENCE_IN_SECS)).await;
         }