From a0b2dc95faab10f730afcc2c8140d5c4920ac625 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 17 Jan 2024 22:53:54 +0500 Subject: [PATCH 01/62] Add tracker, add support for batch jobs --- Cargo.lock | 18 +- src/consumer/mod.rs | 11 +- src/lib.rs | 10 + src/producer/mod.rs | 35 +- src/proto/batch/cmd.rs | 65 ++++ src/proto/batch/mod.rs | 338 ++++++++++++++++++ src/proto/mod.rs | 84 ++++- src/proto/single/ent.rs | 148 +++++++- src/proto/single/mod.rs | 23 +- src/proto/single/resp.rs | 26 ++ src/proto/single/utils.rs | 32 +- src/tracker/mod.rs | 122 +++++++ tests/real/enterprise.rs | 696 ++++++++++++++++++++++++++++++++++++-- 13 files changed, 1524 insertions(+), 84 deletions(-) create mode 100644 src/proto/batch/cmd.rs create mode 100644 src/proto/batch/mod.rs create mode 100644 src/tracker/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 6190f113..5af0f678 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -523,7 +523,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.46", + "syn 2.0.47", ] [[package]] @@ -564,9 +564,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2de98502f212cfcea8d0bb305bd0f49d7ebdd75b64ba0a68f937d888f4e0d6db" +checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] @@ -687,7 +687,7 @@ checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.46", + "syn 2.0.47", ] [[package]] @@ -731,9 +731,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.46" +version = "2.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89456b690ff72fddcecf231caedbe615c59480c93358a93dfae7fc29e3ebbf0e" +checksum = "1726efe18f42ae774cc644f330953a5e7b3c3003d3edcecf18850fe9d4dd9afb" dependencies = [ "proc-macro2", "quote", @@ -770,7 +770,7 @@ checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.46", + "syn 2.0.47", ] [[package]] @@ -871,7 +871,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.46", + "syn 2.0.47", "wasm-bindgen-shared", ] @@ -893,7 +893,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.46", + "syn 2.0.47", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/src/consumer/mod.rs b/src/consumer/mod.rs index 3908932f..70102863 100644 --- a/src/consumer/mod.rs +++ b/src/consumer/mod.rs @@ -1,5 +1,9 @@ use crate::error::Error; -use crate::proto::{self, Client, ClientOptions, HeartbeatStatus, Reconnect}; + +use crate::proto::{ + self, parse_provided_or_from_env, Client, ClientOptions, HeartbeatStatus, Reconnect, +}; + use fnv::FnvHashMap; use std::error::Error as StdError; use std::io::prelude::*; @@ -213,10 +217,7 @@ impl ConsumerBuilder { /// /// If `url` is given, but does not specify a port, it defaults to 7419. pub fn connect(self, url: Option<&str>) -> Result, Error> { - let url = match url { - Some(url) => proto::url_parse(url), - None => proto::url_parse(&proto::get_env_url()), - }?; + let url = parse_provided_or_from_env(url)?; let stream = TcpStream::connect(proto::host_from_url(&url))?; Self::connect_with(self, stream, url.password().map(|p| p.to_string())) } diff --git a/src/lib.rs b/src/lib.rs index 5a83286e..9353df95 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,4 +74,14 @@ pub use crate::consumer::{Consumer, ConsumerBuilder}; pub use crate::error::Error; pub use crate::producer::Producer; pub use crate::proto::Reconnect; + pub use crate::proto::{Job, JobBuilder}; + +#[cfg(feature = "ent")] +pub use crate::proto::{Batch, BatchBuilder, BatchStatus}; +#[cfg(feature = "ent")] +mod tracker; +#[cfg(feature = "ent")] +pub use crate::proto::{Progress, ProgressUpdate, ProgressUpdateBuilder}; +#[cfg(feature = "ent")] +pub use crate::tracker::Tracker; diff --git a/src/producer/mod.rs b/src/producer/mod.rs index 25b413bc..76a827a6 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -1,5 +1,14 @@ use crate::error::Error; -use crate::proto::{self, Client, Info, Job, Push, QueueAction, QueueControl}; +use crate::proto::{ + self, parse_provided_or_from_env, Client, Info, Job, Push, + QueueAction, QueueControl, +}; + +#[cfg(feature = "ent")] +use crate::proto::{BatchHandle, CommitBatch, OpenBatch}; +#[cfg(feature = "ent")] +use crate::Batch; + use std::io::prelude::*; use std::net::TcpStream; @@ -82,10 +91,7 @@ impl Producer { /// /// If `url` is given, but does not specify a port, it defaults to 7419. pub fn connect(url: Option<&str>) -> Result { - let url = match url { - Some(url) => proto::url_parse(url), - None => proto::url_parse(&proto::get_env_url()), - }?; + let url = parse_provided_or_from_env(url)?; let stream = TcpStream::connect(proto::host_from_url(&url))?; Self::connect_with(stream, url.password().map(|p| p.to_string())) } @@ -129,6 +135,25 @@ impl Producer { .issue(&QueueControl::new(QueueAction::Resume, queues))? .await_ok() } + + /// Initiate a new batch of jobs. + #[cfg(feature = "ent")] + pub fn start_batch(&mut self, batch: Batch) -> Result, Error> { + let bid = self.c.issue(&batch)?.read_bid()?; + Ok(BatchHandle::new(bid, self)) + } + + /// Open an already existing batch of jobs. + #[cfg(feature = "ent")] + pub fn open_batch(&mut self, bid: String) -> Result, Error> { + let bid = self.c.issue(&OpenBatch::from(bid))?.read_bid()?; + Ok(BatchHandle::new(bid, self)) + } + + #[cfg(feature = "ent")] + pub(crate) fn commit_batch(&mut self, bid: String) -> Result<(), Error> { + self.c.issue(&CommitBatch::from(bid))?.await_ok() + } } #[cfg(test)] diff --git a/src/proto/batch/cmd.rs b/src/proto/batch/cmd.rs new file mode 100644 index 00000000..03a2d0e7 --- /dev/null +++ b/src/proto/batch/cmd.rs @@ -0,0 +1,65 @@ +use crate::proto::single::FaktoryCommand; +use crate::{Batch, Error}; +use std::io::Write; + +impl FaktoryCommand for Batch { + fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH NEW ")?; + serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; + Ok(w.write_all(b"\r\n")?) + } +} + +// ---------------------------------------------- + +pub struct CommitBatch(String); + +impl From for CommitBatch { + fn from(value: String) -> Self { + CommitBatch(value) + } +} + +impl FaktoryCommand for CommitBatch { + fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH COMMIT ")?; + w.write_all(self.0.as_bytes())?; + Ok(w.write_all(b"\r\n")?) + } +} + +// ---------------------------------------------- + +pub struct GetBatchStatus(String); + +impl From for GetBatchStatus { + fn from(value: String) -> Self { + GetBatchStatus(value) + } +} + +impl FaktoryCommand for GetBatchStatus { + fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH STATUS ")?; + w.write_all(self.0.as_bytes())?; + Ok(w.write_all(b"\r\n")?) + } +} + +// ---------------------------------------------- + +pub struct OpenBatch(String); + +impl From for OpenBatch { + fn from(value: String) -> Self { + OpenBatch(value) + } +} + +impl FaktoryCommand for OpenBatch { + fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH OPEN ")?; + w.write_all(self.0.as_bytes())?; + Ok(w.write_all(b"\r\n")?) + } +} diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs new file mode 100644 index 00000000..904c6035 --- /dev/null +++ b/src/proto/batch/mod.rs @@ -0,0 +1,338 @@ +use std::io::{Read, Write}; + +use chrono::{DateTime, Utc}; +use derive_builder::Builder; + +use crate::{Error, Job, Producer}; + +mod cmd; + +pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; + +/// Batch of jobs. +/// +/// Faktory guarantees a callback (`success` and/or `failure`) will be triggered after the execution +/// of all the jobs belonging to the same batch has finished (successfully or with errors accordingly). +/// The 'complete' callback will always be queued first. +/// +/// Batches can be nested. They can also be re-opened, but - once a batch is committed - only those jobs +/// that belong to this batch can re-open it. +/// +/// An empty batch can be committed just fine. That will make Faktory immediately fire a callback (i.e. put +/// the job specified in `complete` and/or the one specified in `success` onto the queues). +/// +/// If you open a batch, but - for some reason - do not commit it within _30 minutes_, it will simply expire +/// on the Faktory server (which means no callbackes will be fired). +/// +/// Here is how you can create a simple batch: +/// ```no_run +/// # use faktory::Error; +/// use faktory::{Producer, Job, Batch}; +/// +/// let mut prod = Producer::connect(None)?; +/// let job1 = Job::builder("image").build(); +/// let job2 = Job::builder("image").build(); +/// let job_cb = Job::builder("clean_up").build(); +/// +/// let batch = Batch::builder("Image resizing workload".to_string()).with_complete_callback(job_cb); +/// +/// let mut batch = prod.start_batch(batch)?; +/// batch.add(job1)?; +/// batch.add(job2)?; +/// batch.commit()?; +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// Nested batches are also supported: +/// ```no_run +/// # use faktory::{Producer, Job, Batch, Error}; +/// # let mut prod = Producer::connect(None)?; +/// let parent_job1 = Job::builder("stats_build").build(); +/// let parent_job2 = Job::builder("entries_update").build(); +/// let parent_cb = Job::builder("clean_up").build(); +/// +/// let child_job1 = Job::builder("image_recognition").build(); +/// let child_job2 = Job::builder("image_recognition").build(); +/// let child_cb = Job::builder("clean_up").build(); +/// +/// let parent_batch = Batch::builder("Image recognition and analysis workload".to_string()).with_complete_callback(parent_cb); +/// let child_batch = Batch::builder("Image recognition workload".to_string()).with_success_callback(child_cb); +/// +/// let mut parent = prod.start_batch(parent_batch)?; +/// parent.add(parent_job1)?; +/// parent.add(parent_job2)?; +/// let mut child = parent.start_batch(child_batch)?; +/// child.add(child_job1)?; +/// child.add(child_job2)?; +/// +/// child.commit()?; +/// parent.commit()?; +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// In the example above, there is a single level nesting, but you can nest those batches as deep as you wish, +/// effectively building a pipeline this way, since the Faktory guarantees that callback jobs will not be queued unless +/// the batch gets committed. +/// +/// You can retieve the batch status using a [`tracker`](struct.Tracker.html): +/// ```no_run +/// # use faktory::Error; +/// # use faktory::{Producer, Job, Batch, Tracker}; +/// let mut prod = Producer::connect(None)?; +/// let job = Job::builder("image").build(); +/// let cb_job = Job::builder("clean_up").build(); +/// let b = Batch::builder("Description...".to_string()).with_complete_callback(cb_job); +/// +/// let mut b = prod.start_batch(b)?; +/// let bid = b.id().to_string(); +/// b.add(job)?; +/// b.commit()?; +/// +/// let mut t = Tracker::connect(None).unwrap(); +/// let s = t.get_batch_status(bid).unwrap().unwrap(); +/// assert_eq!(s.total, 1); +/// assert_eq!(s.pending, 1); +/// assert_eq!(s.description, Some("Description...".into())); +/// assert_eq!(s.complete_callback_state, ""); // has not been queued; +/// # Ok::<(), Error>(()) +/// ``` +#[derive(Serialize, Debug, Builder)] +#[builder( + custom_constructor, + setter(into), + build_fn(name = "try_build", private) +)] +pub struct Batch { + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(setter(skip))] + parent_bid: Option, + + /// Batch description for Faktory WEB UI. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + + /// On success callback. + /// + /// This job will be queued by the Faktory server provided + /// all the jobs belonging to this batch have been executed successfully. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(setter(custom))] + pub(crate) success: Option, + + /// On complete callback. + /// + /// This job will be queue by the Faktory server after all the jobs + /// belonging to this batch have been executed, even if one/some/all + /// of the workers have failed. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(setter(custom))] + pub(crate) complete: Option, +} + +impl Batch { + /// Create a new `BatchBuilder`. + pub fn builder(description: impl Into>) -> BatchBuilder { + BatchBuilder::new(description) + } +} + +impl BatchBuilder { + fn build(&mut self) -> Batch { + self.try_build() + .expect("All required fields have been set.") + } + + /// Create a new `BatchBuilder` with optional description of the batch. + pub fn new(description: impl Into>) -> BatchBuilder { + BatchBuilder { + description: Some(description.into()), + ..Self::create_empty() + } + } + + fn success(&mut self, success_cb: impl Into>) -> &mut Self { + self.success = Some(success_cb.into()); + self + } + + fn complete(&mut self, complete_cb: impl Into>) -> &mut Self { + self.complete = Some(complete_cb.into()); + self + } + + /// Create a `Batch` with only `success` callback specified. + pub fn with_success_callback(&mut self, success_cb: Job) -> Batch { + self.success(success_cb); + self.complete(None); + self.build() + } + + /// Create a `Batch` with only `complete` callback specified. + pub fn with_complete_callback(&mut self, complete_cb: Job) -> Batch { + self.complete(complete_cb); + self.success(None); + self.build() + } + + /// Create a `Batch` with both `success` and `complete` callbacks specified. + pub fn with_callbacks(&mut self, success_cb: Job, complete_cb: Job) -> Batch { + self.success(success_cb); + self.complete(complete_cb); + self.build() + } +} + +pub struct BatchHandle<'a, S: Read + Write> { + bid: String, + prod: &'a mut Producer, +} + +impl<'a, S: Read + Write> BatchHandle<'a, S> { + /// ID issued by the Faktory server to this batch. + pub fn id(&self) -> &str { + self.bid.as_ref() + } + + pub(crate) fn new(bid: String, prod: &mut Producer) -> BatchHandle<'_, S> { + BatchHandle { bid, prod } + } + + /// Add the given job to the batch. + pub fn add(&mut self, mut job: Job) -> Result<(), Error> { + job.custom.insert("bid".into(), self.bid.clone().into()); + self.prod.enqueue(job) + } + + /// Initiate a child batch of jobs. + pub fn start_batch(&mut self, mut batch: Batch) -> Result, Error> { + batch.parent_bid = Some(self.bid.clone()); + self.prod.start_batch(batch) + } + + /// Commit this batch. + /// + /// The Faktory server will not queue any callbacks, unless the batch is committed. + /// Committing an empty batch will make the server queue the callback(s) right away. + /// Once committed, the batch can still be re-opened with [open_batch](struct.Producer.html#method.open_batch), + /// and extra jobs can be added to it. + pub fn commit(self) -> Result<(), Error> { + self.prod.commit_batch(self.bid) + } +} + +/// Batch status retrieved from Faktory server. +#[derive(Deserialize, Debug)] +pub struct BatchStatus { + // Fields "bid", "created_at", "description", "total", "pending", and "failed" + // are described in the docs: https://github.com/contribsys/faktory/wiki/Ent-Batches#status + /// Id of this batch. + pub bid: String, + + /// Batch creation date and time. + pub created_at: DateTime, + + /// Batch description, if any. + pub description: Option, + + /// Number of jobs in this batch. + pub total: usize, + + /// Number of pending jobs. + pub pending: usize, + + /// Number of failed jobs. + pub failed: usize, + + // The official golang client also mentions "parent_bid', "complete_st", and "success_st": + // https://github.com/contribsys/faktory/blob/main/client/batch.go#L8-L22 + /// Id of the parent batch, provided this batch is a child ("nested") batch. + pub parent_bid: Option, + + /// State of the `complete` callback. + /// + /// See [complete](struct.Batch.html#structfield.complete). + #[serde(rename = "complete_st")] + pub complete_callback_state: String, + + /// State of the `success` callback. + /// + /// See [success](struct.Batch.html#structfield.success). + #[serde(rename = "success_st")] + pub success_callback_state: String, +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use chrono::{DateTime, Utc}; + + use super::*; + + #[test] + fn test_batch_creation() { + let b = BatchBuilder::new("Image processing batch".to_string()) + .with_success_callback(Job::builder("thumbnail").build()); + assert!(b.complete.is_none()); + assert!(b.parent_bid.is_none()); + assert!(b.success.is_some()); + assert_eq!(b.description, Some("Image processing batch".into())); + + let b = BatchBuilder::new("Image processing batch".to_string()) + .with_complete_callback(Job::builder("thumbnail").build()); + assert!(b.complete.is_some()); + assert!(b.success.is_none()); + + let b = BatchBuilder::new(None).with_callbacks( + Job::builder("thumbnail").build(), + Job::builder("thumbnail").build(), + ); + assert!(b.description.is_none()); + assert!(b.complete.is_some()); + assert!(b.success.is_some()); + } + + #[test] + fn test_batch_serialized_correctly() { + let prepare_test_job = |jobtype: String| { + let jid = "LFluKy1Baak83p54"; + let dt = "2023-12-22T07:00:52.546258624Z"; + let created_at = DateTime::::from_str(dt).unwrap(); + Job::builder(jobtype) + .jid(jid) + .created_at(created_at) + .build() + }; + + // with description and on success callback: + let got = serde_json::to_string( + &BatchBuilder::new("Image processing workload".to_string()) + .with_success_callback(prepare_test_job("thumbnail_clean_up".into())), + ) + .unwrap(); + let expected = r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + assert_eq!(got, expected); + + // without description and with on complete callback: + let got = serde_json::to_string( + &BatchBuilder::new(None) + .with_complete_callback(prepare_test_job("thumbnail_info".into())), + ) + .unwrap(); + let expected = r#"{"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + assert_eq!(got, expected); + + // with description and both callbacks: + let got = serde_json::to_string( + &BatchBuilder::new("Image processing workload".to_string()).with_callbacks( + prepare_test_job("thumbnail_clean_up".into()), + prepare_test_job("thumbnail_info".into()), + ), + ) + .unwrap(); + let expected = r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0},"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + assert_eq!(got, expected); + } +} diff --git a/src/proto/mod.rs b/src/proto/mod.rs index d8af3c0c..43dbcaec 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -12,12 +12,23 @@ mod single; // commands that users can issue pub use self::single::{ - Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, QueueAction, QueueControl, + gen_random_wid, Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, QueueAction, QueueControl, }; // responses that users can see pub use self::single::Hi; +pub use self::single::gen_random_jid; + +#[cfg(feature = "ent")] +mod batch; +#[cfg(feature = "ent")] +pub use self::single::ent::{Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; +#[cfg(feature = "ent")] +pub use batch::{ + Batch, BatchBuilder, BatchHandle, BatchStatus, CommitBatch, GetBatchStatus, OpenBatch, +}; + pub(crate) fn get_env_url() -> String { use std::env; let var = env::var("FAKTORY_PROVIDER").unwrap_or_else(|_| "FAKTORY_URL".to_string()); @@ -44,6 +55,21 @@ pub(crate) fn url_parse(url: &str) -> Result { Ok(url) } +pub(crate) fn parse_provided_or_from_env(url: Option<&str>) -> Result { + url_parse(url.unwrap_or(&get_env_url())) +} + +fn check_protocols_match(ver: usize) -> Result<(), Error> { + if ver != EXPECTED_PROTOCOL_VERSION { + return Err(error::Connect::VersionMismatch { + ours: EXPECTED_PROTOCOL_VERSION, + theirs: ver, + } + .into()); + } + Ok(()) +} + /// A stream that can be re-established after failing. pub trait Reconnect: Sized { /// Re-establish the stream. @@ -133,22 +159,40 @@ impl Client { }; Self::new(stream, opts) } + + #[cfg(feature = "ent")] + pub(crate) fn new_tracker(stream: S, pwd: Option) -> Result, Error> { + let opts = ClientOptions { + password: pwd, + ..Default::default() + }; + let mut c = Client { + stream: BufStream::new(stream), + opts, + }; + c.init_tracker()?; + Ok(c) + } } impl Client { fn init(&mut self) -> Result<(), Error> { let hi = single::read_hi(&mut self.stream)?; - if hi.version != EXPECTED_PROTOCOL_VERSION { - return Err(error::Connect::VersionMismatch { - ours: EXPECTED_PROTOCOL_VERSION, - theirs: hi.version, + check_protocols_match(hi.version)?; + + let mut hello = single::Hello::default(); + + // prepare password hash, if one expected by 'Faktory' + if hi.salt.is_some() { + if let Some(ref pwd) = self.opts.password { + hello.set_password(&hi, pwd); + } else { + return Err(error::Connect::AuthenticationNeeded.into()); } - .into()); } // fill in any missing options, and remember them for re-connect - let mut hello = single::Hello::default(); if !self.opts.is_producer { let hostname = self .opts @@ -162,14 +206,7 @@ impl Client { .pid .unwrap_or_else(|| unsafe { getpid() } as usize); self.opts.pid = Some(pid); - let wid = self.opts.wid.clone().unwrap_or_else(|| { - use rand::{thread_rng, Rng}; - thread_rng() - .sample_iter(&rand::distributions::Alphanumeric) - .map(char::from) - .take(32) - .collect() - }); + let wid = self.opts.wid.clone().unwrap_or_else(gen_random_wid); self.opts.wid = Some(wid); hello.hostname = Some(self.opts.hostname.clone().unwrap()); @@ -178,6 +215,18 @@ impl Client { hello.labels = self.opts.labels.clone(); } + single::write_command_and_await_ok(&mut self.stream, &hello) + } + + #[cfg(feature = "ent")] + fn init_tracker(&mut self) -> Result<(), Error> { + let hi = single::read_hi(&mut self.stream)?; + + check_protocols_match(hi.version)?; + + let mut hello = single::Hello::default(); + + // prepare password hash, if one expected by 'Faktory' if hi.salt.is_some() { if let Some(ref pwd) = self.opts.password { hello.set_password(&hi, pwd); @@ -256,6 +305,11 @@ impl<'a, S: Read + Write> ReadToken<'a, S> { { single::read_json(&mut self.0.stream) } + + #[cfg(feature = "ent")] + pub(crate) fn read_bid(self) -> Result { + single::read_bid(&mut self.0.stream) + } } #[cfg(test)] diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 1a866788..3aa91394 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -1,6 +1,23 @@ use chrono::{DateTime, Utc}; +use derive_builder::Builder; +use serde::{ + de::{Deserializer, IntoDeserializer}, + Deserialize, +}; -use crate::JobBuilder; +use crate::{Error, JobBuilder}; + +// Used to parse responses from Faktory that look like this: +// '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' +fn parse_datetime<'de, D>(value: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + match Option::::deserialize(value)?.as_deref() { + Some("") | None => Ok(None), + Some(non_empty) => DateTime::deserialize(non_empty.into_deserializer()).map(Some), + } +} impl JobBuilder { /// When Faktory should expire this job. @@ -74,6 +91,135 @@ impl JobBuilder { } } +/// Info on job execution progress (retrieved). +/// +/// The tracker is guaranteed to get the following details: the job's id (though +/// they should know it beforehand in order to be ably to track the job), its last +/// know state (e.g."enqueued", "working", "success", "unknown") and the date and time +/// the job was last updated. Additionally, information on what's going on with the job +/// ([desc](struct.ProgressUpdate.html#structfield.desc)) and completion percentage +/// ([percent](struct.ProgressUpdate.html#structfield.percent)) may be available, +/// if the worker provided those details. +#[derive(Debug, Clone, Deserialize)] +pub struct Progress { + /// Id of the tracked job. + pub jid: String, + + /// Job's state. + pub state: String, + + /// When this job was last updated. + #[serde(deserialize_with = "parse_datetime")] + pub updated_at: Option>, + + /// Percentage of the job's completion. + pub percent: Option, + + /// Arbitrary description that may be useful to whoever is tracking the job's progress. + pub desc: Option, +} + +/// Info on job execution progress (sent). +/// +/// In Enterprise Faktory, a client executing a job can report on the execution +/// progress, provided the job is trackable. A trackable job is the one with "track":1 +/// specified in the custom data hash. +#[derive(Debug, Clone, Serialize, Builder)] +#[builder( + custom_constructor, + setter(into), + build_fn(name = "try_build", private) +)] +pub struct ProgressUpdate { + /// Id of the tracked job. + #[builder(setter(custom))] + pub jid: String, + + /// Percentage of the job's completion. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub percent: Option, + + /// Arbitrary description that may be useful to whoever is tracking the job's progress. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub desc: Option, + + /// Allows to extend the job's reservation, if more time needed to execute it. + /// + /// Note that you cannot decrease the initial [reservation](struct.Job.html#structfield.reserve_for). + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub reserve_until: Option>, +} + +impl ProgressUpdate { + /// Create a new instance of `ProgressUpdateBuilder` with job ID already set. + /// + /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) + /// `ProgressUpdateBuilder`. + pub fn builder(jid: impl Into) -> ProgressUpdateBuilder { + ProgressUpdateBuilder { + jid: Some(jid.into()), + ..ProgressUpdateBuilder::create_empty() + } + } + + /// Create a new instance of `ProgressUpdate`. + /// + /// While job ID is specified at `ProgressUpdate`'s creation time, + /// the rest of the [fields](struct.ProgressUpdate.html) are defaulted to _None_. + pub fn new(jid: impl Into) -> ProgressUpdate { + ProgressUpdateBuilder::new(jid).build() + } +} + +impl ProgressUpdateBuilder { + /// Builds an instance of ProgressUpdate. + pub fn build(&self) -> ProgressUpdate { + self.try_build() + .expect("All required fields have been set.") + } + + /// Create a new instance of 'JobBuilder' + pub fn new(jid: impl Into) -> ProgressUpdateBuilder { + ProgressUpdateBuilder { + jid: Some(jid.into()), + ..ProgressUpdateBuilder::create_empty() + } + } +} + +// ---------------------------------------------- + +use super::FaktoryCommand; +use std::{fmt::Debug, io::Write}; + +#[derive(Debug, Clone)] +pub enum Track { + Set(ProgressUpdate), + Get(String), +} + +impl FaktoryCommand for Track { + fn issue(&self, w: &mut W) -> Result<(), Error> { + match self { + Self::Set(upd) => { + w.write_all(b"TRACK SET ")?; + serde_json::to_writer(&mut *w, upd).map_err(Error::Serialization)?; + Ok(w.write_all(b"\r\n")?) + } + Self::Get(jid) => { + w.write_all(b"TRACK GET ")?; + w.write_all(jid.as_bytes())?; + Ok(w.write_all(b"\r\n")?) + } + } + } +} + +// ---------------------------------------------- + #[cfg(test)] mod test { use chrono::{DateTime, Utc}; diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index a02df87b..3b30d325 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -8,12 +8,13 @@ mod resp; mod utils; #[cfg(feature = "ent")] -mod ent; +pub mod ent; use crate::error::Error; pub use self::cmd::*; pub use self::resp::*; +pub use self::utils::{gen_random_jid, gen_random_wid}; const JOB_DEFAULT_QUEUE: &str = "default"; const JOB_DEFAULT_RESERVED_FOR_SECS: usize = 600; @@ -56,7 +57,7 @@ const JOB_DEFAULT_BACKTRACE: usize = 0; /// ``` /// /// See also the [Faktory wiki](https://github.com/contribsys/faktory/wiki/The-Job-Payload). -#[derive(Serialize, Deserialize, Debug, Builder)] +#[derive(Serialize, Deserialize, Debug, Clone, Builder)] #[builder( custom_constructor, setter(into), @@ -181,6 +182,24 @@ impl JobBuilder { self.try_build() .expect("All required fields have been set.") } + + /// Builds a new _trackable_ `Job``. + /// + /// Progress update can be sent and received only for the jobs that have + /// been explicitly marked as trackable via `"track":1` in the job's + /// custom hash. + /// ``` + /// use faktory::JobBuilder; + /// + /// let _job = JobBuilder::new("order") + /// .args(vec!["ISBN-13:9781718501850"]) + /// .build_trackable(); + /// ``` + #[cfg(feature = "ent")] + pub fn build_trackable(&mut self) -> Job { + self.add_to_custom_data("track", 1); + self.build() + } } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/src/proto/single/resp.rs b/src/proto/single/resp.rs index acf13ba8..29b732e4 100644 --- a/src/proto/single/resp.rs +++ b/src/proto/single/resp.rs @@ -59,6 +59,32 @@ pub fn read_json(r: R) -> Result(r: R) -> Result { + match read(r)? { + RawResponse::String(ref s) if s.is_empty() => Err(error::Protocol::BadType { + expected: "non-empty string representation of batch id", + received: "empty string".into(), + } + .into()), + RawResponse::String(ref s) => Ok(s.to_string()), + RawResponse::Blob(ref b) if b.is_empty() => Err(error::Protocol::BadType { + expected: "non-empty blob representation of batch id", + received: "empty blob".into(), + } + .into()), + RawResponse::Blob(ref b) => Ok(std::str::from_utf8(b) + .map_err(|_| error::Protocol::BadType { + expected: "valid blob representation of batch id", + received: "unprocessable blob".into(), + })? + .into()), + something_else => Err(bad("id", &something_else).into()), + } +} + +// ---------------------------------------------- + #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct Hi { #[serde(rename = "v")] diff --git a/src/proto/single/utils.rs b/src/proto/single/utils.rs index b6f068dc..8c7651dd 100644 --- a/src/proto/single/utils.rs +++ b/src/proto/single/utils.rs @@ -1,15 +1,24 @@ use rand::{thread_rng, Rng}; const JOB_ID_LENGTH: usize = 16; +const WORKER_ID_LENGTH: usize = 32; -pub fn gen_random_jid() -> String { +fn gen_random_id(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) .map(char::from) - .take(JOB_ID_LENGTH) + .take(length) .collect() } +pub fn gen_random_jid() -> String { + gen_random_id(JOB_ID_LENGTH) +} + +pub fn gen_random_wid() -> String { + gen_random_id(WORKER_ID_LENGTH) +} + #[cfg(test)] mod test { use std::collections::HashSet; @@ -17,17 +26,16 @@ mod test { use super::*; #[test] - fn test_jid_of_known_size_generated() { - let jid1 = gen_random_jid(); - let jid2 = gen_random_jid(); - assert_ne!(jid1, jid2); - println!("{}", jid1); - assert_eq!(jid1.len(), JOB_ID_LENGTH); - assert_eq!(jid2.len(), JOB_ID_LENGTH); + fn test_id_of_known_size_generated() { + let id1 = gen_random_id(WORKER_ID_LENGTH); + let id2 = gen_random_id(WORKER_ID_LENGTH); + assert_ne!(id1, id2); + assert_eq!(id1.len(), WORKER_ID_LENGTH); + assert_eq!(id2.len(), WORKER_ID_LENGTH); } #[test] - fn test_jids_are_unique() { + fn test_ids_are_unique() { let mut ids = HashSet::new(); ids.insert("IYKOxEfLcwcgKaRa".to_string()); @@ -37,8 +45,8 @@ mod test { ids.clear(); for _ in 0..1_000_000 { - let jid = gen_random_jid(); - ids.insert(jid); + let id = gen_random_id(JOB_ID_LENGTH); + ids.insert(id); } assert_eq!(ids.len(), 1_000_000); } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs new file mode 100644 index 00000000..2ba3a9fa --- /dev/null +++ b/src/tracker/mod.rs @@ -0,0 +1,122 @@ +use std::io::{Read, Write}; +use std::net::TcpStream; + +use crate::proto::{host_from_url, parse_provided_or_from_env, Client, GetBatchStatus, Track}; +use crate::{BatchStatus, Error, Progress, ProgressUpdate}; + +/// Used for retrieving and updating information on a job's execution progress +/// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status +/// from the Faktory server (see [`BatchStatus`]). +/// +/// Fetching a job's execution progress: +/// ```no_run +/// use faktory::Tracker; +/// let job_id = String::from("W8qyVle9vXzUWQOf"); +/// let mut tracker = Tracker::connect(None)?; +/// if let Some(progress) = tracker.get_progress(job_id)? { +/// if progress.state == "success" { +/// # /* +/// ... +/// # */ +/// } +/// } +/// # Ok::<(), faktory::Error>(()) +/// ``` +/// Sending an update on a job's execution progress: +/// ```no_run +/// use faktory::{Tracker, ProgressUpdateBuilder}; +/// let jid = String::from("W8qyVle9vXzUWQOf"); +/// let mut tracker = Tracker::connect(None)?; +/// let progress = ProgressUpdateBuilder::new(&jid) +/// .desc("Almost done...".to_owned()) +/// .percent(99) +/// .build(); +/// tracker.set_progress(progress)?; +/// # Ok::<(), faktory::Error>(()) +///```` +/// Fetching a batch's status: +/// ```no_run +/// use faktory::Tracker; +/// let bid = String::from("W8qyVle9vXzUWQOg"); +/// let mut tracker = Tracker::connect(None)?; +/// if let Some(status) = tracker.get_batch_status(bid)? { +/// println!("This batch created at {}", status.created_at); +/// } +/// # Ok::<(), faktory::Error>(()) +/// ``` +pub struct Tracker { + c: Client, +} + +impl Tracker { + /// Create new tracker and connect to a Faktory server. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + pub fn connect(url: Option<&str>) -> Result, Error> { + let url = parse_provided_or_from_env(url)?; + let addr = host_from_url(&url); + let pwd = url.password().map(Into::into); + let stream = TcpStream::connect(addr)?; + Ok(Tracker { + c: Client::new_tracker(stream, pwd)?, + }) + } +} + +impl Tracker { + /// Connect to a Faktory server with a non-standard stream. + pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { + Ok(Tracker { + c: Client::new_tracker(stream, pwd)?, + }) + } + + /// Send information on a job's execution progress to Faktory. + pub fn set_progress(&mut self, upd: ProgressUpdate) -> Result<(), Error> { + let cmd = Track::Set(upd); + self.c.issue(&cmd)?.await_ok() + } + + /// Fetch information on a job's execution progress from Faktory. + pub fn get_progress(&mut self, jid: String) -> Result, Error> { + let cmd = Track::Get(jid); + self.c.issue(&cmd)?.read_json() + } + + /// Fetch information on a batch of jobs execution progress. + pub fn get_batch_status(&mut self, bid: String) -> Result, Error> { + let cmd = GetBatchStatus::from(bid); + self.c.issue(&cmd)?.read_json() + } +} + +#[cfg(test)] +mod test { + use crate::proto::{host_from_url, parse_provided_or_from_env}; + + use super::Tracker; + use std::{env, net::TcpStream}; + + #[test] + fn test_trackers_created_ok() { + if env::var_os("FAKTORY_URL").is_none() || env::var_os("FAKTORY_ENT").is_none() { + return; + } + let _ = Tracker::connect(None).expect("tracker successfully instantiated and connected"); + + let url = parse_provided_or_from_env(None).expect("valid url"); + let host = host_from_url(&url); + let stream = TcpStream::connect(host).expect("connected"); + let pwd = url.password().map(String::from); + let _ = Tracker::connect_with(stream, pwd) + .expect("tracker successfully instantiated and connected"); + } +} diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index d1f651f1..b971fbdc 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -2,7 +2,9 @@ extern crate faktory; extern crate serde_json; extern crate url; +use chrono::Utc; use faktory::*; +use serde_json::Value; use std::io; macro_rules! skip_if_not_enterprise { @@ -13,6 +15,20 @@ macro_rules! skip_if_not_enterprise { }; } +macro_rules! assert_had_one { + ($c:expr, $q:expr) => { + let had_one_job = $c.run_one(0, &[$q]).unwrap(); + assert!(had_one_job); + }; +} + +macro_rules! assert_is_empty { + ($c:expr, $q:expr) => { + let had_one_job = $c.run_one(0, &[$q]).unwrap(); + assert!(!had_one_job); + }; +} + fn learn_faktory_url() -> String { let url = std::env::var_os("FAKTORY_URL").expect( "Enterprise Faktory should be running for this test, and 'FAKTORY_URL' environment variable should be provided", @@ -20,6 +36,15 @@ fn learn_faktory_url() -> String { url.to_str().expect("Is a utf-8 string").to_owned() } +fn some_jobs(kind: S, q: S, count: usize) -> impl Iterator +where + S: Into + Clone + 'static, +{ + (0..count) + .into_iter() + .map(move |_| Job::builder(kind.clone()).queue(q.clone()).build()) +} + #[test] fn ent_expiring_job() { use std::{thread, time}; @@ -29,12 +54,12 @@ fn ent_expiring_job() { let url = learn_faktory_url(); // prepare a producer ("client" in Faktory terms) and consumer ("worker"): - let mut producer = Producer::connect(Some(&url)).unwrap(); - let mut consumer = ConsumerBuilder::default(); - consumer.register("AnExpiringJob", move |job| -> io::Result<_> { + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut c = ConsumerBuilder::default(); + c.register("AnExpiringJob", move |job| -> io::Result<_> { Ok(eprintln!("{:?}", job)) }); - let mut consumer = consumer.connect(Some(&url)).unwrap(); + let mut c = c.connect(Some(&url)).unwrap(); // prepare an expiring job: let job_ttl_secs: u64 = 3; @@ -42,32 +67,31 @@ fn ent_expiring_job() { let ttl = chrono::Duration::seconds(job_ttl_secs as i64); let job1 = JobBuilder::new("AnExpiringJob") .args(vec!["ISBN-13:9781718501850"]) + .queue("ent_expiring_job") .expires_at(chrono::Utc::now() + ttl) .build(); // enqueue and fetch immediately job1: - producer.enqueue(job1).unwrap(); - let had_job = consumer.run_one(0, &["default"]).unwrap(); - assert!(had_job); + p.enqueue(job1).unwrap(); + assert_had_one!(&mut c, "ent_expiring_job"); // check that the queue is drained: - let had_job = consumer.run_one(0, &["default"]).unwrap(); - assert!(!had_job); + assert_is_empty!(&mut c, "ent_expiring_job"); // prepare another one: let job2 = JobBuilder::new("AnExpiringJob") .args(vec!["ISBN-13:9781718501850"]) + .queue("ent_expiring_job") .expires_at(chrono::Utc::now() + ttl) .build(); // enqueue and then fetch job2, but after ttl: - producer.enqueue(job2).unwrap(); + p.enqueue(job2).unwrap(); thread::sleep(time::Duration::from_secs(job_ttl_secs * 2)); - let had_job = consumer.run_one(0, &["default"]).unwrap(); // For the non-enterprise edition of Faktory, this assertion will // fail, which should be taken into account when running the test suite on CI. - assert!(!had_job); + assert_is_empty!(&mut c, "ent_expiring_job"); } #[test] @@ -82,12 +106,12 @@ fn ent_unique_job() { let job_type = "order"; // prepare producer and consumer: - let mut producer = Producer::connect(Some(&url)).unwrap(); - let mut consumer = ConsumerBuilder::default(); - consumer.register(job_type, |job| -> io::Result<_> { + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut c = ConsumerBuilder::default(); + c.register(job_type, |job| -> io::Result<_> { Ok(eprintln!("{:?}", job)) }); - let mut consumer = consumer.connect(Some(&url)).unwrap(); + let mut c = c.connect(Some(&url)).unwrap(); // Reminder. Jobs are considered unique for kind + args + queue. // So the following two jobs, will be accepted by Faktory, since we @@ -98,18 +122,18 @@ fn ent_unique_job() { .args(args.clone()) .queue(queue_name) .build(); - producer.enqueue(job1).unwrap(); + p.enqueue(job1).unwrap(); let job2 = JobBuilder::new(job_type) .args(args.clone()) .queue(queue_name) .build(); - producer.enqueue(job2).unwrap(); + p.enqueue(job2).unwrap(); - let had_job = consumer.run_one(0, &[queue_name]).unwrap(); + let had_job = c.run_one(0, &[queue_name]).unwrap(); assert!(had_job); - let had_another_one = consumer.run_one(0, &[queue_name]).unwrap(); + let had_another_one = c.run_one(0, &[queue_name]).unwrap(); assert!(had_another_one); - let and_that_is_it_for_now = !consumer.run_one(0, &[queue_name]).unwrap(); + let and_that_is_it_for_now = !c.run_one(0, &[queue_name]).unwrap(); assert!(and_that_is_it_for_now); // let's now create a unique job and followed by a job with @@ -121,7 +145,7 @@ fn ent_unique_job() { .queue(queue_name) .unique_for(unique_for_secs) .build(); - producer.enqueue(job1).unwrap(); + p.enqueue(job1).unwrap(); // this one is a 'duplicate' ... let job2 = Job::builder(job_type) .args(args.clone()) @@ -129,7 +153,7 @@ fn ent_unique_job() { .unique_for(unique_for_secs) .build(); // ... so the server will respond accordingly: - let res = producer.enqueue(job2).unwrap_err(); + let res = p.enqueue(job2).unwrap_err(); if let error::Error::Protocol(error::Protocol::UniqueConstraintViolation { msg }) = res { assert_eq!(msg, "Job not unique"); } else { @@ -137,12 +161,12 @@ fn ent_unique_job() { } // Let's now consume the job which is 'holding' a unique lock: - let had_job = consumer.run_one(0, &[queue_name]).unwrap(); + let had_job = c.run_one(0, &[queue_name]).unwrap(); assert!(had_job); // And check that the queue is really empty (`job2` from above // has not been queued indeed): - let queue_is_empty = !consumer.run_one(0, &[queue_name]).unwrap(); + let queue_is_empty = !c.run_one(0, &[queue_name]).unwrap(); assert!(queue_is_empty); // Now let's repeat the latter case, but providing different args to job2: @@ -151,7 +175,7 @@ fn ent_unique_job() { .queue(queue_name) .unique_for(unique_for_secs) .build(); - producer.enqueue(job1).unwrap(); + p.enqueue(job1).unwrap(); // this one is *NOT* a 'duplicate' ... let job2 = JobBuilder::new(job_type) .args(vec![Value::from("ISBN-13:9781718501850"), Value::from(101)]) @@ -159,16 +183,12 @@ fn ent_unique_job() { .unique_for(unique_for_secs) .build(); // ... so the server will accept it: - producer.enqueue(job2).unwrap(); - - let had_job = consumer.run_one(0, &[queue_name]).unwrap(); - assert!(had_job); - let had_another_one = consumer.run_one(0, &[queue_name]).unwrap(); - assert!(had_another_one); + p.enqueue(job2).unwrap(); + assert_had_one!(&mut c, queue_name); + assert_had_one!(&mut c, queue_name); // and the queue is empty again: - let had_job = consumer.run_one(0, &[queue_name]).unwrap(); - assert!(!had_job); + assert_is_empty!(&mut c, queue_name); } #[test] @@ -325,11 +345,11 @@ fn ent_unique_job_until_start() { #[test] fn ent_unique_job_bypass_unique_lock() { use faktory::error; - use serde_json::Value; skip_if_not_enterprise!(); let url = learn_faktory_url(); + let mut producer = Producer::connect(Some(&url)).unwrap(); let job1 = Job::builder("order") @@ -361,3 +381,609 @@ fn ent_unique_job_bypass_unique_lock() { panic!("Expected protocol error.") } } + +#[test] +fn test_tracker_can_send_and_retrieve_job_execution_progress() { + use std::{ + io, + sync::{Arc, Mutex}, + thread, time, + }; + + skip_if_not_enterprise!(); + + let url = learn_faktory_url(); + + let t = Arc::new(Mutex::new( + Tracker::connect(Some(&url)).expect("job progress tracker created successfully"), + )); + + let t_captured = Arc::clone(&t); + + let mut p = Producer::connect(Some(&url)).unwrap(); + + let job_tackable = JobBuilder::new("order") + .args(vec![Value::from("ISBN-13:9781718501850")]) + .queue("test_tracker_can_send_progress_update") + .build_trackable(); + + let job_ordinary = JobBuilder::new("order") + .args(vec![Value::from("ISBN-13:9781718501850")]) + .queue("test_tracker_can_send_progress_update") + .build(); + + // let's remember this job's id: + let job_id = job_tackable.id().to_owned(); + let job_id_captured = job_id.clone(); + + p.enqueue(job_tackable).expect("enqueued"); + + let mut c = ConsumerBuilder::default(); + c.register("order", move |job| -> io::Result<_> { + // trying to set progress on a community edition of Faktory will give: + // 'an internal server error occurred: tracking subsystem is only available in Faktory Enterprise' + let result = t_captured.lock().expect("lock acquired").set_progress( + ProgressUpdateBuilder::new(&job_id_captured) + .desc("Still processing...".to_owned()) + .percent(32) + .build(), + ); + assert!(result.is_ok()); + // let's sleep for a while ... + thread::sleep(time::Duration::from_secs(2)); + + // ... and read the progress info + let result = t_captured + .lock() + .expect("lock acquired") + .get_progress(job_id_captured.clone()) + .expect("Retrieved progress update over the wire"); + + assert!(result.is_some()); + let result = result.unwrap(); + assert_eq!(result.jid, job_id_captured.clone()); + assert_eq!(result.state, "working"); + assert!(result.updated_at.is_some()); + assert_eq!(result.desc, Some("Still processing...".to_owned())); + assert_eq!(result.percent, Some(32)); + // considering the job done + Ok(eprintln!("{:?}", job)) + }); + + let mut c = c + .connect(Some(&url)) + .expect("Successfully ran a handshake with 'Faktory'"); + assert_had_one!(&mut c, "test_tracker_can_send_progress_update"); + + let result = t + .lock() + .expect("lock acquired successfully") + .get_progress(job_id.clone()) + .expect("Retrieved progress update over the wire once again") + .expect("Some progress"); + + assert_eq!(result.jid, job_id); + // 'Faktory' will be keeping last known update for at least 30 minutes: + assert_eq!(result.desc, Some("Still processing...".to_owned())); + assert_eq!(result.percent, Some(32)); + + // But it actually knows the job's real status, since the consumer (worker) + // informed it immediately after finishing with the job: + assert_eq!(result.state, "success"); + + // What about 'ordinary' job ? + let job_id = job_ordinary.id().to_owned().clone(); + + // Sending it ... + p.enqueue(job_ordinary) + .expect("Successfuly send to Faktory"); + + // ... and asking for its progress + let progress = t + .lock() + .expect("lock acquired") + .get_progress(job_id.clone()) + .expect("Retrieved progress update over the wire once again") + .expect("Some progress"); + + // From the docs: + // There are several reasons why a job's state might be unknown: + // The JID is invalid or was never actually enqueued. + // The job was not tagged with the track variable in the job's custom attributes: custom:{"track":1}. + // The job's tracking structure has expired in Redis. It lives for 30 minutes and a big queue backlog can lead to expiration. + assert_eq!(progress.jid, job_id); + + // Returned from Faktory: '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' + assert_eq!(progress.state, "unknown"); + assert!(progress.updated_at.is_none()); + assert!(progress.percent.is_none()); + assert!(progress.desc.is_none()); +} + +#[test] +fn test_batch_of_jobs_can_be_initiated() { + skip_if_not_enterprise!(); + let url = learn_faktory_url(); + + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut c = ConsumerBuilder::default(); + c.register("thumbnail", move |_job| -> io::Result<_> { Ok(()) }); + c.register("clean_up", move |_job| -> io::Result<_> { Ok(()) }); + let mut c = c.connect(Some(&url)).unwrap(); + let mut t = Tracker::connect(Some(&url)).expect("job progress tracker created successfully"); + + let job_1 = Job::builder("thumbnail") + .args(vec!["path/to/original/image1"]) + .queue("test_batch_of_jobs_can_be_initiated") + .build(); + let job_2 = Job::builder("thumbnail") + .args(vec!["path/to/original/image2"]) + .queue("test_batch_of_jobs_can_be_initiated") + .build(); + let job_3 = Job::builder("thumbnail") + .args(vec!["path/to/original/image3"]) + .queue("test_batch_of_jobs_can_be_initiated") + .build(); + + let cb_job = Job::builder("clean_up") + .queue("test_batch_of_jobs_can_be_initiated__CALLBACKs") + .build(); + + let batch = + Batch::builder("Image resizing workload".to_string()).with_complete_callback(cb_job); + + let time_just_before_batch_init = Utc::now(); + + let mut b = p.start_batch(batch).unwrap(); + + // let's remember batch id: + let bid = b.id().to_string(); + + b.add(job_1).unwrap(); + b.add(job_2).unwrap(); + b.add(job_3).unwrap(); + b.commit().unwrap(); + + // The batch has been committed, let's see its status: + let time_just_before_getting_status = Utc::now(); + + let s = t + .get_batch_status(bid.clone()) + .expect("successfully fetched batch status from server...") + .expect("...and it's not none"); + + // Just to make a meaningfull assertion about the BatchStatus's 'created_at' field: + assert!(s.created_at > time_just_before_batch_init); + assert!(s.created_at < time_just_before_getting_status); + assert_eq!(s.bid, bid); + assert_eq!(s.description, Some("Image resizing workload".into())); + assert_eq!(s.total, 3); // three jobs registered + assert_eq!(s.pending, 3); // and none executed just yet + assert_eq!(s.failed, 0); + // Docs do not mention it, but the golang client does: + // https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 + assert_eq!(s.success_callback_state, ""); // we did not even provide the 'success' callback + assert_eq!(s.complete_callback_state, ""); // the 'complete' callback is pending + + // consume and execute job 1 ... + assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + // ... and try consuming from the "callback" queue: + assert_is_empty!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); + + // let's ask the Faktory server about the batch status after + // we have consumed one job from this batch: + let s = t + .get_batch_status(bid.clone()) + .expect("successfully fetched batch status from server...") + .expect("...and it's not none"); + + // this is because we have just consumed and executed 1 of 3 jobs: + assert_eq!(s.total, 3); + assert_eq!(s.pending, 2); + assert_eq!(s.failed, 0); + + // now, consume and execute job 2 + assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + // ... and check the callback queue again: + assert_is_empty!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); // not just yet ... + + // let's check batch status once again: + let s = t + .get_batch_status(bid.clone()) + .expect("successfully fetched batch status from server...") + .expect("...and it's not none"); + + // this is because we have just consumed and executed 2 of 3 jobs: + assert_eq!(s.total, 3); + assert_eq!(s.pending, 1); + assert_eq!(s.failed, 0); + + // finally, consume and execute job 3 - the last one from the batch + assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + + // let's check batch status to see what happens after + // all the jobs from the batch have been executed: + let s = t + .get_batch_status(bid.clone()) + .expect("successfully fetched batch status from server...") + .expect("...and it's not none"); + + // this is because we have just consumed and executed 2 of 3 jobs: + assert_eq!(s.total, 3); + assert_eq!(s.pending, 0); + assert_eq!(s.failed, 0); + assert_eq!(s.complete_callback_state, "1"); // callback has been enqueued!! + + // let's now successfully consume from the "callback" queue: + assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); + + // let's check batch status one last time: + let s = t + .get_batch_status(bid.clone()) + .expect("successfully fetched batch status from server...") + .expect("...and it's not none"); + + // this is because we have just consumed and executed 2 of 3 jobs: + assert_eq!(s.complete_callback_state, "2"); // means calledback successfully executed +} + +#[test] +fn test_batches_can_be_nested() { + skip_if_not_enterprise!(); + let url = learn_faktory_url(); + + // Set up 'producer', 'consumer', and 'tracker': + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut c = ConsumerBuilder::default(); + c.register("jobtype", move |_job| -> io::Result<_> { Ok(()) }); + let mut _c = c.connect(Some(&url)).unwrap(); + let mut t = Tracker::connect(Some(&url)).expect("job progress tracker created successfully"); + + // Prepare some jobs: + let parent_job1 = Job::builder("jobtype") + .queue("test_batches_can_be_nested") + .build(); + let child_job_1 = Job::builder("jobtype") + .queue("test_batches_can_be_nested") + .build(); + let child_job_2 = Job::builder("jobtype") + .queue("test_batches_can_be_nested") + .build(); + let grand_child_job_1 = Job::builder("jobtype") + .queue("test_batches_can_be_nested") + .build(); + + // Sccording to Faktory docs: + // "The callback for a parent batch will not enqueue until the callback for the child batch has finished." + // See: https://github.com/contribsys/faktory/wiki/Ent-Batches#guarantees + let parent_cb_job = Job::builder("clean_up") + .queue("test_batches_can_be_nested__CALLBACKs") + .build(); + let child_cb_job = Job::builder("clean_up") + .queue("test_batches_can_be_nested__CALLBACKs") + .build(); + let grandchild_cb_job = Job::builder("clean_up") + .queue("test_batches_can_be_nested__CALLBACKs") + .build(); + + // batches start + let parent_batch = + Batch::builder("Parent batch".to_string()).with_success_callback(parent_cb_job); + let mut parent_batch = p.start_batch(parent_batch).unwrap(); + let parent_batch_id = parent_batch.id().to_owned(); + parent_batch.add(parent_job1).unwrap(); + + let child_batch = Batch::builder("Child batch".to_string()).with_success_callback(child_cb_job); + let mut child_batch = parent_batch.start_batch(child_batch).unwrap(); + let child_batch_id = child_batch.id().to_owned(); + child_batch.add(child_job_1).unwrap(); + child_batch.add(child_job_2).unwrap(); + + let grandchild_batch = + Batch::builder("Grandchild batch".to_string()).with_success_callback(grandchild_cb_job); + let mut grandchild_batch = child_batch.start_batch(grandchild_batch).unwrap(); + let grandchild_batch_id = grandchild_batch.id().to_owned(); + grandchild_batch.add(grand_child_job_1).unwrap(); + + grandchild_batch.commit().unwrap(); + child_batch.commit().unwrap(); + parent_batch.commit().unwrap(); + // batches finish + + let parent_status = t + .get_batch_status(parent_batch_id.clone()) + .unwrap() + .unwrap(); + assert_eq!(parent_status.description, Some("Parent batch".to_string())); + assert_eq!(parent_status.total, 1); + assert_eq!(parent_status.parent_bid, None); + + let child_status = t.get_batch_status(child_batch_id.clone()).unwrap().unwrap(); + assert_eq!(child_status.description, Some("Child batch".to_string())); + assert_eq!(child_status.total, 2); + assert_eq!(child_status.parent_bid, Some(parent_batch_id)); + + let grandchild_status = t.get_batch_status(grandchild_batch_id).unwrap().unwrap(); + assert_eq!( + grandchild_status.description, + Some("Grandchild batch".to_string()) + ); + assert_eq!(grandchild_status.total, 1); + assert_eq!(grandchild_status.parent_bid, Some(child_batch_id)); +} + +#[test] +fn test_callback_will_not_be_queued_unless_batch_gets_committed() { + skip_if_not_enterprise!(); + let url = learn_faktory_url(); + + // prepare a producer, a consumer of 'order' jobs, and a tracker: + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut c = ConsumerBuilder::default(); + c.register("order", move |_job| -> io::Result<_> { Ok(()) }); + c.register("order_clean_up", move |_job| -> io::Result<_> { Ok(()) }); + let mut c = c.connect(Some(&url)).unwrap(); + let mut t = Tracker::connect(Some(&url)).unwrap(); + + let mut jobs = some_jobs( + "order", + "test_callback_will_not_be_queued_unless_batch_gets_committed", + 3, + ); + let mut callbacks = some_jobs( + "order_clean_up", + "test_callback_will_not_be_queued_unless_batch_gets_committed__CALLBACKs", + 1, + ); + + // start a 'batch': + let mut b = p + .start_batch( + Batch::builder("Orders processing workload".to_string()) + .with_success_callback(callbacks.next().unwrap()), + ) + .unwrap(); + let bid = b.id().to_string(); + + // push 3 jobs onto this batch, but DO NOT commit the batch: + for _ in 0..3 { + b.add(jobs.next().unwrap()).unwrap(); + } + + // check this batch's status: + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 3); + assert_eq!(s.pending, 3); + assert_eq!(s.success_callback_state, ""); // has not been queued; + + // consume those 3 jobs successfully; + for _ in 0..3 { + assert_had_one!( + &mut c, + "test_callback_will_not_be_queued_unless_batch_gets_committed" + ); + } + + // verify the queue is drained: + assert_is_empty!( + &mut c, + "test_callback_will_not_be_queued_unless_batch_gets_committed" + ); + + // check this batch's status again: + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 3); + assert_eq!(s.pending, 0); + assert_eq!(s.failed, 0); + assert_eq!(s.success_callback_state, ""); // not just yet; + + // to double-check, let's assert the success callbacks queue is empty: + assert_is_empty!( + &mut c, + "test_callback_will_not_be_queued_unless_batch_gets_committed__CALLBACKs" + ); + + // now let's COMMIT the batch ... + b.commit().unwrap(); + + // ... and check batch status: + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.success_callback_state, "1"); // callback has been queued; + + // finally, let's consume from the success callbacks queue ... + assert_had_one!( + &mut c, + "test_callback_will_not_be_queued_unless_batch_gets_committed__CALLBACKs" + ); + + // ... and see the final status: + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.success_callback_state, "2"); // callback successfully executed; +} + +#[test] +fn test_callback_will_be_queue_upon_commit_even_if_batch_is_empty() { + use std::{thread, time}; + + skip_if_not_enterprise!(); + let url = learn_faktory_url(); + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut t = Tracker::connect(Some(&url)).unwrap(); + let jobtype = "callback_jobtype"; + let q_name = "test_callback_will_be_queue_upon_commit_even_if_batch_is_empty"; + let mut callbacks = some_jobs(jobtype, q_name, 2); + let b = p + .start_batch( + Batch::builder("Orders processing workload".to_string()) + .with_callbacks(callbacks.next().unwrap(), callbacks.next().unwrap()), + ) + .unwrap(); + let bid = b.id().to_owned(); + + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 0); // no jobs in the batch; + assert_eq!(s.success_callback_state, ""); // not queued; + assert_eq!(s.complete_callback_state, ""); // not queued; + + b.commit().unwrap(); + + // let's give the Faktory server some time: + thread::sleep(time::Duration::from_secs(2)); + + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 0); // again, there are no jobs in the batch ... + + // The docs say "If you don't push any jobs into the batch, any callbacks will fire immediately upon BATCH COMMIT." + // and "the success callback for a batch will always enqueue after the complete callback" + assert_eq!(s.complete_callback_state, "1"); // queued + assert_eq!(s.success_callback_state, ""); // not queued + + let mut c = ConsumerBuilder::default(); + c.register(jobtype, move |_job| -> io::Result<_> { Ok(()) }); + let mut c = c.connect(Some(&url)).unwrap(); + + assert_had_one!(&mut c, q_name); // complete callback consumed + + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 0); + assert_eq!(s.complete_callback_state, "2"); // successfully executed + assert_eq!(s.success_callback_state, "1"); // queued + + assert_had_one!(&mut c, q_name); // success callback consumed + + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 0); + assert_eq!(s.complete_callback_state, "2"); // successfully executed + assert_eq!(s.success_callback_state, "2"); // successfully executed +} + +#[test] +fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { + skip_if_not_enterprise!(); + let url = learn_faktory_url(); + let mut p = Producer::connect(Some(&url)).unwrap(); + let mut t = Tracker::connect(Some(&url)).unwrap(); + let mut jobs = some_jobs("order", "test_batch_can_be_reopned_add_extra_jobs_added", 4); + let mut callbacks = some_jobs( + "order_clean_up", + "test_batch_can_be_reopned_add_extra_jobs_added__CALLBACKs", + 1, + ); + + let b = Batch::builder("Orders processing workload".to_string()) + .with_success_callback(callbacks.next().unwrap()); + + let mut b = p.start_batch(b).unwrap(); + let bid = b.id().to_string(); + b.add(jobs.next().unwrap()).unwrap(); // 1 job + b.add(jobs.next().unwrap()).unwrap(); // 2 jobs + + let status = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(status.total, 2); + assert_eq!(status.pending, 2); + + // ############################## SUBTEST 0 ########################################## + // Let's fist of all try to open the batch we have not committed yet: + let mut b = p.open_batch(bid.clone()).unwrap(); + assert_eq!(b.id(), bid); + b.add(jobs.next().unwrap()).unwrap(); // 3 jobs + + b.commit().unwrap(); // committig the batch + + let status = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(status.total, 3); + assert_eq!(status.pending, 3); + + // Subtest 0 result: + // The Faktory server let's us open the uncommitted batch. This is something not mention + // in the docs, but still worth checking. + + // ############################## SUBTEST 1 ########################################## + // From the docs: + // """Note that, once committed, only a job within the batch may reopen it. + // Faktory will return an error if you dynamically add jobs from "outside" the batch; + // this is to prevent a race condition between callbacks firing and an outsider adding more jobs.""" + // Ref: https://github.com/contribsys/faktory/wiki/Ent-Batches#batch-open-bid (Jan 10, 2024) + + // Let's try to open an already committed batch: + let mut b = p.open_batch(bid.clone()).unwrap(); + assert_eq!(b.id(), bid); + b.add(jobs.next().unwrap()).unwrap(); // 4 jobs + b.commit().unwrap(); // committing the batch again! + + let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 4); + assert_eq!(s.pending, 4); + + // Subtest 1 result: + // We managed to open a batch "from outside" and the server accepted the job INSTEAD OF ERRORING BACK. + // ############################ END OF SUBTEST 1 ####################################### + + // ############################## SUBTEST 2 ############################################ + // Let's see if we will be able to - again - open the committed batch "from outside" and + // add a nested batch to it. + let mut b = p.open_batch(bid.clone()).unwrap(); + assert_eq!(b.id(), bid); // this is to make sure this is the same batch INDEED + let mut nested_callbacks = some_jobs( + "order_clean_up__NESTED", + "test_batch_can_be_reopned_add_extra_jobs_added__CALLBACKs__NESTED", + 2, + ); + let nested_batch_declaration = + Batch::builder("Orders processing workload. Nested stage".to_string()).with_callbacks( + nested_callbacks.next().unwrap(), + nested_callbacks.next().unwrap(), + ); + let nested_batch = b.start_batch(nested_batch_declaration).unwrap(); + let nested_bid = nested_batch.id().to_string(); + // committing the nested batch without any jobs + // since those are just not relevant for this test: + nested_batch.commit().unwrap(); + + let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); + assert_eq!(s.total, 0); + assert_eq!(s.parent_bid, Some(bid)); // this is really our child batch + assert_eq!(s.complete_callback_state, "1"); // has been enqueud + + // Subtest 2 result: + // We managed to open an already committed batch "from outside" and the server accepted + // a nested batch INSTEAD OF ERRORING BACK. + // ############################ END OF SUBTEST 2 ####################################### + + // ############################## SUBTEST 3 ############################################ + // From the docs: + // """Once a callback has enqueued for a batch, you may not add anything to the batch.""" + // ref: https://github.com/contribsys/faktory/wiki/Ent-Batches#guarantees (Jan 10, 2024) + + // Let's try to re-open the nested batch that we have already committed and add some jobs to it. + let mut b = p.open_batch(nested_bid.clone()).unwrap(); + assert_eq!(b.id(), nested_bid); // this is to make sure this is the same batch INDEED + let mut more_jobs = some_jobs( + "order_clean_up__NESTED", + "test_batch_can_be_reopned_add_extra_jobs_added__NESTED", + 2, + ); + b.add(more_jobs.next().unwrap()).unwrap(); + b.add(more_jobs.next().unwrap()).unwrap(); + b.commit().unwrap(); + + let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); + assert_eq!(s.complete_callback_state, "1"); // again, it has been enqueud ... + assert_eq!(s.pending, 2); // ... though there are pending jobs + assert_eq!(s.total, 2); + + // Subtest 3 result: + // We were able to add more jobs to the batch for which the Faktory server had already + // queued the callback. + // ############################## END OF SUBTEST 3 ##################################### + + // ############################## OVERALL RESULTS ###################################### + // The guarantees that definitely hold: + // + // 1) the callbacks will fire immediately after the jobs of this batch have been executed, provided the batch has been committed; + // + // 2) the callbacks will fire immediately for an empty batch, provided it has been committed; + // + // 3) the 'complete' callback will always be queued first + // (this is shown as part the test 'test_callback_will_be_queue_upon_commit_even_if_batch_is_empty'); +} From b143490b8b289f1e9f2f339f4cf55f30361924bc Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 17 Jan 2024 22:58:41 +0500 Subject: [PATCH 02/62] Run fmt --- src/producer/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/producer/mod.rs b/src/producer/mod.rs index 76a827a6..ebee2c0a 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -1,7 +1,6 @@ use crate::error::Error; use crate::proto::{ - self, parse_provided_or_from_env, Client, Info, Job, Push, - QueueAction, QueueControl, + self, parse_provided_or_from_env, Client, Info, Job, Push, QueueAction, QueueControl, }; #[cfg(feature = "ent")] From f3c8348ba182cf66446da3c2ca91e0aff84c3764 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sat, 20 Jan 2024 23:23:14 +0500 Subject: [PATCH 03/62] Use doc-cfg for new ent features --- src/lib.rs | 8 +++++--- src/proto/mod.rs | 5 +++-- src/proto/single/mod.rs | 4 ++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 8a83b62f..6c48135b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,10 +81,12 @@ pub use crate::proto::Reconnect; pub use crate::proto::{Job, JobBuilder}; #[cfg(feature = "ent")] -pub use crate::proto::{Batch, BatchBuilder, BatchStatus}; +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +pub use crate::proto::{ + Batch, BatchBuilder, BatchStatus, Progress, ProgressUpdate, ProgressUpdateBuilder, +}; #[cfg(feature = "ent")] mod tracker; #[cfg(feature = "ent")] -pub use crate::proto::{Progress, ProgressUpdate, ProgressUpdateBuilder}; -#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::tracker::Tracker; diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 28d0c16b..9dc9e988 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -15,10 +15,11 @@ pub use self::single::{ gen_random_wid, Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, QueueAction, QueueControl, }; -#[cfg(feature = "ent")] -mod batch; #[cfg(feature = "ent")] pub use self::single::ent::{Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; + +#[cfg(feature = "ent")] +mod batch; #[cfg(feature = "ent")] pub use batch::{ Batch, BatchBuilder, BatchHandle, BatchStatus, CommitBatch, GetBatchStatus, OpenBatch, diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index 2e3fdcf0..811be23b 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -9,13 +9,13 @@ mod utils; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -mod ent; +pub mod ent; use crate::error::Error; pub use self::cmd::*; pub use self::resp::*; -pub use self::utils::{gen_random_jid, gen_random_wid}; +pub use self::utils::gen_random_wid; const JOB_DEFAULT_QUEUE: &str = "default"; const JOB_DEFAULT_RESERVED_FOR_SECS: usize = 600; From 268ffecc2ce383a88880f92eaf477f12ffb86053 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 00:19:17 +0500 Subject: [PATCH 04/62] Do not error if batch does not exist when re-opening --- src/consumer/mod.rs | 7 ++---- src/lib.rs | 4 +--- src/producer/mod.rs | 18 ++++++++------- src/proto/batch/mod.rs | 8 +++---- src/proto/mod.rs | 17 ++++++++++++++ src/proto/single/resp.rs | 6 ----- tests/real/enterprise.rs | 49 ++++++++++++++++++++++++---------------- 7 files changed, 62 insertions(+), 47 deletions(-) diff --git a/src/consumer/mod.rs b/src/consumer/mod.rs index 70102863..7445e47f 100644 --- a/src/consumer/mod.rs +++ b/src/consumer/mod.rs @@ -1,17 +1,14 @@ use crate::error::Error; - use crate::proto::{ - self, parse_provided_or_from_env, Client, ClientOptions, HeartbeatStatus, Reconnect, + self, parse_provided_or_from_env, Ack, Client, ClientOptions, Fail, HeartbeatStatus, Job, + Reconnect, }; - use fnv::FnvHashMap; use std::error::Error as StdError; use std::io::prelude::*; use std::net::TcpStream; use std::sync::{atomic, Arc, Mutex}; -use crate::proto::{Ack, Fail, Job}; - const STATUS_RUNNING: usize = 0; const STATUS_QUIET: usize = 1; const STATUS_TERMINATING: usize = 2; diff --git a/src/lib.rs b/src/lib.rs index 6c48135b..de0aac1f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,9 +76,7 @@ pub use tls::TlsStream; pub use crate::consumer::{Consumer, ConsumerBuilder}; pub use crate::error::Error; pub use crate::producer::Producer; -pub use crate::proto::Reconnect; - -pub use crate::proto::{Job, JobBuilder}; +pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] diff --git a/src/producer/mod.rs b/src/producer/mod.rs index ebee2c0a..624c69c5 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -2,12 +2,8 @@ use crate::error::Error; use crate::proto::{ self, parse_provided_or_from_env, Client, Info, Job, Push, QueueAction, QueueControl, }; - -#[cfg(feature = "ent")] -use crate::proto::{BatchHandle, CommitBatch, OpenBatch}; #[cfg(feature = "ent")] -use crate::Batch; - +use crate::proto::{Batch, BatchHandle, CommitBatch, OpenBatch}; use std::io::prelude::*; use std::net::TcpStream; @@ -143,10 +139,16 @@ impl Producer { } /// Open an already existing batch of jobs. + /// + /// This will not error if a batch with the provided `bid` does not exist, + /// rather `Ok(None)` will be returned. #[cfg(feature = "ent")] - pub fn open_batch(&mut self, bid: String) -> Result, Error> { - let bid = self.c.issue(&OpenBatch::from(bid))?.read_bid()?; - Ok(BatchHandle::new(bid, self)) + pub fn open_batch(&mut self, bid: String) -> Result>, Error> { + let bid = self.c.issue(&OpenBatch::from(bid))?.maybe_bid()?; + match bid { + Some(bid) => Ok(Some(BatchHandle::new(bid, self))), + None => Ok(None), + } } #[cfg(feature = "ent")] diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 904c6035..588e541b 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -1,9 +1,7 @@ -use std::io::{Read, Write}; - +use crate::{Error, Job, Producer}; use chrono::{DateTime, Utc}; use derive_builder::Builder; - -use crate::{Error, Job, Producer}; +use std::io::{Read, Write}; mod cmd; @@ -76,7 +74,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// effectively building a pipeline this way, since the Faktory guarantees that callback jobs will not be queued unless /// the batch gets committed. /// -/// You can retieve the batch status using a [`tracker`](struct.Tracker.html): +/// You can retieve the batch status using a [`Tracker`](struct.Tracker.html): /// ```no_run /// # use faktory::Error; /// # use faktory::{Producer, Job, Batch, Tracker}; diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 9dc9e988..47fd97e9 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -306,6 +306,23 @@ impl<'a, S: Read + Write> ReadToken<'a, S> { pub(crate) fn read_bid(self) -> Result { single::read_bid(&mut self.0.stream) } + + #[cfg(feature = "ent")] + pub(crate) fn maybe_bid(self) -> Result, Error> { + let bid_read_res = single::read_bid(&mut self.0.stream); + if bid_read_res.is_ok() { + return Ok(Some(bid_read_res.unwrap())); + } + match bid_read_res.unwrap_err() { + Error::Protocol(error::Protocol::Internal { msg }) => { + if msg.starts_with("No such batch") { + return Ok(None); + } + return Err(error::Protocol::Internal { msg }.into()); + } + another => Err(another), + } + } } #[cfg(test)] diff --git a/src/proto/single/resp.rs b/src/proto/single/resp.rs index 29b732e4..9a1d3e19 100644 --- a/src/proto/single/resp.rs +++ b/src/proto/single/resp.rs @@ -62,12 +62,6 @@ pub fn read_json(r: R) -> Result(r: R) -> Result { match read(r)? { - RawResponse::String(ref s) if s.is_empty() => Err(error::Protocol::BadType { - expected: "non-empty string representation of batch id", - received: "empty string".into(), - } - .into()), - RawResponse::String(ref s) => Ok(s.to_string()), RawResponse::Blob(ref b) if b.is_empty() => Err(error::Protocol::BadType { expected: "non-empty blob representation of batch id", received: "empty blob".into(), diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 8a270f94..bd73c6bb 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -812,7 +812,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { } #[test] -fn test_callback_will_be_queue_upon_commit_even_if_batch_is_empty() { +fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { use std::{thread, time}; skip_if_not_enterprise!(); @@ -820,7 +820,7 @@ fn test_callback_will_be_queue_upon_commit_even_if_batch_is_empty() { let mut p = Producer::connect(Some(&url)).unwrap(); let mut t = Tracker::connect(Some(&url)).unwrap(); let jobtype = "callback_jobtype"; - let q_name = "test_callback_will_be_queue_upon_commit_even_if_batch_is_empty"; + let q_name = "test_callback_will_be_queued_upon_commit_even_if_batch_is_empty"; let mut callbacks = some_jobs(jobtype, q_name, 2); let b = p .start_batch( @@ -893,9 +893,14 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { assert_eq!(status.pending, 2); // ############################## SUBTEST 0 ########################################## + // Let's try to open/reopen a batch we have never declared: + let b = p.open_batch(String::from("non-existent-batch-id")).unwrap(); + assert!(b.is_none()); + // ########################## END OF SUBTEST 0 ####################################### + + // ############################## SUBTEST 1 ########################################## // Let's fist of all try to open the batch we have not committed yet: - let mut b = p.open_batch(bid.clone()).unwrap(); - assert_eq!(b.id(), bid); + let mut b = p.open_batch(bid.clone()).unwrap().unwrap(); b.add(jobs.next().unwrap()).unwrap(); // 3 jobs b.commit().unwrap(); // committig the batch @@ -904,11 +909,12 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { assert_eq!(status.total, 3); assert_eq!(status.pending, 3); - // Subtest 0 result: + // Subtest 1 result: // The Faktory server let's us open the uncommitted batch. This is something not mention // in the docs, but still worth checking. + // ########################### END OF SUBTEST 1 ###################################### - // ############################## SUBTEST 1 ########################################## + // ############################## SUBTEST 2 ########################################## // From the docs: // """Note that, once committed, only a job within the batch may reopen it. // Faktory will return an error if you dynamically add jobs from "outside" the batch; @@ -916,8 +922,10 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // Ref: https://github.com/contribsys/faktory/wiki/Ent-Batches#batch-open-bid (Jan 10, 2024) // Let's try to open an already committed batch: - let mut b = p.open_batch(bid.clone()).unwrap(); - assert_eq!(b.id(), bid); + let mut b = p + .open_batch(bid.clone()) + .expect("no error") + .expect("is some"); b.add(jobs.next().unwrap()).unwrap(); // 4 jobs b.commit().unwrap(); // committing the batch again! @@ -925,15 +933,14 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { assert_eq!(s.total, 4); assert_eq!(s.pending, 4); - // Subtest 1 result: + // Subtest 2 result: // We managed to open a batch "from outside" and the server accepted the job INSTEAD OF ERRORING BACK. - // ############################ END OF SUBTEST 1 ####################################### + // ############################ END OF SUBTEST 2 ####################################### - // ############################## SUBTEST 2 ############################################ + // ############################## SUBTEST 3 ############################################ // Let's see if we will be able to - again - open the committed batch "from outside" and // add a nested batch to it. - let mut b = p.open_batch(bid.clone()).unwrap(); - assert_eq!(b.id(), bid); // this is to make sure this is the same batch INDEED + let mut b = p.open_batch(bid.clone()).unwrap().expect("is some"); let mut nested_callbacks = some_jobs( "order_clean_up__NESTED", "test_batch_can_be_reopned_add_extra_jobs_added__CALLBACKs__NESTED", @@ -955,19 +962,21 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { assert_eq!(s.parent_bid, Some(bid)); // this is really our child batch assert_eq!(s.complete_callback_state, "1"); // has been enqueud - // Subtest 2 result: + // Subtest 3 result: // We managed to open an already committed batch "from outside" and the server accepted // a nested batch INSTEAD OF ERRORING BACK. - // ############################ END OF SUBTEST 2 ####################################### + // ############################ END OF SUBTEST 3 ####################################### - // ############################## SUBTEST 3 ############################################ + // ############################## SUBTEST 4 ############################################ // From the docs: // """Once a callback has enqueued for a batch, you may not add anything to the batch.""" // ref: https://github.com/contribsys/faktory/wiki/Ent-Batches#guarantees (Jan 10, 2024) // Let's try to re-open the nested batch that we have already committed and add some jobs to it. - let mut b = p.open_batch(nested_bid.clone()).unwrap(); - assert_eq!(b.id(), nested_bid); // this is to make sure this is the same batch INDEED + let mut b = p + .open_batch(nested_bid.clone()) + .expect("no error") + .expect("is some"); let mut more_jobs = some_jobs( "order_clean_up__NESTED", "test_batch_can_be_reopned_add_extra_jobs_added__NESTED", @@ -982,10 +991,10 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { assert_eq!(s.pending, 2); // ... though there are pending jobs assert_eq!(s.total, 2); - // Subtest 3 result: + // Subtest 4 result: // We were able to add more jobs to the batch for which the Faktory server had already // queued the callback. - // ############################## END OF SUBTEST 3 ##################################### + // ############################## END OF SUBTEST 4 ##################################### // ############################## OVERALL RESULTS ###################################### // The guarantees that definitely hold: From 6b4770c238c3431b415d52d5e1ec720ef8fb33ab Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 10:36:04 +0500 Subject: [PATCH 05/62] Update batch and job names in docs --- src/proto/batch/mod.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 588e541b..d74a2a91 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -28,11 +28,11 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// use faktory::{Producer, Job, Batch}; /// /// let mut prod = Producer::connect(None)?; -/// let job1 = Job::builder("image").build(); -/// let job2 = Job::builder("image").build(); -/// let job_cb = Job::builder("clean_up").build(); +/// let job1 = Job::builder("job_type").build(); +/// let job2 = Job::builder("job_type").build(); +/// let job_cb = Job::builder("callback_job_type").build(); /// -/// let batch = Batch::builder("Image resizing workload".to_string()).with_complete_callback(job_cb); +/// let batch = Batch::builder("Batch description".to_string()).with_complete_callback(job_cb); /// /// let mut batch = prod.start_batch(batch)?; /// batch.add(job1)?; @@ -46,16 +46,16 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// ```no_run /// # use faktory::{Producer, Job, Batch, Error}; /// # let mut prod = Producer::connect(None)?; -/// let parent_job1 = Job::builder("stats_build").build(); -/// let parent_job2 = Job::builder("entries_update").build(); -/// let parent_cb = Job::builder("clean_up").build(); +/// let parent_job1 = Job::builder("job_type").build(); +/// let parent_job2 = Job::builder("another_job_type").build(); +/// let parent_cb = Job::builder("callback_job_type").build(); /// -/// let child_job1 = Job::builder("image_recognition").build(); -/// let child_job2 = Job::builder("image_recognition").build(); -/// let child_cb = Job::builder("clean_up").build(); +/// let child_job1 = Job::builder("job_type").build(); +/// let child_job2 = Job::builder("yet_another_job_type").build(); +/// let child_cb = Job::builder("callback_job_type").build(); /// -/// let parent_batch = Batch::builder("Image recognition and analysis workload".to_string()).with_complete_callback(parent_cb); -/// let child_batch = Batch::builder("Image recognition workload".to_string()).with_success_callback(child_cb); +/// let parent_batch = Batch::builder("Parent batch description".to_string()).with_complete_callback(parent_cb); +/// let child_batch = Batch::builder("Child batch description".to_string()).with_success_callback(child_cb); /// /// let mut parent = prod.start_batch(parent_batch)?; /// parent.add(parent_job1)?; @@ -79,9 +79,9 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// # use faktory::Error; /// # use faktory::{Producer, Job, Batch, Tracker}; /// let mut prod = Producer::connect(None)?; -/// let job = Job::builder("image").build(); -/// let cb_job = Job::builder("clean_up").build(); -/// let b = Batch::builder("Description...".to_string()).with_complete_callback(cb_job); +/// let job = Job::builder("job_type").build(); +/// let cb_job = Job::builder("callback_job_type").build(); +/// let b = Batch::builder("Batch description".to_string()).with_complete_callback(cb_job); /// /// let mut b = prod.start_batch(b)?; /// let bid = b.id().to_string(); @@ -92,7 +92,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let s = t.get_batch_status(bid).unwrap().unwrap(); /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); -/// assert_eq!(s.description, Some("Description...".into())); +/// assert_eq!(s.description, Some("Batch description".into())); /// assert_eq!(s.complete_callback_state, ""); // has not been queued; /// # Ok::<(), Error>(()) /// ``` From b69ed1afda6224a75bddc69ca55fb2ae08de1334 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 10:50:37 +0500 Subject: [PATCH 06/62] Expect no args on batch::builder --- src/proto/batch/mod.rs | 49 +++++++++++++++++++++++----------------- tests/real/enterprise.rs | 33 +++++++++++++++++---------- 2 files changed, 49 insertions(+), 33 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index d74a2a91..9ae4dc0b 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -32,7 +32,9 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let job2 = Job::builder("job_type").build(); /// let job_cb = Job::builder("callback_job_type").build(); /// -/// let batch = Batch::builder("Batch description".to_string()).with_complete_callback(job_cb); +/// let batch = Batch::builder() +/// .description("Batch description".to_string()) +/// .with_complete_callback(job_cb); /// /// let mut batch = prod.start_batch(batch)?; /// batch.add(job1)?; @@ -54,8 +56,12 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let child_job2 = Job::builder("yet_another_job_type").build(); /// let child_cb = Job::builder("callback_job_type").build(); /// -/// let parent_batch = Batch::builder("Parent batch description".to_string()).with_complete_callback(parent_cb); -/// let child_batch = Batch::builder("Child batch description".to_string()).with_success_callback(child_cb); +/// let parent_batch = Batch::builder() +/// .description("Batch description".to_string()) +/// .with_complete_callback(parent_cb); +/// let child_batch = Batch::builder() +/// .description("Child batch description".to_string()) +/// .with_success_callback(child_cb); /// /// let mut parent = prod.start_batch(parent_batch)?; /// parent.add(parent_job1)?; @@ -81,7 +87,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let mut prod = Producer::connect(None)?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); -/// let b = Batch::builder("Batch description".to_string()).with_complete_callback(cb_job); +/// let b = Batch::builder().description("Batch description".to_string()).with_complete_callback(cb_job); /// /// let mut b = prod.start_batch(b)?; /// let bid = b.id().to_string(); @@ -131,8 +137,8 @@ pub struct Batch { impl Batch { /// Create a new `BatchBuilder`. - pub fn builder(description: impl Into>) -> BatchBuilder { - BatchBuilder::new(description) + pub fn builder() -> BatchBuilder { + BatchBuilder::new() } } @@ -143,11 +149,8 @@ impl BatchBuilder { } /// Create a new `BatchBuilder` with optional description of the batch. - pub fn new(description: impl Into>) -> BatchBuilder { - BatchBuilder { - description: Some(description.into()), - ..Self::create_empty() - } + pub fn new() -> BatchBuilder { + Self::create_empty() } fn success(&mut self, success_cb: impl Into>) -> &mut Self { @@ -271,19 +274,21 @@ mod test { #[test] fn test_batch_creation() { - let b = BatchBuilder::new("Image processing batch".to_string()) + let b = BatchBuilder::new() + .description("Image processing batch".to_string()) .with_success_callback(Job::builder("thumbnail").build()); assert!(b.complete.is_none()); assert!(b.parent_bid.is_none()); assert!(b.success.is_some()); assert_eq!(b.description, Some("Image processing batch".into())); - let b = BatchBuilder::new("Image processing batch".to_string()) + let b = BatchBuilder::new() + .description("Image processing batch".to_string()) .with_complete_callback(Job::builder("thumbnail").build()); assert!(b.complete.is_some()); assert!(b.success.is_none()); - let b = BatchBuilder::new(None).with_callbacks( + let b = BatchBuilder::new().with_callbacks( Job::builder("thumbnail").build(), Job::builder("thumbnail").build(), ); @@ -306,7 +311,8 @@ mod test { // with description and on success callback: let got = serde_json::to_string( - &BatchBuilder::new("Image processing workload".to_string()) + &BatchBuilder::new() + .description("Image processing workload".to_string()) .with_success_callback(prepare_test_job("thumbnail_clean_up".into())), ) .unwrap(); @@ -315,8 +321,7 @@ mod test { // without description and with on complete callback: let got = serde_json::to_string( - &BatchBuilder::new(None) - .with_complete_callback(prepare_test_job("thumbnail_info".into())), + &BatchBuilder::new().with_complete_callback(prepare_test_job("thumbnail_info".into())), ) .unwrap(); let expected = r#"{"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; @@ -324,10 +329,12 @@ mod test { // with description and both callbacks: let got = serde_json::to_string( - &BatchBuilder::new("Image processing workload".to_string()).with_callbacks( - prepare_test_job("thumbnail_clean_up".into()), - prepare_test_job("thumbnail_info".into()), - ), + &BatchBuilder::new() + .description("Image processing workload".to_string()) + .with_callbacks( + prepare_test_job("thumbnail_clean_up".into()), + prepare_test_job("thumbnail_info".into()), + ), ) .unwrap(); let expected = r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0},"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index bd73c6bb..4b4690f4 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -539,8 +539,9 @@ fn test_batch_of_jobs_can_be_initiated() { .queue("test_batch_of_jobs_can_be_initiated__CALLBACKs") .build(); - let batch = - Batch::builder("Image resizing workload".to_string()).with_complete_callback(cb_job); + let batch = Batch::builder() + .description("Image resizing workload".to_string()) + .with_complete_callback(cb_job); let time_just_before_batch_init = Utc::now(); @@ -677,20 +678,24 @@ fn test_batches_can_be_nested() { .build(); // batches start - let parent_batch = - Batch::builder("Parent batch".to_string()).with_success_callback(parent_cb_job); + let parent_batch = Batch::builder() + .description("Parent batch".to_string()) + .with_success_callback(parent_cb_job); let mut parent_batch = p.start_batch(parent_batch).unwrap(); let parent_batch_id = parent_batch.id().to_owned(); parent_batch.add(parent_job1).unwrap(); - let child_batch = Batch::builder("Child batch".to_string()).with_success_callback(child_cb_job); + let child_batch = Batch::builder() + .description("Child batch".to_string()) + .with_success_callback(child_cb_job); let mut child_batch = parent_batch.start_batch(child_batch).unwrap(); let child_batch_id = child_batch.id().to_owned(); child_batch.add(child_job_1).unwrap(); child_batch.add(child_job_2).unwrap(); - let grandchild_batch = - Batch::builder("Grandchild batch".to_string()).with_success_callback(grandchild_cb_job); + let grandchild_batch = Batch::builder() + .description("Grandchild batch".to_string()) + .with_success_callback(grandchild_cb_job); let mut grandchild_batch = child_batch.start_batch(grandchild_batch).unwrap(); let grandchild_batch_id = grandchild_batch.id().to_owned(); grandchild_batch.add(grand_child_job_1).unwrap(); @@ -749,7 +754,8 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // start a 'batch': let mut b = p .start_batch( - Batch::builder("Orders processing workload".to_string()) + Batch::builder() + .description("Orders processing workload".to_string()) .with_success_callback(callbacks.next().unwrap()), ) .unwrap(); @@ -824,7 +830,8 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let mut callbacks = some_jobs(jobtype, q_name, 2); let b = p .start_batch( - Batch::builder("Orders processing workload".to_string()) + Batch::builder() + .description("Orders processing workload".to_string()) .with_callbacks(callbacks.next().unwrap(), callbacks.next().unwrap()), ) .unwrap(); @@ -880,7 +887,8 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { 1, ); - let b = Batch::builder("Orders processing workload".to_string()) + let b = Batch::builder() + .description("Orders processing workload".to_string()) .with_success_callback(callbacks.next().unwrap()); let mut b = p.start_batch(b).unwrap(); @@ -946,8 +954,9 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { "test_batch_can_be_reopned_add_extra_jobs_added__CALLBACKs__NESTED", 2, ); - let nested_batch_declaration = - Batch::builder("Orders processing workload. Nested stage".to_string()).with_callbacks( + let nested_batch_declaration = Batch::builder() + .description("Orders processing workload. Nested stage".to_string()) + .with_callbacks( nested_callbacks.next().unwrap(), nested_callbacks.next().unwrap(), ); From 3e27bf113fbb7e5a798c30f045ad4810b8a17704 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 10:55:45 +0500 Subject: [PATCH 07/62] Avoid unwraps in docs --- src/proto/batch/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 9ae4dc0b..16847bfc 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -94,8 +94,8 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// b.add(job)?; /// b.commit()?; /// -/// let mut t = Tracker::connect(None).unwrap(); -/// let s = t.get_batch_status(bid).unwrap().unwrap(); +/// let mut t = Tracker::connect(None)?; +/// let s = t.get_batch_status(bid)?.unwrap(); /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); /// assert_eq!(s.description, Some("Batch description".into())); From 1ca41598eaf07a9f9302a0ac38e1ab323eaf0e09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Wed, 31 Jan 2024 10:58:25 +0500 Subject: [PATCH 08/62] Update src/proto/batch/mod.rs Co-authored-by: Jon Gjengset --- src/proto/batch/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 16847bfc..0864ac10 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -145,7 +145,7 @@ impl Batch { impl BatchBuilder { fn build(&mut self) -> Batch { self.try_build() - .expect("All required fields have been set.") + .expect("There are no required fields.") } /// Create a new `BatchBuilder` with optional description of the batch. From a8048eed355c8080560c19245a3072855d5452ae Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 12:01:10 +0500 Subject: [PATCH 09/62] Consume batchbuilder --- src/proto/batch/mod.rs | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 0864ac10..d53d0d65 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -102,9 +102,10 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// assert_eq!(s.complete_callback_state, ""); // has not been queued; /// # Ok::<(), Error>(()) /// ``` -#[derive(Serialize, Debug, Builder)] +#[derive(Builder, Debug, Serialize)] #[builder( custom_constructor, + pattern = "owned", setter(into), build_fn(name = "try_build", private) )] @@ -143,9 +144,8 @@ impl Batch { } impl BatchBuilder { - fn build(&mut self) -> Batch { - self.try_build() - .expect("There are no required fields.") + fn build(self) -> Batch { + self.try_build().expect("There are no required fields.") } /// Create a new `BatchBuilder` with optional description of the batch. @@ -164,27 +164,38 @@ impl BatchBuilder { } /// Create a `Batch` with only `success` callback specified. - pub fn with_success_callback(&mut self, success_cb: Job) -> Batch { + pub fn with_success_callback(mut self, success_cb: Job) -> Batch { self.success(success_cb); self.complete(None); self.build() } /// Create a `Batch` with only `complete` callback specified. - pub fn with_complete_callback(&mut self, complete_cb: Job) -> Batch { + pub fn with_complete_callback(mut self, complete_cb: Job) -> Batch { self.complete(complete_cb); self.success(None); self.build() } /// Create a `Batch` with both `success` and `complete` callbacks specified. - pub fn with_callbacks(&mut self, success_cb: Job, complete_cb: Job) -> Batch { + pub fn with_callbacks(mut self, success_cb: Job, complete_cb: Job) -> Batch { self.success(success_cb); self.complete(complete_cb); self.build() } } +impl Clone for BatchBuilder { + fn clone(&self) -> Self { + BatchBuilder { + parent_bid: self.parent_bid.clone(), + description: self.description.clone(), + success: self.success.clone(), + complete: self.complete.clone(), + } + } +} + pub struct BatchHandle<'a, S: Read + Write> { bid: String, prod: &'a mut Producer, @@ -277,6 +288,7 @@ mod test { let b = BatchBuilder::new() .description("Image processing batch".to_string()) .with_success_callback(Job::builder("thumbnail").build()); + assert!(b.complete.is_none()); assert!(b.parent_bid.is_none()); assert!(b.success.is_some()); @@ -295,6 +307,10 @@ mod test { assert!(b.description.is_none()); assert!(b.complete.is_some()); assert!(b.success.is_some()); + + let b = BatchBuilder::new().description("Batch description".to_string()); + let _batch_with_complete_cb = b.clone().with_complete_callback(Job::builder("jt").build()); + let _batch_with_success_cb = b.with_success_callback(Job::builder("jt").build()); } #[test] From 6653c508bc252710cd8f21e3aac849256220cfd1 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 22:37:51 +0500 Subject: [PATCH 10/62] Accept Into in BatchBuilder description --- src/proto/batch/mod.rs | 60 ++++++++++++++++++++-------------------- tests/real/enterprise.rs | 16 +++++------ 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index d53d0d65..eeaddaf0 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -33,7 +33,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let job_cb = Job::builder("callback_job_type").build(); /// /// let batch = Batch::builder() -/// .description("Batch description".to_string()) +/// .description("Batch description") /// .with_complete_callback(job_cb); /// /// let mut batch = prod.start_batch(batch)?; @@ -57,10 +57,10 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let child_cb = Job::builder("callback_job_type").build(); /// /// let parent_batch = Batch::builder() -/// .description("Batch description".to_string()) +/// .description("Batch description") /// .with_complete_callback(parent_cb); /// let child_batch = Batch::builder() -/// .description("Child batch description".to_string()) +/// .description("Child batch description") /// .with_success_callback(child_cb); /// /// let mut parent = prod.start_batch(parent_batch)?; @@ -87,7 +87,9 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// let mut prod = Producer::connect(None)?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); -/// let b = Batch::builder().description("Batch description".to_string()).with_complete_callback(cb_job); +/// let b = Batch::builder() +/// .description("Batch description") +/// .with_complete_callback(cb_job); /// /// let mut b = prod.start_batch(b)?; /// let bid = b.id().to_string(); @@ -116,6 +118,7 @@ pub struct Batch { /// Batch description for Faktory WEB UI. #[serde(skip_serializing_if = "Option::is_none")] + #[builder(setter(custom))] pub description: Option, /// On success callback. @@ -123,7 +126,7 @@ pub struct Batch { /// This job will be queued by the Faktory server provided /// all the jobs belonging to this batch have been executed successfully. #[serde(skip_serializing_if = "Option::is_none")] - #[builder(setter(custom))] + #[builder(setter(skip))] pub(crate) success: Option, /// On complete callback. @@ -132,7 +135,7 @@ pub struct Batch { /// belonging to this batch have been executed, even if one/some/all /// of the workers have failed. #[serde(skip_serializing_if = "Option::is_none")] - #[builder(setter(custom))] + #[builder(setter(skip))] pub(crate) complete: Option, } @@ -153,35 +156,32 @@ impl BatchBuilder { Self::create_empty() } - fn success(&mut self, success_cb: impl Into>) -> &mut Self { - self.success = Some(success_cb.into()); - self - } - - fn complete(&mut self, complete_cb: impl Into>) -> &mut Self { - self.complete = Some(complete_cb.into()); + /// Batch description for Faktory WEB UI. + pub fn description(mut self, description: impl Into) -> Self { + self.description = Some(Some(description.into())); self } /// Create a `Batch` with only `success` callback specified. - pub fn with_success_callback(mut self, success_cb: Job) -> Batch { - self.success(success_cb); - self.complete(None); - self.build() + pub fn with_success_callback(self, success_cb: Job) -> Batch { + let mut b = self.build(); + b.success = Some(success_cb); + b } /// Create a `Batch` with only `complete` callback specified. - pub fn with_complete_callback(mut self, complete_cb: Job) -> Batch { - self.complete(complete_cb); - self.success(None); - self.build() + pub fn with_complete_callback(self, complete_cb: Job) -> Batch { + let mut b = self.build(); + b.complete = Some(complete_cb); + b } /// Create a `Batch` with both `success` and `complete` callbacks specified. - pub fn with_callbacks(mut self, success_cb: Job, complete_cb: Job) -> Batch { - self.success(success_cb); - self.complete(complete_cb); - self.build() + pub fn with_callbacks(self, success_cb: Job, complete_cb: Job) -> Batch { + let mut b = self.build(); + b.success = Some(success_cb); + b.complete = Some(complete_cb); + b } } @@ -286,7 +286,7 @@ mod test { #[test] fn test_batch_creation() { let b = BatchBuilder::new() - .description("Image processing batch".to_string()) + .description("Image processing batch") .with_success_callback(Job::builder("thumbnail").build()); assert!(b.complete.is_none()); @@ -295,7 +295,7 @@ mod test { assert_eq!(b.description, Some("Image processing batch".into())); let b = BatchBuilder::new() - .description("Image processing batch".to_string()) + .description("Image processing batch") .with_complete_callback(Job::builder("thumbnail").build()); assert!(b.complete.is_some()); assert!(b.success.is_none()); @@ -308,7 +308,7 @@ mod test { assert!(b.complete.is_some()); assert!(b.success.is_some()); - let b = BatchBuilder::new().description("Batch description".to_string()); + let b = BatchBuilder::new().description("Batch description"); let _batch_with_complete_cb = b.clone().with_complete_callback(Job::builder("jt").build()); let _batch_with_success_cb = b.with_success_callback(Job::builder("jt").build()); } @@ -328,7 +328,7 @@ mod test { // with description and on success callback: let got = serde_json::to_string( &BatchBuilder::new() - .description("Image processing workload".to_string()) + .description("Image processing workload") .with_success_callback(prepare_test_job("thumbnail_clean_up".into())), ) .unwrap(); @@ -346,7 +346,7 @@ mod test { // with description and both callbacks: let got = serde_json::to_string( &BatchBuilder::new() - .description("Image processing workload".to_string()) + .description("Image processing workload") .with_callbacks( prepare_test_job("thumbnail_clean_up".into()), prepare_test_job("thumbnail_info".into()), diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 4b4690f4..1cc27f5a 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -540,7 +540,7 @@ fn test_batch_of_jobs_can_be_initiated() { .build(); let batch = Batch::builder() - .description("Image resizing workload".to_string()) + .description("Image resizing workload") .with_complete_callback(cb_job); let time_just_before_batch_init = Utc::now(); @@ -679,14 +679,14 @@ fn test_batches_can_be_nested() { // batches start let parent_batch = Batch::builder() - .description("Parent batch".to_string()) + .description("Parent batch") .with_success_callback(parent_cb_job); let mut parent_batch = p.start_batch(parent_batch).unwrap(); let parent_batch_id = parent_batch.id().to_owned(); parent_batch.add(parent_job1).unwrap(); let child_batch = Batch::builder() - .description("Child batch".to_string()) + .description("Child batch") .with_success_callback(child_cb_job); let mut child_batch = parent_batch.start_batch(child_batch).unwrap(); let child_batch_id = child_batch.id().to_owned(); @@ -694,7 +694,7 @@ fn test_batches_can_be_nested() { child_batch.add(child_job_2).unwrap(); let grandchild_batch = Batch::builder() - .description("Grandchild batch".to_string()) + .description("Grandchild batch") .with_success_callback(grandchild_cb_job); let mut grandchild_batch = child_batch.start_batch(grandchild_batch).unwrap(); let grandchild_batch_id = grandchild_batch.id().to_owned(); @@ -755,7 +755,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { let mut b = p .start_batch( Batch::builder() - .description("Orders processing workload".to_string()) + .description("Orders processing workload") .with_success_callback(callbacks.next().unwrap()), ) .unwrap(); @@ -831,7 +831,7 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let b = p .start_batch( Batch::builder() - .description("Orders processing workload".to_string()) + .description("Orders processing workload") .with_callbacks(callbacks.next().unwrap(), callbacks.next().unwrap()), ) .unwrap(); @@ -888,7 +888,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { ); let b = Batch::builder() - .description("Orders processing workload".to_string()) + .description("Orders processing workload") .with_success_callback(callbacks.next().unwrap()); let mut b = p.start_batch(b).unwrap(); @@ -955,7 +955,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { 2, ); let nested_batch_declaration = Batch::builder() - .description("Orders processing workload. Nested stage".to_string()) + .description("Orders processing workload. Nested stage") .with_callbacks( nested_callbacks.next().unwrap(), nested_callbacks.next().unwrap(), From c0e0176b1c8143723d6f06b7472fdd89083fd51f Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 22:43:49 +0500 Subject: [PATCH 11/62] Default description to None --- src/proto/batch/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index eeaddaf0..3211903c 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -118,7 +118,7 @@ pub struct Batch { /// Batch description for Faktory WEB UI. #[serde(skip_serializing_if = "Option::is_none")] - #[builder(setter(custom))] + #[builder(setter(custom), default = "None")] pub description: Option, /// On success callback. From 1cda1ddefbbdba538250542de096b42ae9fe7cba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Wed, 31 Jan 2024 22:44:39 +0500 Subject: [PATCH 12/62] Update src/proto/batch/mod.rs Co-authored-by: Jon Gjengset --- src/proto/batch/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 3211903c..df67e054 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -227,7 +227,7 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { /// /// The Faktory server will not queue any callbacks, unless the batch is committed. /// Committing an empty batch will make the server queue the callback(s) right away. - /// Once committed, the batch can still be re-opened with [open_batch](struct.Producer.html#method.open_batch), + /// Once committed, the batch can still be re-opened with [open_batch](Producer::open_batch), /// and extra jobs can be added to it. pub fn commit(self) -> Result<(), Error> { self.prod.commit_batch(self.bid) From d3e4505aa1796cc890057387615ad298ca72a4f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Wed, 31 Jan 2024 22:45:11 +0500 Subject: [PATCH 13/62] Update src/proto/batch/mod.rs Co-authored-by: Jon Gjengset --- src/proto/batch/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index df67e054..923580ae 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -264,7 +264,7 @@ pub struct BatchStatus { /// State of the `complete` callback. /// - /// See [complete](struct.Batch.html#structfield.complete). + /// See [complete](Batch::complete). #[serde(rename = "complete_st")] pub complete_callback_state: String, From 87bf6c7c505a350238a8cc353a3812a46d64134b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Wed, 31 Jan 2024 22:50:05 +0500 Subject: [PATCH 14/62] Update src/proto/single/ent.rs Co-authored-by: Jon Gjengset --- src/proto/single/ent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 3aa91394..3253c9f3 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -122,7 +122,7 @@ pub struct Progress { /// Info on job execution progress (sent). /// /// In Enterprise Faktory, a client executing a job can report on the execution -/// progress, provided the job is trackable. A trackable job is the one with "track":1 +/// progress, provided the job is trackable. A trackable job is one with "track":1 /// specified in the custom data hash. #[derive(Debug, Clone, Serialize, Builder)] #[builder( From 304f29014016e872939f78b7e67272d1bb28a13f Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 31 Jan 2024 23:02:43 +0500 Subject: [PATCH 15/62] Update comment to 'parse_datetime' utility --- src/proto/single/ent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 3253c9f3..5449918f 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -7,7 +7,7 @@ use serde::{ use crate::{Error, JobBuilder}; -// Used to parse responses from Faktory that look like this: +// Used to parse responses from Faktory where a datetime field is set to an empty string, e.g: // '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' fn parse_datetime<'de, D>(value: D) -> Result>, D::Error> where From 31bef3e751d268f31699251299e0b09412404b57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Thu, 1 Feb 2024 00:27:39 +0500 Subject: [PATCH 16/62] Update src/proto/single/ent.rs Co-authored-by: Jon Gjengset --- src/proto/single/ent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 5449918f..03f973be 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -95,7 +95,7 @@ impl JobBuilder { /// /// The tracker is guaranteed to get the following details: the job's id (though /// they should know it beforehand in order to be ably to track the job), its last -/// know state (e.g."enqueued", "working", "success", "unknown") and the date and time +/// know state (e.g., "enqueued", "working", "success", "unknown"), and the date and time /// the job was last updated. Additionally, information on what's going on with the job /// ([desc](struct.ProgressUpdate.html#structfield.desc)) and completion percentage /// ([percent](struct.ProgressUpdate.html#structfield.percent)) may be available, From f5548178c1dd43cc7de8bda2f3fa7eb756e1cb7a Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Thu, 1 Feb 2024 00:28:29 +0500 Subject: [PATCH 17/62] Typo in 'known' --- src/proto/single/ent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 03f973be..846ca863 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -95,7 +95,7 @@ impl JobBuilder { /// /// The tracker is guaranteed to get the following details: the job's id (though /// they should know it beforehand in order to be ably to track the job), its last -/// know state (e.g., "enqueued", "working", "success", "unknown"), and the date and time +/// known state (e.g., "enqueued", "working", "success", "unknown"), and the date and time /// the job was last updated. Additionally, information on what's going on with the job /// ([desc](struct.ProgressUpdate.html#structfield.desc)) and completion percentage /// ([percent](struct.ProgressUpdate.html#structfield.percent)) may be available, From c2db5ea8bf98cff1bb840e5fb74801a006974448 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Thu, 1 Feb 2024 00:50:23 +0500 Subject: [PATCH 18/62] Update docs for 'reserve_until' --- src/proto/single/ent.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 846ca863..725568b7 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -145,9 +145,10 @@ pub struct ProgressUpdate { #[builder(default = "None")] pub desc: Option, - /// Allows to extend the job's reservation, if more time needed to execute it. + /// Allows to extend the job's reservation, if more time is needed to execute it. /// - /// Note that you cannot decrease the initial [reservation](struct.Job.html#structfield.reserve_for). + /// Note that you cannot shorten the initial [reservation](struct.Job.html#structfield.reserve_for) via + /// specifying an instant that is sooner than the job's initial reservation deadline. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default = "None")] pub reserve_until: Option>, From effdd81be7ad2dbc7e41729a628fd709bfc2b93e Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Thu, 1 Feb 2024 01:30:12 +0500 Subject: [PATCH 19/62] Update docs, make 'BatchHandle' public --- src/lib.rs | 2 +- src/producer/mod.rs | 2 ++ src/proto/batch/mod.rs | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index de0aac1f..8fba0e3f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,7 +81,7 @@ pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::proto::{ - Batch, BatchBuilder, BatchStatus, Progress, ProgressUpdate, ProgressUpdateBuilder, + Batch, BatchBuilder, BatchHandle, BatchStatus, Progress, ProgressUpdate, ProgressUpdateBuilder, }; #[cfg(feature = "ent")] mod tracker; diff --git a/src/producer/mod.rs b/src/producer/mod.rs index 624c69c5..deb0ce3f 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -133,6 +133,7 @@ impl Producer { /// Initiate a new batch of jobs. #[cfg(feature = "ent")] + #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub fn start_batch(&mut self, batch: Batch) -> Result, Error> { let bid = self.c.issue(&batch)?.read_bid()?; Ok(BatchHandle::new(bid, self)) @@ -143,6 +144,7 @@ impl Producer { /// This will not error if a batch with the provided `bid` does not exist, /// rather `Ok(None)` will be returned. #[cfg(feature = "ent")] + #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub fn open_batch(&mut self, bid: String) -> Result>, Error> { let bid = self.c.issue(&OpenBatch::from(bid))?.maybe_bid()?; match bid { diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 923580ae..e494d696 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -196,6 +196,7 @@ impl Clone for BatchBuilder { } } +/// Represents a newly started or re-opened batch of jobs. pub struct BatchHandle<'a, S: Read + Write> { bid: String, prod: &'a mut Producer, @@ -264,13 +265,13 @@ pub struct BatchStatus { /// State of the `complete` callback. /// - /// See [complete](Batch::complete). + /// See [with_complete_callback](struct.BatchBuilder.html#method.with_complete_callback). #[serde(rename = "complete_st")] pub complete_callback_state: String, /// State of the `success` callback. /// - /// See [success](struct.Batch.html#structfield.success). + /// See [with_success_callback](struct.BatchBuilder.html#method.with_success_callback). #[serde(rename = "success_st")] pub success_callback_state: String, } From 7c7e1c3aadb2de35588a4964b193685243ab27a3 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Thu, 1 Feb 2024 12:00:28 +0500 Subject: [PATCH 20/62] Update comment in e2e test. --- tests/real/enterprise.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 1cc27f5a..0877dc26 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -903,6 +903,9 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // ############################## SUBTEST 0 ########################################## // Let's try to open/reopen a batch we have never declared: let b = p.open_batch(String::from("non-existent-batch-id")).unwrap(); + // The server will error back on this, with "No such batch ", but + // we are handling this case for the end-user and returning `Ok(None)` instead, indicating + // this way that there is not such batch. assert!(b.is_none()); // ########################## END OF SUBTEST 0 ####################################### From c27992259b67934f1f2ff9d4f7d9f410bef3753b Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Thu, 1 Feb 2024 14:08:27 +0500 Subject: [PATCH 21/62] Add JobState enum. Update tests accordingly --- src/lib.rs | 3 ++- src/proto/mod.rs | 2 +- src/proto/single/ent.rs | 50 ++++++++++++++++++++++++++++++++++++++-- src/tracker/mod.rs | 14 +++++++---- tests/real/enterprise.rs | 9 +++++--- 5 files changed, 67 insertions(+), 11 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 8fba0e3f..cd31aeed 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,7 +81,8 @@ pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::proto::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, Progress, ProgressUpdate, ProgressUpdateBuilder, + Batch, BatchBuilder, BatchHandle, BatchStatus, JobState, Progress, ProgressUpdate, + ProgressUpdateBuilder, }; #[cfg(feature = "ent")] mod tracker; diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 47fd97e9..89960d65 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -16,7 +16,7 @@ pub use self::single::{ }; #[cfg(feature = "ent")] -pub use self::single::ent::{Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; +pub use self::single::ent::{JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; #[cfg(feature = "ent")] mod batch; diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 725568b7..fce53f60 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -91,6 +91,49 @@ impl JobBuilder { } } +// Ref: https://github.com/contribsys/faktory/wiki/Ent-Tracking#notes +/// Job's state as last known by the Faktory server. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum JobState { + /// The server can't tell a job's state. + /// + /// This happens when a job with the specified ID has never been enqueued, or the job has + /// not been marked as trackable via "track":1 in its custom hash, or the tracking info on this + /// job has simply expired on the server (normally, after 30 min). + Unknown, + + /// The job has been enqueued. + Enqueued, + + /// The job has been consumed by a worker and is now being executed. + Working, + + /// The job has been executed with success. + Success, + + /// The job has finished with an error. + Failed, + + /// The jobs has been consumed but its status has never been updated. + Dead, +} + +impl Display for JobState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use JobState::*; + let s = match self { + Unknown => "unknown", + Enqueued => "enqueued", + Working => "working", + Success => "success", + Failed => "failed", + Dead => "dead", + }; + write!(f, "{}", s) + } +} + /// Info on job execution progress (retrieved). /// /// The tracker is guaranteed to get the following details: the job's id (though @@ -106,7 +149,7 @@ pub struct Progress { pub jid: String, /// Job's state. - pub state: String, + pub state: JobState, /// When this job was last updated. #[serde(deserialize_with = "parse_datetime")] @@ -194,7 +237,10 @@ impl ProgressUpdateBuilder { // ---------------------------------------------- use super::FaktoryCommand; -use std::{fmt::Debug, io::Write}; +use std::{ + fmt::{Debug, Display}, + io::Write, +}; #[derive(Debug, Clone)] pub enum Track { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2ba3a9fa..34a55027 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -10,14 +10,20 @@ use crate::{BatchStatus, Error, Progress, ProgressUpdate}; /// /// Fetching a job's execution progress: /// ```no_run -/// use faktory::Tracker; +/// use faktory::{Tracker, JobState}; /// let job_id = String::from("W8qyVle9vXzUWQOf"); /// let mut tracker = Tracker::connect(None)?; /// if let Some(progress) = tracker.get_progress(job_id)? { -/// if progress.state == "success" { -/// # /* +/// match progress.state { +/// JobState::Success => { +/// # /* +/// ... +/// # */ +/// }, +/// # /* /// ... -/// # */ +/// # */ +/// # _ => {}, /// } /// } /// # Ok::<(), faktory::Error>(()) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 0877dc26..818325f7 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -452,7 +452,10 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(result.is_some()); let result = result.unwrap(); assert_eq!(result.jid, job_id_captured.clone()); - assert_eq!(result.state, "working"); + match result.state { + JobState::Working => {} + _ => panic!("expected job's state to be 'working'"), + } assert!(result.updated_at.is_some()); assert_eq!(result.desc, Some("Still processing...".to_owned())); assert_eq!(result.percent, Some(32)); @@ -479,7 +482,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { // But it actually knows the job's real status, since the consumer (worker) // informed it immediately after finishing with the job: - assert_eq!(result.state, "success"); + assert_eq!(result.state.to_string(), "success"); // What about 'ordinary' job ? let job_id = job_ordinary.id().to_owned().clone(); @@ -504,7 +507,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert_eq!(progress.jid, job_id); // Returned from Faktory: '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' - assert_eq!(progress.state, "unknown"); + assert_eq!(progress.state.to_string(), "unknown"); assert!(progress.updated_at.is_none()); assert!(progress.percent.is_none()); assert!(progress.desc.is_none()); From fa906dfda95f692b636e73b2cfd4dcef224ad614 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 09:15:44 +0500 Subject: [PATCH 22/62] App 'open' method on 'BatchStatus' --- src/proto/batch/mod.rs | 14 ++++++++++++++ tests/real/enterprise.rs | 4 +++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index e494d696..26071317 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -276,6 +276,20 @@ pub struct BatchStatus { pub success_callback_state: String, } +#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +impl<'a> BatchStatus { + /// Open the batch for which this `BatchStatus` has been retrieved. + /// + /// See [`open_batch`](Producer::open_batch). + pub fn open( + &self, + prod: &'a mut Producer, + ) -> Result>, Error> { + prod.open_batch(self.bid.clone()) + } +} + #[cfg(test)] mod test { use std::str::FromStr; diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 818325f7..0b1311af 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -914,7 +914,9 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // ############################## SUBTEST 1 ########################################## // Let's fist of all try to open the batch we have not committed yet: - let mut b = p.open_batch(bid.clone()).unwrap().unwrap(); + // [We can use `producer::open_batch` specifying a bid OR - even we previously retrived + // a status for this batch, we can go with `status::open` providing an exclusive ref to producer] + let mut b = status.open(&mut p).unwrap().unwrap(); b.add(jobs.next().unwrap()).unwrap(); // 3 jobs b.commit().unwrap(); // committig the batch From 73c11fd557b0abe15e2c96b0ba298ea1ef25b609 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 10:25:42 +0500 Subject: [PATCH 23/62] Add 'set_progress' shortcut --- src/lib.rs | 4 ++-- src/proto/mod.rs | 4 +++- src/proto/single/ent.rs | 45 ++++++++++++++++++++-------------------- tests/real/enterprise.rs | 29 +++++++++++++++++++------- 4 files changed, 49 insertions(+), 33 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index cd31aeed..c5db49fe 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,8 +81,8 @@ pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::proto::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, JobState, Progress, ProgressUpdate, - ProgressUpdateBuilder, + set_progress, Batch, BatchBuilder, BatchHandle, BatchStatus, JobState, Progress, + ProgressUpdate, ProgressUpdateBuilder, }; #[cfg(feature = "ent")] mod tracker; diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 89960d65..ac8c988f 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -16,7 +16,9 @@ pub use self::single::{ }; #[cfg(feature = "ent")] -pub use self::single::ent::{JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; +pub use self::single::ent::{ + set_progress, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track, +}; #[cfg(feature = "ent")] mod batch; diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index fce53f60..cba1dac0 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -43,7 +43,7 @@ impl JobBuilder { /// /// Under the hood, the method will call `Utc::now` and add the provided `ttl` duration. /// You can use this setter when you have a duration rather than some exact date and time, - /// expected by [`expires_at`](struct.JobBuilder.html#method.expires_at) setter. + /// expected by [`expires_at`](JobBuilder::expires_at) setter. /// Example usage: /// ``` /// # use faktory::JobBuilder; @@ -74,7 +74,7 @@ impl JobBuilder { /// Remove unique lock for this job right before the job starts executing. /// /// Another job with the same kind-args-queue combination will be accepted by the Faktory server - /// after the period specified in [`unique_for`](struct.JobBuilder.html#method.unique_for) has finished + /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished /// _or_ after this job has been been consumed (i.e. its execution has ***started***). pub fn unique_until_start(&mut self) -> &mut Self { self.add_to_custom_data("unique_until", "start") @@ -84,7 +84,7 @@ impl JobBuilder { /// /// Sets `unique_until` on the Job's custom hash to `success`, which is Faktory's default. /// Another job with the same kind-args-queue combination will be accepted by the Faktory server - /// after the period specified in [`unique_for`](struct.JobBuilder.html#method.unique_for) has finished + /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished /// _or_ after this job has been been ***successfully*** processed. pub fn unique_until_success(&mut self) -> &mut Self { self.add_to_custom_data("unique_until", "success") @@ -136,13 +136,11 @@ impl Display for JobState { /// Info on job execution progress (retrieved). /// -/// The tracker is guaranteed to get the following details: the job's id (though -/// they should know it beforehand in order to be ably to track the job), its last -/// known state (e.g., "enqueued", "working", "success", "unknown"), and the date and time -/// the job was last updated. Additionally, information on what's going on with the job -/// ([desc](struct.ProgressUpdate.html#structfield.desc)) and completion percentage -/// ([percent](struct.ProgressUpdate.html#structfield.percent)) may be available, -/// if the worker provided those details. +/// The tracker is guaranteed to get the following details: the job's id (though they should +/// know it beforehand in order to be ably to track the job), its last known [`state`](JobState), and +/// the date and time the job was last updated. Additionally, arbitrary information on what's going +/// on with the job ([`desc`](ProgressUpdate::desc)) and the job's completion percentage +/// ([`percent`](ProgressUpdate::percent)) may be available, if the worker has provided those details. #[derive(Debug, Clone, Deserialize)] pub struct Progress { /// Id of the tracked job. @@ -162,6 +160,13 @@ pub struct Progress { pub desc: Option, } +impl Progress { + /// Create an instance of `ProgressUpdate` for the job updating its completion percentage. + pub fn update(&self, percent: u8) -> ProgressUpdate { + set_progress(&self.jid, percent) + } +} + /// Info on job execution progress (sent). /// /// In Enterprise Faktory, a client executing a job can report on the execution @@ -203,18 +208,7 @@ impl ProgressUpdate { /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) /// `ProgressUpdateBuilder`. pub fn builder(jid: impl Into) -> ProgressUpdateBuilder { - ProgressUpdateBuilder { - jid: Some(jid.into()), - ..ProgressUpdateBuilder::create_empty() - } - } - - /// Create a new instance of `ProgressUpdate`. - /// - /// While job ID is specified at `ProgressUpdate`'s creation time, - /// the rest of the [fields](struct.ProgressUpdate.html) are defaulted to _None_. - pub fn new(jid: impl Into) -> ProgressUpdate { - ProgressUpdateBuilder::new(jid).build() + ProgressUpdateBuilder::new(jid) } } @@ -225,7 +219,7 @@ impl ProgressUpdateBuilder { .expect("All required fields have been set.") } - /// Create a new instance of 'JobBuilder' + /// Create a new instance of `JobBuilder` pub fn new(jid: impl Into) -> ProgressUpdateBuilder { ProgressUpdateBuilder { jid: Some(jid.into()), @@ -234,6 +228,11 @@ impl ProgressUpdateBuilder { } } +/// Create an instance of `ProgressUpdate` for the job specifying its completion percentage. +pub fn set_progress(jid: impl Into, percent: u8) -> ProgressUpdate { + ProgressUpdate::builder(jid).percent(percent).build() +} + // ---------------------------------------------- use super::FaktoryCommand; diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 0b1311af..2698e5ed 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -432,13 +432,23 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { c.register("order", move |job| -> io::Result<_> { // trying to set progress on a community edition of Faktory will give: // 'an internal server error occurred: tracking subsystem is only available in Faktory Enterprise' - let result = t_captured.lock().expect("lock acquired").set_progress( - ProgressUpdateBuilder::new(&job_id_captured) - .desc("Still processing...".to_owned()) - .percent(32) - .build(), - ); - assert!(result.is_ok()); + assert!(t_captured + .lock() + .expect("lock acquired") + .set_progress( + ProgressUpdate::builder(&job_id_captured) + .desc("Still processing...".to_owned()) + .percent(32) + .build(), + ) + .is_ok()); + // Let's update the progress once again, to check the 'set_progress' shortcut: + assert!(t_captured + .lock() + .unwrap() + .set_progress(set_progress(&job_id_captured, 33)) + .is_ok()); + // let's sleep for a while ... thread::sleep(time::Duration::from_secs(2)); @@ -511,6 +521,11 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(progress.updated_at.is_none()); assert!(progress.percent.is_none()); assert!(progress.desc.is_none()); + + if progress.percent != Some(100) { + let upd = progress.update(100); + assert!(t.lock().unwrap().set_progress(upd).is_ok()) + } } #[test] From 2dd891f8f598d4022c8bb0825159af2c4d405fdc Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 10:50:38 +0500 Subject: [PATCH 24/62] Add 'update_percet' method --- src/proto/single/ent.rs | 9 ++++++++- tests/real/enterprise.rs | 16 +++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index cba1dac0..600ff75a 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -162,9 +162,16 @@ pub struct Progress { impl Progress { /// Create an instance of `ProgressUpdate` for the job updating its completion percentage. - pub fn update(&self, percent: u8) -> ProgressUpdate { + /// + /// This will copy the [`desc`](Progress::desc) from the `Progress` (retrieved) over to `ProgressUpdate` (to be sent). + pub fn update_percent(&self, percent: u8) -> ProgressUpdate { set_progress(&self.jid, percent) } + + /// Create an instance of `ProgressUpdateBuilder` for the job. + pub fn update_builder(&self) -> ProgressUpdateBuilder { + ProgressUpdateBuilder::new(&self.jid) + } } /// Info on job execution progress (sent). diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 2698e5ed..0deb1055 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -522,8 +522,22 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(progress.percent.is_none()); assert!(progress.desc.is_none()); + let upd = progress + .update_builder() + .desc("Final stage.".to_string()) + .percent(99) + .build(); + assert!(t.lock().unwrap().set_progress(upd).is_ok()); + + let progress = t + .lock() + .unwrap() + .get_progress(job_id) + .expect("Retrieved progress update over the wire once again") + .expect("Some progress"); + if progress.percent != Some(100) { - let upd = progress.update(100); + let upd = progress.update_percent(100); assert!(t.lock().unwrap().set_progress(upd).is_ok()) } } From d745549914ac614ba6220e9b7bebb222df399e62 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 11:03:30 +0500 Subject: [PATCH 25/62] Add 'update_percet' method --- src/proto/single/ent.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 600ff75a..3eb11add 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -165,7 +165,10 @@ impl Progress { /// /// This will copy the [`desc`](Progress::desc) from the `Progress` (retrieved) over to `ProgressUpdate` (to be sent). pub fn update_percent(&self, percent: u8) -> ProgressUpdate { - set_progress(&self.jid, percent) + ProgressUpdate::builder(&self.jid) + .desc(self.desc.clone()) + .percent(percent) + .build() } /// Create an instance of `ProgressUpdateBuilder` for the job. From 54f78f6b938ddce35ecc0213799675a1044aeb33 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 11:05:42 +0500 Subject: [PATCH 26/62] Add 'update_percet' method --- tests/real/enterprise.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 0deb1055..712f3a61 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -467,8 +467,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { _ => panic!("expected job's state to be 'working'"), } assert!(result.updated_at.is_some()); - assert_eq!(result.desc, Some("Still processing...".to_owned())); - assert_eq!(result.percent, Some(32)); + assert_eq!(result.percent, Some(33)); // considering the job done Ok(eprintln!("{:?}", job)) }); @@ -538,6 +537,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { if progress.percent != Some(100) { let upd = progress.update_percent(100); + assert_eq!(upd.desc, progress.desc); assert!(t.lock().unwrap().set_progress(upd).is_ok()) } } From 2f7231deacd03c667d3e90a928ca5cd596a73866 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 11:11:38 +0500 Subject: [PATCH 27/62] Make get_random_wid and get_random_jid public within crate --- src/proto/mod.rs | 4 ++-- src/proto/single/mod.rs | 3 ++- src/proto/single/utils.rs | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index ac8c988f..3b14042c 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -12,7 +12,7 @@ mod single; // commands that users can issue pub use self::single::{ - gen_random_wid, Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, QueueAction, QueueControl, + Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, QueueAction, QueueControl, }; #[cfg(feature = "ent")] @@ -204,7 +204,7 @@ impl Client { .pid .unwrap_or_else(|| unsafe { getpid() } as usize); self.opts.pid = Some(pid); - let wid = self.opts.wid.clone().unwrap_or_else(gen_random_wid); + let wid = self.opts.wid.clone().unwrap_or_else(single::gen_random_wid); self.opts.wid = Some(wid); hello.hostname = Some(self.opts.hostname.clone().unwrap()); diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index 811be23b..69e75bf8 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -15,7 +15,8 @@ use crate::error::Error; pub use self::cmd::*; pub use self::resp::*; -pub use self::utils::gen_random_wid; + +pub(crate) use self::utils::gen_random_wid; const JOB_DEFAULT_QUEUE: &str = "default"; const JOB_DEFAULT_RESERVED_FOR_SECS: usize = 600; diff --git a/src/proto/single/utils.rs b/src/proto/single/utils.rs index 8c7651dd..98456980 100644 --- a/src/proto/single/utils.rs +++ b/src/proto/single/utils.rs @@ -11,11 +11,11 @@ fn gen_random_id(length: usize) -> String { .collect() } -pub fn gen_random_jid() -> String { +pub(crate) fn gen_random_jid() -> String { gen_random_id(JOB_ID_LENGTH) } -pub fn gen_random_wid() -> String { +pub(crate) fn gen_random_wid() -> String { gen_random_id(WORKER_ID_LENGTH) } From 120978b0439e1f2eff29361735283e421bbf4be9 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 11:18:56 +0500 Subject: [PATCH 28/62] Update tests for 'update_percent' shortcut --- tests/real/enterprise.rs | 54 +++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 712f3a61..99a18ac8 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -477,21 +477,45 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { .expect("Successfully ran a handshake with 'Faktory'"); assert_had_one!(&mut c, "test_tracker_can_send_progress_update"); - let result = t + let progress = t .lock() .expect("lock acquired successfully") .get_progress(job_id.clone()) .expect("Retrieved progress update over the wire once again") .expect("Some progress"); - assert_eq!(result.jid, job_id); + assert_eq!(progress.jid, job_id); // 'Faktory' will be keeping last known update for at least 30 minutes: - assert_eq!(result.desc, Some("Still processing...".to_owned())); - assert_eq!(result.percent, Some(32)); + assert_eq!(progress.percent, Some(33)); // But it actually knows the job's real status, since the consumer (worker) // informed it immediately after finishing with the job: - assert_eq!(result.state.to_string(), "success"); + assert_eq!(progress.state.to_string(), "success"); + + // Let's update the status once again to verify the 'update_builder' method + // on the `Progress` struct works as expected: + let upd = progress + .update_builder() + .desc("Final stage.".to_string()) + .percent(99) + .build(); + assert!(t.lock().unwrap().set_progress(upd).is_ok()); + + let progress = t + .lock() + .unwrap() + .get_progress(job_id) + .expect("Retrieved progress update over the wire once again") + .expect("Some progress"); + + if progress.percent != Some(100) { + let upd = progress.update_percent(100); + assert_eq!(upd.desc, progress.desc); + assert!(t.lock().unwrap().set_progress(upd).is_ok()) + } + + // NB! The following should be failing if we decide to make all the jobs + // trackable by default in the Ent Faltory. // What about 'ordinary' job ? let job_id = job_ordinary.id().to_owned().clone(); @@ -520,26 +544,6 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(progress.updated_at.is_none()); assert!(progress.percent.is_none()); assert!(progress.desc.is_none()); - - let upd = progress - .update_builder() - .desc("Final stage.".to_string()) - .percent(99) - .build(); - assert!(t.lock().unwrap().set_progress(upd).is_ok()); - - let progress = t - .lock() - .unwrap() - .get_progress(job_id) - .expect("Retrieved progress update over the wire once again") - .expect("Some progress"); - - if progress.percent != Some(100) { - let upd = progress.update_percent(100); - assert_eq!(upd.desc, progress.desc); - assert!(t.lock().unwrap().set_progress(upd).is_ok()) - } } #[test] From 01fd74430d874de585c1ca95288fbcbf0f6cdffa Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 11:45:30 +0500 Subject: [PATCH 29/62] Make jobs trackable by deafult --- src/proto/single/mod.rs | 29 ++++++++++------------------- tests/real/enterprise.rs | 9 ++++----- 2 files changed, 14 insertions(+), 24 deletions(-) diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index 69e75bf8..704e9122 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -180,28 +180,19 @@ impl JobBuilder { } /// Builds a new [`Job`] from the parameters of this builder. - pub fn build(&self) -> Job { + /// + /// For Enterprise edition of Faktory builds a new _trackable_ `Job`. + /// In Enterprise Faktory, a progress update can be sent and received only for the jobs + /// that have been explicitly marked as trackable via `"track":1` in the job's custom hash. + /// In case you have a reason to opt out of tracking, either unset (remove) the "track" on + /// the resulted job's [`custom`](Job::custom) hash or set to 0. + pub fn build(&mut self) -> Job { + if cfg!(feature = "ent") { + self.add_to_custom_data("track", 1); + } self.try_build() .expect("All required fields have been set.") } - - /// Builds a new _trackable_ `Job``. - /// - /// Progress update can be sent and received only for the jobs that have - /// been explicitly marked as trackable via `"track":1` in the job's - /// custom hash. - /// ``` - /// use faktory::JobBuilder; - /// - /// let _job = JobBuilder::new("order") - /// .args(vec!["ISBN-13:9781718501850"]) - /// .build_trackable(); - /// ``` - #[cfg(feature = "ent")] - pub fn build_trackable(&mut self) -> Job { - self.add_to_custom_data("track", 1); - self.build() - } } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 99a18ac8..c0c9f402 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -415,12 +415,14 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { let job_tackable = JobBuilder::new("order") .args(vec![Value::from("ISBN-13:9781718501850")]) .queue("test_tracker_can_send_progress_update") - .build_trackable(); + .build(); - let job_ordinary = JobBuilder::new("order") + let mut job_ordinary = JobBuilder::new("order") .args(vec![Value::from("ISBN-13:9781718501850")]) .queue("test_tracker_can_send_progress_update") .build(); + // NB! Jobs are trackable by default, so we need to unset the "track" flag. + assert_eq!(job_ordinary.custom.remove("track"), Some(Value::from(1))); // let's remember this job's id: let job_id = job_tackable.id().to_owned(); @@ -514,9 +516,6 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(t.lock().unwrap().set_progress(upd).is_ok()) } - // NB! The following should be failing if we decide to make all the jobs - // trackable by default in the Ent Faltory. - // What about 'ordinary' job ? let job_id = job_ordinary.id().to_owned().clone(); From c8b74502ab77732f30807e24cf6c85d1335fefa5 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Fri, 2 Feb 2024 12:04:38 +0500 Subject: [PATCH 30/62] Make jobs trackable by deafult - update tests --- src/proto/batch/mod.rs | 18 +++++++++++++++--- src/proto/single/mod.rs | 9 ++++++++- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 26071317..a3991aa9 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -347,7 +347,11 @@ mod test { .with_success_callback(prepare_test_job("thumbnail_clean_up".into())), ) .unwrap(); - let expected = r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + let expected = if cfg!(feature = "ent") { + r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0,"custom":{"track":1}}}"# + } else { + r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"# + }; assert_eq!(got, expected); // without description and with on complete callback: @@ -355,7 +359,11 @@ mod test { &BatchBuilder::new().with_complete_callback(prepare_test_job("thumbnail_info".into())), ) .unwrap(); - let expected = r#"{"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + let expected = if cfg!(feature = "ent") { + r#"{"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0,"custom":{"track":1}}}"# + } else { + r#"{"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"# + }; assert_eq!(got, expected); // with description and both callbacks: @@ -368,7 +376,11 @@ mod test { ), ) .unwrap(); - let expected = r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0},"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"#; + let expected = if cfg!(feature = "ent") { + r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0,"custom":{"track":1}},"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0,"custom":{"track":1}}}"# + } else { + r#"{"description":"Image processing workload","success":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_clean_up","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0},"complete":{"jid":"LFluKy1Baak83p54","queue":"default","jobtype":"thumbnail_info","args":[],"created_at":"2023-12-22T07:00:52.546258624Z","reserve_for":600,"retry":25,"priority":5,"backtrace":0}}"# + }; assert_eq!(got, expected); } } diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index 704e9122..96ad2779 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -295,7 +295,14 @@ mod test { assert_eq!(job.priority, Some(JOB_DEFAULT_PRIORITY)); assert_eq!(job.backtrace, Some(JOB_DEFAULT_BACKTRACE)); assert!(job.failure.is_none()); - assert_eq!(job.custom, HashMap::default()); + + if cfg!(feature = "ent") { + let mut custom = HashMap::new(); + custom.insert("track".into(), 1.into()); + assert_eq!(job.custom, custom) + } else { + assert_eq!(job.custom, HashMap::default()); + } let job = JobBuilder::new(job_kind).build(); assert!(job.args.is_empty()); From 0edbd839fbb2b460bc7644466e9d3b8d656ecfde Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 21:06:25 +0500 Subject: [PATCH 31/62] Add 'CallbackState' enum. Use for complete and success callback type --- src/lib.rs | 2 +- src/proto/batch/mod.rs | 40 +++++++++++++++++++++++--- src/proto/mod.rs | 3 +- src/proto/single/ent.rs | 7 ++--- tests/real/enterprise.rs | 62 +++++++++++++++++++++++++--------------- 5 files changed, 80 insertions(+), 34 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c5db49fe..ce81c331 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,7 +81,7 @@ pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::proto::{ - set_progress, Batch, BatchBuilder, BatchHandle, BatchStatus, JobState, Progress, + set_progress, Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, }; #[cfg(feature = "ent")] diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index a3991aa9..1e6a829f 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -83,7 +83,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// You can retieve the batch status using a [`Tracker`](struct.Tracker.html): /// ```no_run /// # use faktory::Error; -/// # use faktory::{Producer, Job, Batch, Tracker}; +/// # use faktory::{Producer, Job, Batch, Tracker, CallbackState}; /// let mut prod = Producer::connect(None)?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); @@ -101,7 +101,11 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); /// assert_eq!(s.description, Some("Batch description".into())); -/// assert_eq!(s.complete_callback_state, ""); // has not been queued; +/// +/// match s.complete_callback_state { +/// CallbackState::Pending => {}, +/// _ => panic!("The jobs of this batch have not executed, so the callback job is expected to _not_ have fired"), +/// } /// # Ok::<(), Error>(()) /// ``` #[derive(Builder, Debug, Serialize)] @@ -235,6 +239,34 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { } } +// Not documented, but existing de fakto and also mentioned in the official client +// https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 +/// State of a `callback` job of a [`Batch`]. +#[derive(Debug, Clone, Deserialize)] +pub enum CallbackState { + /// Not enqueued yet. + #[serde(rename = "")] + Pending, + /// Enqueued by the server, because the jobs belonging to this batch have finished executing. + #[serde(rename = "1")] + Enqueued, + /// The enqueued callback job has been consumed. + #[serde(rename = "2")] + FinishedOk, +} + +impl std::fmt::Display for CallbackState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use CallbackState::*; + let s = match self { + Pending => "Pending", + Enqueued => "Enqueued", + FinishedOk => "FinishedOk", + }; + write!(f, "{}", s) + } +} + /// Batch status retrieved from Faktory server. #[derive(Deserialize, Debug)] pub struct BatchStatus { @@ -267,13 +299,13 @@ pub struct BatchStatus { /// /// See [with_complete_callback](struct.BatchBuilder.html#method.with_complete_callback). #[serde(rename = "complete_st")] - pub complete_callback_state: String, + pub complete_callback_state: CallbackState, /// State of the `success` callback. /// /// See [with_success_callback](struct.BatchBuilder.html#method.with_success_callback). #[serde(rename = "success_st")] - pub success_callback_state: String, + pub success_callback_state: CallbackState, } #[cfg(feature = "ent")] diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 3b14042c..cfe55386 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -24,7 +24,8 @@ pub use self::single::ent::{ mod batch; #[cfg(feature = "ent")] pub use batch::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, CommitBatch, GetBatchStatus, OpenBatch, + Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, CommitBatch, GetBatchStatus, + OpenBatch, }; pub(crate) fn get_env_url() -> String { diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 3eb11add..d8dfb081 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -119,7 +119,7 @@ pub enum JobState { Dead, } -impl Display for JobState { +impl std::fmt::Display for JobState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use JobState::*; let s = match self { @@ -246,10 +246,7 @@ pub fn set_progress(jid: impl Into, percent: u8) -> ProgressUpdate { // ---------------------------------------------- use super::FaktoryCommand; -use std::{ - fmt::{Debug, Display}, - io::Write, -}; +use std::{fmt::Debug, io::Write}; #[derive(Debug, Clone)] pub enum Track { diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index c0c9f402..f3e16c56 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -608,8 +608,8 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.failed, 0); // Docs do not mention it, but the golang client does: // https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 - assert_eq!(s.success_callback_state, ""); // we did not even provide the 'success' callback - assert_eq!(s.complete_callback_state, ""); // the 'complete' callback is pending + assert_eq!(s.success_callback_state.to_string(), "Pending"); // we did not even provide the 'success' callback + assert_eq!(s.complete_callback_state.to_string(), "Pending"); // consume and execute job 1 ... assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); @@ -658,7 +658,7 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.total, 3); assert_eq!(s.pending, 0); assert_eq!(s.failed, 0); - assert_eq!(s.complete_callback_state, "1"); // callback has been enqueued!! + assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); // let's now successfully consume from the "callback" queue: assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); @@ -670,7 +670,7 @@ fn test_batch_of_jobs_can_be_initiated() { .expect("...and it's not none"); // this is because we have just consumed and executed 2 of 3 jobs: - assert_eq!(s.complete_callback_state, "2"); // means calledback successfully executed + assert_eq!(s.complete_callback_state.to_string(), "FinishedOk"); } #[test] @@ -805,7 +805,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 3); assert_eq!(s.pending, 3); - assert_eq!(s.success_callback_state, ""); // has not been queued; + assert_eq!(s.success_callback_state.to_string(), "Pending"); // consume those 3 jobs successfully; for _ in 0..3 { @@ -826,7 +826,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { assert_eq!(s.total, 3); assert_eq!(s.pending, 0); assert_eq!(s.failed, 0); - assert_eq!(s.success_callback_state, ""); // not just yet; + assert_eq!(s.success_callback_state.to_string(), "Pending"); // not just yet; // to double-check, let's assert the success callbacks queue is empty: assert_is_empty!( @@ -839,7 +839,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // ... and check batch status: let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); - assert_eq!(s.success_callback_state, "1"); // callback has been queued; + assert_eq!(s.success_callback_state.to_string(), "Enqueued"); // finally, let's consume from the success callbacks queue ... assert_had_one!( @@ -849,7 +849,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // ... and see the final status: let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); - assert_eq!(s.success_callback_state, "2"); // callback successfully executed; + assert_eq!(s.success_callback_state.to_string(), "FinishedOk"); } #[test] @@ -860,22 +860,24 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let url = learn_faktory_url(); let mut p = Producer::connect(Some(&url)).unwrap(); let mut t = Tracker::connect(Some(&url)).unwrap(); - let jobtype = "callback_jobtype"; let q_name = "test_callback_will_be_queued_upon_commit_even_if_batch_is_empty"; - let mut callbacks = some_jobs(jobtype, q_name, 2); + let complete_cb_jobtype = "complete_callback_jobtype"; + let success_cb_jobtype = "success_cb_jobtype"; + let complete_cb = some_jobs(complete_cb_jobtype, q_name, 1).next().unwrap(); + let success_cb = some_jobs(success_cb_jobtype, q_name, 1).next().unwrap(); let b = p .start_batch( Batch::builder() .description("Orders processing workload") - .with_callbacks(callbacks.next().unwrap(), callbacks.next().unwrap()), + .with_callbacks(success_cb, complete_cb), ) .unwrap(); let bid = b.id().to_owned(); let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); // no jobs in the batch; - assert_eq!(s.success_callback_state, ""); // not queued; - assert_eq!(s.complete_callback_state, ""); // not queued; + assert_eq!(s.success_callback_state.to_string(), "Pending"); + assert_eq!(s.complete_callback_state.to_string(), "Pending"); b.commit().unwrap(); @@ -887,26 +889,37 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { // The docs say "If you don't push any jobs into the batch, any callbacks will fire immediately upon BATCH COMMIT." // and "the success callback for a batch will always enqueue after the complete callback" - assert_eq!(s.complete_callback_state, "1"); // queued - assert_eq!(s.success_callback_state, ""); // not queued + assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); + assert_eq!(s.success_callback_state.to_string(), "Pending"); let mut c = ConsumerBuilder::default(); - c.register(jobtype, move |_job| -> io::Result<_> { Ok(()) }); + c.register(complete_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); + c.register(success_cb_jobtype, move |_job| -> io::Result<_> { + Err(io::Error::new( + io::ErrorKind::Other, + "we want this one to fail to test the 'CallbackState' behavior", + )) + }); let mut c = c.connect(Some(&url)).unwrap(); assert_had_one!(&mut c, q_name); // complete callback consumed let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); - assert_eq!(s.complete_callback_state, "2"); // successfully executed - assert_eq!(s.success_callback_state, "1"); // queued - + match s.complete_callback_state { + CallbackState::FinishedOk => {} + _ => panic!("Expected the callback to have been successfully executed"), + } + match s.success_callback_state { + CallbackState::Enqueued => {} + _ => panic!("Expected the callback to have been enqueued, since the `complete` callback has already executed"), + } assert_had_one!(&mut c, q_name); // success callback consumed let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); - assert_eq!(s.complete_callback_state, "2"); // successfully executed - assert_eq!(s.success_callback_state, "2"); // successfully executed + assert_eq!(s.complete_callback_state.to_string(), "FinishedOk"); + assert_eq!(s.success_callback_state.to_string(), "FinishedOk"); } #[test] @@ -1009,7 +1022,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); assert_eq!(s.parent_bid, Some(bid)); // this is really our child batch - assert_eq!(s.complete_callback_state, "1"); // has been enqueud + assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); // Subtest 3 result: // We managed to open an already committed batch "from outside" and the server accepted @@ -1036,7 +1049,10 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { b.commit().unwrap(); let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); - assert_eq!(s.complete_callback_state, "1"); // again, it has been enqueud ... + match s.complete_callback_state { + CallbackState::Enqueued => {} + _ => panic!("Expected the callback to have been enqueued"), + } assert_eq!(s.pending, 2); // ... though there are pending jobs assert_eq!(s.total, 2); From 06d208eeb2249a7a160e57071e2ef2001e4f8cc8 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 21:31:05 +0500 Subject: [PATCH 32/62] Bring 'Tracker' into scope for docs --- src/proto/batch/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 1e6a829f..461c9521 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -1,3 +1,6 @@ +#[cfg(doc)] +use crate::Tracker; + use crate::{Error, Job, Producer}; use chrono::{DateTime, Utc}; use derive_builder::Builder; @@ -80,7 +83,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// effectively building a pipeline this way, since the Faktory guarantees that callback jobs will not be queued unless /// the batch gets committed. /// -/// You can retieve the batch status using a [`Tracker`](struct.Tracker.html): +/// You can retieve the batch status using a [`Tracker`]: /// ```no_run /// # use faktory::Error; /// # use faktory::{Producer, Job, Batch, Tracker, CallbackState}; From e13f0d6eb98166e73c915b99ee8300c2e967a6f7 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 21:43:44 +0500 Subject: [PATCH 33/62] Update assertion on batch test --- tests/real/enterprise.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index f3e16c56..6d280893 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -919,7 +919,10 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); assert_eq!(s.complete_callback_state.to_string(), "FinishedOk"); - assert_eq!(s.success_callback_state.to_string(), "FinishedOk"); + // Still `Enqueued` due to the fact that it was not finished with success. + // Had we registered a handler for `success_cb_jobtype` returing Ok(()) rather then Err(), + // the state would be `FinishedOk` just like it's the case with the `complete` callback. + assert_eq!(s.success_callback_state.to_string(), "Enqueued"); } #[test] From d10ceee3488077d3e2e095dd1f69b302ab19016a Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 22:45:11 +0500 Subject: [PATCH 34/62] Make BatchHandle::add return option of old bid --- src/proto/batch/mod.rs | 13 ++++++++++--- tests/real/enterprise.rs | 7 ++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 461c9521..20eb3f1d 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -220,9 +220,16 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { } /// Add the given job to the batch. - pub fn add(&mut self, mut job: Job) -> Result<(), Error> { - job.custom.insert("bid".into(), self.bid.clone().into()); - self.prod.enqueue(job) + /// + /// Should the submitted job - for whatever reason - already have a `bid` (batch ID) specified + /// in its custom hash, this value will be overwritten by the ID of the batch this job is being added to + /// with the old value returned as `Some()`. + pub fn add(&mut self, mut job: Job) -> Result, Error> { + let bid = job + .custom + .insert("bid".into(), self.bid.clone().into()) + .map(|bid| bid.to_string()); + self.prod.enqueue(job).map(|_| bid) } /// Initiate a child batch of jobs. diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 6d280893..59e98ec7 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -568,6 +568,7 @@ fn test_batch_of_jobs_can_be_initiated() { let job_3 = Job::builder("thumbnail") .args(vec!["path/to/original/image3"]) .queue("test_batch_of_jobs_can_be_initiated") + .add_to_custom_data("bid", "check-check") .build(); let cb_job = Job::builder("clean_up") @@ -585,9 +586,9 @@ fn test_batch_of_jobs_can_be_initiated() { // let's remember batch id: let bid = b.id().to_string(); - b.add(job_1).unwrap(); - b.add(job_2).unwrap(); - b.add(job_3).unwrap(); + assert!(b.add(job_1).unwrap().is_none()); + assert!(b.add(job_2).unwrap().is_none()); + assert_eq!(b.add(job_3).unwrap().unwrap(), "check-check"); b.commit().unwrap(); // The batch has been committed, let's see its status: From ff3fd5f6ced069e717f93ada7a8508431def0742 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 23:07:34 +0500 Subject: [PATCH 35/62] Make BatchHandle::add return option of old bid as serde_json::Value --- src/proto/batch/mod.rs | 11 +++++------ tests/real/enterprise.rs | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 20eb3f1d..1abad69a 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -221,14 +221,13 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { /// Add the given job to the batch. /// - /// Should the submitted job - for whatever reason - already have a `bid` (batch ID) specified - /// in its custom hash, this value will be overwritten by the ID of the batch this job is being added to - /// with the old value returned as `Some()`. - pub fn add(&mut self, mut job: Job) -> Result, Error> { + /// Should the submitted job - for whatever reason - already have a `bid` key present in its custom hash, + /// this value will be overwritten by the ID of the batch this job is being added to with the old value + /// returned as `Some()`. + pub fn add(&mut self, mut job: Job) -> Result, Error> { let bid = job .custom - .insert("bid".into(), self.bid.clone().into()) - .map(|bid| bid.to_string()); + .insert("bid".into(), self.bid.clone().into()); self.prod.enqueue(job).map(|_| bid) } diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 59e98ec7..4cda7a99 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -588,7 +588,7 @@ fn test_batch_of_jobs_can_be_initiated() { assert!(b.add(job_1).unwrap().is_none()); assert!(b.add(job_2).unwrap().is_none()); - assert_eq!(b.add(job_3).unwrap().unwrap(), "check-check"); + assert_eq!(b.add(job_3).unwrap().unwrap(), Value::from("check-check")); b.commit().unwrap(); // The batch has been committed, let's see its status: From 489ca0f898954faf69e120d12f032ba8eda63652 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 23:21:38 +0500 Subject: [PATCH 36/62] Run formatter --- src/proto/batch/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 1abad69a..55e8e5fe 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -222,12 +222,10 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { /// Add the given job to the batch. /// /// Should the submitted job - for whatever reason - already have a `bid` key present in its custom hash, - /// this value will be overwritten by the ID of the batch this job is being added to with the old value + /// this value will be overwritten by the ID of the batch this job is being added to with the old value /// returned as `Some()`. pub fn add(&mut self, mut job: Job) -> Result, Error> { - let bid = job - .custom - .insert("bid".into(), self.bid.clone().into()); + let bid = job.custom.insert("bid".into(), self.bid.clone().into()); self.prod.enqueue(job).map(|_| bid) } From 18f15932720e614da7f0d3cc717053e4068d5d45 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 23:41:56 +0500 Subject: [PATCH 37/62] Double-check the batch callback status --- tests/real/enterprise.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 4cda7a99..a6504e45 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -895,12 +895,13 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let mut c = ConsumerBuilder::default(); c.register(complete_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); - c.register(success_cb_jobtype, move |_job| -> io::Result<_> { - Err(io::Error::new( - io::ErrorKind::Other, - "we want this one to fail to test the 'CallbackState' behavior", - )) - }); + // c.register(success_cb_jobtype, move |_job| -> io::Result<_> { + // Err(io::Error::new( + // io::ErrorKind::Other, + // "we want this one to fail to test the 'CallbackState' behavior", + // )) + // }); + c.register(success_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); let mut c = c.connect(Some(&url)).unwrap(); assert_had_one!(&mut c, q_name); // complete callback consumed From 1dfb8d15c7afc3aa0b4ff366b8bf90aecb945518 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sun, 4 Feb 2024 23:47:54 +0500 Subject: [PATCH 38/62] Restore assertions after verifying on CI --- tests/real/enterprise.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index a6504e45..10f87515 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -895,13 +895,13 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let mut c = ConsumerBuilder::default(); c.register(complete_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); - // c.register(success_cb_jobtype, move |_job| -> io::Result<_> { - // Err(io::Error::new( - // io::ErrorKind::Other, - // "we want this one to fail to test the 'CallbackState' behavior", - // )) - // }); - c.register(success_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); + c.register(success_cb_jobtype, move |_job| -> io::Result<_> { + Err(io::Error::new( + io::ErrorKind::Other, + "we want this one to fail to test the 'CallbackState' behavior", + )) + }); + let mut c = c.connect(Some(&url)).unwrap(); assert_had_one!(&mut c, q_name); // complete callback consumed From 0958e3141e11e42eb42d19244f204a668e1821a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Mon, 5 Feb 2024 21:24:28 +0500 Subject: [PATCH 39/62] Update src/producer/mod.rs Co-authored-by: Jon Gjengset --- src/producer/mod.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/producer/mod.rs b/src/producer/mod.rs index deb0ce3f..e77314dc 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -147,10 +147,7 @@ impl Producer { #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub fn open_batch(&mut self, bid: String) -> Result>, Error> { let bid = self.c.issue(&OpenBatch::from(bid))?.maybe_bid()?; - match bid { - Some(bid) => Ok(Some(BatchHandle::new(bid, self))), - None => Ok(None), - } + Ok(bid.map(|bid| BatchHandle::new(bid, self))) } #[cfg(feature = "ent")] From e2f57b617a7663cbc1940c2d4b5c48b3ea2b35bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Mon, 5 Feb 2024 21:25:10 +0500 Subject: [PATCH 40/62] Update src/proto/single/ent.rs Co-authored-by: Jon Gjengset --- src/proto/single/ent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index d8dfb081..ef66e628 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -226,7 +226,7 @@ impl ProgressUpdateBuilder { /// Builds an instance of ProgressUpdate. pub fn build(&self) -> ProgressUpdate { self.try_build() - .expect("All required fields have been set.") + .expect("Only jid is required, and it is set by all constructors.") } /// Create a new instance of `JobBuilder` From 0d721cfd5347227a314fecc90176dc034bcce614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Mon, 5 Feb 2024 21:25:48 +0500 Subject: [PATCH 41/62] Update src/proto/single/mod.rs Co-authored-by: Jon Gjengset --- src/proto/single/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index 96ad2779..ebd2e6b3 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -185,7 +185,7 @@ impl JobBuilder { /// In Enterprise Faktory, a progress update can be sent and received only for the jobs /// that have been explicitly marked as trackable via `"track":1` in the job's custom hash. /// In case you have a reason to opt out of tracking, either unset (remove) the "track" on - /// the resulted job's [`custom`](Job::custom) hash or set to 0. + /// the resulted job's [`custom`](Job::custom) hash or set it to 0. pub fn build(&mut self) -> Job { if cfg!(feature = "ent") { self.add_to_custom_data("track", 1); From 5bd8616931df80775f8dfa3c81b3819df74a1835 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 5 Feb 2024 21:28:27 +0500 Subject: [PATCH 42/62] Update import grouping in producer module --- src/producer/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/producer/mod.rs b/src/producer/mod.rs index e77314dc..5eebcdd8 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -2,11 +2,12 @@ use crate::error::Error; use crate::proto::{ self, parse_provided_or_from_env, Client, Info, Job, Push, QueueAction, QueueControl, }; -#[cfg(feature = "ent")] -use crate::proto::{Batch, BatchHandle, CommitBatch, OpenBatch}; use std::io::prelude::*; use std::net::TcpStream; +#[cfg(feature = "ent")] +use crate::proto::{Batch, BatchHandle, CommitBatch, OpenBatch}; + /// `Producer` is used to enqueue new jobs that will in turn be processed by Faktory workers. /// /// # Connecting to Faktory From c9c8bd0c13fc09104ed3c0f3476a909c42beeb29 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 5 Feb 2024 21:51:17 +0500 Subject: [PATCH 43/62] Turn free func 'set_progress' into unbound set of ProgressUpdate --- src/lib.rs | 2 +- src/proto/mod.rs | 4 +--- src/proto/single/ent.rs | 10 +++++----- tests/real/enterprise.rs | 2 +- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ce81c331..20f76880 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,7 +81,7 @@ pub use crate::proto::{Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub use crate::proto::{ - set_progress, Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, + Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, }; #[cfg(feature = "ent")] diff --git a/src/proto/mod.rs b/src/proto/mod.rs index cfe55386..b6b91b8e 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -16,9 +16,7 @@ pub use self::single::{ }; #[cfg(feature = "ent")] -pub use self::single::ent::{ - set_progress, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track, -}; +pub use self::single::ent::{JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; #[cfg(feature = "ent")] mod batch; diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index ef66e628..067aee38 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -213,6 +213,11 @@ pub struct ProgressUpdate { } impl ProgressUpdate { + /// Create an instance of `ProgressUpdate` for the job with this ID specifying its completion percentage. + pub fn set(jid: impl Into, percent: u8) -> ProgressUpdate { + ProgressUpdate::builder(jid).percent(percent).build() + } + /// Create a new instance of `ProgressUpdateBuilder` with job ID already set. /// /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) @@ -238,11 +243,6 @@ impl ProgressUpdateBuilder { } } -/// Create an instance of `ProgressUpdate` for the job specifying its completion percentage. -pub fn set_progress(jid: impl Into, percent: u8) -> ProgressUpdate { - ProgressUpdate::builder(jid).percent(percent).build() -} - // ---------------------------------------------- use super::FaktoryCommand; diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 10f87515..e9df43fe 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -448,7 +448,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(t_captured .lock() .unwrap() - .set_progress(set_progress(&job_id_captured, 33)) + .set_progress(ProgressUpdate::set(&job_id_captured, 33)) .is_ok()); // let's sleep for a while ... From 1560c21ff184925566df2c2f9ee593787a8a388c Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Wed, 7 Feb 2024 11:51:33 +0500 Subject: [PATCH 44/62] Add comment to batch guarantees subtest --- tests/real/enterprise.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index e9df43fe..d7abee37 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -1034,6 +1034,11 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // a nested batch INSTEAD OF ERRORING BACK. // ############################ END OF SUBTEST 3 ####################################### + // The following subtest assertions should be adjusted once fixes are introduced to + // the Faktory as per https://github.com/contribsys/faktory/issues/465 + // The idea is we should not be able to push to a batch for which the server have already + // enqeued a callback. + // // ############################## SUBTEST 4 ############################################ // From the docs: // """Once a callback has enqueued for a batch, you may not add anything to the batch.""" From 4fd12a62738b7706022f92e01dbc4606ff2ab889 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 14:37:09 +0500 Subject: [PATCH 45/62] Rm Tracker construct --- src/lib.rs | 7 +- src/proto/batch/mod.rs | 8 +- src/proto/mod.rs | 155 +++++++++++++++++++++++++++++++-------- src/tracker/mod.rs | 128 -------------------------------- tests/real/enterprise.rs | 14 ++-- 5 files changed, 138 insertions(+), 174 deletions(-) delete mode 100644 src/tracker/mod.rs diff --git a/src/lib.rs b/src/lib.rs index 20f76880..2b0c79a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,7 +76,7 @@ pub use tls::TlsStream; pub use crate::consumer::{Consumer, ConsumerBuilder}; pub use crate::error::Error; pub use crate::producer::Producer; -pub use crate::proto::{Job, JobBuilder, Reconnect}; +pub use crate::proto::{Client, Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] @@ -84,8 +84,3 @@ pub use crate::proto::{ Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, }; -#[cfg(feature = "ent")] -mod tracker; -#[cfg(feature = "ent")] -#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -pub use crate::tracker::Tracker; diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 55e8e5fe..7ed063cb 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -1,5 +1,5 @@ #[cfg(doc)] -use crate::Tracker; +use crate::Client; use crate::{Error, Job, Producer}; use chrono::{DateTime, Utc}; @@ -83,10 +83,10 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// effectively building a pipeline this way, since the Faktory guarantees that callback jobs will not be queued unless /// the batch gets committed. /// -/// You can retieve the batch status using a [`Tracker`]: +/// You can retieve the batch status using a [`Client`]: /// ```no_run /// # use faktory::Error; -/// # use faktory::{Producer, Job, Batch, Tracker, CallbackState}; +/// # use faktory::{Producer, Job, Batch, Client, CallbackState}; /// let mut prod = Producer::connect(None)?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); @@ -99,7 +99,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// b.add(job)?; /// b.commit()?; /// -/// let mut t = Tracker::connect(None)?; +/// let mut t = Client::connect_tracker(None)?; /// let s = t.get_batch_status(bid)?.unwrap(); /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index b6b91b8e..a5e92649 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -1,3 +1,6 @@ +#[cfg(doc)] +use crate::{Consumer, Producer}; + use crate::error::{self, Error}; use bufstream::BufStream; use libc::getpid; @@ -79,6 +82,23 @@ impl Reconnect for TcpStream { } } +#[derive(Debug, Clone)] +pub(crate) enum ClientRole { + // `Client` in Faktory terms. We've got a dedicated construct - `Producer` - wrapping + // a client who has got `ClientRole::Producer` role. + Producer, + // `Worker` in Faktory terms. We've got a dedicated construct - `Consumer` - wrapping + // a client who has got `ClientRole::Consumer` role. + Consumer, + // Does not have a dedicated name in Faktory narrative. The tacker's functionality + // (setting and getting a job's execution progress, retrieving a batch's status) is what + // a `Client` and a `Worker` have in common, so we are not maintaining a dedicated construct + // for this case, rather exposing those methods on the Client directly. + // Note, this is only available in Enterprise Faktory. + #[cfg(feature = "ent")] + Tracker, +} + #[derive(Clone)] pub(crate) struct ClientOptions { /// Hostname to advertise to server. @@ -101,7 +121,7 @@ pub(crate) struct ClientOptions { /// Defaults to None, pub(crate) password: Option, - is_producer: bool, + role: ClientRole, } impl Default for ClientOptions { @@ -112,12 +132,59 @@ impl Default for ClientOptions { wid: None, labels: vec!["rust".to_string()], password: None, - is_producer: false, + role: ClientRole::Consumer, } } } -pub(crate) struct Client { +/// A construct used internally by [`Producer`] and [`Consumer`]. +/// Can be used directly for retrieving and updating information on a job's execution progress +/// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status +/// from the Faktory server (see [`BatchStatus`]). +/// +/// Fetching a job's execution progress: +/// ```no_run +/// use faktory::{Client, JobState}; +/// let job_id = String::from("W8qyVle9vXzUWQOf"); +/// let mut tracker = Client::connect_tracker(None)?; +/// if let Some(progress) = tracker.get_progress(job_id)? { +/// match progress.state { +/// JobState::Success => { +/// # /* +/// ... +/// # */ +/// }, +/// # /* +/// ... +/// # */ +/// # _ => {}, +/// } +/// } +/// # Ok::<(), faktory::Error>(()) +/// ``` +/// Sending an update on a job's execution progress: +/// ```no_run +/// use faktory::{Client, ProgressUpdateBuilder}; +/// let jid = String::from("W8qyVle9vXzUWQOf"); +/// let mut tracker = Client::connect_tracker(None)?; +/// let progress = ProgressUpdateBuilder::new(&jid) +/// .desc("Almost done...".to_owned()) +/// .percent(99) +/// .build(); +/// tracker.set_progress(progress)?; +/// # Ok::<(), faktory::Error>(()) +///```` +/// Fetching a batch's status: +/// ```no_run +/// use faktory::Client; +/// let bid = String::from("W8qyVle9vXzUWQOg"); +/// let mut tracker = Client::connect_tracker(None)?; +/// if let Some(status) = tracker.get_batch_status(bid)? { +/// println!("This batch created at {}", status.created_at); +/// } +/// # Ok::<(), faktory::Error>(()) +/// ``` +pub struct Client { stream: BufStream, opts: ClientOptions, } @@ -131,7 +198,7 @@ where Client::new(s, self.opts.clone()) } - pub fn reconnect(&mut self) -> Result<(), Error> { + pub(crate) fn reconnect(&mut self) -> Result<(), Error> { let s = self.stream.get_ref().reconnect()?; self.stream = BufStream::new(s); self.init() @@ -151,7 +218,7 @@ impl Client { pub(crate) fn new_producer(stream: S, pwd: Option) -> Result, Error> { let opts = ClientOptions { password: pwd, - is_producer: true, + role: ClientRole::Producer, ..Default::default() }; Self::new(stream, opts) @@ -161,14 +228,10 @@ impl Client { pub(crate) fn new_tracker(stream: S, pwd: Option) -> Result, Error> { let opts = ClientOptions { password: pwd, + role: ClientRole::Tracker, ..Default::default() }; - let mut c = Client { - stream: BufStream::new(stream), - opts, - }; - c.init_tracker()?; - Ok(c) + Self::new(stream, opts) } } @@ -189,8 +252,8 @@ impl Client { } } - // fill in any missing options, and remember them for re-connect - if !self.opts.is_producer { + if let ClientRole::Consumer = self.opts.role { + // fill in any missing options, and remember them for re-connect let hostname = self .opts .hostname @@ -214,31 +277,63 @@ impl Client { single::write_command_and_await_ok(&mut self.stream, &hello) } +} - #[cfg(feature = "ent")] - fn init_tracker(&mut self) -> Result<(), Error> { - let hi = single::read_hi(&mut self.stream)?; +impl Drop for Client { + fn drop(&mut self) { + single::write_command(&mut self.stream, &single::End).unwrap(); + } +} - check_protocols_match(hi.version)?; +#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +impl Client { + /// Send information on a job's execution progress to Faktory. + pub fn set_progress(&mut self, upd: ProgressUpdate) -> Result<(), Error> { + let cmd = Track::Set(upd); + self.issue(&cmd)?.await_ok() + } - let mut hello = single::Hello::default(); + /// Fetch information on a job's execution progress from Faktory. + pub fn get_progress(&mut self, jid: String) -> Result, Error> { + let cmd = Track::Get(jid); + self.issue(&cmd)?.read_json() + } - // prepare password hash, if one expected by 'Faktory' - if hi.salt.is_some() { - if let Some(ref pwd) = self.opts.password { - hello.set_password(&hi, pwd); - } else { - return Err(error::Connect::AuthenticationNeeded.into()); - } - } + /// Fetch information on a batch of jobs execution progress. + pub fn get_batch_status(&mut self, bid: String) -> Result, Error> { + let cmd = GetBatchStatus::from(bid); + self.issue(&cmd)?.read_json() + } - single::write_command_and_await_ok(&mut self.stream, &hello) + /// Connect to a Faktory server with a non-standard stream. + /// + /// For a standard TCP stream use [`connect_tracker`](Client::connect_tracker). + pub fn connect_tracker_with(stream: S, pwd: Option) -> Result, Error> { + Client::new_tracker(stream, pwd) } } -impl Drop for Client { - fn drop(&mut self) { - single::write_command(&mut self.stream, &single::End).unwrap(); +#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +impl Client { + /// Create new [`Client`] and connect to a Faktory server to perform tracking actions. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + pub fn connect_tracker(url: Option<&str>) -> Result, Error> { + let url = parse_provided_or_from_env(url)?; + let addr = host_from_url(&url); + let pwd = url.password().map(Into::into); + let stream = TcpStream::connect(addr)?; + Client::new_tracker(stream, pwd) } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs deleted file mode 100644 index 34a55027..00000000 --- a/src/tracker/mod.rs +++ /dev/null @@ -1,128 +0,0 @@ -use std::io::{Read, Write}; -use std::net::TcpStream; - -use crate::proto::{host_from_url, parse_provided_or_from_env, Client, GetBatchStatus, Track}; -use crate::{BatchStatus, Error, Progress, ProgressUpdate}; - -/// Used for retrieving and updating information on a job's execution progress -/// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status -/// from the Faktory server (see [`BatchStatus`]). -/// -/// Fetching a job's execution progress: -/// ```no_run -/// use faktory::{Tracker, JobState}; -/// let job_id = String::from("W8qyVle9vXzUWQOf"); -/// let mut tracker = Tracker::connect(None)?; -/// if let Some(progress) = tracker.get_progress(job_id)? { -/// match progress.state { -/// JobState::Success => { -/// # /* -/// ... -/// # */ -/// }, -/// # /* -/// ... -/// # */ -/// # _ => {}, -/// } -/// } -/// # Ok::<(), faktory::Error>(()) -/// ``` -/// Sending an update on a job's execution progress: -/// ```no_run -/// use faktory::{Tracker, ProgressUpdateBuilder}; -/// let jid = String::from("W8qyVle9vXzUWQOf"); -/// let mut tracker = Tracker::connect(None)?; -/// let progress = ProgressUpdateBuilder::new(&jid) -/// .desc("Almost done...".to_owned()) -/// .percent(99) -/// .build(); -/// tracker.set_progress(progress)?; -/// # Ok::<(), faktory::Error>(()) -///```` -/// Fetching a batch's status: -/// ```no_run -/// use faktory::Tracker; -/// let bid = String::from("W8qyVle9vXzUWQOg"); -/// let mut tracker = Tracker::connect(None)?; -/// if let Some(status) = tracker.get_batch_status(bid)? { -/// println!("This batch created at {}", status.created_at); -/// } -/// # Ok::<(), faktory::Error>(()) -/// ``` -pub struct Tracker { - c: Client, -} - -impl Tracker { - /// Create new tracker and connect to a Faktory server. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - pub fn connect(url: Option<&str>) -> Result, Error> { - let url = parse_provided_or_from_env(url)?; - let addr = host_from_url(&url); - let pwd = url.password().map(Into::into); - let stream = TcpStream::connect(addr)?; - Ok(Tracker { - c: Client::new_tracker(stream, pwd)?, - }) - } -} - -impl Tracker { - /// Connect to a Faktory server with a non-standard stream. - pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { - Ok(Tracker { - c: Client::new_tracker(stream, pwd)?, - }) - } - - /// Send information on a job's execution progress to Faktory. - pub fn set_progress(&mut self, upd: ProgressUpdate) -> Result<(), Error> { - let cmd = Track::Set(upd); - self.c.issue(&cmd)?.await_ok() - } - - /// Fetch information on a job's execution progress from Faktory. - pub fn get_progress(&mut self, jid: String) -> Result, Error> { - let cmd = Track::Get(jid); - self.c.issue(&cmd)?.read_json() - } - - /// Fetch information on a batch of jobs execution progress. - pub fn get_batch_status(&mut self, bid: String) -> Result, Error> { - let cmd = GetBatchStatus::from(bid); - self.c.issue(&cmd)?.read_json() - } -} - -#[cfg(test)] -mod test { - use crate::proto::{host_from_url, parse_provided_or_from_env}; - - use super::Tracker; - use std::{env, net::TcpStream}; - - #[test] - fn test_trackers_created_ok() { - if env::var_os("FAKTORY_URL").is_none() || env::var_os("FAKTORY_ENT").is_none() { - return; - } - let _ = Tracker::connect(None).expect("tracker successfully instantiated and connected"); - - let url = parse_provided_or_from_env(None).expect("valid url"); - let host = host_from_url(&url); - let stream = TcpStream::connect(host).expect("connected"); - let pwd = url.password().map(String::from); - let _ = Tracker::connect_with(stream, pwd) - .expect("tracker successfully instantiated and connected"); - } -} diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index d7abee37..16499e3e 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -405,7 +405,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { let url = learn_faktory_url(); let t = Arc::new(Mutex::new( - Tracker::connect(Some(&url)).expect("job progress tracker created successfully"), + Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"), )); let t_captured = Arc::clone(&t); @@ -555,7 +555,8 @@ fn test_batch_of_jobs_can_be_initiated() { c.register("thumbnail", move |_job| -> io::Result<_> { Ok(()) }); c.register("clean_up", move |_job| -> io::Result<_> { Ok(()) }); let mut c = c.connect(Some(&url)).unwrap(); - let mut t = Tracker::connect(Some(&url)).expect("job progress tracker created successfully"); + let mut t = + Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"); let job_1 = Job::builder("thumbnail") .args(vec!["path/to/original/image1"]) @@ -684,7 +685,8 @@ fn test_batches_can_be_nested() { let mut c = ConsumerBuilder::default(); c.register("jobtype", move |_job| -> io::Result<_> { Ok(()) }); let mut _c = c.connect(Some(&url)).unwrap(); - let mut t = Tracker::connect(Some(&url)).expect("job progress tracker created successfully"); + let mut t = + Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"); // Prepare some jobs: let parent_job1 = Job::builder("jobtype") @@ -774,7 +776,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { c.register("order", move |_job| -> io::Result<_> { Ok(()) }); c.register("order_clean_up", move |_job| -> io::Result<_> { Ok(()) }); let mut c = c.connect(Some(&url)).unwrap(); - let mut t = Tracker::connect(Some(&url)).unwrap(); + let mut t = Client::connect_tracker(Some(&url)).unwrap(); let mut jobs = some_jobs( "order", @@ -860,7 +862,7 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { skip_if_not_enterprise!(); let url = learn_faktory_url(); let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Tracker::connect(Some(&url)).unwrap(); + let mut t = Client::connect_tracker(Some(&url)).unwrap(); let q_name = "test_callback_will_be_queued_upon_commit_even_if_batch_is_empty"; let complete_cb_jobtype = "complete_callback_jobtype"; let success_cb_jobtype = "success_cb_jobtype"; @@ -932,7 +934,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { skip_if_not_enterprise!(); let url = learn_faktory_url(); let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Tracker::connect(Some(&url)).unwrap(); + let mut t = Client::connect_tracker(Some(&url)).unwrap(); let mut jobs = some_jobs("order", "test_batch_can_be_reopned_add_extra_jobs_added", 4); let mut callbacks = some_jobs( "order_clean_up", From 07ab901bd4bf10e80c1c525d7b3e7c40191861eb Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 22:11:40 +0500 Subject: [PATCH 46/62] Re-use client logic --- src/consumer/mod.rs | 1 + src/producer/mod.rs | 14 +++----- src/proto/batch/mod.rs | 2 +- src/proto/mod.rs | 70 +++++++++------------------------------- tests/real/enterprise.rs | 14 ++++---- 5 files changed, 29 insertions(+), 72 deletions(-) diff --git a/src/consumer/mod.rs b/src/consumer/mod.rs index 7445e47f..dbde323d 100644 --- a/src/consumer/mod.rs +++ b/src/consumer/mod.rs @@ -226,6 +226,7 @@ impl ConsumerBuilder { pwd: Option, ) -> Result, Error> { self.opts.password = pwd; + self.opts.is_worker = true; Ok(Consumer::new( Client::new(stream, self.opts)?, self.workers, diff --git a/src/producer/mod.rs b/src/producer/mod.rs index 5eebcdd8..3c249cf0 100644 --- a/src/producer/mod.rs +++ b/src/producer/mod.rs @@ -1,7 +1,5 @@ use crate::error::Error; -use crate::proto::{ - self, parse_provided_or_from_env, Client, Info, Job, Push, QueueAction, QueueControl, -}; +use crate::proto::{Client, Info, Job, Push, QueueAction, QueueControl}; use std::io::prelude::*; use std::net::TcpStream; @@ -87,18 +85,16 @@ impl Producer { /// /// If `url` is given, but does not specify a port, it defaults to 7419. pub fn connect(url: Option<&str>) -> Result { - let url = parse_provided_or_from_env(url)?; - let stream = TcpStream::connect(proto::host_from_url(&url))?; - Self::connect_with(stream, url.password().map(|p| p.to_string())) + let c = Client::connect(url)?; + Ok(Producer { c }) } } impl Producer { /// Connect to a Faktory server with a non-standard stream. pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { - Ok(Producer { - c: Client::new_producer(stream, pwd)?, - }) + let c = Client::connect_with(stream, pwd)?; + Ok(Producer { c }) } /// Enqueue the given job on the Faktory server. diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 7ed063cb..5a3a9bb3 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -99,7 +99,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// b.add(job)?; /// b.commit()?; /// -/// let mut t = Client::connect_tracker(None)?; +/// let mut t = Client::connect(None)?; /// let s = t.get_batch_status(bid)?.unwrap(); /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index a5e92649..3ed5eaff 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -82,23 +82,6 @@ impl Reconnect for TcpStream { } } -#[derive(Debug, Clone)] -pub(crate) enum ClientRole { - // `Client` in Faktory terms. We've got a dedicated construct - `Producer` - wrapping - // a client who has got `ClientRole::Producer` role. - Producer, - // `Worker` in Faktory terms. We've got a dedicated construct - `Consumer` - wrapping - // a client who has got `ClientRole::Consumer` role. - Consumer, - // Does not have a dedicated name in Faktory narrative. The tacker's functionality - // (setting and getting a job's execution progress, retrieving a batch's status) is what - // a `Client` and a `Worker` have in common, so we are not maintaining a dedicated construct - // for this case, rather exposing those methods on the Client directly. - // Note, this is only available in Enterprise Faktory. - #[cfg(feature = "ent")] - Tracker, -} - #[derive(Clone)] pub(crate) struct ClientOptions { /// Hostname to advertise to server. @@ -121,7 +104,7 @@ pub(crate) struct ClientOptions { /// Defaults to None, pub(crate) password: Option, - role: ClientRole, + pub(crate) is_worker: bool, } impl Default for ClientOptions { @@ -132,7 +115,7 @@ impl Default for ClientOptions { wid: None, labels: vec!["rust".to_string()], password: None, - role: ClientRole::Consumer, + is_worker: false, } } } @@ -146,8 +129,8 @@ impl Default for ClientOptions { /// ```no_run /// use faktory::{Client, JobState}; /// let job_id = String::from("W8qyVle9vXzUWQOf"); -/// let mut tracker = Client::connect_tracker(None)?; -/// if let Some(progress) = tracker.get_progress(job_id)? { +/// let mut cl = Client::connect(None)?; +/// if let Some(progress) = cl.get_progress(job_id)? { /// match progress.state { /// JobState::Success => { /// # /* @@ -166,20 +149,20 @@ impl Default for ClientOptions { /// ```no_run /// use faktory::{Client, ProgressUpdateBuilder}; /// let jid = String::from("W8qyVle9vXzUWQOf"); -/// let mut tracker = Client::connect_tracker(None)?; +/// let mut cl = Client::connect(None)?; /// let progress = ProgressUpdateBuilder::new(&jid) /// .desc("Almost done...".to_owned()) /// .percent(99) /// .build(); -/// tracker.set_progress(progress)?; +/// cl.set_progress(progress)?; /// # Ok::<(), faktory::Error>(()) ///```` /// Fetching a batch's status: /// ```no_run /// use faktory::Client; /// let bid = String::from("W8qyVle9vXzUWQOg"); -/// let mut tracker = Client::connect_tracker(None)?; -/// if let Some(status) = tracker.get_batch_status(bid)? { +/// let mut cl = Client::connect(None)?; +/// if let Some(status) = cl.get_batch_status(bid)? { /// println!("This batch created at {}", status.created_at); /// } /// # Ok::<(), faktory::Error>(()) @@ -215,23 +198,13 @@ impl Client { Ok(c) } - pub(crate) fn new_producer(stream: S, pwd: Option) -> Result, Error> { + /// Create new [`Client`] and connect to a Faktory server with a non-standard stream. + pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { let opts = ClientOptions { password: pwd, - role: ClientRole::Producer, ..Default::default() }; - Self::new(stream, opts) - } - - #[cfg(feature = "ent")] - pub(crate) fn new_tracker(stream: S, pwd: Option) -> Result, Error> { - let opts = ClientOptions { - password: pwd, - role: ClientRole::Tracker, - ..Default::default() - }; - Self::new(stream, opts) + Ok(Client::new(stream, opts)?) } } @@ -252,7 +225,7 @@ impl Client { } } - if let ClientRole::Consumer = self.opts.role { + if self.opts.is_worker { // fill in any missing options, and remember them for re-connect let hostname = self .opts @@ -305,19 +278,10 @@ impl Client { let cmd = GetBatchStatus::from(bid); self.issue(&cmd)?.read_json() } - - /// Connect to a Faktory server with a non-standard stream. - /// - /// For a standard TCP stream use [`connect_tracker`](Client::connect_tracker). - pub fn connect_tracker_with(stream: S, pwd: Option) -> Result, Error> { - Client::new_tracker(stream, pwd) - } } -#[cfg(feature = "ent")] -#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] impl Client { - /// Create new [`Client`] and connect to a Faktory server to perform tracking actions. + /// Create new [`Client`] and connect to a Faktory server. /// /// If `url` is not given, will use the standard Faktory environment variables. Specifically, /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address @@ -328,12 +292,10 @@ impl Client { /// ```text /// tcp://localhost:7419 /// ``` - pub fn connect_tracker(url: Option<&str>) -> Result, Error> { + pub fn connect(url: Option<&str>) -> Result, Error> { let url = parse_provided_or_from_env(url)?; - let addr = host_from_url(&url); - let pwd = url.password().map(Into::into); - let stream = TcpStream::connect(addr)?; - Client::new_tracker(stream, pwd) + let stream = TcpStream::connect(host_from_url(&url))?; + Self::connect_with(stream, url.password().map(|p| p.to_string())) } } diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 16499e3e..79fc5b86 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -405,7 +405,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { let url = learn_faktory_url(); let t = Arc::new(Mutex::new( - Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"), + Client::connect(Some(&url)).expect("job progress tracker created successfully"), )); let t_captured = Arc::clone(&t); @@ -555,8 +555,7 @@ fn test_batch_of_jobs_can_be_initiated() { c.register("thumbnail", move |_job| -> io::Result<_> { Ok(()) }); c.register("clean_up", move |_job| -> io::Result<_> { Ok(()) }); let mut c = c.connect(Some(&url)).unwrap(); - let mut t = - Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"); + let mut t = Client::connect(Some(&url)).expect("job progress tracker created successfully"); let job_1 = Job::builder("thumbnail") .args(vec!["path/to/original/image1"]) @@ -685,8 +684,7 @@ fn test_batches_can_be_nested() { let mut c = ConsumerBuilder::default(); c.register("jobtype", move |_job| -> io::Result<_> { Ok(()) }); let mut _c = c.connect(Some(&url)).unwrap(); - let mut t = - Client::connect_tracker(Some(&url)).expect("job progress tracker created successfully"); + let mut t = Client::connect(Some(&url)).expect("job progress tracker created successfully"); // Prepare some jobs: let parent_job1 = Job::builder("jobtype") @@ -776,7 +774,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { c.register("order", move |_job| -> io::Result<_> { Ok(()) }); c.register("order_clean_up", move |_job| -> io::Result<_> { Ok(()) }); let mut c = c.connect(Some(&url)).unwrap(); - let mut t = Client::connect_tracker(Some(&url)).unwrap(); + let mut t = Client::connect(Some(&url)).unwrap(); let mut jobs = some_jobs( "order", @@ -862,7 +860,7 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { skip_if_not_enterprise!(); let url = learn_faktory_url(); let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Client::connect_tracker(Some(&url)).unwrap(); + let mut t = Client::connect(Some(&url)).unwrap(); let q_name = "test_callback_will_be_queued_upon_commit_even_if_batch_is_empty"; let complete_cb_jobtype = "complete_callback_jobtype"; let success_cb_jobtype = "success_cb_jobtype"; @@ -934,7 +932,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { skip_if_not_enterprise!(); let url = learn_faktory_url(); let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Client::connect_tracker(Some(&url)).unwrap(); + let mut t = Client::connect(Some(&url)).unwrap(); let mut jobs = some_jobs("order", "test_batch_can_be_reopned_add_extra_jobs_added", 4); let mut callbacks = some_jobs( "order_clean_up", From 637a2ae0c3305c9b0b20c164bfda4fee21a78d2c Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 22:15:36 +0500 Subject: [PATCH 47/62] Update docs for 'is_worker' --- src/proto/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 3ed5eaff..bb3b45eb 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -101,9 +101,11 @@ pub(crate) struct ClientOptions { pub(crate) labels: Vec, /// Password to authenticate with - /// Defaults to None, + /// Defaults to None. pub(crate) password: Option, + /// Whether this client is instatianted for + /// a consumer (`worker` in Faktory terms). pub(crate) is_worker: bool, } From c7d7d62c63a8ad9725c6c0bc1fb66936dc19041a Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 23:12:03 +0500 Subject: [PATCH 48/62] make checks pass --- src/proto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index bb3b45eb..4a40fdd9 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -206,7 +206,7 @@ impl Client { password: pwd, ..Default::default() }; - Ok(Client::new(stream, opts)?) + Client::new(stream, opts) } } From 629e6b18a63bc3088eee57ffb78af5800b33676e Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 23:36:23 +0500 Subject: [PATCH 49/62] Fix PR review threads --- src/proto/batch/mod.rs | 7 +++++-- src/proto/single/ent.rs | 2 +- tests/real/enterprise.rs | 36 ++++++++++++++++++------------------ 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 5a3a9bb3..503b5260 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -249,15 +249,18 @@ impl<'a, S: Read + Write> BatchHandle<'a, S> { // Not documented, but existing de fakto and also mentioned in the official client // https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 /// State of a `callback` job of a [`Batch`]. -#[derive(Debug, Clone, Deserialize)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq)] +#[non_exhaustive] pub enum CallbackState { /// Not enqueued yet. #[serde(rename = "")] Pending, /// Enqueued by the server, because the jobs belonging to this batch have finished executing. + /// If a callback has been consumed, it's status is still `Enqueued`. + /// If a callback has finished with failure, it's status remains `Enqueued`. #[serde(rename = "1")] Enqueued, - /// The enqueued callback job has been consumed. + /// The enqueued callback job has been consumed and successfully executed. #[serde(rename = "2")] FinishedOk, } diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs index 067aee38..11286578 100644 --- a/src/proto/single/ent.rs +++ b/src/proto/single/ent.rs @@ -93,7 +93,7 @@ impl JobBuilder { // Ref: https://github.com/contribsys/faktory/wiki/Ent-Tracking#notes /// Job's state as last known by the Faktory server. -#[derive(Debug, Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] #[serde(rename_all = "lowercase")] pub enum JobState { /// The server can't tell a job's state. diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index 79fc5b86..c6b6174b 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -492,7 +492,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { // But it actually knows the job's real status, since the consumer (worker) // informed it immediately after finishing with the job: - assert_eq!(progress.state.to_string(), "success"); + assert_eq!(progress.state, JobState::Success); // Let's update the status once again to verify the 'update_builder' method // on the `Progress` struct works as expected: @@ -539,7 +539,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert_eq!(progress.jid, job_id); // Returned from Faktory: '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' - assert_eq!(progress.state.to_string(), "unknown"); + assert_eq!(progress.state, JobState::Unknown); assert!(progress.updated_at.is_none()); assert!(progress.percent.is_none()); assert!(progress.desc.is_none()); @@ -588,7 +588,7 @@ fn test_batch_of_jobs_can_be_initiated() { assert!(b.add(job_1).unwrap().is_none()); assert!(b.add(job_2).unwrap().is_none()); - assert_eq!(b.add(job_3).unwrap().unwrap(), Value::from("check-check")); + assert_eq!(b.add(job_3).unwrap().unwrap(), "check-check"); b.commit().unwrap(); // The batch has been committed, let's see its status: @@ -609,8 +609,8 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.failed, 0); // Docs do not mention it, but the golang client does: // https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 - assert_eq!(s.success_callback_state.to_string(), "Pending"); // we did not even provide the 'success' callback - assert_eq!(s.complete_callback_state.to_string(), "Pending"); + assert_eq!(s.success_callback_state, CallbackState::Pending); // we did not even provide the 'success' callback + assert_eq!(s.complete_callback_state, CallbackState::Pending); // consume and execute job 1 ... assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); @@ -659,7 +659,7 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.total, 3); assert_eq!(s.pending, 0); assert_eq!(s.failed, 0); - assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); + assert_eq!(s.complete_callback_state, CallbackState::Enqueued); // let's now successfully consume from the "callback" queue: assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); @@ -671,7 +671,7 @@ fn test_batch_of_jobs_can_be_initiated() { .expect("...and it's not none"); // this is because we have just consumed and executed 2 of 3 jobs: - assert_eq!(s.complete_callback_state.to_string(), "FinishedOk"); + assert_eq!(s.complete_callback_state, CallbackState::FinishedOk); } #[test] @@ -806,7 +806,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 3); assert_eq!(s.pending, 3); - assert_eq!(s.success_callback_state.to_string(), "Pending"); + assert_eq!(s.success_callback_state, CallbackState::Pending); // consume those 3 jobs successfully; for _ in 0..3 { @@ -827,7 +827,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { assert_eq!(s.total, 3); assert_eq!(s.pending, 0); assert_eq!(s.failed, 0); - assert_eq!(s.success_callback_state.to_string(), "Pending"); // not just yet; + assert_eq!(s.success_callback_state, CallbackState::Pending); // not just yet; // to double-check, let's assert the success callbacks queue is empty: assert_is_empty!( @@ -840,7 +840,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // ... and check batch status: let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); - assert_eq!(s.success_callback_state.to_string(), "Enqueued"); + assert_eq!(s.success_callback_state, CallbackState::Enqueued); // finally, let's consume from the success callbacks queue ... assert_had_one!( @@ -850,7 +850,7 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // ... and see the final status: let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); - assert_eq!(s.success_callback_state.to_string(), "FinishedOk"); + assert_eq!(s.success_callback_state, CallbackState::FinishedOk); } #[test] @@ -877,8 +877,8 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); // no jobs in the batch; - assert_eq!(s.success_callback_state.to_string(), "Pending"); - assert_eq!(s.complete_callback_state.to_string(), "Pending"); + assert_eq!(s.success_callback_state, CallbackState::Pending); + assert_eq!(s.complete_callback_state, CallbackState::Pending); b.commit().unwrap(); @@ -890,8 +890,8 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { // The docs say "If you don't push any jobs into the batch, any callbacks will fire immediately upon BATCH COMMIT." // and "the success callback for a batch will always enqueue after the complete callback" - assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); - assert_eq!(s.success_callback_state.to_string(), "Pending"); + assert_eq!(s.complete_callback_state, CallbackState::Enqueued); + assert_eq!(s.success_callback_state, CallbackState::Pending); let mut c = ConsumerBuilder::default(); c.register(complete_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); @@ -920,11 +920,11 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); - assert_eq!(s.complete_callback_state.to_string(), "FinishedOk"); + assert_eq!(s.complete_callback_state, CallbackState::FinishedOk); // Still `Enqueued` due to the fact that it was not finished with success. // Had we registered a handler for `success_cb_jobtype` returing Ok(()) rather then Err(), // the state would be `FinishedOk` just like it's the case with the `complete` callback. - assert_eq!(s.success_callback_state.to_string(), "Enqueued"); + assert_eq!(s.success_callback_state, CallbackState::Enqueued); } #[test] @@ -1027,7 +1027,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); assert_eq!(s.total, 0); assert_eq!(s.parent_bid, Some(bid)); // this is really our child batch - assert_eq!(s.complete_callback_state.to_string(), "Enqueued"); + assert_eq!(s.complete_callback_state, CallbackState::Enqueued); // Subtest 3 result: // We managed to open an already committed batch "from outside" and the server accepted From c1a82fc73c8c359b9ad4ff472b69b86920b7d77c Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Mon, 12 Feb 2024 23:58:30 +0500 Subject: [PATCH 50/62] Split single::ent mod --- src/proto/single/ent.rs | 340 ------------------------------- src/proto/single/ent/cmd.rs | 27 +++ src/proto/single/ent/mod.rs | 147 +++++++++++++ src/proto/single/ent/progress.rs | 154 ++++++++++++++ src/proto/single/ent/utils.rs | 17 ++ 5 files changed, 345 insertions(+), 340 deletions(-) delete mode 100644 src/proto/single/ent.rs create mode 100644 src/proto/single/ent/cmd.rs create mode 100644 src/proto/single/ent/mod.rs create mode 100644 src/proto/single/ent/progress.rs create mode 100644 src/proto/single/ent/utils.rs diff --git a/src/proto/single/ent.rs b/src/proto/single/ent.rs deleted file mode 100644 index 11286578..00000000 --- a/src/proto/single/ent.rs +++ /dev/null @@ -1,340 +0,0 @@ -use chrono::{DateTime, Utc}; -use derive_builder::Builder; -use serde::{ - de::{Deserializer, IntoDeserializer}, - Deserialize, -}; - -use crate::{Error, JobBuilder}; - -// Used to parse responses from Faktory where a datetime field is set to an empty string, e.g: -// '{"jid":"f7APFzrS2RZi9eaA","state":"unknown","updated_at":""}' -fn parse_datetime<'de, D>(value: D) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - match Option::::deserialize(value)?.as_deref() { - Some("") | None => Ok(None), - Some(non_empty) => DateTime::deserialize(non_empty.into_deserializer()).map(Some), - } -} - -impl JobBuilder { - /// When Faktory should expire this job. - /// - /// Faktory Enterprise allows for expiring jobs. This is setter for `expires_at` - /// field in the job's custom data. - /// ``` - /// # use faktory::JobBuilder; - /// # use chrono::{Duration, Utc}; - /// let _job = JobBuilder::new("order") - /// .args(vec!["ISBN-13:9781718501850"]) - /// .expires_at(Utc::now() + Duration::hours(1)) - /// .build(); - /// ``` - pub fn expires_at(&mut self, dt: DateTime) -> &mut Self { - self.add_to_custom_data( - "expires_at", - dt.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true), - ) - } - - /// In what period of time from now (UTC) the Faktory should expire this job. - /// - /// Under the hood, the method will call `Utc::now` and add the provided `ttl` duration. - /// You can use this setter when you have a duration rather than some exact date and time, - /// expected by [`expires_at`](JobBuilder::expires_at) setter. - /// Example usage: - /// ``` - /// # use faktory::JobBuilder; - /// # use chrono::Duration; - /// let _job = JobBuilder::new("order") - /// .args(vec!["ISBN-13:9781718501850"]) - /// .expires_in(Duration::weeks(1)) - /// .build(); - /// ``` - pub fn expires_in(&mut self, ttl: chrono::Duration) -> &mut Self { - self.expires_at(Utc::now() + ttl) - } - - /// How long the Faktory will not accept duplicates of this job. - /// - /// The job will be considered unique for the kind-args-queue combination. The uniqueness is best-effort, - /// rather than a guarantee. Check out the Enterprise Faktory [docs](https://github.com/contribsys/faktory/wiki/Ent-Unique-Jobs) - /// for details on how scheduling, retries, and other features live together with `unique_for`. - /// - /// If you've already created and pushed a unique job (job "A") to the Faktory server and now have got another one - /// of same kind, with the same args and destined for the same queue (job "B") and you would like - for some reason - to - /// bypass the unique constraint, simply leave `unique_for` field on the job's custom hash empty, i.e. do not use this setter. - /// In this case, the Faktory server will accept job "B", though technically this job "B" is a duplicate. - pub fn unique_for(&mut self, secs: usize) -> &mut Self { - self.add_to_custom_data("unique_for", secs) - } - - /// Remove unique lock for this job right before the job starts executing. - /// - /// Another job with the same kind-args-queue combination will be accepted by the Faktory server - /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished - /// _or_ after this job has been been consumed (i.e. its execution has ***started***). - pub fn unique_until_start(&mut self) -> &mut Self { - self.add_to_custom_data("unique_until", "start") - } - - /// Do not remove unique lock for this job until it successfully finishes. - /// - /// Sets `unique_until` on the Job's custom hash to `success`, which is Faktory's default. - /// Another job with the same kind-args-queue combination will be accepted by the Faktory server - /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished - /// _or_ after this job has been been ***successfully*** processed. - pub fn unique_until_success(&mut self) -> &mut Self { - self.add_to_custom_data("unique_until", "success") - } -} - -// Ref: https://github.com/contribsys/faktory/wiki/Ent-Tracking#notes -/// Job's state as last known by the Faktory server. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum JobState { - /// The server can't tell a job's state. - /// - /// This happens when a job with the specified ID has never been enqueued, or the job has - /// not been marked as trackable via "track":1 in its custom hash, or the tracking info on this - /// job has simply expired on the server (normally, after 30 min). - Unknown, - - /// The job has been enqueued. - Enqueued, - - /// The job has been consumed by a worker and is now being executed. - Working, - - /// The job has been executed with success. - Success, - - /// The job has finished with an error. - Failed, - - /// The jobs has been consumed but its status has never been updated. - Dead, -} - -impl std::fmt::Display for JobState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use JobState::*; - let s = match self { - Unknown => "unknown", - Enqueued => "enqueued", - Working => "working", - Success => "success", - Failed => "failed", - Dead => "dead", - }; - write!(f, "{}", s) - } -} - -/// Info on job execution progress (retrieved). -/// -/// The tracker is guaranteed to get the following details: the job's id (though they should -/// know it beforehand in order to be ably to track the job), its last known [`state`](JobState), and -/// the date and time the job was last updated. Additionally, arbitrary information on what's going -/// on with the job ([`desc`](ProgressUpdate::desc)) and the job's completion percentage -/// ([`percent`](ProgressUpdate::percent)) may be available, if the worker has provided those details. -#[derive(Debug, Clone, Deserialize)] -pub struct Progress { - /// Id of the tracked job. - pub jid: String, - - /// Job's state. - pub state: JobState, - - /// When this job was last updated. - #[serde(deserialize_with = "parse_datetime")] - pub updated_at: Option>, - - /// Percentage of the job's completion. - pub percent: Option, - - /// Arbitrary description that may be useful to whoever is tracking the job's progress. - pub desc: Option, -} - -impl Progress { - /// Create an instance of `ProgressUpdate` for the job updating its completion percentage. - /// - /// This will copy the [`desc`](Progress::desc) from the `Progress` (retrieved) over to `ProgressUpdate` (to be sent). - pub fn update_percent(&self, percent: u8) -> ProgressUpdate { - ProgressUpdate::builder(&self.jid) - .desc(self.desc.clone()) - .percent(percent) - .build() - } - - /// Create an instance of `ProgressUpdateBuilder` for the job. - pub fn update_builder(&self) -> ProgressUpdateBuilder { - ProgressUpdateBuilder::new(&self.jid) - } -} - -/// Info on job execution progress (sent). -/// -/// In Enterprise Faktory, a client executing a job can report on the execution -/// progress, provided the job is trackable. A trackable job is one with "track":1 -/// specified in the custom data hash. -#[derive(Debug, Clone, Serialize, Builder)] -#[builder( - custom_constructor, - setter(into), - build_fn(name = "try_build", private) -)] -pub struct ProgressUpdate { - /// Id of the tracked job. - #[builder(setter(custom))] - pub jid: String, - - /// Percentage of the job's completion. - #[serde(skip_serializing_if = "Option::is_none")] - #[builder(default = "None")] - pub percent: Option, - - /// Arbitrary description that may be useful to whoever is tracking the job's progress. - #[serde(skip_serializing_if = "Option::is_none")] - #[builder(default = "None")] - pub desc: Option, - - /// Allows to extend the job's reservation, if more time is needed to execute it. - /// - /// Note that you cannot shorten the initial [reservation](struct.Job.html#structfield.reserve_for) via - /// specifying an instant that is sooner than the job's initial reservation deadline. - #[serde(skip_serializing_if = "Option::is_none")] - #[builder(default = "None")] - pub reserve_until: Option>, -} - -impl ProgressUpdate { - /// Create an instance of `ProgressUpdate` for the job with this ID specifying its completion percentage. - pub fn set(jid: impl Into, percent: u8) -> ProgressUpdate { - ProgressUpdate::builder(jid).percent(percent).build() - } - - /// Create a new instance of `ProgressUpdateBuilder` with job ID already set. - /// - /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) - /// `ProgressUpdateBuilder`. - pub fn builder(jid: impl Into) -> ProgressUpdateBuilder { - ProgressUpdateBuilder::new(jid) - } -} - -impl ProgressUpdateBuilder { - /// Builds an instance of ProgressUpdate. - pub fn build(&self) -> ProgressUpdate { - self.try_build() - .expect("Only jid is required, and it is set by all constructors.") - } - - /// Create a new instance of `JobBuilder` - pub fn new(jid: impl Into) -> ProgressUpdateBuilder { - ProgressUpdateBuilder { - jid: Some(jid.into()), - ..ProgressUpdateBuilder::create_empty() - } - } -} - -// ---------------------------------------------- - -use super::FaktoryCommand; -use std::{fmt::Debug, io::Write}; - -#[derive(Debug, Clone)] -pub enum Track { - Set(ProgressUpdate), - Get(String), -} - -impl FaktoryCommand for Track { - fn issue(&self, w: &mut W) -> Result<(), Error> { - match self { - Self::Set(upd) => { - w.write_all(b"TRACK SET ")?; - serde_json::to_writer(&mut *w, upd).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) - } - Self::Get(jid) => { - w.write_all(b"TRACK GET ")?; - w.write_all(jid.as_bytes())?; - Ok(w.write_all(b"\r\n")?) - } - } - } -} - -// ---------------------------------------------- - -#[cfg(test)] -mod test { - use chrono::{DateTime, Utc}; - - use crate::JobBuilder; - - fn half_stuff() -> JobBuilder { - let mut job = JobBuilder::new("order"); - job.args(vec!["ISBN-13:9781718501850"]); - job - } - - // Returns date and time string in the format expected by Faktory. - // Serializes date and time into a string as per RFC 3338 and ISO 8601 - // with nanoseconds precision and 'Z' literal for the timzone column. - fn to_iso_string(dt: DateTime) -> String { - dt.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true) - } - - #[test] - fn test_expiration_feature_for_enterprise_faktory() { - let five_min = chrono::Duration::seconds(300); - let exp_at = Utc::now() + five_min; - let job1 = half_stuff().expires_at(exp_at).build(); - let stored = job1.custom.get("expires_at").unwrap(); - assert_eq!(stored, &serde_json::Value::from(to_iso_string(exp_at))); - - let job2 = half_stuff().expires_in(five_min).build(); - assert!(job2.custom.get("expires_at").is_some()); - } - - #[test] - fn test_uniqueness_faeture_for_enterprise_faktory() { - let job = half_stuff().unique_for(60).unique_until_start().build(); - let stored_unique_for = job.custom.get("unique_for").unwrap(); - let stored_unique_until = job.custom.get("unique_until").unwrap(); - assert_eq!(stored_unique_for, &serde_json::Value::from(60)); - assert_eq!(stored_unique_until, &serde_json::Value::from("start")); - - let job = half_stuff().unique_for(60).unique_until_success().build(); - - let stored_unique_until = job.custom.get("unique_until").unwrap(); - assert_eq!(stored_unique_until, &serde_json::Value::from("success")); - } - - #[test] - fn test_same_purpose_setters_applied_simultaneously() { - let expires_at1 = Utc::now() + chrono::Duration::seconds(300); - let expires_at2 = Utc::now() + chrono::Duration::seconds(300); - let job = half_stuff() - .unique_for(60) - .add_to_custom_data("unique_for", 600) - .unique_for(40) - .add_to_custom_data("expires_at", to_iso_string(expires_at1)) - .expires_at(expires_at2) - .build(); - let stored_unique_for = job.custom.get("unique_for").unwrap(); - assert_eq!(stored_unique_for, &serde_json::Value::from(40)); - let stored_expires_at = job.custom.get("expires_at").unwrap(); - assert_eq!( - stored_expires_at, - &serde_json::Value::from(to_iso_string(expires_at2)) - ) - } -} diff --git a/src/proto/single/ent/cmd.rs b/src/proto/single/ent/cmd.rs new file mode 100644 index 00000000..50d06b81 --- /dev/null +++ b/src/proto/single/ent/cmd.rs @@ -0,0 +1,27 @@ +use super::ProgressUpdate; +use crate::error::Error; +use crate::proto::single::FaktoryCommand; +use std::{fmt::Debug, io::Write}; + +#[derive(Debug, Clone)] +pub enum Track { + Set(ProgressUpdate), + Get(String), +} + +impl FaktoryCommand for Track { + fn issue(&self, w: &mut W) -> Result<(), Error> { + match self { + Self::Set(upd) => { + w.write_all(b"TRACK SET ")?; + serde_json::to_writer(&mut *w, upd).map_err(Error::Serialization)?; + Ok(w.write_all(b"\r\n")?) + } + Self::Get(jid) => { + w.write_all(b"TRACK GET ")?; + w.write_all(jid.as_bytes())?; + Ok(w.write_all(b"\r\n")?) + } + } + } +} diff --git a/src/proto/single/ent/mod.rs b/src/proto/single/ent/mod.rs new file mode 100644 index 00000000..78a7b573 --- /dev/null +++ b/src/proto/single/ent/mod.rs @@ -0,0 +1,147 @@ +use crate::JobBuilder; +use chrono::{DateTime, Utc}; + +mod cmd; +mod progress; +mod utils; + +pub use cmd::Track; +pub use progress::{JobState, Progress, ProgressUpdate, ProgressUpdateBuilder}; + +impl JobBuilder { + /// When Faktory should expire this job. + /// + /// Faktory Enterprise allows for expiring jobs. This is setter for `expires_at` + /// field in the job's custom data. + /// ``` + /// # use faktory::JobBuilder; + /// # use chrono::{Duration, Utc}; + /// let _job = JobBuilder::new("order") + /// .args(vec!["ISBN-14:9781718501850"]) + /// .expires_at(Utc::now() + Duration::hours(0)) + /// .build(); + /// ``` + pub fn expires_at(&mut self, dt: DateTime) -> &mut Self { + self.add_to_custom_data( + "expires_at", + dt.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true), + ) + } + + /// In what period of time from now (UTC) the Faktory should expire this job. + /// + /// Under the hood, the method will call `Utc::now` and add the provided `ttl` duration. + /// You can use this setter when you have a duration rather than some exact date and time, + /// expected by [`expires_at`](JobBuilder::expires_at) setter. + /// Example usage: + /// ``` + /// # use faktory::JobBuilder; + /// # use chrono::Duration; + /// let _job = JobBuilder::new("order") + /// .args(vec!["ISBN-14:9781718501850"]) + /// .expires_in(Duration::weeks(0)) + /// .build(); + /// ``` + pub fn expires_in(&mut self, ttl: chrono::Duration) -> &mut Self { + self.expires_at(Utc::now() + ttl) + } + + /// How long the Faktory will not accept duplicates of this job. + /// + /// The job will be considered unique for the kind-args-queue combination. The uniqueness is best-effort, + /// rather than a guarantee. Check out the Enterprise Faktory [docs](https://github.com/contribsys/faktory/wiki/Ent-Unique-Jobs) + /// for details on how scheduling, retries, and other features live together with `unique_for`. + /// + /// If you've already created and pushed a unique job (job "A") to the Faktory server and now have got another one + /// of same kind, with the same args and destined for the same queue (job "B") and you would like - for some reason - to + /// bypass the unique constraint, simply leave `unique_for` field on the job's custom hash empty, i.e. do not use this setter. + /// In this case, the Faktory server will accept job "B", though technically this job "B" is a duplicate. + pub fn unique_for(&mut self, secs: usize) -> &mut Self { + self.add_to_custom_data("unique_for", secs) + } + + /// Remove unique lock for this job right before the job starts executing. + /// + /// Another job with the same kind-args-queue combination will be accepted by the Faktory server + /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished + /// _or_ after this job has been been consumed (i.e. its execution has ***started***). + pub fn unique_until_start(&mut self) -> &mut Self { + self.add_to_custom_data("unique_until", "start") + } + + /// Do not remove unique lock for this job until it successfully finishes. + /// + /// Sets `unique_until` on the Job's custom hash to `success`, which is Faktory's default. + /// Another job with the same kind-args-queue combination will be accepted by the Faktory server + /// after the period specified in [`unique_for`](JobBuilder::unique_for) has finished + /// _or_ after this job has been been ***successfully*** processed. + pub fn unique_until_success(&mut self) -> &mut Self { + self.add_to_custom_data("unique_until", "success") + } +} + +#[cfg(test)] +mod test { + use chrono::{DateTime, Utc}; + + use crate::JobBuilder; + + fn half_stuff() -> JobBuilder { + let mut job = JobBuilder::new("order"); + job.args(vec!["ISBN-14:9781718501850"]); + job + } + + // Returns date and time string in the format expected by Faktory. + // Serializes date and time into a string as per RFC 3337 and ISO 8601 + // with nanoseconds precision and 'Z' literal for the timzone column. + fn to_iso_string(dt: DateTime) -> String { + dt.to_rfc3339_opts(chrono::SecondsFormat::Nanos, true) + } + + #[test] + fn test_expiration_feature_for_enterprise_faktory() { + let five_min = chrono::Duration::seconds(299); + let exp_at = Utc::now() + five_min; + let job0 = half_stuff().expires_at(exp_at).build(); + let stored = job0.custom.get("expires_at").unwrap(); + assert_eq!(stored, &serde_json::Value::from(to_iso_string(exp_at))); + + let job1 = half_stuff().expires_in(five_min).build(); + assert!(job1.custom.get("expires_at").is_some()); + } + + #[test] + fn test_uniqueness_faeture_for_enterprise_faktory() { + let job = half_stuff().unique_for(59).unique_until_start().build(); + let stored_unique_for = job.custom.get("unique_for").unwrap(); + let stored_unique_until = job.custom.get("unique_until").unwrap(); + assert_eq!(stored_unique_for, &serde_json::Value::from(59)); + assert_eq!(stored_unique_until, &serde_json::Value::from("start")); + + let job = half_stuff().unique_for(59).unique_until_success().build(); + + let stored_unique_until = job.custom.get("unique_until").unwrap(); + assert_eq!(stored_unique_until, &serde_json::Value::from("success")); + } + + #[test] + fn test_same_purpose_setters_applied_simultaneously() { + let expires_at0 = Utc::now() + chrono::Duration::seconds(300); + let expires_at1 = Utc::now() + chrono::Duration::seconds(300); + let job = half_stuff() + .unique_for(59) + .add_to_custom_data("unique_for", 599) + .unique_for(39) + .add_to_custom_data("expires_at", to_iso_string(expires_at0)) + .expires_at(expires_at1) + .build(); + let stored_unique_for = job.custom.get("unique_for").unwrap(); + assert_eq!(stored_unique_for, &serde_json::Value::from(39)); + let stored_expires_at = job.custom.get("expires_at").unwrap(); + assert_eq!( + stored_expires_at, + &serde_json::Value::from(to_iso_string(expires_at1)) + ) + } +} diff --git a/src/proto/single/ent/progress.rs b/src/proto/single/ent/progress.rs new file mode 100644 index 00000000..ede5d838 --- /dev/null +++ b/src/proto/single/ent/progress.rs @@ -0,0 +1,154 @@ +use super::utils; +use chrono::{DateTime, Utc}; +use derive_builder::Builder; +/// Info on job execution progress (sent). +/// +/// In Enterprise Faktory, a client executing a job can report on the execution +/// progress, provided the job is trackable. A trackable job is one with "track":0 +/// specified in the custom data hash. +#[derive(Debug, Clone, Serialize, Builder)] +#[builder( + custom_constructor, + setter(into), + build_fn(name = "try_build", private) +)] +pub struct ProgressUpdate { + /// Id of the tracked job. + #[builder(setter(custom))] + pub jid: String, + + /// Percentage of the job's completion. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub percent: Option, + + /// Arbitrary description that may be useful to whoever is tracking the job's progress. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub desc: Option, + + /// Allows to extend the job's reservation, if more time is needed to execute it. + /// + /// Note that you cannot shorten the initial [reservation](struct.Job.html#structfield.reserve_for) via + /// specifying an instant that is sooner than the job's initial reservation deadline. + #[serde(skip_serializing_if = "Option::is_none")] + #[builder(default = "None")] + pub reserve_until: Option>, +} + +impl ProgressUpdate { + /// Create an instance of `ProgressUpdate` for the job with this ID specifying its completion percentage. + pub fn set(jid: impl Into, percent: u8) -> ProgressUpdate { + ProgressUpdate::builder(jid).percent(percent).build() + } + + /// Create a new instance of `ProgressUpdateBuilder` with job ID already set. + /// + /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) + /// `ProgressUpdateBuilder`. + pub fn builder(jid: impl Into) -> ProgressUpdateBuilder { + ProgressUpdateBuilder::new(jid) + } +} + +impl ProgressUpdateBuilder { + /// Builds an instance of ProgressUpdate. + pub fn build(&self) -> ProgressUpdate { + self.try_build() + .expect("Only jid is required, and it is set by all constructors.") + } + + /// Create a new instance of `JobBuilder` + pub fn new(jid: impl Into) -> ProgressUpdateBuilder { + ProgressUpdateBuilder { + jid: Some(jid.into()), + ..ProgressUpdateBuilder::create_empty() + } + } +} + +// Ref: https://github.com/contribsys/faktory/wiki/Ent-Tracking#notes +/// Job's state as last known by the Faktory server. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum JobState { + /// The server can't tell a job's state. + /// + /// This happens when a job with the specified ID has never been enqueued, or the job has + /// not been marked as trackable via "track":0 in its custom hash, or the tracking info on this + /// job has simply expired on the server (normally, after 29 min). + Unknown, + + /// The job has been enqueued. + Enqueued, + + /// The job has been consumed by a worker and is now being executed. + Working, + + /// The job has been executed with success. + Success, + + /// The job has finished with an error. + Failed, + + /// The jobs has been consumed but its status has never been updated. + Dead, +} + +impl std::fmt::Display for JobState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use JobState::*; + let s = match self { + Unknown => "unknown", + Enqueued => "enqueued", + Working => "working", + Success => "success", + Failed => "failed", + Dead => "dead", + }; + write!(f, "{}", s) + } +} + +/// Info on job execution progress (retrieved). +/// +/// The tracker is guaranteed to get the following details: the job's id (though they should +/// know it beforehand in order to be ably to track the job), its last known [`state`](JobState), and +/// the date and time the job was last updated. Additionally, arbitrary information on what's going +/// on with the job ([`desc`](ProgressUpdate::desc)) and the job's completion percentage +/// ([`percent`](ProgressUpdate::percent)) may be available, if the worker has provided those details. +#[derive(Debug, Clone, Deserialize)] +pub struct Progress { + /// Id of the tracked job. + pub jid: String, + + /// Job's state. + pub state: JobState, + + /// When this job was last updated. + #[serde(deserialize_with = "utils::parse_datetime")] + pub updated_at: Option>, + + /// Percentage of the job's completion. + pub percent: Option, + + /// Arbitrary description that may be useful to whoever is tracking the job's progress. + pub desc: Option, +} + +impl Progress { + /// Create an instance of `ProgressUpdate` for the job updating its completion percentage. + /// + /// This will copy the [`desc`](Progress::desc) from the `Progress` (retrieved) over to `ProgressUpdate` (to be sent). + pub fn update_percent(&self, percent: u8) -> ProgressUpdate { + ProgressUpdate::builder(&self.jid) + .desc(self.desc.clone()) + .percent(percent) + .build() + } + + /// Create an instance of `ProgressUpdateBuilder` for the job. + pub fn update_builder(&self) -> ProgressUpdateBuilder { + ProgressUpdateBuilder::new(&self.jid) + } +} diff --git a/src/proto/single/ent/utils.rs b/src/proto/single/ent/utils.rs new file mode 100644 index 00000000..772a0e8c --- /dev/null +++ b/src/proto/single/ent/utils.rs @@ -0,0 +1,17 @@ +use chrono::{DateTime, Utc}; +use serde::{ + de::{Deserializer, IntoDeserializer}, + Deserialize, +}; + +// Used to parse responses from Faktory where a datetime field is set to an empty string, e.g: +// '{"jid":"f6APFzrS2RZi9eaA","state":"unknown","updated_at":""}' +pub(crate) fn parse_datetime<'de, D>(value: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + match Option::::deserialize(value)?.as_deref() { + Some("") | None => Ok(None), + Some(non_empty) => DateTime::deserialize(non_empty.into_deserializer()).map(Some), + } +} From 50ef9c3fbe076e2fc4a29d0a7d7a05ca891aa07c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:04:19 +0500 Subject: [PATCH 51/62] Update src/lib.rs Co-authored-by: Jon Gjengset --- src/lib.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2b0c79a7..2627127d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,7 +80,10 @@ pub use crate::proto::{Client, Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -pub use crate::proto::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, - ProgressUpdate, ProgressUpdateBuilder, -}; +/// Constructs only available with the enterprise version of Faktory. +pub mod ent { + pub use crate::proto::{ + Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, + ProgressUpdate, ProgressUpdateBuilder, + }; +} From fde63fc05ac3e59e48e6fbe2ab99c53ee105a7ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:04:53 +0500 Subject: [PATCH 52/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 4a40fdd9..e17cccbd 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -105,7 +105,7 @@ pub(crate) struct ClientOptions { pub(crate) password: Option, /// Whether this client is instatianted for - /// a consumer (`worker` in Faktory terms). + /// a consumer ("worker" in Faktory terms). pub(crate) is_worker: bool, } From 62da8fd2db454b38e52375aecdfa939aed98876a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:05:42 +0500 Subject: [PATCH 53/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index e17cccbd..6cb4940b 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -122,7 +122,8 @@ impl Default for ClientOptions { } } -/// A construct used internally by [`Producer`] and [`Consumer`]. +/// A Faktory connection that represents neither a [`Producer`] nor a [`Consumer`]. +/// /// Can be used directly for retrieving and updating information on a job's execution progress /// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status /// from the Faktory server (see [`BatchStatus`]). From 9bd191cb8293069b8b239c22ed4baeb4013d5683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:06:45 +0500 Subject: [PATCH 54/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 6cb4940b..13fd3c80 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -124,7 +124,7 @@ impl Default for ClientOptions { /// A Faktory connection that represents neither a [`Producer`] nor a [`Consumer`]. /// -/// Can be used directly for retrieving and updating information on a job's execution progress +/// Useful for retrieving and updating information on a job's execution progress /// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status /// from the Faktory server (see [`BatchStatus`]). /// From 8a87e50bd15a5e16c6d4a01610567c19e2fd38be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:07:22 +0500 Subject: [PATCH 55/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 13fd3c80..e0a94a21 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -160,7 +160,9 @@ impl Default for ClientOptions { /// cl.set_progress(progress)?; /// # Ok::<(), faktory::Error>(()) ///```` +/// /// Fetching a batch's status: +/// /// ```no_run /// use faktory::Client; /// let bid = String::from("W8qyVle9vXzUWQOg"); From fb562364492b65cdb3f8cf2405e7af426f638cfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:07:49 +0500 Subject: [PATCH 56/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index e0a94a21..aef37c04 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -148,7 +148,9 @@ impl Default for ClientOptions { /// } /// # Ok::<(), faktory::Error>(()) /// ``` +/// /// Sending an update on a job's execution progress: +/// /// ```no_run /// use faktory::{Client, ProgressUpdateBuilder}; /// let jid = String::from("W8qyVle9vXzUWQOf"); From 6ce6bc06066c85b808655dc31e0be4d85521f021 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavie=C5=82=20Michalkievi=C4=8D?= <117771945+rustworthy@users.noreply.github.com> Date: Sat, 17 Feb 2024 21:09:36 +0500 Subject: [PATCH 57/62] Update src/proto/mod.rs Co-authored-by: Jon Gjengset --- src/proto/mod.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index aef37c04..be28873f 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -134,16 +134,10 @@ impl Default for ClientOptions { /// let job_id = String::from("W8qyVle9vXzUWQOf"); /// let mut cl = Client::connect(None)?; /// if let Some(progress) = cl.get_progress(job_id)? { -/// match progress.state { -/// JobState::Success => { -/// # /* -/// ... -/// # */ -/// }, +/// if let JobState::Success = progress.state { /// # /* /// ... /// # */ -/// # _ => {}, /// } /// } /// # Ok::<(), faktory::Error>(()) From 1dd39813885051d3e8a3225fbcaab981b93ad894 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sat, 17 Feb 2024 22:13:12 +0500 Subject: [PATCH 58/62] Update imports in faktory ent tests --- src/proto/batch/cmd.rs | 3 ++- src/proto/mod.rs | 38 +++++++++++++++++++------------------- tests/real/enterprise.rs | 1 + 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/proto/batch/cmd.rs b/src/proto/batch/cmd.rs index 03a2d0e7..1e4a7d56 100644 --- a/src/proto/batch/cmd.rs +++ b/src/proto/batch/cmd.rs @@ -1,5 +1,6 @@ use crate::proto::single::FaktoryCommand; -use crate::{Batch, Error}; +use crate::Error; +use crate::ent::Batch; use std::io::Write; impl FaktoryCommand for Batch { diff --git a/src/proto/mod.rs b/src/proto/mod.rs index be28873f..42e5d088 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -173,6 +173,25 @@ pub struct Client { opts: ClientOptions, } +impl Client { + /// Create new [`Client`] and connect to a Faktory server. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + pub fn connect(url: Option<&str>) -> Result, Error> { + let url = parse_provided_or_from_env(url)?; + let stream = TcpStream::connect(host_from_url(&url))?; + Self::connect_with(stream, url.password().map(|p| p.to_string())) + } +} + impl Client where S: Read + Write + Reconnect, @@ -281,25 +300,6 @@ impl Client { } } -impl Client { - /// Create new [`Client`] and connect to a Faktory server. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - pub fn connect(url: Option<&str>) -> Result, Error> { - let url = parse_provided_or_from_env(url)?; - let stream = TcpStream::connect(host_from_url(&url))?; - Self::connect_with(stream, url.password().map(|p| p.to_string())) - } -} - pub struct ReadToken<'a, S: Read + Write>(&'a mut Client); pub(crate) enum HeartbeatStatus { diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index c6b6174b..cf5460ec 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -4,6 +4,7 @@ extern crate url; use chrono::Utc; use faktory::*; +use faktory::ent::*; use serde_json::Value; use std::io; From 65ac2693520f6a0a1e5b8398825fecf2aa26797f Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sat, 17 Feb 2024 22:30:19 +0500 Subject: [PATCH 59/62] Run cargo fmt --- src/lib.rs | 2 +- src/proto/batch/cmd.rs | 2 +- tests/real/enterprise.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2627127d..fd6e4d8f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,7 +80,7 @@ pub use crate::proto::{Client, Job, JobBuilder, Reconnect}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -/// Constructs only available with the enterprise version of Faktory. +/// Constructs only available with the enterprise version of Faktory. pub mod ent { pub use crate::proto::{ Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, diff --git a/src/proto/batch/cmd.rs b/src/proto/batch/cmd.rs index 1e4a7d56..a6c57367 100644 --- a/src/proto/batch/cmd.rs +++ b/src/proto/batch/cmd.rs @@ -1,6 +1,6 @@ +use crate::ent::Batch; use crate::proto::single::FaktoryCommand; use crate::Error; -use crate::ent::Batch; use std::io::Write; impl FaktoryCommand for Batch { diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index cf5460ec..e2fd807b 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -3,8 +3,8 @@ extern crate serde_json; extern crate url; use chrono::Utc; -use faktory::*; use faktory::ent::*; +use faktory::*; use serde_json::Value; use std::io; From f8dae7de9d38535ba1815afd08c25e21bc3dae52 Mon Sep 17 00:00:00 2001 From: Pavel Mikhalkevich Date: Sat, 17 Feb 2024 22:37:50 +0500 Subject: [PATCH 60/62] Update docs --- src/proto/batch/mod.rs | 8 +++++--- src/proto/mod.rs | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index 503b5260..cd1134e1 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -28,7 +28,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// Here is how you can create a simple batch: /// ```no_run /// # use faktory::Error; -/// use faktory::{Producer, Job, Batch}; +/// use faktory::{Producer, Job, ent::Batch}; /// /// let mut prod = Producer::connect(None)?; /// let job1 = Job::builder("job_type").build(); @@ -49,7 +49,8 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// /// Nested batches are also supported: /// ```no_run -/// # use faktory::{Producer, Job, Batch, Error}; +/// # use faktory::{Producer, Job, Error}; +/// # use faktory::ent::Batch; /// # let mut prod = Producer::connect(None)?; /// let parent_job1 = Job::builder("job_type").build(); /// let parent_job2 = Job::builder("another_job_type").build(); @@ -86,7 +87,8 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// You can retieve the batch status using a [`Client`]: /// ```no_run /// # use faktory::Error; -/// # use faktory::{Producer, Job, Batch, Client, CallbackState}; +/// # use faktory::{Producer, Job, Client}; +/// # use faktory::ent::{Batch, CallbackState}; /// let mut prod = Producer::connect(None)?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 42e5d088..67bc9fdb 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -130,7 +130,7 @@ impl Default for ClientOptions { /// /// Fetching a job's execution progress: /// ```no_run -/// use faktory::{Client, JobState}; +/// use faktory::{Client, ent::JobState}; /// let job_id = String::from("W8qyVle9vXzUWQOf"); /// let mut cl = Client::connect(None)?; /// if let Some(progress) = cl.get_progress(job_id)? { @@ -146,7 +146,7 @@ impl Default for ClientOptions { /// Sending an update on a job's execution progress: /// /// ```no_run -/// use faktory::{Client, ProgressUpdateBuilder}; +/// use faktory::{Client, ent::ProgressUpdateBuilder}; /// let jid = String::from("W8qyVle9vXzUWQOf"); /// let mut cl = Client::connect(None)?; /// let progress = ProgressUpdateBuilder::new(&jid) From 6c98127365fdf88c49410b92773c41214fdb46ee Mon Sep 17 00:00:00 2001 From: Jon Gjengset Date: Sun, 18 Feb 2024 09:26:10 +0100 Subject: [PATCH 61/62] Remove group imports format checking It makes too many suboptimal choices for now. See https://github.com/rust-lang/rustfmt/issues/5083#issuecomment-1951001501 and https://github.com/rust-lang/rustfmt/issues/5083#issuecomment-1951011779 --- .github/workflows/check.yml | 17 ----------------- Makefile | 5 ----- 2 files changed, 22 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index e9a5aff3..f2a3fe3d 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -34,23 +34,6 @@ jobs: components: rustfmt - name: cargo fmt --check run: cargo fmt --check - # This is currently a dedicated job due to the rustfmt's `group_imports` configuration - # option being available on the nightly channel only as of February 2024. - # Once stabilized, can be merged with the `stable / fmt` job in this workflow. - # See: https://github.com/rust-lang/rustfmt/issues/5083 - imports: - runs-on: ubuntu-latest - name: nightly / fmt (import grouping) - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - name: Install nightly - uses: dtolnay/rust-toolchain@nightly - with: - components: rustfmt - - name: cargo +nightly fmt -- --config group_imports=one --check - run: cargo +nightly fmt -- --config group_imports=one --check clippy: runs-on: ubuntu-latest name: ${{ matrix.toolchain }} / clippy diff --git a/Makefile b/Makefile index 23782039..6e23d9c0 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,6 @@ check: cargo fmt --check cargo clippy cargo d --no-deps --all-features - cargo +nightly fmt -- --config group_imports=one --check .PHONY: doc doc: @@ -36,10 +35,6 @@ faktory/tls: faktory/tls/kill: docker compose -f docker/compose.yml down -.PHONY: sort -sort: - cargo +nightly fmt -- --config group_imports=one - .PHONY: test test: cargo t --locked --all-features --all-targets From 62ec997e312b39b3cacf6d4741a600672d9bffb7 Mon Sep 17 00:00:00 2001 From: Jon Gjengset Date: Sun, 18 Feb 2024 09:28:30 +0100 Subject: [PATCH 62/62] nit on module ordering --- src/lib.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 28a7e920..9067ebea 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -65,14 +65,12 @@ #[macro_use] extern crate serde_derive; -mod consumer; pub mod error; + +mod consumer; mod producer; mod proto; -#[cfg(feature = "tls")] -#[cfg_attr(docsrs, doc(cfg(feature = "tls")))] -mod tls; pub use crate::consumer::{Consumer, ConsumerBuilder}; pub use crate::error::Error; pub use crate::producer::Producer; @@ -88,6 +86,10 @@ pub mod ent { }; } +#[cfg(feature = "tls")] +#[cfg_attr(docsrs, doc(cfg(feature = "tls")))] +mod tls; + #[cfg(feature = "tls")] #[cfg_attr(docsrs, doc(cfg(feature = "tls")))] pub use tls::TlsStream;