Skip to content

Commit

Permalink
Azure Queue Storage support (#65)
Browse files Browse the repository at this point in the history
Add support for Azure Queue Storage.

Note that the blocking receives are not possible in Azure
Queue Storage and as such, calling receive on an empty
queue will return immediately with QueueError::NoData.
  • Loading branch information
jaymell authored Apr 12, 2024
1 parent a00efe9 commit b48d4ad
Show file tree
Hide file tree
Showing 8 changed files with 503 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
Cargo.lock
target
.vscode
3 changes: 3 additions & 0 deletions omniqueue/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ edition = "2021"
async-trait = "0.1"
aws-config = { version = "1.1.5", features = ["behavior-version-latest"], optional = true }
aws-sdk-sqs = { version = "1.13.0", optional = true }
azure_storage = { version = "0.19.0", optional = true }
azure_storage_queues = { version = "0.19.0", optional = true }
bb8 = { version = "0.8", optional = true }
bb8-redis = { version = "0.14.0", optional = true }
futures-util = { version = "0.3.28", default-features = false, features = ["async-await", "std"], optional = true }
Expand Down Expand Up @@ -46,3 +48,4 @@ rabbitmq-with-message-ids = ["rabbitmq", "dep:time", "dep:svix-ksuid"]
redis = ["dep:bb8", "dep:bb8-redis", "dep:redis", "dep:svix-ksuid"]
redis_cluster = ["redis", "redis/cluster-async"]
sqs = ["dep:aws-config", "dep:aws-sdk-sqs"]
azure_queue_storage = ["dep:azure_storage", "dep:azure_storage_queues"]
225 changes: 225 additions & 0 deletions omniqueue/src/backends/azure_queue_storage.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
use std::time::Duration;

use async_trait::async_trait;
use azure_storage::StorageCredentials;
use azure_storage_queues::{
operations::Message, PopReceipt, QueueClient, QueueServiceClientBuilder,
};
use serde::Serialize;

use crate::{
builder::Static, queue::Acker, Delivery, QueueBackend, QueueBuilder, QueueError, Result,
};

fn get_client(cfg: &AqsConfig) -> QueueClient {
let AqsConfig {
queue_name,
storage_account,
credentials,
cloud_uri,
..
} = cfg;
let mut builder = QueueServiceClientBuilder::new(storage_account, credentials.clone());
if let Some(cloud_uri) = cloud_uri {
builder = builder.cloud_location(azure_storage::CloudLocation::Custom {
account: cfg.storage_account.clone(),
uri: cloud_uri.clone(),
});
}
builder.build().queue_client(queue_name)
}

/// Note that blocking receives are not supported by Azure Queue Storage and
/// that message order is not guaranteed.
#[non_exhaustive]
pub struct AqsBackend;

impl AqsBackend {
/// Creates a new Azure Queue Storage builder with the given
/// configuration.
pub fn builder(cfg: impl Into<AqsConfig>) -> QueueBuilder<Self, Static> {
QueueBuilder::new(cfg.into())
}
}

const DEFAULT_RECV_TIMEOUT: Duration = Duration::from_secs(180);
const DEFAULT_EMPTY_RECV_DELAY: Duration = Duration::from_millis(200);

#[derive(Clone)]
pub struct AqsConfig {
pub queue_name: String,
pub empty_receive_delay: Option<Duration>,
pub message_ttl: Duration,
pub storage_account: String,
pub credentials: StorageCredentials,
pub cloud_uri: Option<String>,
pub receive_timeout: Option<Duration>,
}

impl QueueBackend for AqsBackend {
type Config = AqsConfig;

type PayloadIn = String;
type PayloadOut = String;

type Producer = AqsProducer;
type Consumer = AqsConsumer;

async fn new_pair(config: Self::Config) -> Result<(AqsProducer, AqsConsumer)> {
let client = get_client(&config);
Ok((
AqsProducer {
client: client.clone(),
config: config.clone(),
},
AqsConsumer {
client: client.clone(),
config: config.clone(),
},
))
}

async fn producing_half(config: Self::Config) -> Result<AqsProducer> {
let client = get_client(&config);
Ok(AqsProducer { client, config })
}

async fn consuming_half(config: Self::Config) -> Result<AqsConsumer> {
let client = get_client(&config);
Ok(AqsConsumer { client, config })
}
}

pub struct AqsProducer {
client: QueueClient,
config: AqsConfig,
}

impl AqsProducer {
pub async fn send_raw(&self, payload: &str) -> Result<()> {
self.send_raw_scheduled(payload, Duration::ZERO).await
}

pub async fn send_raw_scheduled(&self, payload: &str, delay: Duration) -> Result<()> {
self.client
.put_message(payload)
.visibility_timeout(delay)
.ttl(self.config.message_ttl)
.await
.map_err(QueueError::generic)
.map(|_| ())
}

pub async fn send_serde_json<P: Serialize + Sync>(&self, payload: &P) -> Result<()> {
let payload = serde_json::to_string(payload)?;
self.send_raw(&payload).await
}

pub async fn send_serde_json_scheduled<P: Serialize + Sync>(
&self,
payload: &P,
delay: Duration,
) -> Result<()> {
let payload = serde_json::to_string(payload)?;
self.send_raw_scheduled(&payload, delay).await
}
}

impl_queue_producer!(AqsProducer, String);
impl_scheduled_queue_producer!(AqsProducer, String);

/// Note that blocking receives are not supported by Azure Queue Storage and
/// that message order is not guaranteed.
pub struct AqsConsumer {
client: QueueClient,
config: AqsConfig,
}

struct AqsAcker {
client: QueueClient,
already_acked_or_nacked: bool,
pop_receipt: PopReceipt,
}

#[async_trait]
impl Acker for AqsAcker {
async fn ack(&mut self) -> Result<()> {
if self.already_acked_or_nacked {
return Err(QueueError::CannotAckOrNackTwice);
}
self.already_acked_or_nacked = true;
self.client
.pop_receipt_client(self.pop_receipt.clone())
.delete()
.await
.map_err(QueueError::generic)
.map(|_| ())
}

async fn nack(&mut self) -> Result<()> {
Ok(())
}
}

impl AqsConsumer {
fn wrap_message(&self, message: &Message) -> Delivery {
Delivery {
acker: Box::new(AqsAcker {
client: self.client.clone(),
pop_receipt: message.pop_receipt(),
already_acked_or_nacked: false,
}),
payload: Some(message.message_text.as_bytes().to_owned()),
}
}

/// Note that blocking receives are not supported by Azure Queue Storage.
/// Calls to this method will return immediately if no messages are
/// available for delivery in the queue.
pub async fn receive(&mut self) -> Result<Delivery> {
self.client
.get_messages()
.visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT))
.await
.map_err(QueueError::generic)
.and_then(|m| m.messages.into_iter().next().ok_or(QueueError::NoData))
.map(|m| self.wrap_message(&m))
}

pub async fn receive_all(
&mut self,
max_messages: usize,
deadline: Duration,
) -> Result<Vec<Delivery>> {
let end = std::time::Instant::now() + deadline;
let mut interval = tokio::time::interval(
self.config
.empty_receive_delay
.unwrap_or(DEFAULT_EMPTY_RECV_DELAY),
);
loop {
interval.tick().await;
let msgs = self
.client
.get_messages()
.number_of_messages(max_messages.try_into().unwrap_or(u8::MAX))
.visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT))
.await
.map_err(QueueError::generic)
.map(|m| {
m.messages
.iter()
.map(|m| self.wrap_message(m))
.collect::<Vec<_>>()
})?;
if !msgs.is_empty() {
return Ok(msgs);
}
if std::time::Instant::now() > end {
return Ok(vec![]);
}
}
}
}

impl_queue_consumer!(AqsConsumer, String);
4 changes: 4 additions & 0 deletions omniqueue/src/backends/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#[cfg(feature = "azure_queue_storage")]
pub mod azure_queue_storage;
#[cfg(feature = "gcp_pubsub")]
pub mod gcp_pubsub;
#[cfg(feature = "in_memory")]
Expand All @@ -9,6 +11,8 @@ pub mod redis;
#[cfg(feature = "sqs")]
pub mod sqs;

#[cfg(feature = "azure_queue_storage")]
pub use azure_queue_storage::{AqsBackend, AqsConfig, AqsConsumer, AqsProducer};
#[cfg(feature = "gcp_pubsub")]
pub use gcp_pubsub::{GcpPubSubBackend, GcpPubSubConfig, GcpPubSubConsumer, GcpPubSubProducer};
#[cfg(feature = "in_memory")]
Expand Down
1 change: 1 addition & 0 deletions omniqueue/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
//! * RabbitMQ
//! * Redis
//! * Amazon SQS
//! * Azure Queue Storage
//!
//! ## How to Use Omniqueue
//!
Expand Down
Loading

0 comments on commit b48d4ad

Please sign in to comment.