feature/IO-3096-GlobalNotifications - Checkpoint, finished testing queue, adjusted timeouts to be pegged to one variable.

This commit is contained in:
Dave Richer
2025-02-20 12:21:09 -05:00
parent 29f7144e72
commit cc5fea9410
2 changed files with 55 additions and 34 deletions

View File

@@ -1,5 +1,14 @@
const { Queue, Worker } = require("bullmq");
// Base time-related constant (in milliseconds)
const CONSOLIDATION_DELAY = 60000; // 1 minute (base timeout)
// Derived time-related constants based on CONSOLIDATION_DELAY / DO NOT TOUCH, these are pegged to CONSOLIDATION_DELAY
const NOTIFICATION_STORAGE_EXPIRATION = CONSOLIDATION_DELAY * 1.5; // 1.5 minutes (90s, for notification storage)
const CONSOLIDATION_FLAG_EXPIRATION = CONSOLIDATION_DELAY * 1.5; // 1.5 minutes (90s, buffer for consolidation flag)
const LOCK_EXPIRATION = CONSOLIDATION_DELAY * 0.25; // 15 seconds (quarter of base, for lock duration)
const RATE_LIMITER_DURATION = CONSOLIDATION_DELAY * 0.1; // 6 seconds (tenth of base, for rate limiting)
let addQueue;
let consolidateQueue;
@@ -21,8 +30,8 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
// Create queue for adding notifications
addQueue = new Queue("notificationsAdd", {
connection: pubClient,
prefix: "{BULLMQ}", // Namespace prefix for BullMQ in Redis
defaultJobOptions: { removeOnComplete: true, removeOnFail: true } // Cleanup jobs after success/failure
prefix: "{BULLMQ}",
defaultJobOptions: { removeOnComplete: true, removeOnFail: true }
});
// Create queue for consolidating notifications
@@ -49,26 +58,30 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
const existingNotifications = await pubClient.get(userKey);
const notifications = existingNotifications ? JSON.parse(existingNotifications) : [];
notifications.push(notification);
// Set with 40-second expiration to avoid stale data
await pubClient.set(userKey, JSON.stringify(notifications), "EX", 40);
// Set with expiration to avoid stale data
await pubClient.set(userKey, JSON.stringify(notifications), "EX", NOTIFICATION_STORAGE_EXPIRATION / 1000); // Convert to seconds
logger.logger.debug(`Stored notification for ${user} under ${userKey}: ${JSON.stringify(notifications)}`);
}
const consolidateKey = `app:consolidate:${jobId}`;
// setnx ensures only one consolidation job is scheduled (atomic operation)
const flagSet = await pubClient.setnx(consolidateKey, "pending");
logger.logger.debug(`Consolidation flag set for jobId ${jobId}: ${flagSet}`);
if (flagSet) {
// Schedule consolidation job to run after a 5-second delay
// Schedule consolidation job with delay and retries
await consolidateQueue.add(
"consolidate-notifications",
{ jobId, recipients },
{ jobId: `consolidate:${jobId}`, delay: 5000 }
{
jobId: `consolidate:${jobId}`,
delay: CONSOLIDATION_DELAY,
attempts: 3, // Retry up to 3 times
backoff: LOCK_EXPIRATION // Retry delay matches lock expiration (15s)
}
);
logger.logger.info(`Scheduled consolidation for jobId ${jobId}`);
// Set expiration on flag to clean up after 5 minutes
await pubClient.expire(consolidateKey, 300);
// Set expiration on flag
await pubClient.expire(consolidateKey, CONSOLIDATION_FLAG_EXPIRATION / 1000); // Convert to seconds
} else {
logger.logger.debug(`Consolidation already scheduled for jobId ${jobId}`);
}
@@ -76,7 +89,7 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
{
connection: pubClient,
prefix: "{BULLMQ}",
concurrency: 5 // Process up to 5 jobs concurrently
concurrency: 5
}
);
@@ -89,14 +102,13 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
const redisKeyPrefix = `app:notifications:${jobId}`;
const lockKey = `lock:consolidate:${jobId}`;
// Acquire a lock to prevent concurrent consolidation (NX = set if not exists)
const lockAcquired = await pubClient.set(lockKey, "locked", "NX", "EX", 10);
// Acquire a lock to prevent concurrent consolidation
const lockAcquired = await pubClient.set(lockKey, "locked", "NX", "EX", LOCK_EXPIRATION / 1000); // Convert to seconds
logger.logger.debug(`Lock acquisition for jobId ${jobId}: ${lockAcquired}`);
if (lockAcquired) {
try {
const allNotifications = {};
// Get unique user IDs to avoid duplicate processing
const uniqueUsers = [...new Set(recipients.map((r) => r.user))];
logger.logger.debug(`Unique users for jobId ${jobId}: ${uniqueUsers}`);
@@ -113,7 +125,7 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
allNotifications[user] = allNotifications[user] || {};
allNotifications[user][bodyShopId] = parsedNotifications;
}
await pubClient.del(userKey); // Clean up after retrieval
await pubClient.del(userKey);
logger.logger.debug(`Deleted Redis key ${userKey}`);
} else {
logger.logger.warn(`No notifications found for ${user} under ${userKey}`);
@@ -152,13 +164,12 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
}
}
// Clean up consolidation flag after processing
await pubClient.del(`app:consolidate:${jobId}`);
} catch (err) {
logger.logger.error(`Consolidation error for jobId ${jobId}: ${err.message}`, { error: err });
throw err; // Re-throw to trigger BullMQ's failed event
throw err; // Trigger retry if attempts remain
} finally {
await pubClient.del(lockKey); // Release lock regardless of success/failure
await pubClient.del(lockKey);
}
} else {
logger.logger.info(`Skipped consolidation for jobId ${jobId} - lock held by another worker`);
@@ -167,8 +178,8 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
{
connection: pubClient,
prefix: "{BULLMQ}",
concurrency: 1, // Single concurrency to avoid race conditions
limiter: { max: 1, duration: 5000 } // Rate limit: 1 job every 5 seconds
concurrency: 1,
limiter: { max: 1, duration: RATE_LIMITER_DURATION }
}
);
@@ -192,11 +203,11 @@ const loadAppQueue = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
logger.logger.info("App queue workers closed");
};
process.on("SIGTERM", shutdown); // Handle termination signal
process.on("SIGINT", shutdown); // Handle interrupt signal (e.g., Ctrl+C)
process.on("SIGTERM", shutdown);
process.on("SIGINT", shutdown);
}
return addQueue; // Return queue for external use
return addQueue;
};
/**
@@ -223,7 +234,6 @@ const dispatchAppsToQueue = async ({ appsToDispatch, logger }) => {
for (const app of appsToDispatch) {
const { jobId, bodyShopId, key, variables, recipients } = app;
// Unique jobId with timestamp to avoid duplicates
await appQueue.add(
"add-notification",
{ jobId, bodyShopId, key, variables, recipients },