feature/IO-3092-imgproxy - Merge release to take care of PR conflicts.

This commit is contained in:
Dave Richer
2025-03-25 14:57:17 -04:00
184 changed files with 19851 additions and 19376 deletions

191
server.js
View File

@@ -5,7 +5,7 @@ require("dotenv").config({
});
if (process.env.NODE_ENV) {
const tracer = require("dd-trace").init({
require("dd-trace").init({
profiling: true,
env: process.env.NODE_ENV,
service: "bodyshop-api"
@@ -22,18 +22,24 @@ const cookieParser = require("cookie-parser");
const { Server } = require("socket.io");
const { createAdapter } = require("@socket.io/redis-adapter");
const { instrument } = require("@socket.io/admin-ui");
const { isString, isEmpty } = require("lodash");
const { isString, isEmpty, isFunction } = require("lodash");
const logger = require("./server/utils/logger");
const { applyRedisHelpers } = require("./server/utils/redisHelpers");
const { applyIOHelpers } = require("./server/utils/ioHelpers");
const { redisSocketEvents } = require("./server/web-sockets/redisSocketEvents");
const { ElastiCacheClient, DescribeCacheClustersCommand } = require("@aws-sdk/client-elasticache");
const {
ElastiCacheClient,
DescribeCacheClustersCommand,
DescribeReplicationGroupsCommand
} = require("@aws-sdk/client-elasticache");
const { InstanceRegion } = require("./server/utils/instanceMgr");
const StartStatusReporter = require("./server/utils/statusReporter");
const { registerCleanupTask, initializeCleanupManager } = require("./server/utils/cleanupManager");
const { loadEmailQueue } = require("./server/notifications/queues/emailQueue");
const { loadAppQueue } = require("./server/notifications/queues/appQueue");
const cleanupTasks = [];
let isShuttingDown = false;
const CLUSTER_RETRY_BASE_DELAY = 100;
const CLUSTER_RETRY_MAX_DELAY = 5000;
const CLUSTER_RETRY_JITTER = 100;
@@ -58,7 +64,7 @@ const SOCKETIO_CORS_ORIGIN = [
"https://beta.test.imex.online",
"https://www.beta.test.imex.online",
"https://beta.imex.online",
"https://www.beta.imex.online",
"https://www.beta.imex.online",
"https://www.test.promanager.web-est.com",
"https://test.promanager.web-est.com",
"https://www.promanager.web-est.com",
@@ -124,26 +130,48 @@ const applyRoutes = ({ app }) => {
* @returns {Promise<string[]>}
*/
const getRedisNodesFromAWS = async () => {
const client = new ElastiCacheClient({
region: InstanceRegion()
});
const params = {
ReplicationGroupId: process.env.REDIS_CLUSTER_ID,
ShowCacheNodeInfo: true
};
const client = new ElastiCacheClient({ region: InstanceRegion() });
try {
// Fetch the cache clusters associated with the replication group
const command = new DescribeCacheClustersCommand(params);
const response = await client.send(command);
const cacheClusters = response.CacheClusters;
const describeReplicationGroupCommand = new DescribeReplicationGroupsCommand({
ReplicationGroupId: process.env.REDIS_CLUSTER_ID
});
const describeReplicationGroupResponse = await client.send(describeReplicationGroupCommand);
return cacheClusters.flatMap((cluster) =>
cluster.CacheNodes.map((node) => `${node.Endpoint.Address}:${node.Endpoint.Port}`)
);
//TODO: add checking to make sure there's only 1.
const cacheClusterIds = describeReplicationGroupResponse.ReplicationGroups[0].MemberClusters;
// Ensure cacheClusters exists and is an array
if (!cacheClusterIds || !Array.isArray(cacheClusterIds) || cacheClusterIds.length === 0) {
logger.log(`No cache clusters found for cluster id ${process.env.REDIS_CLUSTER_ID}`, "ERROR", "redis", "api");
return [];
}
const nodeEndpointAddresses = [];
for (const cluster of cacheClusterIds) {
const params = { CacheClusterId: cluster, ShowCacheNodeInfo: true };
const command = new DescribeCacheClustersCommand(params);
const response = await client.send(command);
if (response.CacheClusters && Array.isArray(response.CacheClusters)) {
// Map nodes to address strings
//TODO: What happens if we have more shards?
const nodeAddress = `${response.CacheClusters[0].CacheNodes[0].Endpoint.Address}:${response.CacheClusters[0].CacheNodes[0].Endpoint.Port}`;
// Debug log node addresses
logger.log(`Cluster node addresses: ${nodeAddress}`, "DEBUG", "redis", "api");
// Return only those addresses that start with the current cluster id
nodeEndpointAddresses.push(nodeAddress);
}
}
return nodeEndpointAddresses;
// Process each cluster
} catch (err) {
logger.log(`Error fetching Redis nodes from AWS: ${err.message}`, "ERROR", "redis", "api");
logger.log(`Error fetching Redis nodes from AWS:`, "ERROR", "redis", "api", {
message: err?.message,
stack: err?.stack
});
throw err;
}
};
@@ -167,7 +195,10 @@ const connectToRedisCluster = async () => {
try {
redisServers = JSON.parse(process.env.REDIS_URL);
} catch (error) {
logger.log(`Failed to parse REDIS_URL: ${error.message}. Exiting...`, "ERROR", "redis", "api");
logger.log(`Failed to parse REDIS_URL: ${error.message}. Exiting...`, "ERROR", "redis", "api", {
message: error?.message,
stack: error?.stack
});
process.exit(1);
}
}
@@ -193,11 +224,22 @@ const connectToRedisCluster = async () => {
return new Promise((resolve, reject) => {
redisCluster.on("ready", () => {
logger.log(`Redis cluster connection established.`, "INFO", "redis", "api");
resolve(redisCluster);
if (process.env.NODE_ENV === "development" && process.env?.CLEAR_REDIS_ON_START === "true") {
logger.log("[Development] Flushing Redis Cluster on Service start...", "INFO", "redis", "api");
const master = redisCluster.nodes("master");
Promise.all(master.map((node) => node.flushall())).then(() => {
resolve(redisCluster);
});
} else {
resolve(redisCluster);
}
});
redisCluster.on("error", (err) => {
logger.log(`Redis cluster connection failed: ${err.message}`, "ERROR", "redis", "api");
logger.log(`Redis cluster connection failed:`, "ERROR", "redis", "api", {
message: err?.message,
stack: err?.stack
});
reject(err);
});
});
@@ -219,17 +261,24 @@ const applySocketIO = async ({ server, app }) => {
const pubClient = redisCluster;
const subClient = pubClient.duplicate();
pubClient.on("error", (err) => logger.log(`Redis pubClient error: ${err}`, "ERROR", "redis"));
subClient.on("error", (err) => logger.log(`Redis subClient error: ${err}`, "ERROR", "redis"));
pubClient.on("error", (err) =>
logger.log(`Redis pubClient error: ${err}`, "ERROR", "redis", "api", {
message: err?.message,
stack: err?.stack
})
);
subClient.on("error", (err) =>
logger.log(`Redis subClient error: ${err}`, "ERROR", "redis", "api", {
message: err?.message,
stack: err?.stack
})
);
process.on("SIGINT", async () => {
// Register Redis cleanup
registerCleanupTask(async () => {
logger.log("Closing Redis connections...", "INFO", "redis", "api");
try {
await Promise.all([pubClient.disconnect(), subClient.disconnect()]);
logger.log("Redis connections closed. Process will exit.", "INFO", "redis", "api");
} catch (error) {
logger.log(`Error closing Redis connections: ${error.message}`, "ERROR", "redis", "api");
}
await Promise.all([pubClient.disconnect(), subClient.disconnect()]);
logger.log("Redis connections closed.", "INFO", "redis", "api");
});
const ioRedis = new Server(server, {
@@ -287,6 +336,34 @@ const applySocketIO = async ({ server, app }) => {
return api;
};
/**
* Load Queues for Email and App
* @param {Object} options - Queue configuration options
* @param {Redis.Cluster} options.pubClient - Redis client for publishing
* @param {Object} options.logger - Logger instance
* @param {Object} options.redisHelpers - Redis helper functions
* @param {Server} options.ioRedis - Socket.IO server instance
* @returns {Promise<void>}
*/
const loadQueues = async ({ pubClient, logger, redisHelpers, ioRedis }) => {
const queueSettings = { pubClient, logger, redisHelpers, ioRedis };
// Assuming loadEmailQueue and loadAppQueue return Promises
const [notificationsEmailsQueue, notificationsAppQueue] = await Promise.all([
loadEmailQueue(queueSettings),
loadAppQueue(queueSettings)
]);
// Add error listeners or other setup for queues if needed
notificationsEmailsQueue.on("error", (error) => {
logger.log(`Error in notificationsEmailsQueue: ${error}`, "ERROR", "queue", "api", null, { error: error?.message });
});
notificationsAppQueue.on("error", (error) => {
logger.log(`Error in notificationsAppQueue: ${error}`, "ERROR", "queue", "api", null, { error: error?.message });
});
};
/**
* Main function to start the server
* @returns {Promise<void>}
@@ -297,6 +374,9 @@ const main = async () => {
const server = http.createServer(app);
// Initialize cleanup manager with signal handlers
initializeCleanupManager();
const { pubClient, ioRedis } = await applySocketIO({ server, app });
const redisHelpers = applyRedisHelpers({ pubClient, app, logger });
const ioHelpers = applyIOHelpers({ app, redisHelpers, ioRedis, logger });
@@ -304,24 +384,25 @@ const main = async () => {
// Legacy Socket Events
require("./server/web-sockets/web-socket");
// Initialize Queues
await loadQueues({ pubClient: pubClient, logger, redisHelpers, ioRedis });
applyMiddleware({ app });
applyRoutes({ app });
redisSocketEvents({ io: ioRedis, redisHelpers, ioHelpers, logger });
const StatusReporter = StartStatusReporter();
registerCleanupTask(async () => {
StatusReporter.end();
if (isFunction(StatusReporter?.end)) {
StatusReporter.end();
}
});
// Add SIGTERM signal handler
process.on("SIGTERM", handleSigterm);
process.on("SIGINT", handleSigterm); // Optional: Handle Ctrl+C
try {
await server.listen(port);
logger.log(`Server started on port ${port}`, "INFO", "api");
} catch (error) {
logger.log(`Server failed to start on port ${port}`, "ERROR", "api", error);
logger.log(`Server failed to start on port ${port}`, "ERROR", "api", null, { error: error.message });
}
};
@@ -335,33 +416,3 @@ main().catch((error) => {
// Note: If we want the app to crash on all uncaught async operations, we would
// need to put a `process.exit(1);` here
});
// Register a cleanup task
function registerCleanupTask(task) {
cleanupTasks.push(task);
}
// SIGTERM handler
async function handleSigterm() {
if (isShuttingDown) {
logger.log("sigterm-api", "WARN", null, null, { message: "Shutdown already in progress, ignoring signal." });
return;
}
isShuttingDown = true;
logger.log("sigterm-api", "WARN", null, null, { message: "SIGTERM Received. Starting graceful shutdown." });
try {
for (const task of cleanupTasks) {
logger.log("sigterm-api", "WARN", null, null, { message: `Running cleanup task: ${task.name}` });
await task();
}
logger.log("sigterm-api", "WARN", null, null, { message: `All cleanup tasks completed.` });
} catch (error) {
logger.log("sigterm-api-error", "ERROR", null, null, { message: error.message, stack: error.stack });
}
process.exit(0);
}