diff --git a/server.js b/server.js index ef480a7ea..c0c5539ab 100644 --- a/server.js +++ b/server.js @@ -28,7 +28,11 @@ const logger = require("./server/utils/logger"); const { applyRedisHelpers } = require("./server/utils/redisHelpers"); const { applyIOHelpers } = require("./server/utils/ioHelpers"); const { redisSocketEvents } = require("./server/web-sockets/redisSocketEvents"); -const { ElastiCacheClient, DescribeCacheClustersCommand } = require("@aws-sdk/client-elasticache"); +const { + ElastiCacheClient, + DescribeCacheClustersCommand, + DescribeReplicationGroupsCommand +} = require("@aws-sdk/client-elasticache"); const { InstanceRegion } = require("./server/utils/instanceMgr"); const StartStatusReporter = require("./server/utils/statusReporter"); @@ -58,7 +62,7 @@ const SOCKETIO_CORS_ORIGIN = [ "https://beta.test.imex.online", "https://www.beta.test.imex.online", "https://beta.imex.online", - "https://www.beta.imex.online", + "https://www.beta.imex.online", "https://www.test.promanager.web-est.com", "https://test.promanager.web-est.com", "https://www.promanager.web-est.com", @@ -124,26 +128,48 @@ const applyRoutes = ({ app }) => { * @returns {Promise} */ const getRedisNodesFromAWS = async () => { - const client = new ElastiCacheClient({ - region: InstanceRegion() - }); - - const params = { - ReplicationGroupId: process.env.REDIS_CLUSTER_ID, - ShowCacheNodeInfo: true - }; + const client = new ElastiCacheClient({ region: InstanceRegion() }); try { - // Fetch the cache clusters associated with the replication group - const command = new DescribeCacheClustersCommand(params); - const response = await client.send(command); - const cacheClusters = response.CacheClusters; + const describeReplicationGroupCommand = new DescribeReplicationGroupsCommand({ + ReplicationGroupId: process.env.REDIS_CLUSTER_ID + }); + const describeReplicationGroupResponse = await client.send(describeReplicationGroupCommand); - return cacheClusters.flatMap((cluster) => - cluster.CacheNodes.map((node) => `${node.Endpoint.Address}:${node.Endpoint.Port}`) - ); + //TODO: add checking to make sure there's only 1. + const cacheClusterIds = describeReplicationGroupResponse.ReplicationGroups[0].MemberClusters; + + // Ensure cacheClusters exists and is an array + if (!cacheClusterIds || !Array.isArray(cacheClusterIds) || cacheClusterIds.length === 0) { + logger.log(`No cache clusters found for cluster id ${process.env.REDIS_CLUSTER_ID}`, "ERROR", "redis", "api"); + return []; + } + + const nodeEndpointAddresses = []; + + for (const cluster of cacheClusterIds) { + const params = { CacheClusterId: cluster, ShowCacheNodeInfo: true }; + const command = new DescribeCacheClustersCommand(params); + const response = await client.send(command); + + if (response.CacheClusters && Array.isArray(response.CacheClusters)) { + // Map nodes to address strings + //TODO: What happens if we have more shards? + const nodeAddress = `${response.CacheClusters[0].CacheNodes[0].Endpoint.Address}:${response.CacheClusters[0].CacheNodes[0].Endpoint.Port}`; + // Debug log node addresses + logger.log(`Cluster node addresses: ${nodeAddress}`, "DEBUG", "redis", "api"); + // Return only those addresses that start with the current cluster id + nodeEndpointAddresses.push(nodeAddress); + } + } + + return nodeEndpointAddresses; + // Process each cluster } catch (err) { - logger.log(`Error fetching Redis nodes from AWS: ${err.message}`, "ERROR", "redis", "api"); + logger.log(`Error fetching Redis nodes from AWS:`, "ERROR", "redis", "api", { + message: err?.message, + stack: err?.stack + }); throw err; } };