From d50f3348039d9980ce090881cfb6e9327a029752 Mon Sep 17 00:00:00 2001 From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr> Date: Mon, 9 Feb 2026 11:08:49 +0100 Subject: [PATCH 1/2] added tests to our backend --- backend/package.json | 2 +- backend/src/clients.js | 33 +++++- backend/src/services/scaler.js | 2 +- backend/tests/clusterService.test.js | 92 ++++++++++++++++ backend/tests/hashRoutes.test.js | 79 ++++++++++++++ backend/tests/scaler.test.js | 155 +++++++++++++++++++++++++++ 6 files changed, 360 insertions(+), 3 deletions(-) create mode 100644 backend/tests/clusterService.test.js create mode 100644 backend/tests/hashRoutes.test.js create mode 100644 backend/tests/scaler.test.js diff --git a/backend/package.json b/backend/package.json index 39a9e7c..5c793f0 100644 --- a/backend/package.json +++ b/backend/package.json @@ -6,7 +6,7 @@ "scripts": { "start": "node src/index.js", "dev": "nodemon src/index.js", - "test": "node tests/sample.test.js" + "test": "NODE_ENV=test node --experimental-test-coverage --test --test-coverage-include=src/**/*.js tests/**/*.test.js" }, "dependencies": { "axios": "^1.7.9", diff --git a/backend/src/clients.js b/backend/src/clients.js index 715e072..6ea9e8e 100644 --- a/backend/src/clients.js +++ b/backend/src/clients.js @@ -6,5 +6,36 @@ import Redis from "ioredis"; import Docker from "dockerode"; import { REDIS_URL, DOCKER_SOCKET } from "./config.js"; -export const redis = new Redis(REDIS_URL); +const isTest = process.env.NODE_ENV === "test"; + +/** + * Redis client + * - In normal mode: real ioredis client pointing at REDIS_URL + * - In test mode (NODE_ENV === "test"): lightweight in-memory stub so tests + * don't require a running Redis instance or a resolvable hostname. + */ +export const redis = isTest + ? { + // Queue: list of pending jobs + async lpush() { + return 0; + }, + // Status / results hashes + async hset() { + return "OK"; + }, + async hget() { + return null; + }, + // Queue length + async llen() { + return 0; + }, + // In-progress jobs set size + async scard() { + return 0; + }, + } + : new Redis(REDIS_URL); + export const docker = new Docker({ socketPath: DOCKER_SOCKET }); diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js index 126a69f..91136f5 100644 --- a/backend/src/services/scaler.js +++ b/backend/src/services/scaler.js @@ -13,7 +13,7 @@ import { let intervalId = null; // this will evaluate the current load, and decide if we need to scale up or down -async function evaluate(docker, redis) { +export async function evaluate(docker, redis) { try { const [pendingCount, inProgressCount] = await Promise.all([ redis.llen(REDIS_KEYS.JOBS_PENDING), diff --git a/backend/tests/clusterService.test.js b/backend/tests/clusterService.test.js new file mode 100644 index 0000000..f172789 --- /dev/null +++ b/backend/tests/clusterService.test.js @@ -0,0 +1,92 @@ +import test from "node:test"; +import assert from "node:assert/strict"; + +import { scaleToReplicas } from "../src/services/clusterService.js"; + +test("scaleToReplicas successfully updates the service replicas", async () => { + let updatedSpec = null; + + const docker = { + getService(id) { + assert.equal(id, "svc-1"); + return { + async update(spec) { + updatedSpec = spec; + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 42 }, + Spec: { + Mode: { + Replicated: { + Replicas: 2, + }, + }, + }, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 5); + + assert.deepEqual(result, { ok: true, workerReplicas: 5 }); + assert.ok(updatedSpec, "service.update should have been called"); + assert.equal(updatedSpec.version, 42); + assert.equal( + updatedSpec.Mode.Replicated.Replicas, + 5, + "replica count should be updated to desired value", + ); +}); + +test("scaleToReplicas returns not_swarm when Docker is not a swarm manager", async () => { + const docker = { + getService() { + return { + async update() { + throw new Error("This node is not a swarm manager"); + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 1 }, + Spec: {}, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 3); + + assert.deepEqual(result, { ok: false, reason: "not_swarm" }); +}); + +test("scaleToReplicas surfaces other errors", async () => { + const docker = { + getService() { + return { + async update() { + throw new Error("unexpected error"); + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 1 }, + Spec: {}, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 3); + + assert.deepEqual(result, { + ok: false, + reason: "error", + error: "unexpected error", + }); +} +); + diff --git a/backend/tests/hashRoutes.test.js b/backend/tests/hashRoutes.test.js new file mode 100644 index 0000000..9737e4c --- /dev/null +++ b/backend/tests/hashRoutes.test.js @@ -0,0 +1,79 @@ +import test, { mock } from "node:test"; +import assert from "node:assert/strict"; + +import hashRouter from "../src/routes/hash.js"; +import { redis } from "../src/clients.js"; + +function findRouteHandler(router, method, path) { + const targetMethod = method.toLowerCase(); + + for (const layer of router.stack) { + const route = layer.route; + if (!route) continue; + + if ( + route.path === path && + route.methods && + route.methods[targetMethod] === true + ) { + // Simple routes have a single layer in route.stack + const stack = route.stack || []; + if (stack.length > 0) { + return stack[0].handle; + } + } + } + + throw new Error(`Route handler for [${method.toUpperCase()} ${path}] not found`); +} + +function createMockRes() { + return { + statusCode: 200, + body: undefined, + status(code) { + this.statusCode = code; + return this; + }, + json(payload) { + this.body = payload; + return this; + }, + }; +} + +test("POST /hash/manual validates that hash is required", async () => { + const handler = findRouteHandler(hashRouter, "post", "/manual"); + + const req = { body: {} }; + const res = createMockRes(); + + const lpushMock = mock.method(redis, "lpush"); + const hsetMock = mock.method(redis, "hset"); + + await handler(req, res); + + assert.equal(res.statusCode, 400); + assert.deepEqual(res.body, { error: "hash is required" }); + assert.equal(lpushMock.mock.callCount(), 0); + assert.equal(hsetMock.mock.callCount(), 0); +}); + +test("POST /hash/manual enqueues a job and returns 202 with an id", async () => { + const handler = findRouteHandler(hashRouter, "post", "/manual"); + + const req = { body: { hash: "abc123" } }; + const res = createMockRes(); + + const lpushMock = mock.method(redis, "lpush", async () => 1); + const hsetMock = mock.method(redis, "hset", async () => "OK"); + + await handler(req, res); + + assert.equal(res.statusCode, 202); + assert.ok(res.body.id, "response should contain a job id"); + + assert.equal(lpushMock.mock.callCount(), 1); + assert.equal(hsetMock.mock.callCount(), 1); +}); + diff --git a/backend/tests/scaler.test.js b/backend/tests/scaler.test.js new file mode 100644 index 0000000..9a70a0d --- /dev/null +++ b/backend/tests/scaler.test.js @@ -0,0 +1,155 @@ +import test from "node:test"; +import assert from "node:assert/strict"; + +import { evaluate } from "../src/services/scaler.js"; +import { + WORKER_SERVICE_NAME, + SCALER_MAX_REPLICAS, + SCALER_MIN_REPLICAS, + SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE, + SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW, +} from "../src/config.js"; + +function createDocker({ currentReplicas, onUpdate }) { + return { + async listServices() { + return [ + { + ID: "svc-1", + Version: { Index: 1 }, + Spec: { + Name: WORKER_SERVICE_NAME, + Mode: { + Replicated: { + Replicas: currentReplicas, + }, + }, + }, + }, + ]; + }, + getService() { + return { + async update(spec) { + onUpdate(spec.Mode.Replicated.Replicas, spec); + }, + }; + }, + }; +} + +test("evaluate scales up when jobs per worker is above threshold", async () => { + const currentReplicas = 2; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE + 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 0; + }, + }; + + await evaluate(docker, redis); + + assert.equal(updates.length, 1, "should perform a single scale operation"); + const expected = Math.min(SCALER_MAX_REPLICAS, currentReplicas + 1); + assert.equal( + updates[0], + expected, + "should scale up by one replica (respecting max)", + ); +}); + +test("evaluate scales down when jobs per worker is below or equal threshold and no jobs in progress", async () => { + const currentReplicas = 3; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW - 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 0; + }, + }; + + await evaluate(docker, redis); + + assert.equal(updates.length, 1, "should perform a single scale operation"); + const expected = Math.max(SCALER_MIN_REPLICAS, currentReplicas - 1); + assert.equal( + updates[0], + expected, + "should scale down by one replica (respecting min)", + ); +}); + +test("evaluate does not scale down when there are jobs in progress", async () => { + const currentReplicas = 3; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW - 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 5; + }, + }; + + await evaluate(docker, redis); + + assert.equal( + updates.length, + 0, + "should not scale down when jobs are in progress", + ); +}); + +test("evaluate is a no-op when worker service is not found", async () => { + // docker that returns no services + const docker = { + async listServices() { + return []; + }, + }; + + const redis = { + async llen() { + return 10; + }, + async scard() { + return 0; + }, + }; + + // Should not throw + await assert.doesNotReject(() => evaluate(docker, redis)); +}); + -- GitLab From bb1f2b0d71b4a156f6a8587fdc9bb0a61bb50189 Mon Sep 17 00:00:00 2001 From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr> Date: Mon, 9 Feb 2026 11:09:16 +0100 Subject: [PATCH 2/2] added a gitlab CI file and a coverage badage into our readme, updated todos --- .gitlab-ci.yml | 35 +++++++++++++++++++++++++++++++++++ README.md | 2 ++ todo.txt | 6 ++++-- 3 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..26877bc --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,35 @@ +# GitLab CI – build, test, and Docker image build +# Mirrors the GitHub Actions workflow (build-and-test + docker-build) + +stages: + - test + - docker + +variables: + NODE_VERSION: "22" + +# Backend + frontend: install deps, run tests, build frontend +build-and-test: + stage: test + image: node:${NODE_VERSION}-alpine + script: + - cd backend && npm ci && npm test + - cd frontend && npm ci && npm run build + coverage: '/all files\s+\|\s+(\d+\.\d+)\s+\|/' + +# Build Docker images (runs after test stage passes) +docker-build: + stage: docker + image: docker:24 + services: + - docker:24-dind + variables: + DOCKER_TLS_CERTDIR: "/certs" + before_script: + - until docker info 2>/dev/null; do echo "Waiting for Docker..."; sleep 1; done + script: + - docker build -t md5-swarm-backend:ci ./backend + - docker build -t md5-swarm-worker:ci ./worker + - docker build -t md5-swarm-frontend:ci ./frontend + needs: + - build-and-test diff --git a/README.md b/README.md index 257eb41..ce1e095 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ ## Projet Docker Swarm – Bruteforce MD5 +![coverage](https://gitlab.com/your-group/your-project/badges/main/coverage.svg?job=build-and-test) + Ce dépôt contient une petite infrastructure Docker Swarm capable de bruteforcer des hash MD5 de manière scalable, ainsi qu’une application web pour piloter et monitorer le cluster. ### Sous-projets diff --git a/todo.txt b/todo.txt index d4dfe2d..be2ecf0 100644 --- a/todo.txt +++ b/todo.txt @@ -1,6 +1,8 @@ * implement the CI using gitlab tool chain. - +* add tests +* add a cache of the already computed hashes, using redis since we already integrated it in the backend. +* currently when we refresh the page, all the history gets cleared out, that is not good. +* write the flow of the projet to flow.txt or maybe just record my screen and send it to the teacher. * review how we use async/await, and if we do really need those awaits everywhere ? - * review the code, simplify and simplify! -- GitLab