diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..26877bcbbaa43c8d0324d9ae84988a557a2d931b --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,35 @@ +# GitLab CI – build, test, and Docker image build +# Mirrors the GitHub Actions workflow (build-and-test + docker-build) + +stages: + - test + - docker + +variables: + NODE_VERSION: "22" + +# Backend + frontend: install deps, run tests, build frontend +build-and-test: + stage: test + image: node:${NODE_VERSION}-alpine + script: + - cd backend && npm ci && npm test + - cd frontend && npm ci && npm run build + coverage: '/all files\s+\|\s+(\d+\.\d+)\s+\|/' + +# Build Docker images (runs after test stage passes) +docker-build: + stage: docker + image: docker:24 + services: + - docker:24-dind + variables: + DOCKER_TLS_CERTDIR: "/certs" + before_script: + - until docker info 2>/dev/null; do echo "Waiting for Docker..."; sleep 1; done + script: + - docker build -t md5-swarm-backend:ci ./backend + - docker build -t md5-swarm-worker:ci ./worker + - docker build -t md5-swarm-frontend:ci ./frontend + needs: + - build-and-test diff --git a/README.md b/README.md index 257eb41fefd4bd5f88529e4aec5d7a8d81094246..ce1e0951b43a482e12e7972251d5f70e7a2a07c4 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ ## Projet Docker Swarm – Bruteforce MD5 +![coverage](https://gitlab.com/your-group/your-project/badges/main/coverage.svg?job=build-and-test) + Ce dépôt contient une petite infrastructure Docker Swarm capable de bruteforcer des hash MD5 de manière scalable, ainsi qu’une application web pour piloter et monitorer le cluster. ### Sous-projets diff --git a/backend/package.json b/backend/package.json index 39a9e7c588196a08bcc8706a8182cd04633173b4..5c793f01e1921d5beb90f27f9b91b852ba30e894 100644 --- a/backend/package.json +++ b/backend/package.json @@ -6,7 +6,7 @@ "scripts": { "start": "node src/index.js", "dev": "nodemon src/index.js", - "test": "node tests/sample.test.js" + "test": "NODE_ENV=test node --experimental-test-coverage --test --test-coverage-include=src/**/*.js tests/**/*.test.js" }, "dependencies": { "axios": "^1.7.9", diff --git a/backend/src/clients.js b/backend/src/clients.js index 715e0721efc6b47f10115216abe24dfbb5cee6a9..6ea9e8e41471724a06a4dc85831fb15a07b549fb 100644 --- a/backend/src/clients.js +++ b/backend/src/clients.js @@ -6,5 +6,36 @@ import Redis from "ioredis"; import Docker from "dockerode"; import { REDIS_URL, DOCKER_SOCKET } from "./config.js"; -export const redis = new Redis(REDIS_URL); +const isTest = process.env.NODE_ENV === "test"; + +/** + * Redis client + * - In normal mode: real ioredis client pointing at REDIS_URL + * - In test mode (NODE_ENV === "test"): lightweight in-memory stub so tests + * don't require a running Redis instance or a resolvable hostname. + */ +export const redis = isTest + ? { + // Queue: list of pending jobs + async lpush() { + return 0; + }, + // Status / results hashes + async hset() { + return "OK"; + }, + async hget() { + return null; + }, + // Queue length + async llen() { + return 0; + }, + // In-progress jobs set size + async scard() { + return 0; + }, + } + : new Redis(REDIS_URL); + export const docker = new Docker({ socketPath: DOCKER_SOCKET }); diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js index 126a69f6186621d8e03c6f9d20409ede381865e7..91136f51b545f6ee7c0903aff4cab05ae50682bc 100644 --- a/backend/src/services/scaler.js +++ b/backend/src/services/scaler.js @@ -13,7 +13,7 @@ import { let intervalId = null; // this will evaluate the current load, and decide if we need to scale up or down -async function evaluate(docker, redis) { +export async function evaluate(docker, redis) { try { const [pendingCount, inProgressCount] = await Promise.all([ redis.llen(REDIS_KEYS.JOBS_PENDING), diff --git a/backend/tests/clusterService.test.js b/backend/tests/clusterService.test.js new file mode 100644 index 0000000000000000000000000000000000000000..f172789638b990f7ce4ae92aa0fc1eaf4b864550 --- /dev/null +++ b/backend/tests/clusterService.test.js @@ -0,0 +1,92 @@ +import test from "node:test"; +import assert from "node:assert/strict"; + +import { scaleToReplicas } from "../src/services/clusterService.js"; + +test("scaleToReplicas successfully updates the service replicas", async () => { + let updatedSpec = null; + + const docker = { + getService(id) { + assert.equal(id, "svc-1"); + return { + async update(spec) { + updatedSpec = spec; + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 42 }, + Spec: { + Mode: { + Replicated: { + Replicas: 2, + }, + }, + }, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 5); + + assert.deepEqual(result, { ok: true, workerReplicas: 5 }); + assert.ok(updatedSpec, "service.update should have been called"); + assert.equal(updatedSpec.version, 42); + assert.equal( + updatedSpec.Mode.Replicated.Replicas, + 5, + "replica count should be updated to desired value", + ); +}); + +test("scaleToReplicas returns not_swarm when Docker is not a swarm manager", async () => { + const docker = { + getService() { + return { + async update() { + throw new Error("This node is not a swarm manager"); + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 1 }, + Spec: {}, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 3); + + assert.deepEqual(result, { ok: false, reason: "not_swarm" }); +}); + +test("scaleToReplicas surfaces other errors", async () => { + const docker = { + getService() { + return { + async update() { + throw new Error("unexpected error"); + }, + }; + }, + }; + + const workerServiceInfo = { + ID: "svc-1", + Version: { Index: 1 }, + Spec: {}, + }; + + const result = await scaleToReplicas(docker, workerServiceInfo, 3); + + assert.deepEqual(result, { + ok: false, + reason: "error", + error: "unexpected error", + }); +} +); + diff --git a/backend/tests/hashRoutes.test.js b/backend/tests/hashRoutes.test.js new file mode 100644 index 0000000000000000000000000000000000000000..9737e4c18af827a9a661292013ba3ac867381659 --- /dev/null +++ b/backend/tests/hashRoutes.test.js @@ -0,0 +1,79 @@ +import test, { mock } from "node:test"; +import assert from "node:assert/strict"; + +import hashRouter from "../src/routes/hash.js"; +import { redis } from "../src/clients.js"; + +function findRouteHandler(router, method, path) { + const targetMethod = method.toLowerCase(); + + for (const layer of router.stack) { + const route = layer.route; + if (!route) continue; + + if ( + route.path === path && + route.methods && + route.methods[targetMethod] === true + ) { + // Simple routes have a single layer in route.stack + const stack = route.stack || []; + if (stack.length > 0) { + return stack[0].handle; + } + } + } + + throw new Error(`Route handler for [${method.toUpperCase()} ${path}] not found`); +} + +function createMockRes() { + return { + statusCode: 200, + body: undefined, + status(code) { + this.statusCode = code; + return this; + }, + json(payload) { + this.body = payload; + return this; + }, + }; +} + +test("POST /hash/manual validates that hash is required", async () => { + const handler = findRouteHandler(hashRouter, "post", "/manual"); + + const req = { body: {} }; + const res = createMockRes(); + + const lpushMock = mock.method(redis, "lpush"); + const hsetMock = mock.method(redis, "hset"); + + await handler(req, res); + + assert.equal(res.statusCode, 400); + assert.deepEqual(res.body, { error: "hash is required" }); + assert.equal(lpushMock.mock.callCount(), 0); + assert.equal(hsetMock.mock.callCount(), 0); +}); + +test("POST /hash/manual enqueues a job and returns 202 with an id", async () => { + const handler = findRouteHandler(hashRouter, "post", "/manual"); + + const req = { body: { hash: "abc123" } }; + const res = createMockRes(); + + const lpushMock = mock.method(redis, "lpush", async () => 1); + const hsetMock = mock.method(redis, "hset", async () => "OK"); + + await handler(req, res); + + assert.equal(res.statusCode, 202); + assert.ok(res.body.id, "response should contain a job id"); + + assert.equal(lpushMock.mock.callCount(), 1); + assert.equal(hsetMock.mock.callCount(), 1); +}); + diff --git a/backend/tests/scaler.test.js b/backend/tests/scaler.test.js new file mode 100644 index 0000000000000000000000000000000000000000..9a70a0d4b91e77d0a6227c3787632d05ab92fa48 --- /dev/null +++ b/backend/tests/scaler.test.js @@ -0,0 +1,155 @@ +import test from "node:test"; +import assert from "node:assert/strict"; + +import { evaluate } from "../src/services/scaler.js"; +import { + WORKER_SERVICE_NAME, + SCALER_MAX_REPLICAS, + SCALER_MIN_REPLICAS, + SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE, + SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW, +} from "../src/config.js"; + +function createDocker({ currentReplicas, onUpdate }) { + return { + async listServices() { + return [ + { + ID: "svc-1", + Version: { Index: 1 }, + Spec: { + Name: WORKER_SERVICE_NAME, + Mode: { + Replicated: { + Replicas: currentReplicas, + }, + }, + }, + }, + ]; + }, + getService() { + return { + async update(spec) { + onUpdate(spec.Mode.Replicated.Replicas, spec); + }, + }; + }, + }; +} + +test("evaluate scales up when jobs per worker is above threshold", async () => { + const currentReplicas = 2; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE + 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 0; + }, + }; + + await evaluate(docker, redis); + + assert.equal(updates.length, 1, "should perform a single scale operation"); + const expected = Math.min(SCALER_MAX_REPLICAS, currentReplicas + 1); + assert.equal( + updates[0], + expected, + "should scale up by one replica (respecting max)", + ); +}); + +test("evaluate scales down when jobs per worker is below or equal threshold and no jobs in progress", async () => { + const currentReplicas = 3; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW - 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 0; + }, + }; + + await evaluate(docker, redis); + + assert.equal(updates.length, 1, "should perform a single scale operation"); + const expected = Math.max(SCALER_MIN_REPLICAS, currentReplicas - 1); + assert.equal( + updates[0], + expected, + "should scale down by one replica (respecting min)", + ); +}); + +test("evaluate does not scale down when there are jobs in progress", async () => { + const currentReplicas = 3; + const updates = []; + + const docker = createDocker({ + currentReplicas, + onUpdate: (replicas) => updates.push(replicas), + }); + + const pendingJobs = + (SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW - 0.1) * currentReplicas; + + const redis = { + async llen() { + return pendingJobs; + }, + async scard() { + return 5; + }, + }; + + await evaluate(docker, redis); + + assert.equal( + updates.length, + 0, + "should not scale down when jobs are in progress", + ); +}); + +test("evaluate is a no-op when worker service is not found", async () => { + // docker that returns no services + const docker = { + async listServices() { + return []; + }, + }; + + const redis = { + async llen() { + return 10; + }, + async scard() { + return 0; + }, + }; + + // Should not throw + await assert.doesNotReject(() => evaluate(docker, redis)); +}); + diff --git a/todo.txt b/todo.txt index d4dfe2d5a114bb0557902c7e3cc2927fb0d52dad..be2ecf0f11d7467eed03aa42b90366103b423882 100644 --- a/todo.txt +++ b/todo.txt @@ -1,6 +1,8 @@ * implement the CI using gitlab tool chain. - +* add tests +* add a cache of the already computed hashes, using redis since we already integrated it in the backend. +* currently when we refresh the page, all the history gets cleared out, that is not good. +* write the flow of the projet to flow.txt or maybe just record my screen and send it to the teacher. * review how we use async/await, and if we do really need those awaits everywhere ? - * review the code, simplify and simplify!