diff --git a/worker/src/index.js b/worker/src/index.js
index a9d7f22..6b6a60d 100644
--- a/worker/src/index.js
+++ b/worker/src/index.js
@@ -33,13 +33,16 @@ async function bruteforce(hash) {
}
async function processJobs() {
+ let job;
while (true) {
+ job = null;
try {
const res = await redis.brpop("jobs:pending", 0);
if (!res) continue;
const [, payload] = res;
- const job = JSON.parse(payload);
+ job = JSON.parse(payload);
+ await redis.sadd("jobs:in_progress", job.id);
const result = await bruteforce(job.hash);
@@ -58,11 +61,32 @@ async function processJobs() {
job.id,
JSON.stringify({ status: "done" })
);
+ await redis.srem("jobs:in_progress", job.id);
console.log(`Job ${job.id} processed, found=${result.found}`);
} catch (err) {
console.error("Worker error:", err);
- await new Promise((resolve) => setTimeout(resolve, 1000));
+ if (job?.id) {
+ await redis.hset(
+ "jobs:results",
+ job.id,
+ JSON.stringify({
+ id: job.id,
+ hash: job.hash,
+ found: false,
+ plaintext: null,
+ error: true,
+ completedAt: Date.now(),
+ })
+ );
+ await redis.hset(
+ "jobs:status",
+ job.id,
+ JSON.stringify({ status: "failed" })
+ );
+ await redis.srem("jobs:in_progress", job.id);
+ }
+ console.log(`Job ${job?.id ?? "?"} failed (error)`);
}
}
}
--
GitLab
From dc3abcb98f5731f5711d8738bbf57939329aa276 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Fri, 6 Feb 2026 21:51:32 +0100
Subject: [PATCH 04/19] added a todo file that explains the upcoming changes
and updates!
---
todo.txt | 9 +++++++++
1 file changed, 9 insertions(+)
create mode 100644 todo.txt
diff --git a/todo.txt b/todo.txt
new file mode 100644
index 0000000..3335881
--- /dev/null
+++ b/todo.txt
@@ -0,0 +1,9 @@
+* read the code to review if the queue is properly managed and if the UI state is proprely handled as well. Then simplify.
+* make the UI a bit nicer, add timers and loaders, (ex; refresh cluster in 2s)
+* add a stepper to the UI instead of our radio buttons
+* add an export button to export the inputs and the outputs of automatic hash generation and submitting, make the input and
+ output files be seperate this way the teacher can inject the inputs to his program and check if the outputs match
+* implement the CI using gitlab tool chain.
+
+* review the code, simplify and simplify!
+
--
GitLab
From 3497789fc3d6388345979f167db68dd7a3554d39 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 15:12:17 +0100
Subject: [PATCH 05/19] Review the backend code Added some notes/todos for
later on. Small refactoring and retested the whole system. Everything is
still working just fine!
---
backend/src/index.js | 12 +++++++++++-
backend/src/routes/cluster.js | 24 +++++++++++++++---------
backend/src/routes/hash.js | 15 +++++++++++----
backend/src/services/clusterService.js | 19 +++++--------------
backend/src/services/scaler.js | 15 +++++++++++----
5 files changed, 53 insertions(+), 32 deletions(-)
diff --git a/backend/src/index.js b/backend/src/index.js
index 0d9982b..5e2c0ec 100644
--- a/backend/src/index.js
+++ b/backend/src/index.js
@@ -7,7 +7,17 @@ import { startScaler } from "./services/scaler.js";
const app = express();
const port = process.env.PORT || 8080;
-app.use(cors({origin: "*"}));
+/**
+ * @todo:
+ * use helmet for server wide input sanitization
+ */
+
+
+/**@note */
+// this is too permessive, but it is not a real world project so....we do not care as much!
+// ideally, the best thing to do is to pin point the exact adresse of the frontend server,
+// but this also means that we have to pass it as an env var for the backend server as well, (we can't hardcode it)
+app.use(cors({origin: "*"}));
app.use(express.json());
app.get("/health", (req, res) => {
diff --git a/backend/src/routes/cluster.js b/backend/src/routes/cluster.js
index e1a012e..c6be2e5 100644
--- a/backend/src/routes/cluster.js
+++ b/backend/src/routes/cluster.js
@@ -21,18 +21,18 @@ router.get("/state", async (req, res) => {
redis.llen("jobs:pending"),
]);
- const workerService = services.find(
+ const workerServiceInfo = services.find(
(s) => s.Spec && s.Spec.Name === workerServiceName
);
let workerReplicas = 0;
if (
- workerService &&
- workerService.Spec &&
- workerService.Spec.Mode &&
- workerService.Spec.Mode.Replicated
+ workerServiceInfo &&
+ workerServiceInfo.Spec &&
+ workerServiceInfo.Spec.Mode &&
+ workerServiceInfo.Spec.Mode.Replicated
) {
- workerReplicas = workerService.Spec.Mode.Replicated.Replicas || 0;
+ workerReplicas = workerServiceInfo.Spec.Mode.Replicated.Replicas || 0;
}
res.json({
@@ -48,14 +48,19 @@ router.get("/state", async (req, res) => {
// POST /cluster/scale - scale up/down des workers
router.post("/scale", async (req, res) => {
const desired = Number(req.body.replicas);
+
if (Number.isNaN(desired) || desired < 0) {
- return res.status(400).json({ error: "invalid replicas count" });
+ return res.status(400).json(
+ {
+ error: "invalid replicas count, replicas count should be a numerical value > 0."
+ }
+ );
}
const result = await scaleToReplicas(docker, workerServiceName, desired);
if (result.ok) {
- return res.json({ workerReplicas: result.workerReplicas });
+ return res.status(200).json({ workerReplicas: result.workerReplicas });
}
if (result.reason === "service_not_found") {
return res.status(404).json({ error: "worker service not found" });
@@ -63,8 +68,9 @@ router.post("/scale", async (req, res) => {
if (result.reason === "not_swarm") {
return res.status(503).json({ error: "Docker is not in Swarm mode" });
}
+
console.error("Error scaling workers", result.error);
- res.status(500).json({ error: "failed to scale workers" });
+ return res.status(500).json({ error: "failed to scale workers" });
});
export default router;
diff --git a/backend/src/routes/hash.js b/backend/src/routes/hash.js
index b329679..1ef1425 100644
--- a/backend/src/routes/hash.js
+++ b/backend/src/routes/hash.js
@@ -7,7 +7,7 @@ const router = express.Router();
const redisUrl = process.env.REDIS_URL || "redis://redis:6379";
const redis = new Redis(redisUrl);
-// POST /hash/manual - envoyer un hash MD5 à bruteforcer
+// POST /hash/manual - queing the hash to be bruteforced by the worker
router.post("/manual", async (req, res) => {
const { hash } = req.body;
if (!hash) {
@@ -17,21 +17,28 @@ router.post("/manual", async (req, res) => {
const jobId = uuidv4();
const job = { id: jobId, hash, createdAt: Date.now() };
+ /**@note */
+ // Consider using Promise.then rather then two awaits, this
+ // will leave the nodejs runtime schedule them when it is
+ // appropriate regarding the current process load. Double
+ // awaits forces the event loop to consider these over other
+ // stuff, it is okey for critical stuff, but this is not !
await redis.lpush("jobs:pending", JSON.stringify(job));
await redis.hset("jobs:status", jobId, JSON.stringify({ status: "queued" }));
- res.status(202).json({ id: jobId });
+ return res.status(202).json({ id: jobId });
});
-// GET /hash/:id - récupérer le résultat d’un bruteforce
+// GET /hash/:id - get the status of a specific job
router.get("/:id", async (req, res) => {
const { id } = req.params;
const raw = await redis.hget("jobs:results", id);
+
if (!raw) {
return res.status(404).json({ error: "result not found" });
}
- res.json(JSON.parse(raw));
+ return res.json(JSON.parse(raw));
});
export default router;
diff --git a/backend/src/services/clusterService.js b/backend/src/services/clusterService.js
index 1c454b1..aa09dde 100644
--- a/backend/src/services/clusterService.js
+++ b/backend/src/services/clusterService.js
@@ -6,25 +6,16 @@
* @param {number} desiredReplicas - Desired replica count
* @returns {Promise<{ ok: boolean, workerReplicas?: number, reason?: string, error?: string }>}
*/
-export async function scaleToReplicas(docker, workerServiceName, desiredReplicas) {
+export async function scaleToReplicas(docker, workerServiceInfo, desiredReplicas) {
try {
- const services = await docker.listServices();
- const serviceInfo = services.find(
- (s) => s.Spec && s.Spec.Name === workerServiceName
- );
-
- if (!serviceInfo) {
- return { ok: false, reason: "service_not_found" };
- }
-
- const service = docker.getService(serviceInfo.ID);
- const spec = { ...serviceInfo.Spec };
+ const workerService = docker.getService(workerServiceInfo.ID);
+ const spec = { ...workerServiceInfo.Spec };
spec.Mode = spec.Mode || {};
spec.Mode.Replicated = spec.Mode.Replicated || {};
spec.Mode.Replicated.Replicas = desiredReplicas;
- await service.update({
- version: serviceInfo.Version.Index,
+ await workerService.update({
+ version: workerServiceInfo.Version.Index,
...spec,
});
diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js
index c0ac122..818c37f 100644
--- a/backend/src/services/scaler.js
+++ b/backend/src/services/scaler.js
@@ -14,6 +14,7 @@ const SCALER_SCALE_DOWN_WHEN_JOBS_BELOW = Number(process.env.SCALER_SCALE_DOWN_W
let intervalId = null;
+// this will evaluate the current load, and decide if we need to scale up or down
async function evaluate(docker, redis) {
try {
const [pendingCount, inProgressCount] = await Promise.all([
@@ -32,19 +33,25 @@ async function evaluate(docker, redis) {
throw err;
}
- const workerService = services.find(
+ const workerServiceInfo = services.find(
(s) => s.Spec && s.Spec.Name === workerServiceName
);
+ if (!workerServiceInfo) {
+ console.warn("Scaler: scale failed, service_not_found.");
+ return;
+ }
+
let current = 0;
- if (workerService?.Spec?.Mode?.Replicated != null) {
- current = workerService.Spec.Mode.Replicated.Replicas ?? 0;
+ if (workerServiceInfo?.Spec?.Mode?.Replicated != null) {
+ current = workerServiceInfo.Spec.Mode.Replicated.Replicas ?? 0;
}
let desired = -1;
if (pendingCount >= SCALER_SCALE_UP_WHEN_JOBS_ABOVE) {
desired = Math.min(SCALER_MAX_REPLICAS, current + 1);
+
} else if (
pendingCount <= SCALER_SCALE_DOWN_WHEN_JOBS_BELOW &&
inProgressCount === 0
@@ -54,7 +61,7 @@ async function evaluate(docker, redis) {
if (desired === -1) return;
- const result = await scaleToReplicas(docker, workerServiceName, desired);
+ const result = await scaleToReplicas(docker, workerServiceInfo, desired);
if (result.ok) {
console.log(`Scaler: scaled workers from ${current} to ${desired} (pending jobs: ${pendingCount})`);
--
GitLab
From dd3c638dfb2fc0cde033b1adc21771c8825f8784 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 15:51:42 +0100
Subject: [PATCH 06/19] review the worker's script, refactor, added some
comments and notes, retested the whole system, everything works just fine !
---
worker/src/index.js | 55 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/worker/src/index.js b/worker/src/index.js
index 6b6a60d..2987da1 100644
--- a/worker/src/index.js
+++ b/worker/src/index.js
@@ -10,11 +10,31 @@ const maxLength = Number(process.env.MAX_LENGTH || 4);
async function bruteforce(hash) {
const start = Date.now();
+
+ // this is a generator, which is a little bit advanced,
+ // but the idea is simple, we basically go over all
+ // possible words in an on-demand like fashion, this
+ // allows us to have a lazy computation workflow that
+ // allows us to satisfy our need without pre-computing
+ // all the possible words before hand which requires
+ // much more memory to store them all!
+
+ // If you draw the execution path of this generator
+ // function, you'll see that the recursive calls generate
+ // the words in a tree like fashion.
function* generateStrings(maxLen, prefix = "") {
+ // base case
+ if (prefix.length === maxLen){
+ yield prefix;
+ return;
+ };
+
+ // yield current state/word
if (prefix.length > 0) {
yield prefix;
}
- if (prefix.length === maxLen) return;
+
+ // generate next words
for (const ch of charset) {
yield* generateStrings(maxLen, prefix + ch);
}
@@ -32,20 +52,34 @@ async function bruteforce(hash) {
return { found: false, plaintext: null, elapsedMs };
}
+
+// this is our main function of the woker
async function processJobs() {
let job;
+
while (true) {
+
job = null;
+
try {
+
+ // poll a pending job from the queue
const res = await redis.brpop("jobs:pending", 0);
if (!res) continue;
const [, payload] = res;
job = JSON.parse(payload);
+
+ // register its id in the in_progress queue
+ // this is important insights for the scaler!
await redis.sadd("jobs:in_progress", job.id);
+
+ // solve the job hash
const result = await bruteforce(job.hash);
+
+ // save the results
await redis.hset(
"jobs:results",
job.id,
@@ -56,17 +90,31 @@ async function processJobs() {
completedAt: Date.now(),
})
);
+
+ // save the status
await redis.hset(
"jobs:status",
job.id,
JSON.stringify({ status: "done" })
);
+
+ // unregister the job id from the in_progress
+ // so now worker goes idle, which is important
+ // info for the scaler
await redis.srem("jobs:in_progress", job.id);
console.log(`Job ${job.id} processed, found=${result.found}`);
+
+
} catch (err) {
console.error("Worker error:", err);
+
+
+ // check if the error occured after the job polling
+ // if that is true, then job should not be undefined
if (job?.id) {
+
+ // register the results after error
await redis.hset(
"jobs:results",
job.id,
@@ -79,13 +127,18 @@ async function processJobs() {
completedAt: Date.now(),
})
);
+
+ // register the status after error
await redis.hset(
"jobs:status",
job.id,
JSON.stringify({ status: "failed" })
);
+
+ // do not forget to remove if from the in_progress queue
await redis.srem("jobs:in_progress", job.id);
}
+
console.log(`Job ${job?.id ?? "?"} failed (error)`);
}
}
--
GitLab
From c1f815b2e7451bf5173aabf8f142b35f9100949e Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 16:44:05 +0100
Subject: [PATCH 07/19] reviewed the frontend code, added some comments and
notes, and a bit of logic change between the frontend and the backend, now we
do not have the noisy 404 error in the UI console when the requested job is
still in queue, we instead return 200 with a payload containing status=queued
---
backend/src/routes/hash.js | 3 ++-
frontend/src/App.jsx | 18 ++++++++----------
2 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/backend/src/routes/hash.js b/backend/src/routes/hash.js
index 1ef1425..d59590d 100644
--- a/backend/src/routes/hash.js
+++ b/backend/src/routes/hash.js
@@ -35,7 +35,8 @@ router.get("/:id", async (req, res) => {
const raw = await redis.hget("jobs:results", id);
if (!raw) {
- return res.status(404).json({ error: "result not found" });
+ const jobStatus = await redis.hget("jobs:status", id);
+ return res.status(200).json(JSON.parse(jobStatus));
}
return res.json(JSON.parse(raw));
diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx
index c0a212e..0a314b4 100644
--- a/frontend/src/App.jsx
+++ b/frontend/src/App.jsx
@@ -2,16 +2,13 @@ import React, { useEffect, useState } from "react";
import CryptoJS from "crypto-js";
const MODES = {
+//mode: delay(ms)
gentle: 5000,
normal: 2000,
aggressive: 500,
};
async function apiRequest(path, options = {}) {
- console.log("API_URL: ", import.meta.env.VITE_API_URL);
- console.log("path: ", path);
- console.log("options: ", options);
-
const res = await fetch(`${import.meta.env.VITE_API_URL || "http://localhost:8080"}${path}`, {
headers: { "Content-Type": "application/json" },
...options,
@@ -30,7 +27,6 @@ function App() {
const [jobs, setJobs] = useState([]);
const [clusterState, setClusterState] = useState(null);
- // Envoi manuel
const sendManual = async () => {
if (!hashInput) return;
try {
@@ -59,10 +55,15 @@ function App() {
// Trigger the API calls
Promise.all(
currentJobs.map(async (job) => {
+
+ // if the job obj already has a result key
+ // then skip
if (job.result) return job;
+
+ // otherwise, request the backend
try {
const res = await apiRequest(`/hash/${job.id}`);
- return res.status === "processing" ? job : { ...job, result: res };
+ return res.status === "queued" ? job : { ...job, result: res };
} catch {
return job;
}
@@ -102,8 +103,7 @@ function App() {
// Envoi auto de hash aléatoires selon le mode
useEffect(() => {
- console.log("autoRunning: ", autoRunning);
- console.log("mode: ", mode);
+
if (!autoRunning) return;
const delay = MODES[mode] || MODES.normal;
@@ -112,8 +112,6 @@ function App() {
const randomText = Math.random().toString(36).slice(2, 6);
const hash = CryptoJS.MD5(randomText).toString(CryptoJS.enc.Hex);
- console.log("auto send", delay, hash);
-
try {
const res = await apiRequest("/hash/manual", {
method: "POST",
--
GitLab
From 0085a4ab2c67335f5ea8ea190f5f45a077773027 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 16:44:22 +0100
Subject: [PATCH 08/19] updated todo file
---
flow.txt | 0
todo.txt | 11 ++++++++++-
2 files changed, 10 insertions(+), 1 deletion(-)
create mode 100644 flow.txt
diff --git a/flow.txt b/flow.txt
new file mode 100644
index 0000000..e69de29
diff --git a/todo.txt b/todo.txt
index 3335881..b5eeeb9 100644
--- a/todo.txt
+++ b/todo.txt
@@ -1,9 +1,18 @@
* read the code to review if the queue is properly managed and if the UI state is proprely handled as well. Then simplify.
-* make the UI a bit nicer, add timers and loaders, (ex; refresh cluster in 2s)
+* centralize the constants, and export them rather then redeclaring them on every file
+* make the scaler work with loads rather then hard numerical value based thresholds (scale if 4 jobs
+per worker rather then scale if pending_jobs_count > 5)
+* make the UI a bit nicer, add timers and loaders, (ex; add the text "refreshing the cluster state
+in 2seconds" to the view)
+* change the 404 status code to 200 with a body informing the frontend that the job is currently on
+wait, this will remove the noise from the frontend consle, (update the /hash/:id)
* add a stepper to the UI instead of our radio buttons
* add an export button to export the inputs and the outputs of automatic hash generation and submitting, make the input and
output files be seperate this way the teacher can inject the inputs to his program and check if the outputs match
* implement the CI using gitlab tool chain.
+* review how we use async/await, and if we do really need those awaits everywhere ?
+* see what the jobs:status redis channel used for.
+
* review the code, simplify and simplify!
--
GitLab
From e1c0c7ea8599e21dbb86667ed5f5ec5a6f563455 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 17:00:52 +0100
Subject: [PATCH 09/19] centralized the constants and the magic values into
config.js
---
backend/src/config.js | 29 +++++++++++++++++++++++
backend/src/index.js | 8 +++----
backend/src/routes/cluster.js | 42 ++++++++++++++++++++++------------
backend/src/routes/hash.js | 15 ++++++------
backend/src/services/scaler.js | 31 +++++++++++++------------
5 files changed, 83 insertions(+), 42 deletions(-)
create mode 100644 backend/src/config.js
diff --git a/backend/src/config.js b/backend/src/config.js
new file mode 100644
index 0000000..3a9dbe3
--- /dev/null
+++ b/backend/src/config.js
@@ -0,0 +1,29 @@
+/**
+ * Centralized configuration for the backend.
+ * All env-based constants and Redis key names live here.
+ */
+
+// Redis
+export const REDIS_URL = process.env.REDIS_URL || "redis://redis:6379";
+
+export const REDIS_KEYS = {
+ JOBS_PENDING: "jobs:pending",
+ JOBS_RESULTS: "jobs:results",
+ JOBS_STATUS: "jobs:status",
+ JOBS_IN_PROGRESS: "jobs:in_progress",
+};
+
+// Docker
+export const DOCKER_SOCKET = process.env.DOCKER_SOCKET || "/var/run/docker.sock";
+export const WORKER_SERVICE_NAME = process.env.WORKER_SERVICE_NAME || "md5_hash_worker";
+
+// Server
+export const PORT = Number(process.env.PORT) || 8080;
+
+// Scaler
+export const SCALER_ENABLED = process.env.SCALER_ENABLED === "true";
+export const SCALER_INTERVAL_MS = Number(process.env.SCALER_INTERVAL_MS) || 10000;
+export const SCALER_MIN_REPLICAS = Number(process.env.SCALER_MIN_REPLICAS) || 1;
+export const SCALER_MAX_REPLICAS = Number(process.env.SCALER_MAX_REPLICAS) || 10;
+export const SCALER_SCALE_UP_WHEN_JOBS_ABOVE = Number(process.env.SCALER_SCALE_UP_WHEN_JOBS_ABOVE) || 5;
+export const SCALER_SCALE_DOWN_WHEN_JOBS_BELOW = Number(process.env.SCALER_SCALE_DOWN_WHEN_JOBS_BELOW) || 3;
diff --git a/backend/src/index.js b/backend/src/index.js
index 5e2c0ec..b27b723 100644
--- a/backend/src/index.js
+++ b/backend/src/index.js
@@ -3,9 +3,9 @@ import cors from "cors";
import hashRouter from "./routes/hash.js";
import clusterRouter from "./routes/cluster.js";
import { startScaler } from "./services/scaler.js";
+import { PORT, SCALER_ENABLED } from "./config.js";
const app = express();
-const port = process.env.PORT || 8080;
/**
* @todo:
@@ -27,9 +27,9 @@ app.get("/health", (req, res) => {
app.use("/hash", hashRouter);
app.use("/cluster", clusterRouter);
-app.listen(port, () => {
- console.log(`Backend API listening on port ${port}`);
- if (process.env.SCALER_ENABLED === "true") {
+app.listen(PORT, () => {
+ console.log(`Backend API listening on port ${PORT}`);
+ if (SCALER_ENABLED) {
startScaler();
}
});
diff --git a/backend/src/routes/cluster.js b/backend/src/routes/cluster.js
index c6be2e5..8d16531 100644
--- a/backend/src/routes/cluster.js
+++ b/backend/src/routes/cluster.js
@@ -2,27 +2,22 @@ import express from "express";
import Docker from "dockerode";
import Redis from "ioredis";
import { scaleToReplicas } from "../services/clusterService.js";
+import { DOCKER_SOCKET, REDIS_URL, WORKER_SERVICE_NAME, REDIS_KEYS } from "../config.js";
const router = express.Router();
-
-const dockerSocketPath = process.env.DOCKER_SOCKET || "/var/run/docker.sock";
-const docker = new Docker({ socketPath: dockerSocketPath });
-
-const redisUrl = process.env.REDIS_URL || "redis://redis:6379";
-const redis = new Redis(redisUrl);
-
-const workerServiceName = process.env.WORKER_SERVICE_NAME || "md5_hash_worker";
+const docker = new Docker({ socketPath: DOCKER_SOCKET });
+const redis = new Redis(REDIS_URL);
// GET /cluster/state - état simple du cluster
router.get("/state", async (req, res) => {
try {
const [services, pendingCount] = await Promise.all([
docker.listServices(),
- redis.llen("jobs:pending"),
+ redis.llen(REDIS_KEYS.JOBS_PENDING),
]);
const workerServiceInfo = services.find(
- (s) => s.Spec && s.Spec.Name === workerServiceName
+ (s) => s.Spec && s.Spec.Name === WORKER_SERVICE_NAME
);
let workerReplicas = 0;
@@ -45,19 +40,36 @@ router.get("/state", async (req, res) => {
}
});
-// POST /cluster/scale - scale up/down des workers
+// POST /cluster/scale - scale up/down workers on-deman manually
router.post("/scale", async (req, res) => {
const desired = Number(req.body.replicas);
-
+
if (Number.isNaN(desired) || desired < 0) {
return res.status(400).json(
- {
- error: "invalid replicas count, replicas count should be a numerical value > 0."
+ {
+ error: "invalid replicas count, replicas count should be a numerical value > 0."
}
);
}
- const result = await scaleToReplicas(docker, workerServiceName, desired);
+ let services;
+ try {
+ services = await docker.listServices();
+ } catch (err) {
+ const msg = err?.message || String(err);
+ if (msg.includes("not a swarm manager") || msg.includes("This node is not a swarm manager")) {
+ return res.status(503).json({ error: "Docker is not in Swarm mode" });
+ }
+ throw err;
+ }
+ const workerServiceInfo = services.find(
+ (s) => s.Spec && s.Spec.Name === WORKER_SERVICE_NAME
+ );
+ if (!workerServiceInfo) {
+ return res.status(404).json({ error: "worker service not found" });
+ }
+
+ const result = await scaleToReplicas(docker, workerServiceInfo, desired);
if (result.ok) {
return res.status(200).json({ workerReplicas: result.workerReplicas });
diff --git a/backend/src/routes/hash.js b/backend/src/routes/hash.js
index d59590d..1d3ed4e 100644
--- a/backend/src/routes/hash.js
+++ b/backend/src/routes/hash.js
@@ -1,11 +1,10 @@
import express from "express";
import Redis from "ioredis";
import { v4 as uuidv4 } from "uuid";
+import { REDIS_URL, REDIS_KEYS } from "../config.js";
const router = express.Router();
-
-const redisUrl = process.env.REDIS_URL || "redis://redis:6379";
-const redis = new Redis(redisUrl);
+const redis = new Redis(REDIS_URL);
// POST /hash/manual - queing the hash to be bruteforced by the worker
router.post("/manual", async (req, res) => {
@@ -23,8 +22,8 @@ router.post("/manual", async (req, res) => {
// appropriate regarding the current process load. Double
// awaits forces the event loop to consider these over other
// stuff, it is okey for critical stuff, but this is not !
- await redis.lpush("jobs:pending", JSON.stringify(job));
- await redis.hset("jobs:status", jobId, JSON.stringify({ status: "queued" }));
+ await redis.lpush(REDIS_KEYS.JOBS_PENDING, JSON.stringify(job));
+ await redis.hset(REDIS_KEYS.JOBS_STATUS, jobId, JSON.stringify({ status: "queued" }));
return res.status(202).json({ id: jobId });
});
@@ -32,10 +31,10 @@ router.post("/manual", async (req, res) => {
// GET /hash/:id - get the status of a specific job
router.get("/:id", async (req, res) => {
const { id } = req.params;
- const raw = await redis.hget("jobs:results", id);
-
+ const raw = await redis.hget(REDIS_KEYS.JOBS_RESULTS, id);
+
if (!raw) {
- const jobStatus = await redis.hget("jobs:status", id);
+ const jobStatus = await redis.hget(REDIS_KEYS.JOBS_STATUS, id);
return res.status(200).json(JSON.parse(jobStatus));
}
diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js
index 818c37f..d209b9e 100644
--- a/backend/src/services/scaler.js
+++ b/backend/src/services/scaler.js
@@ -1,16 +1,17 @@
import Docker from "dockerode";
import Redis from "ioredis";
import { scaleToReplicas } from "./clusterService.js";
-
-const dockerSocketPath = process.env.DOCKER_SOCKET || "/var/run/docker.sock";
-const redisUrl = process.env.REDIS_URL || "redis://redis:6379";
-const workerServiceName = process.env.WORKER_SERVICE_NAME || "md5_hash_worker";
-
-const SCALER_INTERVAL_MS = Number(process.env.SCALER_INTERVAL_MS) || 10000;
-const SCALER_MIN_REPLICAS = Number(process.env.SCALER_MIN_REPLICAS) || 1;
-const SCALER_MAX_REPLICAS = Number(process.env.SCALER_MAX_REPLICAS) || 10;
-const SCALER_SCALE_UP_WHEN_JOBS_ABOVE = Number(process.env.SCALER_SCALE_UP_WHEN_JOBS_ABOVE) || 5;
-const SCALER_SCALE_DOWN_WHEN_JOBS_BELOW = Number(process.env.SCALER_SCALE_DOWN_WHEN_JOBS_BELOW) || 3;
+import {
+ DOCKER_SOCKET,
+ REDIS_URL,
+ WORKER_SERVICE_NAME,
+ REDIS_KEYS,
+ SCALER_INTERVAL_MS,
+ SCALER_MIN_REPLICAS,
+ SCALER_MAX_REPLICAS,
+ SCALER_SCALE_UP_WHEN_JOBS_ABOVE,
+ SCALER_SCALE_DOWN_WHEN_JOBS_BELOW,
+} from "../config.js";
let intervalId = null;
@@ -18,8 +19,8 @@ let intervalId = null;
async function evaluate(docker, redis) {
try {
const [pendingCount, inProgressCount] = await Promise.all([
- redis.llen("jobs:pending"),
- redis.scard("jobs:in_progress"),
+ redis.llen(REDIS_KEYS.JOBS_PENDING),
+ redis.scard(REDIS_KEYS.JOBS_IN_PROGRESS),
]);
let services;
@@ -34,7 +35,7 @@ async function evaluate(docker, redis) {
}
const workerServiceInfo = services.find(
- (s) => s.Spec && s.Spec.Name === workerServiceName
+ (s) => s.Spec && s.Spec.Name === WORKER_SERVICE_NAME
);
if (!workerServiceInfo) {
@@ -79,8 +80,8 @@ export function startScaler() {
return;
}
- const redis = new Redis(redisUrl);
- const docker = new Docker({ socketPath: dockerSocketPath });
+ const redis = new Redis(REDIS_URL);
+ const docker = new Docker({ socketPath: DOCKER_SOCKET });
intervalId = setInterval(() => evaluate(docker, redis), SCALER_INTERVAL_MS);
--
GitLab
From 7d77b9c76063a5734820659eccd145436491639a Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 17:02:10 +0100
Subject: [PATCH 10/19] removed manual scalling endpoint since now our
automatic scaler works as expected
---
backend/src/routes/cluster.js | 45 -----------------------------------
1 file changed, 45 deletions(-)
diff --git a/backend/src/routes/cluster.js b/backend/src/routes/cluster.js
index 8d16531..e9cab3d 100644
--- a/backend/src/routes/cluster.js
+++ b/backend/src/routes/cluster.js
@@ -1,7 +1,6 @@
import express from "express";
import Docker from "dockerode";
import Redis from "ioredis";
-import { scaleToReplicas } from "../services/clusterService.js";
import { DOCKER_SOCKET, REDIS_URL, WORKER_SERVICE_NAME, REDIS_KEYS } from "../config.js";
const router = express.Router();
@@ -40,50 +39,6 @@ router.get("/state", async (req, res) => {
}
});
-// POST /cluster/scale - scale up/down workers on-deman manually
-router.post("/scale", async (req, res) => {
- const desired = Number(req.body.replicas);
-
- if (Number.isNaN(desired) || desired < 0) {
- return res.status(400).json(
- {
- error: "invalid replicas count, replicas count should be a numerical value > 0."
- }
- );
- }
-
- let services;
- try {
- services = await docker.listServices();
- } catch (err) {
- const msg = err?.message || String(err);
- if (msg.includes("not a swarm manager") || msg.includes("This node is not a swarm manager")) {
- return res.status(503).json({ error: "Docker is not in Swarm mode" });
- }
- throw err;
- }
- const workerServiceInfo = services.find(
- (s) => s.Spec && s.Spec.Name === WORKER_SERVICE_NAME
- );
- if (!workerServiceInfo) {
- return res.status(404).json({ error: "worker service not found" });
- }
-
- const result = await scaleToReplicas(docker, workerServiceInfo, desired);
-
- if (result.ok) {
- return res.status(200).json({ workerReplicas: result.workerReplicas });
- }
- if (result.reason === "service_not_found") {
- return res.status(404).json({ error: "worker service not found" });
- }
- if (result.reason === "not_swarm") {
- return res.status(503).json({ error: "Docker is not in Swarm mode" });
- }
-
- console.error("Error scaling workers", result.error);
- return res.status(500).json({ error: "failed to scale workers" });
-});
export default router;
--
GitLab
From 93f561740a343c8d85d9281107369eef7f2dd459 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 17:10:28 +0100
Subject: [PATCH 11/19] centralized our docker and redis handles instantiation
---
backend/src/clients.js | 10 ++++++++++
backend/src/routes/cluster.js | 7 ++-----
backend/src/routes/hash.js | 5 ++---
backend/src/services/scaler.js | 10 ++--------
4 files changed, 16 insertions(+), 16 deletions(-)
create mode 100644 backend/src/clients.js
diff --git a/backend/src/clients.js b/backend/src/clients.js
new file mode 100644
index 0000000..715e072
--- /dev/null
+++ b/backend/src/clients.js
@@ -0,0 +1,10 @@
+/**
+ * Shared Redis and Docker client instances.
+ * Single connection per process instead of one per route/scaler.
+ */
+import Redis from "ioredis";
+import Docker from "dockerode";
+import { REDIS_URL, DOCKER_SOCKET } from "./config.js";
+
+export const redis = new Redis(REDIS_URL);
+export const docker = new Docker({ socketPath: DOCKER_SOCKET });
diff --git a/backend/src/routes/cluster.js b/backend/src/routes/cluster.js
index e9cab3d..e69f5e5 100644
--- a/backend/src/routes/cluster.js
+++ b/backend/src/routes/cluster.js
@@ -1,11 +1,8 @@
import express from "express";
-import Docker from "dockerode";
-import Redis from "ioredis";
-import { DOCKER_SOCKET, REDIS_URL, WORKER_SERVICE_NAME, REDIS_KEYS } from "../config.js";
+import { WORKER_SERVICE_NAME, REDIS_KEYS } from "../config.js";
+import { docker, redis } from "../clients.js";
const router = express.Router();
-const docker = new Docker({ socketPath: DOCKER_SOCKET });
-const redis = new Redis(REDIS_URL);
// GET /cluster/state - état simple du cluster
router.get("/state", async (req, res) => {
diff --git a/backend/src/routes/hash.js b/backend/src/routes/hash.js
index 1d3ed4e..d203a59 100644
--- a/backend/src/routes/hash.js
+++ b/backend/src/routes/hash.js
@@ -1,10 +1,9 @@
import express from "express";
-import Redis from "ioredis";
import { v4 as uuidv4 } from "uuid";
-import { REDIS_URL, REDIS_KEYS } from "../config.js";
+import { REDIS_KEYS } from "../config.js";
+import { redis } from "../clients.js";
const router = express.Router();
-const redis = new Redis(REDIS_URL);
// POST /hash/manual - queing the hash to be bruteforced by the worker
router.post("/manual", async (req, res) => {
diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js
index d209b9e..184d281 100644
--- a/backend/src/services/scaler.js
+++ b/backend/src/services/scaler.js
@@ -1,9 +1,6 @@
-import Docker from "dockerode";
-import Redis from "ioredis";
import { scaleToReplicas } from "./clusterService.js";
+import { docker, redis } from "../clients.js";
import {
- DOCKER_SOCKET,
- REDIS_URL,
WORKER_SERVICE_NAME,
REDIS_KEYS,
SCALER_INTERVAL_MS,
@@ -79,10 +76,7 @@ export function startScaler() {
console.log("Scaler: already started");
return;
}
-
- const redis = new Redis(REDIS_URL);
- const docker = new Docker({ socketPath: DOCKER_SOCKET });
-
+
intervalId = setInterval(() => evaluate(docker, redis), SCALER_INTERVAL_MS);
console.log(`Scaler: started (interval ${SCALER_INTERVAL_MS}ms, min=${SCALER_MIN_REPLICAS}, max=${SCALER_MAX_REPLICAS})`);
--
GitLab
From 77f6f9797a7f4b0621d916c8a153d4c4f4506eee Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Sun, 8 Feb 2026 17:11:34 +0100
Subject: [PATCH 12/19] updated todos
---
todo.txt | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/todo.txt b/todo.txt
index b5eeeb9..fbb1aee 100644
--- a/todo.txt
+++ b/todo.txt
@@ -1,18 +1,17 @@
-* read the code to review if the queue is properly managed and if the UI state is proprely handled as well. Then simplify.
-* centralize the constants, and export them rather then redeclaring them on every file
-* make the scaler work with loads rather then hard numerical value based thresholds (scale if 4 jobs
-per worker rather then scale if pending_jobs_count > 5)
+* add a stepper to the UI instead of our radio buttons
+
* make the UI a bit nicer, add timers and loaders, (ex; add the text "refreshing the cluster state
in 2seconds" to the view)
-* change the 404 status code to 200 with a body informing the frontend that the job is currently on
-wait, this will remove the noise from the frontend consle, (update the /hash/:id)
-* add a stepper to the UI instead of our radio buttons
+
* add an export button to export the inputs and the outputs of automatic hash generation and submitting, make the input and
output files be seperate this way the teacher can inject the inputs to his program and check if the outputs match
+
+* make the scaler work with loads rather then hard numerical value based thresholds (scale if 4 jobs
+per worker rather then scale if pending_jobs_count > 5)
+
* implement the CI using gitlab tool chain.
* review how we use async/await, and if we do really need those awaits everywhere ?
-* see what the jobs:status redis channel used for.
* review the code, simplify and simplify!
--
GitLab
From 0f83f79fff584a62f8da11700209feb5e11dbd4a Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Mon, 9 Feb 2026 00:38:13 +0100
Subject: [PATCH 13/19] Make the scaler work with loads rather then hard
numerical value based thresholds (scale if 4 jobs per worker rather then
scale if pending_jobs_count > 5)
---
README.md | 6 +++---
backend/src/config.js | 7 +++++--
backend/src/services/scaler.js | 15 +++++++++------
3 files changed, 17 insertions(+), 11 deletions(-)
diff --git a/README.md b/README.md
index a5948bc..257eb41 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ Ce dépôt contient une petite infrastructure Docker Swarm capable de bruteforce
### Auto-scaling
-Un **scaler** tourne dans le processus du backend. Il lit périodiquement la taille de la file Redis (`jobs:pending`) et ajuste le nombre de réplicas du service worker Swarm entre un min et un max configurables. Seuils avec hysteresis : scale up si les jobs en attente dépassent un seuil, scale down s’ils tombent en dessous d’un seuil plus bas.
+Un **scaler** tourne dans le processus du backend. Il lit périodiquement la file Redis (`jobs:pending`) et le nombre de workers, puis ajuste le nombre de réplicas du service worker Swarm entre un min et un max configurables. Les seuils sont basés sur la charge (jobs par worker) : scale up si (jobs en attente / workers) dépasse un seuil, scale down si ça tombe en dessous d’un seuil plus bas (et qu’aucun job n’est en cours).
Variables d’environnement (optionnelles, sur le service `api_backend`) :
@@ -27,8 +27,8 @@ Variables d’environnement (optionnelles, sur le service `api_backend`) :
| `SCALER_INTERVAL_MS` | `10000` | Période entre deux évaluations (ms). |
| `SCALER_MIN_REPLICAS` | `1` | Nombre minimum de workers. |
| `SCALER_MAX_REPLICAS` | `10` | Nombre maximum de workers. |
-| `SCALER_SCALE_UP_WHEN_JOBS_ABOVE` | `5` | Scale up d’un réplica si jobs en attente ≥ cette valeur. |
-| `SCALER_SCALE_DOWN_WHEN_JOBS_BELOW` | `1` | Scale down d’un réplica si jobs en attente ≤ cette valeur. |
+| `SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE` | `4` | Scale up si (jobs en attente / workers) ≥ cette valeur. |
+| `SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW` | `1` | Scale down si (jobs en attente / workers) ≤ cette valeur (et aucun job en cours). |
### Démarrage rapide (esquisse)
diff --git a/backend/src/config.js b/backend/src/config.js
index 3a9dbe3..f4c6dac 100644
--- a/backend/src/config.js
+++ b/backend/src/config.js
@@ -25,5 +25,8 @@ export const SCALER_ENABLED = process.env.SCALER_ENABLED === "true";
export const SCALER_INTERVAL_MS = Number(process.env.SCALER_INTERVAL_MS) || 10000;
export const SCALER_MIN_REPLICAS = Number(process.env.SCALER_MIN_REPLICAS) || 1;
export const SCALER_MAX_REPLICAS = Number(process.env.SCALER_MAX_REPLICAS) || 10;
-export const SCALER_SCALE_UP_WHEN_JOBS_ABOVE = Number(process.env.SCALER_SCALE_UP_WHEN_JOBS_ABOVE) || 5;
-export const SCALER_SCALE_DOWN_WHEN_JOBS_BELOW = Number(process.env.SCALER_SCALE_DOWN_WHEN_JOBS_BELOW) || 3;
+// Load-based: scale by jobs per worker (pending / current replicas)
+export const SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE =
+ Number(process.env.SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE) || 4;
+export const SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW =
+ Number(process.env.SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW) || 1;
diff --git a/backend/src/services/scaler.js b/backend/src/services/scaler.js
index 184d281..b9190f8 100644
--- a/backend/src/services/scaler.js
+++ b/backend/src/services/scaler.js
@@ -6,8 +6,8 @@ import {
SCALER_INTERVAL_MS,
SCALER_MIN_REPLICAS,
SCALER_MAX_REPLICAS,
- SCALER_SCALE_UP_WHEN_JOBS_ABOVE,
- SCALER_SCALE_DOWN_WHEN_JOBS_BELOW,
+ SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE,
+ SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW,
} from "../config.js";
let intervalId = null;
@@ -45,13 +45,16 @@ async function evaluate(docker, redis) {
current = workerServiceInfo.Spec.Mode.Replicated.Replicas ?? 0;
}
+ // set lower bound to 1 to avoid div/0
+ const effectiveWorkers = Math.max(1, current);
+ const jobsPerWorker = pendingCount / effectiveWorkers;
+
let desired = -1;
- if (pendingCount >= SCALER_SCALE_UP_WHEN_JOBS_ABOVE) {
+ if (jobsPerWorker >= SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE) {
desired = Math.min(SCALER_MAX_REPLICAS, current + 1);
-
} else if (
- pendingCount <= SCALER_SCALE_DOWN_WHEN_JOBS_BELOW &&
+ jobsPerWorker <= SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW &&
inProgressCount === 0
) {
desired = Math.max(SCALER_MIN_REPLICAS, current - 1);
@@ -62,7 +65,7 @@ async function evaluate(docker, redis) {
const result = await scaleToReplicas(docker, workerServiceInfo, desired);
if (result.ok) {
- console.log(`Scaler: scaled workers from ${current} to ${desired} (pending jobs: ${pendingCount})`);
+ console.log(`Scaler: scaled workers from ${current} to ${desired} (pending: ${pendingCount}, jobs/worker: ${jobsPerWorker.toFixed(1)})`);
} else {
console.warn("Scaler: scale failed", result.reason, result.error || "");
}
--
GitLab
From 46d671bc99d9df4d8881a45d333355093c8ef573 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Mon, 9 Feb 2026 00:39:36 +0100
Subject: [PATCH 14/19] updated env vars for the new scaler logic
---
infra/stack.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/infra/stack.yml b/infra/stack.yml
index 6b80792..ffc1763 100644
--- a/infra/stack.yml
+++ b/infra/stack.yml
@@ -16,8 +16,8 @@ services:
- SCALER_INTERVAL_MS=10000
- SCALER_MIN_REPLICAS=1
- SCALER_MAX_REPLICAS=10
- - SCALER_SCALE_UP_WHEN_JOBS_ABOVE=5
- - SCALER_SCALE_DOWN_WHEN_JOBS_BELOW=3
+ - SCALER_SCALE_UP_WHEN_JOBS_PER_WORKER_ABOVE=4
+ - SCALER_SCALE_DOWN_WHEN_JOBS_PER_WORKER_BELOW=1
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
--
GitLab
From 2216eb16111924cf5b744ecef26af629f7a6a7c7 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Mon, 9 Feb 2026 00:40:03 +0100
Subject: [PATCH 15/19] updated todos
---
todo.txt | 3 ---
1 file changed, 3 deletions(-)
diff --git a/todo.txt b/todo.txt
index fbb1aee..1cbf248 100644
--- a/todo.txt
+++ b/todo.txt
@@ -6,9 +6,6 @@ in 2seconds" to the view)
* add an export button to export the inputs and the outputs of automatic hash generation and submitting, make the input and
output files be seperate this way the teacher can inject the inputs to his program and check if the outputs match
-* make the scaler work with loads rather then hard numerical value based thresholds (scale if 4 jobs
-per worker rather then scale if pending_jobs_count > 5)
-
* implement the CI using gitlab tool chain.
* review how we use async/await, and if we do really need those awaits everywhere ?
--
GitLab
From 5dbe7aa144d277fa8645a45af8c963d3c118eae0 Mon Sep 17 00:00:00 2001
From: Massiles Ghernaout <749-gm213204@users.noreply.www-apps.univ-lehavre.fr>
Date: Mon, 9 Feb 2026 00:53:00 +0100
Subject: [PATCH 16/19] added a stepper like mode change mechanism with more
choices
---
frontend/src/App.jsx | 117 +++++++++++++++++++++++++++++--------------
1 file changed, 79 insertions(+), 38 deletions(-)
diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx
index 0a314b4..a2acc52 100644
--- a/frontend/src/App.jsx
+++ b/frontend/src/App.jsx
@@ -2,10 +2,23 @@ import React, { useEffect, useState } from "react";
import CryptoJS from "crypto-js";
const MODES = {
-//mode: delay(ms)
- gentle: 5000,
- normal: 2000,
+ veryGentle: 5000, // nice for quick debugging
+ gentle: 3000,
+ normal: 1500,
+ fast: 1000,
aggressive: 500,
+ veryAggressive: 250,
+};
+
+const MODE_KEYS = ["veryGentle", "gentle", "normal", "fast", "aggressive", "veryAggressive"];
+const intervalLabel = (ms) => (ms >= 1000 ? `${ms / 1000}s` : `${ms}ms`);
+const MODE_LABELS = {
+ veryGentle: `Très gentil (${intervalLabel(MODES.veryGentle)})`,
+ gentle: `Gentil (${intervalLabel(MODES.gentle)})`,
+ normal: `Normal (${intervalLabel(MODES.normal)})`,
+ fast: `Rapide (${intervalLabel(MODES.fast)})`,
+ aggressive: `Agressif (${intervalLabel(MODES.aggressive)})`,
+ veryAggressive: `Très agressif (${intervalLabel(MODES.veryAggressive)})`,
};
async function apiRequest(path, options = {}) {
@@ -22,7 +35,10 @@ async function apiRequest(path, options = {}) {
function App() {
const [hashInput, setHashInput] = useState("");
- const [mode, setMode] = useState("gentle");
+
+ const [modeStep, setModeStep] = useState(2); // default: normal (2s)
+ const mode = MODE_KEYS[modeStep];
+
const [autoRunning, setAutoRunning] = useState(false);
const [jobs, setJobs] = useState([]);
const [clusterState, setClusterState] = useState(null);
@@ -151,40 +167,65 @@ function App() {
Mode automatique
-
-
-
-
-
-
-
+
+
+
+
+ {MODE_LABELS[mode]}
+
+