[FEATURE} Adjust error handling and bump versions
This commit is contained in:
parent
d47a8c3cc4
commit
360dd32860
19 changed files with 1052 additions and 455 deletions
|
@ -7,6 +7,7 @@ import { INFO, OK, logInfo } from "./util/logging.js";
|
|||
// Import Core Modules
|
||||
import buildRoutes from "./server/router.js";
|
||||
import injectSockets from "./server/sockets.js";
|
||||
import pg from "./database/postgres.js";
|
||||
|
||||
// Constants
|
||||
const title = "MCL";
|
||||
|
@ -23,6 +24,7 @@ export default class Minecluster {
|
|||
logInfo(fig.textSync(title, "Larry 3D"));
|
||||
INFO("INIT", "Initializing...");
|
||||
this.app = express();
|
||||
this.pg = pg;
|
||||
this.server = http.createServer(this.app);
|
||||
this.sockets = injectSockets(this.server, this.jobs);
|
||||
this.routes = buildRoutes(this.sockets);
|
||||
|
@ -31,11 +33,12 @@ export default class Minecluster {
|
|||
}
|
||||
|
||||
async _connect() {
|
||||
// await this.pg.connect();
|
||||
await this.pg.connect();
|
||||
}
|
||||
|
||||
start() {
|
||||
const mcl = this;
|
||||
|
||||
return new Promise(async function init(res) {
|
||||
mcl._preinitialize();
|
||||
await mcl._connect();
|
||||
|
|
8
lib/database/migrations/1_create_servers_table.sql
Normal file
8
lib/database/migrations/1_create_servers_table.sql
Normal file
|
@ -0,0 +1,8 @@
|
|||
/*CREATE SEQUENCE servers_id_seq;
|
||||
CREATE TABLE servers (
|
||||
id bigint NOT NULL DEFAULT nextval('servers_id_seq') PRIMARY KEY,
|
||||
name varchar(255) DEFAULT NULL,
|
||||
host varchar(255) DEFAULT NULL,
|
||||
CONSTRAINT unique_host UNIQUE(host)
|
||||
);
|
||||
ALTER SEQUENCE servers_id_seq OWNED BY servers.id;*/
|
121
lib/database/pg-query.js
Normal file
121
lib/database/pg-query.js
Normal file
|
@ -0,0 +1,121 @@
|
|||
const buildPostgresEntry = (entry) => {
|
||||
const pgEntry = { ...entry };
|
||||
Object.keys(pgEntry).forEach((col) => {
|
||||
if (pgEntry[col] === undefined) delete pgEntry[col];
|
||||
});
|
||||
return pgEntry;
|
||||
};
|
||||
|
||||
export const buildPostgresValue = (jsVar) => {
|
||||
if (jsVar === null) return "null";
|
||||
if (typeof jsVar === "string") return buildPostgresString(jsVar);
|
||||
if (Array.isArray(jsVar) && jsVar.length === 0) return "null";
|
||||
if (Array.isArray(jsVar) && isTypeArray(jsVar, "string"))
|
||||
return buildPostgresStringArray(jsVar);
|
||||
return jsVar;
|
||||
};
|
||||
|
||||
const buildPostgresStringArray = (jsonArray) => {
|
||||
if (jsonArray.length === 0) return null;
|
||||
var pgArray = [...jsonArray];
|
||||
var arrayString = "ARRAY [";
|
||||
pgArray.forEach((e, i) => (pgArray[i] = `'${e}'`));
|
||||
arrayString += pgArray.join(",");
|
||||
arrayString += "]";
|
||||
return arrayString;
|
||||
};
|
||||
|
||||
const isTypeArray = (jsonArray, type) =>
|
||||
jsonArray.every((e) => typeof e === type);
|
||||
|
||||
const buildPostgresString = (jsonString) =>
|
||||
(jsonString && `'${jsonString.replaceAll("'", "''")}'`) || null;
|
||||
|
||||
export const insertQuery = (table, jsEntry) => {
|
||||
if (typeof jsEntry !== "object") throw Error("PG Inserts must be objects!");
|
||||
const entry = buildPostgresEntry(jsEntry);
|
||||
const cols = Object.keys(entry);
|
||||
cols.forEach((col, i) => {
|
||||
entry[col] = buildPostgresValue(entry[col]);
|
||||
cols[i] = `"${col}"`;
|
||||
});
|
||||
var query = `INSERT INTO ${table}(${cols.join(",")})\n`;
|
||||
query += `VALUES(${Object.values(entry).join(",")})`;
|
||||
return query;
|
||||
};
|
||||
|
||||
export const deleteQuery = (table, jsEntry) => {
|
||||
if (typeof jsEntry !== "object")
|
||||
throw Error("PG Delete conditionals must be an object!");
|
||||
const entry = buildPostgresEntry(jsEntry);
|
||||
const cols = Object.keys(entry);
|
||||
const conditionals = [];
|
||||
for (var col of cols) {
|
||||
entry[col] = buildPostgresValue(entry[col]);
|
||||
if (entry[col] === "null") conditionals.push(`x.${col} IS NULL`);
|
||||
else conditionals.push(`x.${col}=${entry[col]}`);
|
||||
}
|
||||
return `DELETE FROM ${table} x WHERE ${conditionals.join(" AND ")}`;
|
||||
};
|
||||
export const onConflictUpdate = (conflicts, updates) => {
|
||||
if (!Array.isArray(conflicts)) throw Error("PG Conflicts must be an array!");
|
||||
if (typeof updates !== "object") throw Error("PG Updates must be objects!");
|
||||
const entry = buildPostgresEntry(updates);
|
||||
var query = `ON CONFLICT (${conflicts.join(",")}) DO UPDATE SET\n`;
|
||||
const cols = Object.keys(entry);
|
||||
for (var col of cols) {
|
||||
entry[col] = buildPostgresValue(entry[col]);
|
||||
}
|
||||
query += cols.map((c) => `${c}=${entry[c]}`).join(",");
|
||||
return query;
|
||||
};
|
||||
export const clearTableQuery = (table) => {
|
||||
return `TRUNCATE ${table}`;
|
||||
};
|
||||
|
||||
export const selectWhereQuery = (table, jsEntry, joinWith) => {
|
||||
if (typeof jsEntry !== "object") throw Error("PG Where must be an object!");
|
||||
const where = buildPostgresEntry(jsEntry);
|
||||
const cols = Object.keys(where);
|
||||
var query = `SELECT * FROM ${table} AS x WHERE\n`;
|
||||
for (var col of cols) {
|
||||
where[col] = buildPostgresValue(where[col]);
|
||||
}
|
||||
return (query += cols.map((c) => `x.${c}=${where[c]}`).join(joinWith));
|
||||
};
|
||||
export const updateWhereQuery = (table, updates, wheres, joinWith) => {
|
||||
if (typeof updates !== "object") throw Error("PG Updates must be an object!");
|
||||
if (typeof wheres !== "object") throw Error("PG Wheres must be an object!");
|
||||
const update = buildPostgresEntry(updates);
|
||||
const where = buildPostgresEntry(wheres);
|
||||
const updateCols = Object.keys(update);
|
||||
const whereCols = Object.keys(where);
|
||||
var query = `UPDATE ${table}\n`;
|
||||
var updateQuery = updateCols
|
||||
.map((c) => `${c} = ${buildPostgresValue(update[c])}`)
|
||||
.join(",");
|
||||
var whereQuery = whereCols
|
||||
.map((c) => `${c} = ${buildPostgresValue(where[c])}`)
|
||||
.join(joinWith);
|
||||
return (query += `SET ${updateQuery} WHERE ${whereQuery}`);
|
||||
};
|
||||
export const updateWhereAnyQuery = (table, updates, wheres) =>
|
||||
updateWhereQuery(table, updates, wheres, " OR ");
|
||||
export const updateWhereAllQuery = (table, updates, wheres) =>
|
||||
updateWhereQuery(table, updates, wheres, " AND ");
|
||||
export const selectWhereAnyQuery = (table, where) =>
|
||||
selectWhereQuery(table, where, " OR ");
|
||||
export const selectWhereAllQuery = (table, where) =>
|
||||
selectWhereQuery(table, where, " AND ");
|
||||
|
||||
export default {
|
||||
selectWhereAnyQuery,
|
||||
selectWhereAllQuery,
|
||||
updateWhereAnyQuery,
|
||||
updateWhereAllQuery,
|
||||
insertQuery,
|
||||
deleteQuery,
|
||||
buildPostgresValue,
|
||||
onConflictUpdate,
|
||||
clearTableQuery,
|
||||
};
|
63
lib/database/postgres.js
Normal file
63
lib/database/postgres.js
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Imports
|
||||
import path from "node:path";
|
||||
import { URL } from "node:url";
|
||||
import { migrate } from "postgres-migrations";
|
||||
import createPgp from "pg-promise";
|
||||
import moment from "moment";
|
||||
import { INFO, WARN, OK, VERB } from "../util/logging.js";
|
||||
|
||||
// Environment Variables
|
||||
const {
|
||||
MCL_POSTGRES_DATABASE: database,
|
||||
MCL_POSTGRES_ENABLED: pgEnabled,
|
||||
MCL_POSTGRES_HOST: host,
|
||||
MCL_POSTGRES_PASSWORD: password,
|
||||
MCL_POSTGRES_PORT: port,
|
||||
MCL_POSTGRES_USER: user,
|
||||
} = process.env;
|
||||
|
||||
// Postgres-promise Configuration
|
||||
// Ensure dates get saved as UTC date strings
|
||||
// This prevents the parser from doing strange datetime operations
|
||||
const pgp = createPgp();
|
||||
pgp.pg.types.setTypeParser(1114, (str) => moment.utc(str).format());
|
||||
|
||||
// Database Config
|
||||
const dbConfig = {
|
||||
database: database ?? "minecluster",
|
||||
user: user ?? "postgres",
|
||||
password: password ?? "postgres",
|
||||
host: host ?? "localhost",
|
||||
port: port ?? 5432,
|
||||
ensureDatabaseExists: true,
|
||||
};
|
||||
|
||||
const databaseDir = new URL(".", import.meta.url).pathname;
|
||||
const migrationsDir = path.resolve(databaseDir, "migrations/");
|
||||
|
||||
const queryMock = (str) => INFO("POSTGRES MOCK", str);
|
||||
|
||||
const connect = (pg) => async () => {
|
||||
if (pgEnabled === "false") {
|
||||
WARN("POSTGRES", "Postgres Disabled!");
|
||||
return { query: queryMock };
|
||||
}
|
||||
VERB("POSTGRES", "Migrating...");
|
||||
await migrate(dbConfig, migrationsDir);
|
||||
// Override fake methods
|
||||
const pgInstance = pgp(dbConfig);
|
||||
for (var k in pgInstance) pg[k] = pgInstance[k];
|
||||
VERB("POSTGRES", "Migrated Successfully!");
|
||||
await pg.connect();
|
||||
VERB("POSTGRES", "Postgres connected Successfully!");
|
||||
|
||||
OK("POSTGRES", `Connected to database ${dbConfig.database}!`);
|
||||
};
|
||||
|
||||
const buildPostgres = () => {
|
||||
var pg = { query: queryMock };
|
||||
pg.connect = connect(pg);
|
||||
return pg;
|
||||
};
|
||||
|
||||
export default buildPostgres();
|
|
@ -18,7 +18,7 @@ export default async function liveLogging(socket, serverNamespace) {
|
|||
const log = new k8s.Log(kc);
|
||||
const logStream = new stream.PassThrough();
|
||||
logStream.on("data", (chunk) =>
|
||||
socket.emit("push", Buffer.from(chunk).toString())
|
||||
socket.emit("push", Buffer.from(chunk).toString()),
|
||||
);
|
||||
log
|
||||
.log(serverNamespace, mcsPods[0], containerName, logStream, {
|
||||
|
|
|
@ -8,21 +8,35 @@ const k8sCore = kc.makeApiClient(k8s.CoreV1Api);
|
|||
const k8sMetrics = new k8s.Metrics(kc);
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
async function findDeployment(serverName) {
|
||||
try {
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
return deploymentRes.body.items.find(
|
||||
(i) => i.metadata.name === `mcl-${serverName}`,
|
||||
);
|
||||
} catch (e) {
|
||||
ERR("SERVER CONTROL", `Error finding deployment: mcl-${serverName}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function startServer(req, res) {
|
||||
const serverSpec = req.body;
|
||||
if (!serverSpec) return res.sendStatus(400);
|
||||
if (!serverSpec.name) return res.status(400).send("Server name required!");
|
||||
const { name } = serverSpec;
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const dep = deploymentRes.body.items.find(
|
||||
(i) => i.metadata.name === `mcl-${name}`
|
||||
);
|
||||
if (!dep) return res.status(409).send("Server does not exist!");
|
||||
const dep = await findDeployment(name);
|
||||
|
||||
if (!dep || !dep.spec) return res.status(409).send("Server does not exist!");
|
||||
if (dep.spec.replicas === 1)
|
||||
return res.status(409).send("Server already started!");
|
||||
dep.spec.replicas = 1;
|
||||
k8sDeps.replaceNamespacedDeployment(`mcl-${name}`, namespace, dep);
|
||||
res.sendStatus(200);
|
||||
k8sDeps
|
||||
.replaceNamespacedDeployment(`mcl-${name}`, namespace, dep)
|
||||
.then(() => res.sendStatus(200))
|
||||
.catch((e) => {
|
||||
ERR("SERVER CONTROL", e);
|
||||
res.status(500).send("Error updating server!");
|
||||
});
|
||||
}
|
||||
|
||||
export async function stopServer(req, res) {
|
||||
|
@ -32,7 +46,7 @@ export async function stopServer(req, res) {
|
|||
const { name } = serverSpec;
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const dep = deploymentRes.body.items.find(
|
||||
(i) => i.metadata.name === `mcl-${name}`
|
||||
(i) => i.metadata.name === `mcl-${name}`,
|
||||
);
|
||||
if (!dep) return res.status(409).send("Server does not exist!");
|
||||
if (dep.spec.replicas === 0)
|
||||
|
@ -56,7 +70,7 @@ export async function getServers(req, res) {
|
|||
const podMetricsResponse = await k8sMetrics.getPodMetrics(namespace);
|
||||
// TODO Add an annotation and manage using that
|
||||
const serverDeployments = deployments.filter((d) =>
|
||||
d.metadata.name.startsWith("mcl-")
|
||||
d.metadata.name.startsWith("mcl-"),
|
||||
);
|
||||
var name, metrics, started;
|
||||
const servers = serverDeployments.map((s) => {
|
||||
|
@ -68,10 +82,10 @@ export async function getServers(req, res) {
|
|||
});
|
||||
if (pod) {
|
||||
const podCpus = pod.containers.map(
|
||||
({ usage }) => parseInt(usage.cpu) / 1_000_000
|
||||
({ usage }) => parseInt(usage.cpu) / 1_000_000,
|
||||
);
|
||||
const podMems = pod.containers.map(
|
||||
({ usage }) => parseInt(usage.memory) / 1024
|
||||
({ usage }) => parseInt(usage.memory) / 1024,
|
||||
);
|
||||
metrics = {
|
||||
cpu: Math.ceil(podCpus.reduce((a, b) => a + b)),
|
||||
|
|
|
@ -30,7 +30,7 @@ function payloadFilter(req, res) {
|
|||
function createRconSecret(serverSpec) {
|
||||
const { name } = serverSpec;
|
||||
const rconYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-secret.yml"), "utf8")
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-secret.yml"), "utf8"),
|
||||
);
|
||||
|
||||
// TODO: Dyamic rconPassword
|
||||
|
@ -45,7 +45,7 @@ function createRconSecret(serverSpec) {
|
|||
function createServerVolume(serverSpec) {
|
||||
const { name } = serverSpec;
|
||||
const volumeYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-pvc.yml"), "utf8")
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-pvc.yml"), "utf8"),
|
||||
);
|
||||
volumeYaml.metadata.labels.service = `mcl-${name}-server`;
|
||||
volumeYaml.metadata.name = `mcl-${name}-volume`;
|
||||
|
@ -72,8 +72,8 @@ function createServerDeploy(serverSpec) {
|
|||
const deployYaml = yaml.load(
|
||||
fs.readFileSync(
|
||||
path.resolve("lib/k8s/configs/server-deployment.yml"),
|
||||
"utf8"
|
||||
)
|
||||
"utf8",
|
||||
),
|
||||
);
|
||||
deployYaml.metadata.name = `mcl-${name}`;
|
||||
deployYaml.metadata.namespace = namespace;
|
||||
|
@ -83,30 +83,25 @@ function createServerDeploy(serverSpec) {
|
|||
deployYaml.spec.template.spec.containers.splice(0, 1); //TODO: Currently removing backup container
|
||||
const serverContainer = deployYaml.spec.template.spec.containers[0];
|
||||
|
||||
const findEnv = (k) => serverContainer.env.find(({ name: n }) => n === k);
|
||||
const updateEnv = (k, v) => (findEnv.value = v);
|
||||
// Enviornment variables
|
||||
serverContainer.env.find(({ name: n }) => n === "TYPE").value = serverType;
|
||||
serverContainer.env.find(({ name: n }) => n === "VERSION").value = version;
|
||||
serverContainer.env.find(({ name: n }) => n === "DIFFICULTY").value =
|
||||
difficulty;
|
||||
serverContainer.env.find(({ name: n }) => n === "MODE").value = gamemode;
|
||||
serverContainer.env.find(({ name: n }) => n === "MOTD").value = motd;
|
||||
serverContainer.env.find(({ name: n }) => n === "MAX_PLAYERS").value =
|
||||
maxPlayers;
|
||||
serverContainer.env.find(({ name: n }) => n === "SEED").value = seed;
|
||||
serverContainer.env.find(({ name: n }) => n === "OPS").value = ops;
|
||||
serverContainer.env.find(({ name: n }) => n === "WHITELIST").value =
|
||||
whitelist;
|
||||
serverContainer.env.find(
|
||||
({ name: n }) => n === "MEMORY"
|
||||
).value = `${memory}M`;
|
||||
if (version !== "VANILLA")
|
||||
delete serverContainer.env.find(({ name: n }) => n === "MODPACK").value;
|
||||
else
|
||||
serverContainer.env.find(({ name: n }) => n === "MODPACK").value = modpack;
|
||||
updateEnv("TYPE", serverType);
|
||||
updateEnv("VERSION", version);
|
||||
updateEnv("DIFFICULTY", difficulty);
|
||||
updateEnv("MODE", gamemode);
|
||||
updateEnv("MOTD", motd);
|
||||
updateEnv("MAX_PLAYERS", maxPlayers);
|
||||
updateEnv("SEED", seed);
|
||||
updateEnv("OPS", ops);
|
||||
updateEnv("WHITELIST", whitelist);
|
||||
updateEnv("MEMORY", `${memory}M`);
|
||||
|
||||
if (version !== "VANILLA") delete findEnv("MODPACK").value;
|
||||
else updateEnv("MODPACK", modpack);
|
||||
findEnv("RCON_PASSWORD").valueFrom.secretKeyRef.name =
|
||||
`mcl-${name}-rcon-secret`;
|
||||
|
||||
serverContainer.env.find(
|
||||
({ name }) => name === "RCON_PASSWORD"
|
||||
).valueFrom.secretKeyRef.name = `mcl-${name}-rcon-secret`;
|
||||
// Server Container Name
|
||||
serverContainer.name = `mcl-${name}`;
|
||||
// Resources
|
||||
|
@ -114,7 +109,7 @@ function createServerDeploy(serverSpec) {
|
|||
// serverContainer.resources.limits.memory = `${memory}Mi`; // TODO Allow for limits beyond initial startup
|
||||
// Volumes
|
||||
deployYaml.spec.template.spec.volumes.find(
|
||||
({ name }) => name === "datadir"
|
||||
({ name }) => name === "datadir",
|
||||
).persistentVolumeClaim.claimName = `mcl-${name}-volume`;
|
||||
deployYaml.spec.template.spec.containers[0] = serverContainer;
|
||||
return deployYaml;
|
||||
|
@ -123,7 +118,7 @@ function createServerDeploy(serverSpec) {
|
|||
function createServerService(serverSpec) {
|
||||
const { name, url } = serverSpec;
|
||||
const serviceYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-svc.yml"), "utf8")
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-svc.yml"), "utf8"),
|
||||
);
|
||||
serviceYaml.metadata.annotations["ingress.qumine.io/hostname"] = url;
|
||||
serviceYaml.metadata.labels.app = `mcl-${name}-app`;
|
||||
|
@ -136,7 +131,7 @@ function createServerService(serverSpec) {
|
|||
function createRconService(serverSpec) {
|
||||
const { name, url } = serverSpec;
|
||||
const rconSvcYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-svc.yml"), "utf8")
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-svc.yml"), "utf8"),
|
||||
);
|
||||
rconSvcYaml.metadata.labels.app = `mcl-${name}-app`;
|
||||
rconSvcYaml.metadata.name = `mcl-${name}-rcon`;
|
||||
|
|
|
@ -25,24 +25,24 @@ export default async function deleteServer(req, res) {
|
|||
// Delete in reverse order
|
||||
const deleteDeploy = k8sDeps.deleteNamespacedDeployment(
|
||||
`mcl-${serverSpec.name}`,
|
||||
namespace
|
||||
namespace,
|
||||
);
|
||||
const deleteService = k8sCore.deleteNamespacedService(
|
||||
`mcl-${name}-server`,
|
||||
namespace
|
||||
namespace,
|
||||
);
|
||||
const deleteRconService = k8sCore.deleteNamespacedService(
|
||||
`mcl-${name}-rcon`,
|
||||
namespace
|
||||
namespace,
|
||||
);
|
||||
await deleteDeploy.catch(deleteError(res));
|
||||
const deleteRconSecret = k8sCore.deleteNamespacedSecret(
|
||||
`mcl-${name}-rcon-secret`,
|
||||
namespace
|
||||
namespace,
|
||||
);
|
||||
const deleteVolume = k8sCore.deleteNamespacedPersistentVolumeClaim(
|
||||
`mcl-${name}-volume`,
|
||||
namespace
|
||||
namespace,
|
||||
);
|
||||
Promise.all([
|
||||
deleteService,
|
||||
|
|
17
lib/routes/error-route.js
Normal file
17
lib/routes/error-route.js
Normal file
|
@ -0,0 +1,17 @@
|
|||
export function logErrors(err, req, res, next) {
|
||||
console.error(err.stack);
|
||||
next(err);
|
||||
}
|
||||
|
||||
export function clientErrorHandler(err, req, res, next) {
|
||||
if (req.xhr) {
|
||||
res.status(500).send({ error: "Something failed!" });
|
||||
} else {
|
||||
next(err);
|
||||
}
|
||||
}
|
||||
|
||||
export function errorHandler(err, req, res, next) {
|
||||
res.status(500);
|
||||
res.render("error", { error: err });
|
||||
}
|
2
lib/routes/react-route.js
vendored
2
lib/routes/react-route.js
vendored
|
@ -3,6 +3,6 @@ import path from "path";
|
|||
const router = Router();
|
||||
router.use("/", express.static(path.resolve("./build")));
|
||||
router.get("/*", (req, res) =>
|
||||
res.sendFile(path.resolve("./build/index.html"))
|
||||
res.sendFile(path.resolve("./build/index.html")),
|
||||
);
|
||||
export default router;
|
||||
|
|
|
@ -7,7 +7,7 @@ kc.loadFromDefault();
|
|||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api);
|
||||
// Get Routes
|
||||
router.get("/available", (req, res) => {
|
||||
return res.json({cpu: 8000, memory: 16000});
|
||||
return res.json({ cpu: 8000, memory: 16000 });
|
||||
// TODO Workaround to detect available
|
||||
k8sApi.listNode().then((nodeRes) => {
|
||||
const nodeAllocatable = nodeRes.body.items.map((i) => i.status.allocatable);
|
||||
|
|
|
@ -13,7 +13,7 @@ export default async function rconInterface(socket) {
|
|||
const rconRes = await k8sCore.readNamespacedSecret(rconSecret, namespace);
|
||||
const rconPassword = Buffer.from(
|
||||
rconRes.body.data["rcon-password"],
|
||||
"base64"
|
||||
"base64",
|
||||
).toString("utf8");
|
||||
const rconHost = `mcl-${socket.mcs.serverName}-rcon`;
|
||||
const rcon = new RconClient({
|
||||
|
|
|
@ -6,6 +6,11 @@ import vitals from "../routes/vitals-route.js";
|
|||
import systemRoute from "../routes/system-route.js";
|
||||
import serverRoute from "../routes/server-route.js";
|
||||
import reactRoute from "../routes/react-route.js";
|
||||
import {
|
||||
logErrors,
|
||||
clientErrorHandler,
|
||||
errorHandler,
|
||||
} from "../routes/error-route.js";
|
||||
|
||||
export default function buildRoutes(pg, skio) {
|
||||
const router = express.Router();
|
||||
|
@ -18,7 +23,10 @@ export default function buildRoutes(pg, skio) {
|
|||
// Routes
|
||||
router.use("/api/system", systemRoute);
|
||||
router.use("/api/server", serverRoute);
|
||||
router.use(["/mcl","/mcl/*"], reactRoute); // Static Build Route
|
||||
router.use(["/mcl", "/mcl/*"], reactRoute); // Static Build Route
|
||||
/*router.use(logErrors);
|
||||
router.use(clientErrorHandler);
|
||||
router.use(errorHandler);*/
|
||||
|
||||
return router;
|
||||
}
|
||||
|
|
|
@ -32,4 +32,3 @@ const storage = multerS3({
|
|||
});
|
||||
|
||||
export const upload = multer({ storage });
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue