(feature) Update UI & Resource Availability
This commit is contained in:
parent
11d8229eb5
commit
929193d272
44 changed files with 4747 additions and 27 deletions
48
lib/Minecluster.js
Normal file
48
lib/Minecluster.js
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Imports
|
||||
import fig from "figlet";
|
||||
import http from "http";
|
||||
import express from "express";
|
||||
import { INFO, OK, logInfo } from "./util/logging.js";
|
||||
|
||||
// Import Core Modules
|
||||
import buildRoutes from "./server/router.js";
|
||||
import injectSockets from "./server/sockets.js";
|
||||
|
||||
// Constants
|
||||
const title = "MCL";
|
||||
const port = process.env.MCL_DEV_PORT ?? 52000;
|
||||
|
||||
// Class
|
||||
export default class Minecluster {
|
||||
constructor(options = {}) {
|
||||
for (var k in options) this[k] = options[k];
|
||||
this.port = options.port ?? port;
|
||||
}
|
||||
|
||||
async _preinitialize() {
|
||||
logInfo(fig.textSync(title, "Larry 3D"));
|
||||
INFO("INIT", "Initializing...");
|
||||
this.app = express();
|
||||
this.server = http.createServer(this.app);
|
||||
this.sockets = injectSockets(this.server, this.jobs);
|
||||
this.routes = buildRoutes(this.sockets);
|
||||
this.app.use(this.routes);
|
||||
OK("INIT", "Initialized!");
|
||||
}
|
||||
|
||||
async _connect() {
|
||||
// await this.pg.connect();
|
||||
}
|
||||
|
||||
start() {
|
||||
const mcl = this;
|
||||
return new Promise(async function init(res) {
|
||||
mcl._preinitialize();
|
||||
await mcl._connect();
|
||||
mcl.server.listen(mcl.port, function onStart() {
|
||||
OK("SERVER", `Running on ${mcl.port}`);
|
||||
res();
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
0
lib/index.js
Normal file
0
lib/index.js
Normal file
8
lib/k8s.js
Normal file
8
lib/k8s.js
Normal file
|
@ -0,0 +1,8 @@
|
|||
import k8s from "@kubernetes/client-node";
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api);
|
||||
k8sApi.listNamespacedPod("mc-garden-default").then((res) => {
|
||||
console.log(res.body);
|
||||
});
|
10
lib/k8s/configs/rcon-secret.yml
Normal file
10
lib/k8s/configs/rcon-secret.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
rcon-password: UEphT3V2aGJlQjNvc3M0dElwQU5YTUZrSkltR1RsRVl0ZGx3elFqZjJLdVZrZXNtV0hja1VhUUd3bmZDcElpbA==
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app: changeme-app-label
|
||||
name: changeme-rcon-secret
|
||||
namespace: changeme-namespace
|
||||
type: Opaque
|
22
lib/k8s/configs/rcon-svc.yml
Normal file
22
lib/k8s/configs/rcon-svc.yml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
app: changeme-app
|
||||
name: changeme-rcon
|
||||
namespace: changeme-namespace
|
||||
spec:
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: rcon
|
||||
port: 25575
|
||||
protocol: TCP
|
||||
targetPort: rcon
|
||||
selector:
|
||||
app: changeme-app
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
215
lib/k8s/configs/server-deployment.yml
Normal file
215
lib/k8s/configs/server-deployment.yml
Normal file
|
@ -0,0 +1,215 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: changeme-name
|
||||
namespace: changeme-namespace
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: changeme-app
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: changeme-app
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: SRC_DIR
|
||||
value: /data
|
||||
- name: BACKUP_NAME
|
||||
value: world
|
||||
- name: INITIAL_DELAY
|
||||
value: 2m
|
||||
- name: BACKUP_INTERVAL
|
||||
value: 24h
|
||||
- name: PRUNE_BACKUPS_DAYS
|
||||
value: "2"
|
||||
- name: PAUSE_IF_NO_PLAYERS
|
||||
value: "true"
|
||||
- name: SERVER_PORT
|
||||
value: "25565"
|
||||
- name: RCON_HOST
|
||||
value: localhost
|
||||
- name: RCON_PORT
|
||||
value: "25575"
|
||||
- name: RCON_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: rcon-password
|
||||
name: changeme-rcon-secret
|
||||
- name: RCON_RETRIES
|
||||
value: "5"
|
||||
- name: RCON_RETRY_INTERVAL
|
||||
value: 10s
|
||||
- name: EXCLUDES
|
||||
value: "*.jar,cache,logs"
|
||||
- name: BACKUP_METHOD
|
||||
value: rclone
|
||||
- name: DEST_DIR
|
||||
value: /backups
|
||||
- name: LINK_LATEST
|
||||
value: "false"
|
||||
- name: TAR_COMPRESS_METHOD
|
||||
value: gzip
|
||||
- name: ZSTD_PARAMETERS
|
||||
value: -3 --long=25 --single-thread
|
||||
- name: RCLONE_REMOTE
|
||||
value: mc-dunemask-net
|
||||
- name: RCLONE_DEST_DIR
|
||||
value: /minecraft-backups/deltasmp-backups
|
||||
- name: RCLONE_COMPRESS_METHOD
|
||||
value: gzip
|
||||
image: itzg/mc-backup:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: mcs-deltasmp-minecraft-mc-backup
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: datadir
|
||||
readOnly: true
|
||||
- mountPath: /backups
|
||||
name: backupdir
|
||||
- mountPath: /config/rclone
|
||||
name: rclone-config
|
||||
- env:
|
||||
- name: EULA
|
||||
value: "TRUE"
|
||||
- name: TYPE
|
||||
value: VANILLA
|
||||
- name: VERSION
|
||||
value: "latest"
|
||||
- name: DIFFICULTY
|
||||
value: easy
|
||||
- name: WHITELIST
|
||||
- name: OPS
|
||||
- name: ICON
|
||||
- name: MAX_PLAYERS
|
||||
value: "20"
|
||||
- name: MAX_WORLD_SIZE
|
||||
value: "10000"
|
||||
- name: ALLOW_NETHER
|
||||
value: "true"
|
||||
- name: ANNOUNCE_PLAYER_ACHIEVEMENTS
|
||||
value: "true"
|
||||
- name: ENABLE_COMMAND_BLOCK
|
||||
value: "true"
|
||||
- name: FORCE_GAMEMODE
|
||||
value: "false"
|
||||
- name: GENERATE_STRUCTURES
|
||||
value: "true"
|
||||
- name: HARDCORE
|
||||
value: "false"
|
||||
- name: MAX_BUILD_HEIGHT
|
||||
value: "256"
|
||||
- name: MAX_TICK_TIME
|
||||
value: "60000"
|
||||
- name: SPAWN_ANIMALS
|
||||
value: "true"
|
||||
- name: SPAWN_MONSTERS
|
||||
value: "true"
|
||||
- name: SPAWN_NPCS
|
||||
value: "true"
|
||||
- name: SPAWN_PROTECTION
|
||||
value: "16"
|
||||
- name: VIEW_DISTANCE
|
||||
value: "10"
|
||||
- name: SEED
|
||||
- name: MODE
|
||||
value: survival
|
||||
- name: MOTD
|
||||
value: §6Minecluster Hosting
|
||||
- name: PVP
|
||||
value: "true"
|
||||
- name: LEVEL_TYPE
|
||||
value: DEFAULT
|
||||
- name: GENERATOR_SETTINGS
|
||||
- name: LEVEL
|
||||
value: world
|
||||
- name: MODPACK
|
||||
- name: ONLINE_MODE
|
||||
value: "true"
|
||||
- name: MEMORY
|
||||
value: 1024M
|
||||
- name: JVM_OPTS
|
||||
- name: JVM_XX_OPTS
|
||||
- name: OVERRIDE_SERVER_PROPERTIES
|
||||
value: "true"
|
||||
- name: ENABLE_RCON
|
||||
value: "true"
|
||||
- name: RCON_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: rcon-password
|
||||
name: changeme-rcon-secret
|
||||
image: itzg/minecraft-server:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- mc-health
|
||||
failureThreshold: 20
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: changeme-name
|
||||
ports:
|
||||
- containerPort: 25565
|
||||
name: minecraft
|
||||
protocol: TCP
|
||||
- containerPort: 25575
|
||||
name: rcon
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- mc-health
|
||||
failureThreshold: 20
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
stdin: true
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: datadir
|
||||
- mountPath: /backups
|
||||
name: backupdir
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsUser: 1000
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: changeme-pvc-name
|
||||
- emptyDir: {}
|
||||
name: backupdir
|
||||
- name: rclone-config
|
||||
secret:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: rclone.conf
|
||||
path: rclone.conf
|
||||
secretName: rclone-config
|
13
lib/k8s/configs/server-pvc.yml
Normal file
13
lib/k8s/configs/server-pvc.yml
Normal file
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
service: changeme-service-name
|
||||
name: changeme-pvc-name
|
||||
namespace: changeme-namespace
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
24
lib/k8s/configs/server-svc.yml
Normal file
24
lib/k8s/configs/server-svc.yml
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
ingress.qumine.io/hostname: changeme-url
|
||||
ingress.qumine.io/portname: minecraft
|
||||
labels:
|
||||
app: changeme-app
|
||||
name: changeme-name
|
||||
namespace: changeme-namespace
|
||||
spec:
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: minecraft
|
||||
port: 25565
|
||||
protocol: TCP
|
||||
targetPort: minecraft
|
||||
selector:
|
||||
app: changeme-app
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
30
lib/k8s/live-logging.js
Normal file
30
lib/k8s/live-logging.js
Normal file
|
@ -0,0 +1,30 @@
|
|||
import stream from "stream";
|
||||
import k8s from "@kubernetes/client-node";
|
||||
import { ERR } from "../util/logging.js";
|
||||
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api);
|
||||
export default async function liveLogging(socket, serverNamespace) {
|
||||
const containerName = `mcl-${socket.mcs.serverName}`;
|
||||
const podResponse = await k8sApi.listNamespacedPod(serverNamespace);
|
||||
const pods = podResponse.body.items.map((vp1) => vp1.metadata.name);
|
||||
const mcsPods = pods.filter((p) => p.startsWith(containerName));
|
||||
if (mcsPods.length === 0)
|
||||
throw Error(`Could not find a pod that starts with ${containerName}`);
|
||||
if (mcsPods.length > 1)
|
||||
throw Error(`Multiple pods match the name ${containerName}`);
|
||||
|
||||
const log = new k8s.Log(kc);
|
||||
const logStream = new stream.PassThrough();
|
||||
logStream.on("data", (chunk) =>
|
||||
socket.emit("push", Buffer.from(chunk).toString())
|
||||
);
|
||||
log
|
||||
.log(serverNamespace, mcsPods[0], containerName, logStream, {
|
||||
follow: true,
|
||||
pretty: false,
|
||||
timestamps: false,
|
||||
})
|
||||
.catch((e) => ERR("K8S", e));
|
||||
}
|
95
lib/k8s/server-control.js
Normal file
95
lib/k8s/server-control.js
Normal file
|
@ -0,0 +1,95 @@
|
|||
import k8s from "@kubernetes/client-node";
|
||||
import { ERR } from "../util/logging.js";
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
|
||||
const k8sDeps = kc.makeApiClient(k8s.AppsV1Api);
|
||||
const k8sCore = kc.makeApiClient(k8s.CoreV1Api);
|
||||
const k8sMetrics = new k8s.Metrics(kc);
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
export async function startServer(req, res) {
|
||||
const serverSpec = req.body;
|
||||
if (!serverSpec) return res.sendStatus(400);
|
||||
if (!serverSpec.name) return res.status(400).send("Server name required!");
|
||||
const { name } = serverSpec;
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const dep = deploymentRes.body.items.find(
|
||||
(i) => i.metadata.name === `mcl-${name}`
|
||||
);
|
||||
if (!dep) return res.status(409).send("Server does not exist!");
|
||||
if (dep.spec.replicas === 1)
|
||||
return res.status(409).send("Server already started!");
|
||||
dep.spec.replicas = 1;
|
||||
k8sDeps.replaceNamespacedDeployment(`mcl-${name}`, namespace, dep);
|
||||
res.sendStatus(200);
|
||||
}
|
||||
|
||||
export async function stopServer(req, res) {
|
||||
const serverSpec = req.body;
|
||||
if (!serverSpec) return res.sendStatus(400);
|
||||
if (!serverSpec.name) return res.status(400).send("Server name required!");
|
||||
const { name } = serverSpec;
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const dep = deploymentRes.body.items.find(
|
||||
(i) => i.metadata.name === `mcl-${name}`
|
||||
);
|
||||
if (!dep) return res.status(409).send("Server does not exist!");
|
||||
if (dep.spec.replicas === 0)
|
||||
return res.status(409).send("Server already stopped!");
|
||||
dep.spec.replicas = 0;
|
||||
k8sDeps.replaceNamespacedDeployment(`mcl-${name}`, namespace, dep);
|
||||
res.sendStatus(200);
|
||||
}
|
||||
|
||||
export async function serverList(req, res) {
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const deployments = deploymentRes.body.items.map((i) => i.metadata.name);
|
||||
// TODO Add an annotation and manage using that
|
||||
const serverDeployments = deployments.filter((d) => d.startsWith("mcl-"));
|
||||
res.json(serverDeployments.map((sd) => sd.substring(4)));
|
||||
}
|
||||
|
||||
export async function getServers(req, res) {
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const deployments = deploymentRes.body.items;
|
||||
const podMetricsResponse = await k8sMetrics.getPodMetrics(namespace);
|
||||
// TODO Add an annotation and manage using that
|
||||
const serverDeployments = deployments.filter((d) =>
|
||||
d.metadata.name.startsWith("mcl-")
|
||||
);
|
||||
var name, metrics, started;
|
||||
const servers = serverDeployments.map((s) => {
|
||||
name = s.metadata.name.substring(4);
|
||||
metrics = null;
|
||||
started = !!s.spec.replicas;
|
||||
const pod = podMetricsResponse.items.find(({ metadata: md }) => {
|
||||
return md.labels && md.labels.app && md.labels.app === `mcl-${name}-app`;
|
||||
});
|
||||
if (pod) {
|
||||
const podCpus = pod.containers.map(
|
||||
({ usage }) => parseInt(usage.cpu) / 1_000_000
|
||||
);
|
||||
const podMems = pod.containers.map(
|
||||
({ usage }) => parseInt(usage.memory) / 1024
|
||||
);
|
||||
metrics = {
|
||||
cpu: Math.ceil(podCpus.reduce((a, b) => a + b)),
|
||||
memory: Math.ceil(podMems.reduce((a, b) => a + b)),
|
||||
};
|
||||
}
|
||||
|
||||
return { name, metrics, started };
|
||||
});
|
||||
var clusterMetrics = { cpu: 0, memory: 0 };
|
||||
if (servers.length > 1) {
|
||||
const clusterCpu = servers
|
||||
.map(({ metrics }) => (metrics ? metrics.cpu : 0))
|
||||
.reduce((a, b) => a + b);
|
||||
const clusterMem = servers
|
||||
.map(({ metrics }) => (metrics ? metrics.memory : 0))
|
||||
.reduce((a, b) => a + b);
|
||||
clusterMetrics = { cpu: clusterCpu, memory: clusterMem };
|
||||
}
|
||||
res.json({ servers, clusterMetrics });
|
||||
}
|
171
lib/k8s/server-create.js
Normal file
171
lib/k8s/server-create.js
Normal file
|
@ -0,0 +1,171 @@
|
|||
import { v4 as uuidv4 } from "uuid";
|
||||
import bcrypt from "bcrypt";
|
||||
import k8s from "@kubernetes/client-node";
|
||||
import yaml from "js-yaml";
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
const k8sDeps = kc.makeApiClient(k8s.AppsV1Api);
|
||||
const k8sCore = kc.makeApiClient(k8s.CoreV1Api);
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
function payloadFilter(req, res) {
|
||||
const serverSpec = req.body;
|
||||
if (!serverSpec) return res.sendStatus(400);
|
||||
const { name, url, version, serverType, difficulty, gamemode, memory } =
|
||||
serverSpec;
|
||||
if (!name) return res.status(400).send("Server name is required!");
|
||||
if (!url) return res.status(400).send("Server url is required!");
|
||||
if (!version) return res.status(400).send("Server version is required!");
|
||||
if (!difficulty)
|
||||
return res.status(400).send("Server difficulty is required!");
|
||||
if (!serverType) return res.status(400).send("Server type is required!");
|
||||
if (!gamemode) return res.status(400).send("Server Gamemode is required!");
|
||||
if (!memory) return res.status(400).send("Memory is required!");
|
||||
req.body.name = req.body.name.toLowerCase();
|
||||
return "filtered";
|
||||
}
|
||||
|
||||
function createRconSecret(serverSpec) {
|
||||
const { name } = serverSpec;
|
||||
const rconYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-secret.yml"), "utf8")
|
||||
);
|
||||
|
||||
// TODO: Dyamic rconPassword
|
||||
const rconPassword = bcrypt.hashSync(uuidv4(), 10);
|
||||
rconYaml.data["rcon-password"] = Buffer.from(rconPassword).toString("base64");
|
||||
rconYaml.metadata.labels.app = `mcl-${name}-app`;
|
||||
rconYaml.metadata.name = `mcl-${name}-rcon-secret`;
|
||||
rconYaml.metadata.namespace = namespace;
|
||||
return rconYaml;
|
||||
}
|
||||
|
||||
function createServerVolume(serverSpec) {
|
||||
const { name } = serverSpec;
|
||||
const volumeYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-pvc.yml"), "utf8")
|
||||
);
|
||||
volumeYaml.metadata.labels.service = `mcl-${name}-server`;
|
||||
volumeYaml.metadata.name = `mcl-${name}-volume`;
|
||||
volumeYaml.metadata.namespace = namespace;
|
||||
volumeYaml.spec.resources.requests.storage = "1Gi"; // TODO: Changeme
|
||||
return volumeYaml;
|
||||
}
|
||||
|
||||
function createServerDeploy(serverSpec) {
|
||||
const {
|
||||
name,
|
||||
version,
|
||||
serverType,
|
||||
difficulty,
|
||||
gamemode,
|
||||
memory,
|
||||
motd,
|
||||
maxPlayers,
|
||||
seed,
|
||||
modpack,
|
||||
ops,
|
||||
whitelist,
|
||||
} = serverSpec;
|
||||
const deployYaml = yaml.load(
|
||||
fs.readFileSync(
|
||||
path.resolve("lib/k8s/configs/server-deployment.yml"),
|
||||
"utf8"
|
||||
)
|
||||
);
|
||||
deployYaml.metadata.name = `mcl-${name}`;
|
||||
deployYaml.metadata.namespace = namespace;
|
||||
deployYaml.spec.replicas = 0; // TODO: User control for autostart
|
||||
deployYaml.spec.selector.matchLabels.app = `mcl-${name}-app`;
|
||||
deployYaml.spec.template.metadata.labels.app = `mcl-${name}-app`;
|
||||
deployYaml.spec.template.spec.containers.splice(0, 1); //TODO: Currently removing backup container
|
||||
const serverContainer = deployYaml.spec.template.spec.containers[0];
|
||||
|
||||
// Enviornment variables
|
||||
serverContainer.env.find(({ name: n }) => n === "TYPE").value = serverType;
|
||||
serverContainer.env.find(({ name: n }) => n === "VERSION").value = version;
|
||||
serverContainer.env.find(({ name: n }) => n === "DIFFICULTY").value =
|
||||
difficulty;
|
||||
serverContainer.env.find(({ name: n }) => n === "MODE").value = gamemode;
|
||||
serverContainer.env.find(({ name: n }) => n === "MOTD").value = motd;
|
||||
serverContainer.env.find(({ name: n }) => n === "MAX_PLAYERS").value =
|
||||
maxPlayers;
|
||||
serverContainer.env.find(({ name: n }) => n === "SEED").value = seed;
|
||||
serverContainer.env.find(({ name: n }) => n === "OPS").value = ops;
|
||||
serverContainer.env.find(({ name: n }) => n === "WHITELIST").value =
|
||||
whitelist;
|
||||
serverContainer.env.find(
|
||||
({ name: n }) => n === "MEMORY"
|
||||
).value = `${memory}M`;
|
||||
if (version !== "VANILLA")
|
||||
delete serverContainer.env.find(({ name: n }) => n === "MODPACK").value;
|
||||
else
|
||||
serverContainer.env.find(({ name: n }) => n === "MODPACK").value = modpack;
|
||||
|
||||
serverContainer.env.find(
|
||||
({ name }) => name === "RCON_PASSWORD"
|
||||
).valueFrom.secretKeyRef.name = `mcl-${name}-rcon-secret`;
|
||||
// Server Container Name
|
||||
serverContainer.name = `mcl-${name}`;
|
||||
// Resources
|
||||
serverContainer.resources.requests.memory = `${memory}Mi`;
|
||||
// serverContainer.resources.limits.memory = `${memory}Mi`; // TODO Allow for limits beyond initial startup
|
||||
// Volumes
|
||||
deployYaml.spec.template.spec.volumes.find(
|
||||
({ name }) => name === "datadir"
|
||||
).persistentVolumeClaim.claimName = `mcl-${name}-volume`;
|
||||
deployYaml.spec.template.spec.containers[0] = serverContainer;
|
||||
return deployYaml;
|
||||
}
|
||||
|
||||
function createServerService(serverSpec) {
|
||||
const { name, url } = serverSpec;
|
||||
const serviceYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/server-svc.yml"), "utf8")
|
||||
);
|
||||
serviceYaml.metadata.annotations["ingress.qumine.io/hostname"] = url;
|
||||
serviceYaml.metadata.labels.app = `mcl-${name}-app`;
|
||||
serviceYaml.metadata.name = `mcl-${name}-server`;
|
||||
serviceYaml.metadata.namespace = namespace;
|
||||
serviceYaml.spec.selector.app = `mcl-${name}-app`;
|
||||
return serviceYaml;
|
||||
}
|
||||
|
||||
function createRconService(serverSpec) {
|
||||
const { name, url } = serverSpec;
|
||||
const rconSvcYaml = yaml.load(
|
||||
fs.readFileSync(path.resolve("lib/k8s/configs/rcon-svc.yml"), "utf8")
|
||||
);
|
||||
rconSvcYaml.metadata.labels.app = `mcl-${name}-app`;
|
||||
rconSvcYaml.metadata.name = `mcl-${name}-rcon`;
|
||||
rconSvcYaml.metadata.namespace = namespace;
|
||||
rconSvcYaml.spec.selector.app = `mcl-${name}-app`;
|
||||
return rconSvcYaml;
|
||||
}
|
||||
|
||||
export default async function createServer(req, res) {
|
||||
if (payloadFilter(req, res) !== "filtered") return;
|
||||
const serverSpec = req.body;
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const deployments = deploymentRes.body.items.map((i) => i.metadata.name);
|
||||
if (deployments.includes(`mcl-${serverSpec.name}`))
|
||||
return res.status(409).send("Server already exists!");
|
||||
const pvcRes = await k8sCore.listNamespacedPersistentVolumeClaim(namespace);
|
||||
const pvcs = pvcRes.body.items.map((i) => i.metadata.name);
|
||||
if (pvcs.includes(`mcl-${serverSpec.name}-volume`))
|
||||
return res.status(409).send("Server PVC already exists!");
|
||||
const rconSecret = createRconSecret(serverSpec);
|
||||
const serverVolume = createServerVolume(serverSpec);
|
||||
const serverDeploy = createServerDeploy(serverSpec);
|
||||
const serverService = createServerService(serverSpec);
|
||||
const rconService = createRconService(serverSpec);
|
||||
k8sCore.createNamespacedPersistentVolumeClaim(namespace, serverVolume);
|
||||
k8sCore.createNamespacedSecret(namespace, rconSecret);
|
||||
k8sCore.createNamespacedService(namespace, serverService);
|
||||
k8sCore.createNamespacedService(namespace, rconService);
|
||||
k8sDeps.createNamespacedDeployment(namespace, serverDeploy);
|
||||
|
||||
res.sendStatus(200);
|
||||
}
|
55
lib/k8s/server-delete.js
Normal file
55
lib/k8s/server-delete.js
Normal file
|
@ -0,0 +1,55 @@
|
|||
import k8s from "@kubernetes/client-node";
|
||||
import { ERR } from "../util/logging.js";
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
|
||||
const k8sDeps = kc.makeApiClient(k8s.AppsV1Api);
|
||||
const k8sCore = kc.makeApiClient(k8s.CoreV1Api);
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
const deleteError = (res) => (err) => {
|
||||
res.status(500).send("Error deleting a resource!");
|
||||
ERR("K8S", "An error occurred while deleting a resource", err);
|
||||
};
|
||||
|
||||
export default async function deleteServer(req, res) {
|
||||
const serverSpec = req.body;
|
||||
if (!serverSpec) return res.sendStatus(400);
|
||||
if (!serverSpec.name) return res.status(400).send("Server name required!");
|
||||
const { name } = serverSpec;
|
||||
// Ensure deployment exists
|
||||
const deploymentRes = await k8sDeps.listNamespacedDeployment(namespace);
|
||||
const deployments = deploymentRes.body.items.map((i) => i.metadata.name);
|
||||
if (!deployments.includes(`mcl-${serverSpec.name}`))
|
||||
return res.status(409).send("Server does not exist!");
|
||||
// Delete in reverse order
|
||||
const deleteDeploy = k8sDeps.deleteNamespacedDeployment(
|
||||
`mcl-${serverSpec.name}`,
|
||||
namespace
|
||||
);
|
||||
const deleteService = k8sCore.deleteNamespacedService(
|
||||
`mcl-${name}-server`,
|
||||
namespace
|
||||
);
|
||||
const deleteRconService = k8sCore.deleteNamespacedService(
|
||||
`mcl-${name}-rcon`,
|
||||
namespace
|
||||
);
|
||||
await deleteDeploy.catch(deleteError(res));
|
||||
const deleteRconSecret = k8sCore.deleteNamespacedSecret(
|
||||
`mcl-${name}-rcon-secret`,
|
||||
namespace
|
||||
);
|
||||
const deleteVolume = k8sCore.deleteNamespacedPersistentVolumeClaim(
|
||||
`mcl-${name}-volume`,
|
||||
namespace
|
||||
);
|
||||
Promise.all([
|
||||
deleteService,
|
||||
deleteRconService,
|
||||
deleteRconSecret,
|
||||
deleteVolume,
|
||||
])
|
||||
.then(() => res.sendStatus(200))
|
||||
.catch(deleteError(res));
|
||||
}
|
19
lib/routes/server-route.js
Normal file
19
lib/routes/server-route.js
Normal file
|
@ -0,0 +1,19 @@
|
|||
import { Router, json as jsonMiddleware } from "express";
|
||||
import {
|
||||
startServer,
|
||||
stopServer,
|
||||
serverList,
|
||||
getServers,
|
||||
} from "../k8s/server-control.js";
|
||||
import createServer from "../k8s/server-create.js";
|
||||
import deleteServer from "../k8s/server-delete.js";
|
||||
const router = Router();
|
||||
router.use(jsonMiddleware());
|
||||
// Routes
|
||||
router.post("/create", createServer);
|
||||
router.delete("/delete", deleteServer);
|
||||
router.post("/start", startServer);
|
||||
router.post("/stop", stopServer);
|
||||
router.get("/list", serverList);
|
||||
router.get("/instances", getServers);
|
||||
export default router;
|
27
lib/routes/system-route.js
Normal file
27
lib/routes/system-route.js
Normal file
|
@ -0,0 +1,27 @@
|
|||
import { Router } from "express";
|
||||
import k8s from "@kubernetes/client-node";
|
||||
import { WARN } from "../util/logging.js";
|
||||
const router = Router();
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
const k8sApi = kc.makeApiClient(k8s.CoreV1Api);
|
||||
// Get Routes
|
||||
router.get("/available", (req, res) => {
|
||||
k8sApi.listNode().then((nodeRes) => {
|
||||
const nodeAllocatable = nodeRes.body.items.map((i) => i.status.allocatable);
|
||||
const nodeResources = nodeAllocatable.map(({ cpu, memory }) => ({
|
||||
cpu,
|
||||
memory,
|
||||
}));
|
||||
const { cpu: clusterCpu, memory: clusterMemory } = nodeResources[0];
|
||||
const isIdentical = ({ cpu, memory }) =>
|
||||
clusterMemory === memory && clusterCpu === cpu;
|
||||
if (!nodeResources.every(isIdentical))
|
||||
WARN("ROUTES", "Warning, node resources were non-consistent");
|
||||
const availableCpu = parseInt(clusterCpu) * 1000;
|
||||
const availableMemory = parseInt(clusterMemory) / 1024;
|
||||
res.json({ cpu: availableCpu, memory: availableMemory });
|
||||
});
|
||||
});
|
||||
|
||||
export default router;
|
6
lib/routes/vitals-route.js
Normal file
6
lib/routes/vitals-route.js
Normal file
|
@ -0,0 +1,6 @@
|
|||
import { Router } from "express";
|
||||
const router = Router();
|
||||
// Get Routes
|
||||
router.get("/healthz", (req, res) => res.sendStatus(200));
|
||||
|
||||
export default router;
|
31
lib/server/rcon.js
Normal file
31
lib/server/rcon.js
Normal file
|
@ -0,0 +1,31 @@
|
|||
import k8s from "@kubernetes/client-node";
|
||||
import { Rcon as RconClient } from "rcon-client";
|
||||
import { ERR } from "../util/logging.js";
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromDefault();
|
||||
const k8sCore = kc.makeApiClient(k8s.CoreV1Api);
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
export default async function rconInterface(socket) {
|
||||
if (socket.rconClient)
|
||||
return VERB("RCON", "Socket already connected to RCON");
|
||||
const rconSecret = `mcl-${socket.mcs.serverName}-rcon-secret`;
|
||||
const rconRes = await k8sCore.readNamespacedSecret(rconSecret, namespace);
|
||||
const rconPassword = Buffer.from(
|
||||
rconRes.body.data["rcon-password"],
|
||||
"base64"
|
||||
).toString("utf8");
|
||||
const rconHost = `mcl-${socket.mcs.serverName}-rcon`;
|
||||
const rcon = new RconClient({
|
||||
host: rconHost,
|
||||
port: 25575,
|
||||
password: rconPassword,
|
||||
});
|
||||
rcon.on("error", (error) => socket.emit("push", error));
|
||||
try {
|
||||
await rcon.connect();
|
||||
} catch (error) {
|
||||
ERR("RCON", `Could not connect to 'mcl-${socket.mcs.serverName}-rcon'`);
|
||||
}
|
||||
socket.rconClient = rcon;
|
||||
}
|
23
lib/server/router.js
Normal file
23
lib/server/router.js
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Imports
|
||||
import express from "express";
|
||||
|
||||
// Routes
|
||||
import vitals from "../routes/vitals-route.js";
|
||||
import systemRoute from "../routes/system-route.js";
|
||||
import serverRoute from "../routes/server-route.js";
|
||||
|
||||
export default function buildRoutes(pg, skio) {
|
||||
const router = express.Router();
|
||||
// Special Routes
|
||||
router.use(vitals);
|
||||
router.all("/", (req, res) => res.redirect("/mcl"));
|
||||
|
||||
// Middlewares
|
||||
|
||||
// Routes
|
||||
router.use("/api/system", systemRoute);
|
||||
router.use("/api/server", serverRoute);
|
||||
// router.use("/mcl", react); // Static Build Route
|
||||
|
||||
return router;
|
||||
}
|
46
lib/server/sockets.js
Normal file
46
lib/server/sockets.js
Normal file
|
@ -0,0 +1,46 @@
|
|||
import { Server as Skio } from "socket.io";
|
||||
import { VERB, WARN, ERR } from "../util/logging.js";
|
||||
import liveLogging from "../k8s/live-logging.js";
|
||||
import rconInterface from "./rcon.js";
|
||||
|
||||
const namespace = process.env.MCL_SERVER_NAMESPACE;
|
||||
|
||||
async function rconSend(socket, m) {
|
||||
if (!socket.rconClient)
|
||||
return WARN("RCON", "Message sent before RCON connected!");
|
||||
try {
|
||||
const r = await socket.rconClient.send(m);
|
||||
socket.emit("push", `[RCON]: ${r}`);
|
||||
} catch (error) {
|
||||
WARN("RCON", error);
|
||||
}
|
||||
}
|
||||
|
||||
const socketConnect = async (io, socket) => {
|
||||
VERB("WS", "Websocket connecting");
|
||||
socket.mcs = { serverName: socket.handshake.query.serverName };
|
||||
try {
|
||||
await liveLogging(socket, namespace);
|
||||
await rconInterface(socket);
|
||||
socket.on("msg", (m) => rconSend(socket, m));
|
||||
} catch (err) {
|
||||
ERR("SOCKETS", err);
|
||||
socket.send("push", err);
|
||||
socket.disconnect();
|
||||
}
|
||||
};
|
||||
|
||||
const socketAuth = (socket, next) => {
|
||||
const { token } = socket.handshake.auth;
|
||||
// next(new Error("Bad Token"));
|
||||
next();
|
||||
};
|
||||
|
||||
const applySockets = (server) => {
|
||||
const io = new Skio(server);
|
||||
io.on("connection", (socket) => socketConnect(io, socket));
|
||||
VERB("WS", "Configured Websockets");
|
||||
return io;
|
||||
};
|
||||
|
||||
export default applySockets;
|
28
lib/util/logging.js
Normal file
28
lib/util/logging.js
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Imports
|
||||
import { Chalk } from "chalk";
|
||||
const { redBright, greenBright, yellowBright, cyanBright, magentaBright } =
|
||||
new Chalk({ level: 2 });
|
||||
|
||||
// Logging
|
||||
const logColor = (color, header, ...args) =>
|
||||
console.log(color(header), ...args);
|
||||
|
||||
export const logError = (...args) => logColor(redBright, ...args);
|
||||
|
||||
export const logConfirm = (...args) => logColor(greenBright, ...args);
|
||||
|
||||
export const logWarn = (...args) => logColor(yellowBright, ...args);
|
||||
|
||||
export const logInfo = (...args) => logColor(cyanBright, ...args);
|
||||
|
||||
export const logVerbose = (...args) => logColor(magentaBright, ...args);
|
||||
|
||||
export const ERR = (header, ...args) => logError(`[${header}]`, ...args);
|
||||
|
||||
export const OK = (header, ...args) => logConfirm(`[${header}]`, ...args);
|
||||
|
||||
export const WARN = (header, ...args) => logWarn(`[${header}]`, ...args);
|
||||
|
||||
export const INFO = (header, ...args) => logInfo(`[${header}]`, ...args);
|
||||
|
||||
export const VERB = (header, ...args) => logVerbose(`[${header}]`, ...args);
|
Loading…
Add table
Add a link
Reference in a new issue