Преглед изворни кода

Daemon: Implementierung Mount/Drive data collection

Christian Kahlau пре 3 година
родитељ
комит
750cf14f18

+ 2 - 2
.gitignore

@@ -3,7 +3,7 @@ package-lock.json
 
 daemon/data/
 daemon/dist/
+daemon/.env
 
 server/public/
-server/dist/
-server/data/
+server/dist/server/data/

+ 16 - 1
common/types/buffered-data.d.ts

@@ -1 +1,16 @@
-type BufferedData = { time: Date; cpu: number; ram: { used: number; max: number } };
+type BufferedData = {
+  time: Date;
+  cpu: number;
+  ram: {
+    used: number;
+    max: number;
+  };
+  hdd?: BufferedDriveData;
+};
+
+type BufferedDriveData = {
+  [mount: string]: {
+    used: number;
+    max: number;
+  };
+};

+ 13 - 9
common/types/reduced-data.d.ts

@@ -1,12 +1,16 @@
 type ReducedData = {
   time: Date;
-  cpu: {
-    avg: number;
-    peak: number;
-  };
-  ram: {
-    avg: number;
-    peak: number;
-    max: number;
-  };
+  cpu: ReducedValuesPerc;
+  ram: ReducedValuesMinMax;
+  hdd?: ReducedDriveData;
+};
+
+type ReducedValuesPerc = {
+  avg: number;
+  peak: number;
+};
+
+type ReducedValuesMinMax = ReducedValuesPerc & { max: number };
+type ReducedDriveData = {
+  [mount: string]: ReducedValuesMinMax;
 };

+ 1 - 1
common/util/logger.class.ts

@@ -3,7 +3,7 @@ export class Logger {
   public static get LOG_LEVELS(): Array<LogLevel> {
     return ['ERROR', 'WARNING', 'INFO', 'DEBUG'];
   }
-  private static levels: Array<LogLevel>;
+  private static levels: Array<LogLevel> = this.LOG_LEVELS.slice(0, 3);
 
   public static set logLevel(logLevel: LogLevel) {
     Logger.levels = Logger.LOG_LEVELS.slice(0, Logger.LOG_LEVELS.indexOf(logLevel) + 1);

+ 0 - 3
daemon/.env

@@ -1,3 +0,0 @@
-DATA_DIR=data
-LOG_LEVEL=INFO
-WEB_PORT=8890

+ 4 - 0
daemon/.env.default

@@ -0,0 +1,4 @@
+DATA_DIR=data
+LOG_LEVEL=INFO
+WEB_PORT=8890
+MONITOR_MOUNTS=/

+ 35 - 0
daemon/hdd.sh

@@ -0,0 +1,35 @@
+#!/bin/bash
+
+if [ -z "$1" ]; then
+  echo "Prints <used>/<available> space at <MOUNT_DIR> in full bytes"
+  echo "    Usage: $0 <MOUNT_DIR>"
+  exit 1
+fi
+
+FACTORS=('' 'K' 'M' 'G' 'T' 'P')
+BLKSIZE=$(df | sed -n '1 p' | awk '{print $2}' | sed 's/-blocks//') # e.g. "1K", "2M", "512K"
+
+REG='([0-9]+)([KMGTP]?)'
+[[ $BLKSIZE =~ $REG ]]
+BLKBASE="${BASH_REMATCH[1]}"
+BLKEXP="${BASH_REMATCH[2]}"
+
+for i in "${!FACTORS[@]}"; do
+  if [[ "${FACTORS[$i]}" = "${BLKEXP}" ]]; then
+    BLKEXP=$i
+  fi
+done
+
+BLKSIZE=$[($BLKBASE*(2**(10*$BLKEXP)))]
+
+MOUNT=$1
+DF_LINE=( $(df | tail -n +2 | grep "$MOUNT\$" | awk '{printf "%s %s", $3, $2}') )
+
+if [ -z "$DF_LINE" ]; then
+  exit 0
+fi
+
+USED=$[${DF_LINE[0]}*$BLKSIZE]
+AVAIL=$[${DF_LINE[1]}*$BLKSIZE]
+
+echo $USED"/"$AVAIL

+ 99 - 13
daemon/src/collector.class.ts

@@ -15,6 +15,10 @@ const TIMESTAMP_FORMAT = `YYYY-MM-DD[T]HH:mm:ss.SSSZZ`;
 const REDUCE_INTERVAL_MINUTES = 5;
 const REDUCE_GROUP_MINUTES = 1;
 
+const MONITOR_MOUNTS = !!process.env.MONITOR_MOUNTS ? process.env.MONITOR_MOUNTS.split(':') : [];
+
+Logger.info('[INFO] Monitoring Drives:', MONITOR_MOUNTS);
+
 const CSV_COLS = {
   buffer: {
     time: 0,
@@ -65,7 +69,18 @@ export class Collector {
       const time = now.format(TIMESTAMP_FORMAT);
       const cpu = (await exec(`./cpu.sh`)).trim();
       const ram = (await exec(`./ram.sh`)).trim();
-      const data = `${time};${cpu};${ram}\n`;
+
+      const hdd: string[] = [];
+      for (const mount of MONITOR_MOUNTS) {
+        try {
+          const stats = (await exec(`./hdd.sh "${mount}"`)).trim();
+          if (stats?.length) hdd.push(`${mount} ${stats}`);
+        } catch (err) {
+          Logger.warn('[WARN] Error while getting space usage of mount', mount, ':', err);
+        }
+      }
+
+      const data = `${time};${cpu};${ram}${hdd.length ? `;${hdd.join(';')}` : ''}\n`;
 
       // Time to reduce buffer?
       const firstBufferTime = await this.getFirstBufferTime();
@@ -156,7 +171,10 @@ export class Collector {
         const firstTime = moment(valueBuffer[0].time);
         const currentTime = moment(data.time);
         if (moment.duration(currentTime.diff(firstTime)).abs().asMinutes() >= REDUCE_GROUP_MINUTES) {
-          const { cpu, ram, count } = valueBuffer.reduce(
+          type IntermediateValues = { sum: number; peak: number; max: number };
+          type IntermediateDriveData = { [mount: string]: IntermediateValues };
+          type IntermediateSums = { ram: IntermediateValues; cpu: IntermediateValues; hdd?: IntermediateDriveData; count: number };
+          const { cpu, ram, count, hdd } = valueBuffer.reduce(
             (res, cur) => {
               res.count++;
               res.cpu.sum += cur.cpu;
@@ -164,9 +182,20 @@ export class Collector {
               res.ram.sum += cur.ram.used;
               res.ram.peak = Math.max(res.ram.peak, cur.ram.used);
               res.ram.max = cur.ram.max;
+
+              if (cur.hdd && Object.keys(cur.hdd).length) {
+                res.hdd = Object.keys(cur.hdd).reduce((res_hdd, mount) => {
+                  if (!res_hdd[mount]) res_hdd[mount] = { sum: 0, peak: 0, max: 0 };
+                  res_hdd[mount].sum += cur.hdd[mount].used;
+                  res_hdd[mount].peak = Math.max(res_hdd[mount].peak, cur.hdd[mount].used);
+                  res_hdd[mount].max += cur.hdd[mount].max;
+                  return res_hdd;
+                }, {} as IntermediateDriveData);
+              }
+
               return res;
             },
-            { ram: { sum: 0, peak: 0, max: 0 }, cpu: { sum: 0, peak: 0 }, count: 0 }
+            { ram: { sum: 0, peak: 0, max: 0 }, cpu: { sum: 0, peak: 0 }, count: 0 } as IntermediateSums
           );
 
           reduced.push({
@@ -179,7 +208,17 @@ export class Collector {
               avg: ram.sum / count,
               peak: ram.peak,
               max: ram.max
-            }
+            },
+            hdd: hdd
+              ? Object.keys(hdd).reduce((res, mount) => {
+                  res[mount] = {
+                    avg: hdd[mount].sum / count,
+                    peak: hdd[mount].peak,
+                    max: hdd[mount].max
+                  };
+                  return res;
+                }, {} as ReducedDriveData)
+              : undefined
           });
 
           Logger.debug('[DEBUG] ReducedData:', JSON.stringify(reduced[reduced.length - 1]));
@@ -209,25 +248,64 @@ export class Collector {
   }
 
   private parseBufferedData(line: string[]): BufferedData {
-    const cpu = Number(line[CSV_COLS.buffer.cpu]);
+    // TIMESTAMP
     const time = moment(line[CSV_COLS.buffer.time], TIMESTAMP_FORMAT).toDate();
-    let ramSplit = line[CSV_COLS.buffer.ram].split(' ');
-    const unit = ramSplit[1];
-    ramSplit = ramSplit[0].split('/');
-    const [used, max] = ramSplit;
+
+    // CPU
+    const cpu = Number(line[CSV_COLS.buffer.cpu]);
+
+    // RAM
+    let [stats, unit] = line[CSV_COLS.buffer.ram].split(' ');
+    const [used, max] = stats.split('/');
     const factor = this.parseByteUnit(unit);
 
+    const lastCol = CSV_COLS.buffer.ram;
+
+    // HDD (?)
+    let hdd: BufferedDriveData;
+    if (MONITOR_MOUNTS.length && line.length > lastCol + 1) {
+      for (let i = 1; i <= MONITOR_MOUNTS.length; i++) {
+        if (lastCol + i > line.length - 1) break;
+
+        const data = line[lastCol + i];
+        const [mount, stats] = data.split(' ');
+        const [used, max] = stats.split('/');
+
+        if (!hdd) hdd = {};
+        hdd[mount] = {
+          used: Number(used),
+          max: Number(max)
+        };
+      }
+    }
+
     return {
       time,
       cpu,
       ram: {
         used: Number(used) * factor,
         max: Number(max) * factor
-      }
+      },
+      hdd
     };
   }
 
   private parseReducedData(line: string[]): ReducedData {
+    const lastCol = CSV_COLS.reduced.ram.max;
+
+    // HDD (?)
+    let hdd: ReducedDriveData;
+    if (MONITOR_MOUNTS.length && line.length > lastCol + 1) {
+      hdd = {};
+      for (let i = 1; lastCol + i + 3 < line.length; i += 4) {
+        hdd[line[lastCol + i]] = {
+          avg: Number(line[lastCol + i + 1]),
+          peak: Number(line[lastCol + i + 2]),
+          max: Number(line[lastCol + i + 3])
+        };
+      }
+    }
+
     return {
       time: moment(line[CSV_COLS.reduced.time], TIMESTAMP_FORMAT).toDate(),
       cpu: {
@@ -238,7 +316,8 @@ export class Collector {
         avg: Number(line[CSV_COLS.reduced.ram.avg]),
         peak: Number(line[CSV_COLS.reduced.ram.peak]),
         max: Number(line[CSV_COLS.reduced.ram.max])
-      }
+      },
+      hdd
     };
   }
 
@@ -262,7 +341,8 @@ export class Collector {
     return [
       moment(data.time).format(TIMESTAMP_FORMAT),
       data.cpu,
-      `${(data.ram.used / this.byteFactors['M']).toFixed(2)}/${(data.ram.max / this.byteFactors['M']).toFixed(2)} MiB`
+      `${(data.ram.used / this.byteFactors['M']).toFixed(2)}/${(data.ram.max / this.byteFactors['M']).toFixed(2)} MiB`,
+      ...(data.hdd ? Object.keys(data.hdd).map(mount => `${mount} ${data.hdd[mount].used}/${data.hdd[mount].max}`) : [])
     ].join(';');
   }
 
@@ -273,7 +353,13 @@ export class Collector {
       data.cpu.peak.toFixed(2),
       data.ram.avg.toFixed(2),
       data.ram.peak.toFixed(2),
-      data.ram.max.toFixed(2)
+      data.ram.max.toFixed(2),
+      ...(data.hdd
+        ? Object.keys(data.hdd).reduce((res, mount) => {
+            res.push(mount, data.hdd[mount].avg.toFixed(2), data.hdd[mount].peak.toFixed(2), data.hdd[mount].max.toFixed(2));
+            return res;
+          }, [])
+        : [])
     ].join(';');
   }
 

+ 2 - 3
daemon/src/index.ts

@@ -3,13 +3,12 @@ import dotenv from 'dotenv';
 import { Logger, LogLevel } from '../../common/util/logger.class';
 
 dotenv.config();
+const LOG_LEVEL: LogLevel = (process.env.LOG_LEVEL as LogLevel) || 'INFO';
+Logger.logLevel = LOG_LEVEL;
 
 import { Collector } from './collector.class';
 import { Webserver } from './webserver.class';
 
-const LOG_LEVEL: LogLevel = (process.env.LOG_LEVEL as LogLevel) || 'INFO';
-Logger.logLevel = LOG_LEVEL;
-
 process.on('SIGABRT', exitGracefully);
 process.on('SIGQUIT', exitGracefully);
 process.on('SIGTERM', exitGracefully);