Browse Source

[BS5] rework network and disk io

FreddleSpl0it 3 years ago
parent
commit
5d35af9d69
2 changed files with 74 additions and 46 deletions
  1. 30 15
      data/Dockerfiles/dockerapi/dockerapi.py
  2. 44 31
      data/web/js/site/debug.js

+ 30 - 15
data/Dockerfiles/dockerapi/dockerapi.py

@@ -7,6 +7,7 @@ from flask import Response
 from flask import request
 from flask import request
 from threading import Thread
 from threading import Thread
 from datetime import datetime
 from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
 import docker
 import docker
 import uuid
 import uuid
 import signal
 import signal
@@ -333,17 +334,29 @@ class host_stats_get(Resource):
     try:
     try:
       system_time = datetime.now()
       system_time = datetime.now()
 
 
-      disk_io_before = psutil.disk_io_counters(perdisk=False)
-      net_io_before = psutil.net_io_counters(pernic=False)
-      time.sleep(1)
-      disk_io_after = psutil.disk_io_counters(perdisk=False)
-      net_io_after = psutil.net_io_counters(pernic=False)
-
-      disks_read_per_sec = disk_io_after.read_bytes - disk_io_before.read_bytes
-      disks_write_per_sec = disk_io_after.write_bytes - disk_io_before.write_bytes
-      net_recv_per_sec = net_io_after.bytes_recv - net_io_before.bytes_recv
-      net_sent_per_sec = net_io_after.bytes_sent - net_io_before.bytes_sent
-
+      # get docker stats multithreaded for faster results
+      containers = docker_client.containers.list()
+      workers = os.cpu_count() * 4
+      with ThreadPoolExecutor(workers) as pool:
+        stats = list(pool.map(lambda x: x.stats(decode=None, stream=False), containers))
+
+      bytes_recv_total = 0
+      bytes_sent_total = 0
+      bytes_read_total = 0
+      bytes_write_total = 0
+      for stat in stats:
+        if "networks" in stat:
+          for interface in stat["networks"]:
+            bytes_recv_total += stat["networks"][interface]["rx_bytes"]
+            bytes_sent_total += stat["networks"][interface]["tx_bytes"]
+        if "blkio_stats" in stat:
+          if "io_service_bytes_recursive" in stat["blkio_stats"]:
+            if hasattr(stat["blkio_stats"]["io_service_bytes_recursive"], "__len__"):
+              for blkio in stat["blkio_stats"]["io_service_bytes_recursive"]:
+                if blkio["op"] == "read":
+                  bytes_read_total += blkio["value"]
+                elif blkio["op"] == "write":
+                  bytes_write_total += blkio["value"]
 
 
       host_stats = {
       host_stats = {
         "cpu": {
         "cpu": {
@@ -356,13 +369,14 @@ class host_stats_get(Resource):
           "swap": psutil.swap_memory()
           "swap": psutil.swap_memory()
         },
         },
         "disk": {
         "disk": {
-          "read_bytes": disks_read_per_sec,
-          "write_bytes": disks_write_per_sec
+          "bytes_read_total": bytes_read_total,
+          "bytes_write_total": bytes_write_total
         },
         },
         "network": {
         "network": {
-          "bytes_recv": net_recv_per_sec,
-          "bytes_sent": net_sent_per_sec
+          "bytes_recv_total": bytes_recv_total,
+          "bytes_sent_total": bytes_sent_total
         },
         },
+        "container_stats": stats,
         "uptime": time.time() - psutil.boot_time(),
         "uptime": time.time() - psutil.boot_time(),
         "system_time": system_time.strftime("%d.%m.%Y %H:%M:%S")
         "system_time": system_time.strftime("%d.%m.%Y %H:%M:%S")
       }
       }
@@ -370,6 +384,7 @@ class host_stats_get(Resource):
     except Exception as e:
     except Exception as e:
       return jsonify(type='danger', msg=str(e))
       return jsonify(type='danger', msg=str(e))
 
 
+
 def exec_cmd_container(container, cmd, user, timeout=2, shell_cmd="/bin/bash"):
 def exec_cmd_container(container, cmd, user, timeout=2, shell_cmd="/bin/bash"):
 
 
   def recv_socket_data(c_socket, timeout):
   def recv_socket_data(c_socket, timeout):

+ 44 - 31
data/web/js/site/debug.js

@@ -1010,7 +1010,7 @@ jQuery(function($){
 
 
 
 
 // update system stats - every 5 seconds if system & container tab is active
 // update system stats - every 5 seconds if system & container tab is active
-function update_stats(){
+function update_stats(prev_stats = null){
   if (!$('#tab-containers').hasClass('active')) {
   if (!$('#tab-containers').hasClass('active')) {
     // tab not active - dont fetch stats - run again in n seconds
     // tab not active - dont fetch stats - run again in n seconds
     setTimeout(update_stats, 5000);
     setTimeout(update_stats, 5000);
@@ -1020,6 +1020,7 @@ function update_stats(){
   window.fetch("/api/v1/get/status/host", {method:'GET',cache:'no-cache'}).then(function(response) {
   window.fetch("/api/v1/get/status/host", {method:'GET',cache:'no-cache'}).then(function(response) {
     return response.json();
     return response.json();
   }).then(function(data) {
   }).then(function(data) {
+    // display table data
     $("#host_date").text(data.system_time);
     $("#host_date").text(data.system_time);
     $("#host_uptime").text(formatUptime(data.uptime));
     $("#host_uptime").text(formatUptime(data.uptime));
     $("#host_cpu_cores").text(data.cpu.cores);
     $("#host_cpu_cores").text(data.cpu.cores);
@@ -1027,40 +1028,52 @@ function update_stats(){
     $("#host_memory_total").text((data.memory.total / (1024 ** 3)).toFixed(2).toString() + "GB");
     $("#host_memory_total").text((data.memory.total / (1024 ** 3)).toFixed(2).toString() + "GB");
     $("#host_memory_usage").text(parseInt(data.memory.usage).toString() + "%");
     $("#host_memory_usage").text(parseInt(data.memory.usage).toString() + "%");
 
 
-    var net_io_chart = Chart.getChart("net_io_chart");
-    var disk_io_chart = Chart.getChart("disk_io_chart");
-    
-    net_io_chart.data.labels.push(data.system_time.split(" ")[1]);
-    if (net_io_chart.data.labels.length > 20) {
-      net_io_chart.data.labels.shift();
-    }
-    net_io_chart.data.datasets[0].data.push((data.network.bytes_recv / 1024).toFixed(4));
-    net_io_chart.data.datasets[1].data.push((data.network.bytes_sent / 1024).toFixed(4));
-    if (net_io_chart.data.datasets[0].data.length > 20) {
-      net_io_chart.data.datasets[0].data.shift();
-    }
-    if (net_io_chart.data.datasets[1].data.length > 20) {
-      net_io_chart.data.datasets[1].data.shift();
-    }
 
 
-    disk_io_chart.data.labels.push(data.system_time.split(" ")[1]);
-    if (disk_io_chart.data.labels.length > 20) {
-      disk_io_chart.data.labels.shift();
-    }
-    disk_io_chart.data.datasets[0].data.push((data.disk.read_bytes / 1024).toFixed(4));
-    disk_io_chart.data.datasets[1].data.push((data.disk.write_bytes / 1024).toFixed(4));
-    if (disk_io_chart.data.datasets[0].data.length > 20) {
-      disk_io_chart.data.datasets[0].data.shift();
-    }
-    if (disk_io_chart.data.datasets[1].data.length > 20) {
-      disk_io_chart.data.datasets[1].data.shift();
-    }
+    // display network and disk i/o
+    if (prev_stats != null){
+      // get chart instances by elemId
+      var net_io_chart = Chart.getChart("net_io_chart");
+      var disk_io_chart = Chart.getChart("disk_io_chart");
+
+
+      // calc time diff
+      var time_diff = (new Date(data.system_time) - new Date(prev_stats.system_time)) / 1000;
+      // push time label for x-axis
+      net_io_chart.data.labels.push(data.system_time.split(" ")[1]);
+      // shift data if more than 20 entires exists
+      if (net_io_chart.data.labels.length > 20) net_io_chart.data.labels.shift();
 
 
-    net_io_chart.update();
-    disk_io_chart.update();
+      var diff_bytes_recv = (data.network.bytes_recv_total - prev_stats.network.bytes_recv_total) / time_diff;
+      var diff_bytes_sent = (data.network.bytes_sent_total - prev_stats.network.bytes_sent_total) / time_diff;
+      net_io_chart.data.datasets[0].data.push(diff_bytes_recv);
+      net_io_chart.data.datasets[1].data.push(diff_bytes_sent);
+      // shift data if more than 20 entires exists
+      if (net_io_chart.data.datasets[0].data.length > 20)  net_io_chart.data.datasets[0].data.shift();
+      if (net_io_chart.data.datasets[1].data.length > 20) net_io_chart.data.datasets[1].data.shift();
+
+
+      // push time label for x-axis
+      disk_io_chart.data.labels.push(data.system_time.split(" ")[1]);
+      // shift data if more than 20 entires exists
+      if (disk_io_chart.data.labels.length > 20) disk_io_chart.data.labels.shift();
+
+      var diff_bytes_read = (data.disk.bytes_read_total - prev_stats.disk.bytes_read_total) / time_diff;
+      var diff_bytes_write = (data.disk.bytes_write_total - prev_stats.disk.bytes_write_total) / time_diff;
+      disk_io_chart.data.datasets[0].data.push(diff_bytes_read);
+      disk_io_chart.data.datasets[1].data.push(diff_bytes_write);
+      // shift data if more than 20 entires exists
+      if (disk_io_chart.data.datasets[0].data.length > 20) disk_io_chart.data.datasets[0].data.shift();
+      if (disk_io_chart.data.datasets[1].data.length > 20) disk_io_chart.data.datasets[1].data.shift();
+
+
+      // update charts
+      net_io_chart.update();
+      disk_io_chart.update();
+    }
 
 
     // run again in n seconds
     // run again in n seconds
-    setTimeout(update_stats, 5000);
+    prev_stats = data;
+    setTimeout(update_stats(prev_stats), 2500);
   });
   });
 }
 }
 // format hosts uptime seconds to readable string
 // format hosts uptime seconds to readable string