mirror of
https://github.com/librenms/librenms-agent.git
synced 2024-05-09 09:54:52 +00:00
Cleanup some code (#355)
* Format with isort * Format with Black * Fix CRLF * Format with shellcheck * Fix some warning * Fix PHP style * Dont modifiy check_mk files * Fixes
This commit is contained in:
109
agent-local/ceph
109
agent-local/ceph
@@ -14,80 +14,117 @@
|
|||||||
#
|
#
|
||||||
# See http://www.gnu.org/licenses/gpl.txt for the full license
|
# See http://www.gnu.org/licenses/gpl.txt for the full license
|
||||||
|
|
||||||
from subprocess import check_output
|
|
||||||
import json
|
import json
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
|
||||||
def cephversion():
|
def cephversion():
|
||||||
cephv = check_output(["/usr/bin/ceph", "version"]).decode("utf-8").replace('ceph version ', '')
|
cephv = (
|
||||||
major, minor = cephv.split('.')[0:2]
|
check_output(["/usr/bin/ceph", "version"])
|
||||||
|
.decode("utf-8")
|
||||||
|
.replace("ceph version ", "")
|
||||||
|
)
|
||||||
|
major, minor = cephv.split(".")[0:2]
|
||||||
return [int(major), int(minor)]
|
return [int(major), int(minor)]
|
||||||
|
|
||||||
|
|
||||||
def cephdf():
|
def cephdf():
|
||||||
cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).decode("utf-8").replace('-inf', '0')
|
cephdf = (
|
||||||
|
check_output(["/usr/bin/ceph", "-f", "json", "df"])
|
||||||
|
.decode("utf-8")
|
||||||
|
.replace("-inf", "0")
|
||||||
|
)
|
||||||
|
|
||||||
s = json.loads(cephdf)
|
s = json.loads(cephdf)
|
||||||
try:
|
try:
|
||||||
ts = s['stats']['total_bytes']
|
ts = s["stats"]["total_bytes"]
|
||||||
except:
|
except KeyError:
|
||||||
ts = s['stats']['total_space']
|
ts = s["stats"]["total_space"]
|
||||||
try:
|
try:
|
||||||
tu = s['stats']['total_used_bytes']
|
tu = s["stats"]["total_used_bytes"]
|
||||||
except:
|
except KeyError:
|
||||||
tu = s['stats']['total_used']
|
tu = s["stats"]["total_used"]
|
||||||
try:
|
try:
|
||||||
ta = s['stats']['total_avail_bytes']
|
ta = s["stats"]["total_avail_bytes"]
|
||||||
except:
|
except KeyError:
|
||||||
ta = s['stats']['total_avail']
|
ta = s["stats"]["total_avail"]
|
||||||
|
|
||||||
print("c:%i:%i:%i" % (ts, tu, ta))
|
print("c:%i:%i:%i" % (ts, tu, ta))
|
||||||
|
|
||||||
for p in s['pools']:
|
for p in s["pools"]:
|
||||||
b = p['stats']['bytes_used']
|
b = p["stats"]["bytes_used"]
|
||||||
a = p['stats']['max_avail']
|
a = p["stats"]["max_avail"]
|
||||||
o = p['stats']['objects']
|
o = p["stats"]["objects"]
|
||||||
print("%s:%i:%i:%i" % (p['name'], a, b, o))
|
print("%s:%i:%i:%i" % (p["name"], a, b, o))
|
||||||
|
|
||||||
|
|
||||||
def osdperf():
|
def osdperf():
|
||||||
global major
|
global major
|
||||||
osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).decode("utf-8").replace('-inf', '0')
|
osdperf = (
|
||||||
|
check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"])
|
||||||
|
.decode("utf-8")
|
||||||
|
.replace("-inf", "0")
|
||||||
|
)
|
||||||
|
|
||||||
if major > 13:
|
if major > 13:
|
||||||
for o in json.loads(osdperf)['osdstats']['osd_perf_infos']:
|
for o in json.loads(osdperf)["osdstats"]["osd_perf_infos"]:
|
||||||
print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms']))
|
print(
|
||||||
|
"osd.%s:%i:%i"
|
||||||
|
% (
|
||||||
|
o["id"],
|
||||||
|
o["perf_stats"]["apply_latency_ms"],
|
||||||
|
o["perf_stats"]["commit_latency_ms"],
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
for o in json.loads(osdperf)['osd_perf_infos']:
|
for o in json.loads(osdperf)["osd_perf_infos"]:
|
||||||
print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms']))
|
print(
|
||||||
|
"osd.%s:%i:%i"
|
||||||
|
% (
|
||||||
|
o["id"],
|
||||||
|
o["perf_stats"]["apply_latency_ms"],
|
||||||
|
o["perf_stats"]["commit_latency_ms"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def poolstats():
|
def poolstats():
|
||||||
global major
|
global major
|
||||||
poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).decode("utf-8").replace('-inf', '0')
|
poolstats = (
|
||||||
|
check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"])
|
||||||
|
.decode("utf-8")
|
||||||
|
.replace("-inf", "0")
|
||||||
|
)
|
||||||
|
|
||||||
for p in json.loads(poolstats):
|
for p in json.loads(poolstats):
|
||||||
try:
|
try:
|
||||||
r = p['client_io_rate']['read_bytes_sec']
|
r = p["client_io_rate"]["read_bytes_sec"]
|
||||||
except:
|
except KeyError:
|
||||||
r = 0
|
r = 0
|
||||||
try:
|
try:
|
||||||
w = p['client_io_rate']['write_bytes_sec']
|
w = p["client_io_rate"]["write_bytes_sec"]
|
||||||
except:
|
except KeyError:
|
||||||
w = 0
|
w = 0
|
||||||
try:
|
try:
|
||||||
if major > 11:
|
if major > 11:
|
||||||
o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec']
|
o = (
|
||||||
|
p["client_io_rate"]["read_op_per_sec"]
|
||||||
|
+ p["client_io_rate"]["write_op_per_sec"]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
o = p['client_io_rate']['op_per_sec']
|
o = p["client_io_rate"]["op_per_sec"]
|
||||||
except:
|
except KeyError:
|
||||||
o = 0
|
o = 0
|
||||||
|
|
||||||
print("%s:%i:%i:%i" % (p['pool_name'], o, w, r))
|
print("%s:%i:%i:%i" % (p["pool_name"], o, w, r))
|
||||||
|
|
||||||
|
|
||||||
major, minor = cephversion()
|
major, minor = cephversion()
|
||||||
|
|
||||||
print ("<<<app-ceph>>>")
|
print("<<<app-ceph>>>")
|
||||||
print ("<poolstats>")
|
print("<poolstats>")
|
||||||
poolstats()
|
poolstats()
|
||||||
print ("<osdperformance>")
|
print("<osdperformance>")
|
||||||
osdperf()
|
osdperf()
|
||||||
print ("<df>")
|
print("<df>")
|
||||||
cephdf()
|
cephdf()
|
||||||
|
@@ -55,11 +55,11 @@ else
|
|||||||
SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p"
|
SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for i in `$BIN_NC -w 1 $Hval $pval 2>&1 | $BIN_SED '/^<<<mrpe>>>/,/^<<</{//!b};d'` ; do
|
for i in $($BIN_NC -w 1 "$Hval" "$pval" 2>&1 | $BIN_SED '/^<<<mrpe>>>/,/^<<</{//!b};d') ; do
|
||||||
echo $i | $BIN_SED -n "$SED_CMD"
|
echo "$i" | $BIN_SED -n "$SED_CMD"
|
||||||
if [ "$aflag" ];
|
if [ "$aflag" ];
|
||||||
then
|
then
|
||||||
STATUSCODE=$(echo $i | $BIN_SED -n "$SED_CMD_STATUS")
|
STATUSCODE=$(echo "$i" | $BIN_SED -n "$SED_CMD_STATUS")
|
||||||
if [ "$STATUSCODE" ];
|
if [ "$STATUSCODE" ];
|
||||||
then
|
then
|
||||||
case $STATUSCODE in
|
case $STATUSCODE in
|
||||||
|
@@ -13,10 +13,10 @@ then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PEAK=`$FSCLI -x "status" | grep 'per Sec' | grep -Po 'last 5min \d+' | cut -d ' ' -f 3`
|
PEAK=$($FSCLI -x "status" | grep 'per Sec' | grep -Po 'last 5min \d+' | cut -d ' ' -f 3)
|
||||||
CALLCOUNT=`$FSCLI -x "show calls count" | grep -Po '^\d+'`
|
CALLCOUNT=$($FSCLI -x "show calls count" | grep -Po '^\d+')
|
||||||
CHANNELCOUNT=`$FSCLI -x "show channels count" | grep -Po '^\d+'`
|
CHANNELCOUNT=$($FSCLI -x "show channels count" | grep -Po '^\d+')
|
||||||
GATEWAYS=`$FSCLI -x "sofia status gateway"`
|
GATEWAYS=$($FSCLI -x "sofia status gateway")
|
||||||
if [[ $GATEWAYS =~ $inRe ]]; then
|
if [[ $GATEWAYS =~ $inRe ]]; then
|
||||||
INFAILED=${BASH_REMATCH[1]}
|
INFAILED=${BASH_REMATCH[1]}
|
||||||
INTOTAL=${BASH_REMATCH[2]}
|
INTOTAL=${BASH_REMATCH[2]}
|
||||||
|
@@ -17,25 +17,25 @@
|
|||||||
|
|
||||||
# Try to use lsblk if available. Otherwise, use find.
|
# Try to use lsblk if available. Otherwise, use find.
|
||||||
if type lsblk >/dev/null 2>&1; then
|
if type lsblk >/dev/null 2>&1; then
|
||||||
disks=`lsblk -dnp|cut -d' ' -f1 | tr '\n' ' '`
|
disks=$(lsblk -dnp|cut -d' ' -f1 | tr '\n' ' ')
|
||||||
else
|
else
|
||||||
disks=`find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' '`
|
disks=$(find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' ')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
hddtemp=`which hddtemp 2>/dev/null`
|
hddtemp=$(which hddtemp 2>/dev/null)
|
||||||
|
|
||||||
if [ "${hddtemp}" != "" ]; then
|
if [ "${hddtemp}" != "" ]; then
|
||||||
if [ -x "${hddtemp}" ]; then
|
if [ -x "${hddtemp}" ]; then
|
||||||
if type parallel > /dev/null 2>&1; then
|
if type parallel > /dev/null 2>&1; then
|
||||||
# When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!)
|
# When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!)
|
||||||
output=`parallel ${hddtemp} -w -q ::: ${disks} 2>/dev/null`
|
output=$(parallel "${hddtemp}" -w -q ::: "${disks}" 2>/dev/null)
|
||||||
else
|
else
|
||||||
output=`${hddtemp} -w -q ${disks} 2>/dev/null`
|
output=$(${hddtemp} -w -q "${disks}" 2>/dev/null)
|
||||||
fi
|
fi
|
||||||
content=`echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'`
|
content=$(echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176')
|
||||||
if [ "${content}" != "" ]; then
|
if [ "${content}" != "" ]; then
|
||||||
echo '<<<hddtemp>>>'
|
echo '<<<hddtemp>>>'
|
||||||
echo ${content}
|
echo "${content}"
|
||||||
echo
|
echo
|
||||||
else
|
else
|
||||||
echo "no hddtemp compatible disks found" >&2
|
echo "no hddtemp compatible disks found" >&2
|
||||||
|
@@ -1,9 +1,9 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
import urllib2
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
import urllib2
|
||||||
|
|
||||||
data = urllib2.urlopen('http://127.0.0.1/nginx-status').read()
|
data = urllib2.urlopen("http://127.0.0.1/nginx-status").read()
|
||||||
|
|
||||||
params = {}
|
params = {}
|
||||||
|
|
||||||
@@ -11,28 +11,24 @@ for line in data.split("\n"):
|
|||||||
smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line)
|
smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line)
|
||||||
req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line)
|
req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line)
|
||||||
if smallstat:
|
if smallstat:
|
||||||
params["Reading"] = smallstat.group(1)
|
params["Reading"] = smallstat.group(1)
|
||||||
params["Writing"] = smallstat.group(2)
|
params["Writing"] = smallstat.group(2)
|
||||||
params["Waiting"] = smallstat.group(3)
|
params["Waiting"] = smallstat.group(3)
|
||||||
elif req:
|
elif req:
|
||||||
params["Requests"] = req.group(3)
|
params["Requests"] = req.group(3)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
dataorder = [
|
dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"]
|
||||||
"Active",
|
|
||||||
"Reading",
|
|
||||||
"Writing",
|
|
||||||
"Waiting",
|
|
||||||
"Requests"
|
|
||||||
]
|
|
||||||
|
|
||||||
print "<<<nginx>>>\n";
|
print "<<<nginx>>>\n"
|
||||||
|
|
||||||
for param in dataorder:
|
for param in dataorder:
|
||||||
if param == "Active":
|
if param == "Active":
|
||||||
Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
Active = (
|
||||||
print Active
|
int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
||||||
|
)
|
||||||
|
print Active
|
||||||
else:
|
else:
|
||||||
print params[param]
|
print params[param]
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from urllib.request import urlopen
|
|
||||||
import re
|
import re
|
||||||
|
from urllib.request import urlopen
|
||||||
|
|
||||||
data = urlopen('http://127.0.0.1/nginx-status').read()
|
data = urlopen("http://127.0.0.1/nginx-status").read()
|
||||||
|
|
||||||
params = {}
|
params = {}
|
||||||
|
|
||||||
@@ -24,7 +24,9 @@ print("<<<nginx>>>\n")
|
|||||||
|
|
||||||
for param in dataorder:
|
for param in dataorder:
|
||||||
if param == "Active":
|
if param == "Active":
|
||||||
Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
Active = (
|
||||||
|
int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
||||||
|
)
|
||||||
print(Active)
|
print(Active)
|
||||||
else:
|
else:
|
||||||
print(params[param])
|
print(params[param])
|
||||||
|
@@ -1,25 +1,40 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import PIPE, Popen
|
||||||
|
|
||||||
kvars = [
|
kvars = [
|
||||||
'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup',
|
"corrupt-packets",
|
||||||
'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size',
|
"deferred-cache-inserts",
|
||||||
'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers',
|
"deferred-cache-lookup",
|
||||||
'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries',
|
"latency",
|
||||||
'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers',
|
"packetcache-hit",
|
||||||
'udp4-queries', 'udp6-answers', 'udp6-queries'
|
"packetcache-miss",
|
||||||
|
"packetcache-size",
|
||||||
|
"qsize-q",
|
||||||
|
"query-cache-hit",
|
||||||
|
"query-cache-miss",
|
||||||
|
"recursing-answers",
|
||||||
|
"recursing-questions",
|
||||||
|
"servfail-packets",
|
||||||
|
"tcp-answers",
|
||||||
|
"tcp-queries",
|
||||||
|
"timedout-packets",
|
||||||
|
"udp-answers",
|
||||||
|
"udp-queries",
|
||||||
|
"udp4-answers",
|
||||||
|
"udp4-queries",
|
||||||
|
"udp6-answers",
|
||||||
|
"udp6-queries",
|
||||||
]
|
]
|
||||||
|
|
||||||
rvars = {}
|
rvars = {}
|
||||||
cmd = ['pdns_control', 'show', '*']
|
cmd = ["pdns_control", "show", "*"]
|
||||||
|
|
||||||
for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','):
|
for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(","):
|
||||||
v = l.split('=')
|
v = l.split("=")
|
||||||
if len(v) > 1:
|
if len(v) > 1:
|
||||||
rvars[v[0]] = v[1]
|
rvars[v[0]] = v[1]
|
||||||
|
|
||||||
print("<<<app-powerdns>>>")
|
print("<<<app-powerdns>>>")
|
||||||
|
|
||||||
for k in kvars:
|
for k in kvars:
|
||||||
print(rvars[k])
|
print(rvars[k])
|
||||||
|
|
||||||
|
@@ -1,13 +1,14 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import json, subprocess
|
import json
|
||||||
from subprocess import Popen, PIPE
|
import subprocess
|
||||||
|
from subprocess import PIPE, Popen
|
||||||
|
|
||||||
input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0]
|
input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0]
|
||||||
data = []
|
data = []
|
||||||
|
|
||||||
for line in input.splitlines():
|
for line in input.splitlines():
|
||||||
item = line.split()
|
item = line.split()
|
||||||
data.append({'name': item[0].decode(), 'value': int(item[1].decode())})
|
data.append({"name": item[0].decode(), "value": int(item[1].decode())})
|
||||||
|
|
||||||
print('<<<powerdns-recursor>>>')
|
print("<<<powerdns-recursor>>>")
|
||||||
print(json.dumps(data))
|
print(json.dumps(data))
|
||||||
|
@@ -15,8 +15,8 @@ PENDING_JOBS=$($QSTAT -u "*" -s p | wc -l)
|
|||||||
SUSPEND_JOBS=$($QSTAT -u "*" -s s | wc -l)
|
SUSPEND_JOBS=$($QSTAT -u "*" -s s | wc -l)
|
||||||
ZOMBIE_JOBS=$($QSTAT -u "*" -s z | wc -l)
|
ZOMBIE_JOBS=$($QSTAT -u "*" -s z | wc -l)
|
||||||
|
|
||||||
echo $RUNNING_JOBS;
|
echo "$RUNNING_JOBS";
|
||||||
echo $PENDING_JOBS;
|
echo "$PENDING_JOBS";
|
||||||
echo $SUSPEND_JOBS;
|
echo "$SUSPEND_JOBS";
|
||||||
echo $ZOMBIE_JOBS;
|
echo "$ZOMBIE_JOBS";
|
||||||
|
|
||||||
|
@@ -1,13 +1,13 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import os
|
|
||||||
|
|
||||||
# Unix socket
|
# Unix socket
|
||||||
server_address = '/var/run/rrdcached.sock'
|
server_address = "/var/run/rrdcached.sock"
|
||||||
|
|
||||||
# TCP socket
|
# TCP socket
|
||||||
#server_address = 'localhost:42217'
|
# server_address = 'localhost:42217'
|
||||||
|
|
||||||
sock = None
|
sock = None
|
||||||
try:
|
try:
|
||||||
@@ -15,31 +15,31 @@ try:
|
|||||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
else:
|
else:
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
if ':' in server_address:
|
if ":" in server_address:
|
||||||
split = server_address.rsplit(':', 1)
|
split = server_address.rsplit(":", 1)
|
||||||
server_address = (split[0],int(split[1]))
|
server_address = (split[0], int(split[1]))
|
||||||
else:
|
else:
|
||||||
server_address = (server_address, 42217)
|
server_address = (server_address, 42217)
|
||||||
|
|
||||||
sock.connect(server_address)
|
sock.connect(server_address)
|
||||||
except socket.error as e:
|
except socket.error as e:
|
||||||
sys.stderr.write(str(e) + ': ' + str(server_address) + '\n')
|
sys.stderr.write(str(e) + ": " + str(server_address) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
buffer = ''
|
buffer = ""
|
||||||
max = -1
|
max = -1
|
||||||
try:
|
try:
|
||||||
sock.settimeout(5)
|
sock.settimeout(5)
|
||||||
sock.sendall('STATS\n'.encode())
|
sock.sendall("STATS\n".encode())
|
||||||
while max == -1 or len(buffer.split('\n')) < max:
|
while max == -1 or len(buffer.split("\n")) < max:
|
||||||
buffer += sock.recv(1024).decode()
|
buffer += sock.recv(1024).decode()
|
||||||
if max == -1:
|
if max == -1:
|
||||||
# the first line contains the number of following lines
|
# the first line contains the number of following lines
|
||||||
max = int(buffer.split(' ')[0]) + 1
|
max = int(buffer.split(" ")[0]) + 1
|
||||||
except socket.error as e:
|
except socket.error as e:
|
||||||
sys.stderr.write(str(e) + '\n')
|
sys.stderr.write(str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
sock.close()
|
sock.close()
|
||||||
print('<<<rrdcached>>>')
|
print("<<<rrdcached>>>")
|
||||||
print(buffer.rstrip('\n'))
|
print(buffer.rstrip("\n"))
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
unboundctl=`which unbound-control`
|
unboundctl=$(which unbound-control)
|
||||||
if [ "$?" != "0" ]; then
|
if [ "$?" != "0" ]; then
|
||||||
#Unbound control executable doesn't exist
|
#Unbound control executable doesn't exist
|
||||||
exit
|
exit
|
||||||
|
@@ -56,7 +56,7 @@ if [ ! -z "$s" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `script_enabled $s` != "yes" ]; then
|
if [ `script_enabled "$s"` != "yes" ]; then
|
||||||
enable_script $s
|
enable_script "$s"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@@ -12,15 +12,15 @@ if [ $# -gt 1 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get path to this script
|
# Get path to this script
|
||||||
scriptdir=$(dirname $(readlink -f -- $0))
|
scriptdir=$(dirname $(readlink -f -- "$0"))
|
||||||
|
|
||||||
# Get hostname, interface list. Set target, which is name returned for interface
|
# Get hostname, interface list. Set target, which is name returned for interface
|
||||||
hostname=`/bin/uname -n`
|
hostname=$(/bin/uname -n)
|
||||||
if [ $1 ]; then
|
if [ "$1" ]; then
|
||||||
interfaces=$1
|
interfaces=$1
|
||||||
target=$1
|
target=$1
|
||||||
else
|
else
|
||||||
interfaces=`cat $scriptdir/wlInterfaces.txt | cut -f 1 -d","`
|
interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",")
|
||||||
target=wlan
|
target=wlan
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ fi
|
|||||||
count=0
|
count=0
|
||||||
for interface in $interfaces
|
for interface in $interfaces
|
||||||
do
|
do
|
||||||
new=`/usr/sbin/iw dev $interface station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l`
|
new=$(/usr/sbin/iw dev "$interface" station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l)
|
||||||
count=$(( $count + $new ))
|
count=$(( $count + $new ))
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@@ -12,8 +12,8 @@ if [ $# -ne 1 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get hostname, extract frequency
|
# Get hostname, extract frequency
|
||||||
hostname=`/bin/uname -n`
|
hostname=$(/bin/uname -n)
|
||||||
frequency=`/usr/sbin/iw dev $1 info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" "`
|
frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ")
|
||||||
|
|
||||||
# Return snmp result
|
# Return snmp result
|
||||||
/bin/echo $frequency
|
/bin/echo "$frequency"
|
||||||
|
@@ -13,8 +13,8 @@ fi
|
|||||||
|
|
||||||
# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one
|
# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one
|
||||||
# Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut!
|
# Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut!
|
||||||
hostname=`/bin/uname -n`
|
hostname=$(/bin/uname -n)
|
||||||
noise=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1`
|
noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1)
|
||||||
|
|
||||||
# Return snmp result
|
# Return snmp result
|
||||||
/bin/echo $noise
|
/bin/echo "$noise"
|
||||||
|
@@ -16,17 +16,17 @@ fi
|
|||||||
|
|
||||||
# Get hostname, calculate result. Sum just for debug, and have to return integer
|
# Get hostname, calculate result. Sum just for debug, and have to return integer
|
||||||
# => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!)
|
# => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!)
|
||||||
hostname=`/bin/uname -n`
|
hostname=$(/bin/uname -n)
|
||||||
ratelist=`/usr/sbin/iw dev $1 station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" "`
|
ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ")
|
||||||
if [ "$3" == "sum" ]; then
|
if [ "$3" == "sum" ]; then
|
||||||
result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}'`
|
result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}')
|
||||||
elif [ "$3" == "avg" ]; then
|
elif [ "$3" == "avg" ]; then
|
||||||
result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}'`
|
result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}')
|
||||||
elif [ "$3" == "min" ]; then
|
elif [ "$3" == "min" ]; then
|
||||||
result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}'`
|
result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}')
|
||||||
elif [ "$3" == "max" ]; then
|
elif [ "$3" == "max" ]; then
|
||||||
result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}'`
|
result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Return snmp result
|
# Return snmp result
|
||||||
echo $result
|
echo "$result"
|
||||||
|
@@ -14,17 +14,17 @@ if [ $# -ne 2 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest)
|
# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest)
|
||||||
hostname=`/bin/uname -n`
|
hostname=$(/bin/uname -n)
|
||||||
snrlist=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1`
|
snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1)
|
||||||
if [ "$2" == "sum" ]; then
|
if [ "$2" == "sum" ]; then
|
||||||
result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}'`
|
result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}')
|
||||||
elif [ "$2" == "avg" ]; then
|
elif [ "$2" == "avg" ]; then
|
||||||
result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}'`
|
result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}')
|
||||||
elif [ "$2" == "min" ]; then
|
elif [ "$2" == "min" ]; then
|
||||||
result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}'`
|
result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}')
|
||||||
elif [ "$2" == "max" ]; then
|
elif [ "$2" == "max" ]; then
|
||||||
result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}'`
|
result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Return snmp result
|
# Return snmp result
|
||||||
echo $result
|
echo "$result"
|
||||||
|
@@ -22,63 +22,67 @@ import time
|
|||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
cachetime = 30
|
cachetime = 30
|
||||||
cachefile = '/var/cache/librenms/apache-snmp'
|
cachefile = "/var/cache/librenms/apache-snmp"
|
||||||
|
|
||||||
# Check for a cache file newer than cachetime seconds ago
|
# Check for a cache file newer than cachetime seconds ago
|
||||||
|
|
||||||
if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime:
|
if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime:
|
||||||
# Use cached data
|
# Use cached data
|
||||||
f = open(cachefile, 'r')
|
f = open(cachefile, "r")
|
||||||
data = f.read()
|
data = f.read()
|
||||||
f.close()
|
f.close()
|
||||||
else:
|
else:
|
||||||
# Grab the status URL (fresh data), needs package urllib3
|
# Grab the status URL (fresh data), needs package urllib3
|
||||||
data = urllib.request.urlopen("http://localhost/server-status?auto").read().decode('UTF-8')
|
data = (
|
||||||
|
urllib.request.urlopen("http://localhost/server-status?auto")
|
||||||
|
.read()
|
||||||
|
.decode("UTF-8")
|
||||||
|
)
|
||||||
# Write file
|
# Write file
|
||||||
f = open(cachefile+'.TMP.'+str(os.getpid()), 'w')
|
f = open(cachefile + ".TMP." + str(os.getpid()), "w")
|
||||||
f.write(data)
|
f.write(data)
|
||||||
f.close()
|
f.close()
|
||||||
os.rename(cachefile+'.TMP.'+str(os.getpid()), cachefile)
|
os.rename(cachefile + ".TMP." + str(os.getpid()), cachefile)
|
||||||
|
|
||||||
|
|
||||||
# dice up the data
|
# dice up the data
|
||||||
scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.']
|
scoreboardkey = ["_", "S", "R", "W", "K", "D", "C", "L", "G", "I", "."]
|
||||||
params = {}
|
params = {}
|
||||||
for line in data.splitlines():
|
for line in data.splitlines():
|
||||||
fields = line.split(': ')
|
fields = line.split(": ")
|
||||||
if len(fields) <= 1:
|
if len(fields) <= 1:
|
||||||
continue # "localhost" as first line causes out of index error
|
continue # "localhost" as first line causes out of index error
|
||||||
elif fields[0] == 'Scoreboard':
|
elif fields[0] == "Scoreboard":
|
||||||
# count up the scoreboard into states
|
# count up the scoreboard into states
|
||||||
states = {}
|
states = {}
|
||||||
for state in scoreboardkey:
|
for state in scoreboardkey:
|
||||||
states[state] = 0
|
states[state] = 0
|
||||||
for state in fields[1]:
|
for state in fields[1]:
|
||||||
states[state] += 1
|
states[state] += 1
|
||||||
elif fields[0] == 'Total kBytes':
|
elif fields[0] == "Total kBytes":
|
||||||
# turn into base(byte) value
|
# turn into base(byte) value
|
||||||
params[fields[0]] = int(fields[1])*1024
|
params[fields[0]] = int(fields[1]) * 1024
|
||||||
elif len(fields) > 1:
|
elif len(fields) > 1:
|
||||||
# just store everything else
|
# just store everything else
|
||||||
params[fields[0]] = fields[1]
|
params[fields[0]] = fields[1]
|
||||||
|
|
||||||
# output the data in order(this is because some platforms don't have them all)
|
# output the data in order(this is because some platforms don't have them all)
|
||||||
dataorder = [
|
dataorder = [
|
||||||
'Total Accesses',
|
"Total Accesses",
|
||||||
'Total kBytes',
|
"Total kBytes",
|
||||||
'CPULoad',
|
"CPULoad",
|
||||||
'Uptime',
|
"Uptime",
|
||||||
'ReqPerSec',
|
"ReqPerSec",
|
||||||
'BytesPerSec',
|
"BytesPerSec",
|
||||||
'BytesPerReq',
|
"BytesPerReq",
|
||||||
'BusyWorkers',
|
"BusyWorkers",
|
||||||
'IdleWorkers'
|
"IdleWorkers",
|
||||||
]
|
]
|
||||||
for param in dataorder:
|
for param in dataorder:
|
||||||
try:
|
try:
|
||||||
print(params[param])
|
print(params[param])
|
||||||
except KeyError: # not all Apache's have all stats
|
except KeyError: # not all Apache's have all stats
|
||||||
print('U')
|
print("U")
|
||||||
|
|
||||||
# print the scoreboard
|
# print the scoreboard
|
||||||
for state in scoreboardkey:
|
for state in scoreboardkey:
|
||||||
|
@@ -195,17 +195,17 @@ for (( c=0; c<${#Scoreboard}; c++ )); do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# scoreboard output order must be this ...
|
# scoreboard output order must be this ...
|
||||||
echo ${Scoreboard_}
|
echo "${Scoreboard_}"
|
||||||
echo ${ScoreboardS}
|
echo "${ScoreboardS}"
|
||||||
echo ${ScoreboardR}
|
echo "${ScoreboardR}"
|
||||||
echo ${ScoreboardW}
|
echo "${ScoreboardW}"
|
||||||
echo ${ScoreboardK}
|
echo "${ScoreboardK}"
|
||||||
echo ${ScoreboardD}
|
echo "${ScoreboardD}"
|
||||||
echo ${ScoreboardC}
|
echo "${ScoreboardC}"
|
||||||
echo ${ScoreboardL}
|
echo "${ScoreboardL}"
|
||||||
echo ${ScoreboardG}
|
echo "${ScoreboardG}"
|
||||||
echo ${ScoreboardI}
|
echo "${ScoreboardI}"
|
||||||
echo ${ScoreboardDot}
|
echo "${ScoreboardDot}"
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
if [ -f ${Tmp_File} ]; then
|
if [ -f ${Tmp_File} ]; then
|
||||||
|
@@ -1,39 +1,45 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import io
|
import io
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
version = 1
|
version = 1
|
||||||
error = 0
|
error = 0
|
||||||
error_string = ''
|
error_string = ""
|
||||||
|
|
||||||
logfile = '/var/log/backupninja.log'
|
logfile = "/var/log/backupninja.log"
|
||||||
|
|
||||||
backupninja_datas = {
|
backupninja_datas = {
|
||||||
'last_actions': 0,
|
"last_actions": 0,
|
||||||
'last_fatal': 0,
|
"last_fatal": 0,
|
||||||
'last_error': 0,
|
"last_error": 0,
|
||||||
'last_warning': 0}
|
"last_warning": 0,
|
||||||
|
}
|
||||||
|
|
||||||
if not os.path.isfile(logfile):
|
if not os.path.isfile(logfile):
|
||||||
error_string = 'file unavailable'
|
error_string = "file unavailable"
|
||||||
error = 1
|
error = 1
|
||||||
break
|
break
|
||||||
|
|
||||||
with io.open(logfile,'r') as f:
|
with io.open(logfile, "r") as f:
|
||||||
for line in reversed(list(f)):
|
for line in reversed(list(f)):
|
||||||
match = re.search('^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$', line)
|
match = re.search(
|
||||||
|
"^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$",
|
||||||
|
line,
|
||||||
|
)
|
||||||
if match:
|
if match:
|
||||||
backupninja_datas['last_actions'] = int(match.group(2))
|
backupninja_datas["last_actions"] = int(match.group(2))
|
||||||
backupninja_datas['last_fatal'] = int(match.group(3))
|
backupninja_datas["last_fatal"] = int(match.group(3))
|
||||||
backupninja_datas['last_error'] = int(match.group(4))
|
backupninja_datas["last_error"] = int(match.group(4))
|
||||||
backupninja_datas['last_warning'] = int(match.group(5))
|
backupninja_datas["last_warning"] = int(match.group(5))
|
||||||
break
|
break
|
||||||
|
|
||||||
output = {'version': version,
|
output = {
|
||||||
'error': error,
|
"version": version,
|
||||||
'errorString': error_string,
|
"error": error,
|
||||||
'data': backupninja_datas}
|
"errorString": error_string,
|
||||||
|
"data": backupninja_datas,
|
||||||
|
}
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
@@ -1,12 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import socket
|
|
||||||
import ssl
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
|
import socket
|
||||||
|
import ssl
|
||||||
|
|
||||||
|
CONFIGFILE = "/etc/snmp/certificate.json"
|
||||||
CONFIGFILE='/etc/snmp/certificate.json'
|
|
||||||
# {"domains": [
|
# {"domains": [
|
||||||
# {"fqdn": "www.mydomain.com"},
|
# {"fqdn": "www.mydomain.com"},
|
||||||
# {"fqdn": "www2.mydomain.com"}
|
# {"fqdn": "www2.mydomain.com"}
|
||||||
@@ -34,55 +33,61 @@ def get_certificate_data(domain, port=443):
|
|||||||
# Manage expired certificates
|
# Manage expired certificates
|
||||||
except ssl.SSLCertVerificationError as e:
|
except ssl.SSLCertVerificationError as e:
|
||||||
# Arbitrary start date
|
# Arbitrary start date
|
||||||
ssl_info['notBefore'] = "Jan 1 00:00:00 2020 GMT"
|
ssl_info["notBefore"] = "Jan 1 00:00:00 2020 GMT"
|
||||||
# End date is now (we don't have the real one but the certificate is expired)
|
# End date is now (we don't have the real one but the certificate is expired)
|
||||||
one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1)
|
one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1)
|
||||||
ssl_info['notAfter'] = one_minute_further.strftime('%b %d %H:%M:%S %Y GMT')
|
ssl_info["notAfter"] = one_minute_further.strftime("%b %d %H:%M:%S %Y GMT")
|
||||||
|
|
||||||
return ssl_info, error_msg
|
return ssl_info, error_msg
|
||||||
|
|
||||||
|
|
||||||
output = {}
|
output = {}
|
||||||
output['error'] = 0
|
output["error"] = 0
|
||||||
output['errorString'] = ""
|
output["errorString"] = ""
|
||||||
output['version'] = 1
|
output["version"] = 1
|
||||||
|
|
||||||
with open(CONFIGFILE, 'r') as json_file:
|
with open(CONFIGFILE, "r") as json_file:
|
||||||
try:
|
try:
|
||||||
configfile = json.load(json_file)
|
configfile = json.load(json_file)
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = "Configfile Error: '%s'" % e
|
output["errorString"] = "Configfile Error: '%s'" % e
|
||||||
|
|
||||||
if not output['error']:
|
if not output["error"]:
|
||||||
output_data_list = []
|
output_data_list = []
|
||||||
for domain in configfile['domains']:
|
for domain in configfile["domains"]:
|
||||||
output_data = {}
|
output_data = {}
|
||||||
|
|
||||||
if 'port' not in domain.keys():
|
if "port" not in domain.keys():
|
||||||
domain['port'] = 443
|
domain["port"] = 443
|
||||||
certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port'])
|
certificate_data, error_msg = get_certificate_data(
|
||||||
|
domain["fqdn"], domain["port"]
|
||||||
|
)
|
||||||
|
|
||||||
output_data['cert_name'] = domain['fqdn']
|
output_data["cert_name"] = domain["fqdn"]
|
||||||
|
|
||||||
if not error_msg:
|
if not error_msg:
|
||||||
ssl_date_format = r'%b %d %H:%M:%S %Y %Z'
|
ssl_date_format = r"%b %d %H:%M:%S %Y %Z"
|
||||||
validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format)
|
validity_end = datetime.datetime.strptime(
|
||||||
validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format)
|
certificate_data["notAfter"], ssl_date_format
|
||||||
|
)
|
||||||
|
validity_start = datetime.datetime.strptime(
|
||||||
|
certificate_data["notBefore"], ssl_date_format
|
||||||
|
)
|
||||||
cert_age = datetime.datetime.now() - validity_start
|
cert_age = datetime.datetime.now() - validity_start
|
||||||
cert_still_valid = validity_end - datetime.datetime.now()
|
cert_still_valid = validity_end - datetime.datetime.now()
|
||||||
|
|
||||||
output_data['age'] = cert_age.days
|
output_data["age"] = cert_age.days
|
||||||
output_data['remaining_days'] = cert_still_valid.days
|
output_data["remaining_days"] = cert_still_valid.days
|
||||||
|
|
||||||
else:
|
else:
|
||||||
output_data['age'] = None
|
output_data["age"] = None
|
||||||
output_data['remaining_days'] = None
|
output_data["remaining_days"] = None
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg)
|
output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg)
|
||||||
|
|
||||||
output_data_list.append(output_data)
|
output_data_list.append(output_data)
|
||||||
|
|
||||||
output['data'] = output_data_list
|
output["data"] = output_data_list
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
54
snmp/chip.sh
54
snmp/chip.sh
@@ -18,13 +18,13 @@ BAT_D=0
|
|||||||
|
|
||||||
if [ $STATUS_ACIN == 1 ]; then
|
if [ $STATUS_ACIN == 1 ]; then
|
||||||
# ACIN voltage
|
# ACIN voltage
|
||||||
REG=`i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
ACIN=`echo "$REG*0.0017"|bc`
|
ACIN=$(echo "$REG*0.0017"|bc)
|
||||||
# ACIN Current
|
# ACIN Current
|
||||||
REG=`i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
ACIN_C=`echo "$REG*0.000625"|bc`
|
ACIN_C=$(echo "$REG*0.000625"|bc)
|
||||||
else
|
else
|
||||||
ACIN=0
|
ACIN=0
|
||||||
ACIN_C=0
|
ACIN_C=0
|
||||||
@@ -32,14 +32,14 @@ fi
|
|||||||
|
|
||||||
if [ $STATUS_VBUS == 1 ]; then
|
if [ $STATUS_VBUS == 1 ]; then
|
||||||
# VBUS voltage
|
# VBUS voltage
|
||||||
REG=`i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
VBUS=`echo "$REG*0.0017"|bc`
|
VBUS=$(echo "$REG*0.0017"|bc)
|
||||||
|
|
||||||
# VBUS Current
|
# VBUS Current
|
||||||
REG=`i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
VBUS_C=`echo "$REG*0.000375"|bc`
|
VBUS_C=$(echo "$REG*0.000375"|bc)
|
||||||
else
|
else
|
||||||
VBUS=0
|
VBUS=0
|
||||||
VBUS_C=0
|
VBUS_C=0
|
||||||
@@ -47,24 +47,24 @@ fi
|
|||||||
|
|
||||||
if [ $STATUS_BATCON == 1 ]; then
|
if [ $STATUS_BATCON == 1 ]; then
|
||||||
# Battery Voltage
|
# Battery Voltage
|
||||||
REG=`i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
VBAT=`echo "$REG*0.0011"|bc`
|
VBAT=$(echo "$REG*0.0011"|bc)
|
||||||
|
|
||||||
if [ $STATUS_CHG_DIR == 1 ]; then
|
if [ $STATUS_CHG_DIR == 1 ]; then
|
||||||
# Battery Charging Current
|
# Battery Charging Current
|
||||||
REG=`i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG_C=`printf "%d" "$REG"`
|
REG_C=$(printf "%d" "$REG")
|
||||||
BAT_C=`echo "scale=2;$REG_C*0.001"|bc`
|
BAT_C=$(echo "scale=2;$REG_C*0.001"|bc)
|
||||||
else
|
else
|
||||||
# Battery Discharge Current
|
# Battery Discharge Current
|
||||||
REG=`i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG_D=`printf "%d" "$REG"`
|
REG_D=$(printf "%d" "$REG")
|
||||||
BAT_D=`echo "scale=2;$REG_D*0.001"|bc`
|
BAT_D=$(echo "scale=2;$REG_D*0.001"|bc)
|
||||||
fi
|
fi
|
||||||
# Battery %
|
# Battery %
|
||||||
REG=`i2cget -y -f 0 0x34 0xB9`
|
REG=$(i2cget -y -f 0 0x34 0xB9)
|
||||||
BAT_PERCENT=`printf "%d" "$REG"`
|
BAT_PERCENT=$(printf "%d" "$REG")
|
||||||
else
|
else
|
||||||
VBAT=0
|
VBAT=0
|
||||||
BATT_CUR=0
|
BATT_CUR=0
|
||||||
@@ -72,11 +72,11 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Temperature
|
# Temperature
|
||||||
REG=`i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'`
|
REG=$(i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}')
|
||||||
REG=`printf "%d" "$REG"`
|
REG=$(printf "%d" "$REG")
|
||||||
THERM=`echo "($REG*0.1)-144.7"|bc`
|
THERM=$(echo "($REG*0.1)-144.7"|bc)
|
||||||
|
|
||||||
echo $THERM
|
echo "$THERM"
|
||||||
echo $ACIN
|
echo $ACIN
|
||||||
echo $ACIN_C
|
echo $ACIN_C
|
||||||
echo $VBUS
|
echo $VBUS
|
||||||
|
149
snmp/dhcp.py
149
snmp/dhcp.py
@@ -1,10 +1,10 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
import json
|
||||||
|
import subprocess
|
||||||
from os.path import isfile
|
from os.path import isfile
|
||||||
|
|
||||||
CONFIGFILE = '/etc/snmp/dhcp.json'
|
CONFIGFILE = "/etc/snmp/dhcp.json"
|
||||||
|
|
||||||
# Configfile is needed /etc/snmp/dhcp.json
|
# Configfile is needed /etc/snmp/dhcp.json
|
||||||
#
|
#
|
||||||
@@ -13,10 +13,10 @@ CONFIGFILE = '/etc/snmp/dhcp.json'
|
|||||||
#
|
#
|
||||||
|
|
||||||
error = 0
|
error = 0
|
||||||
error_string = ''
|
error_string = ""
|
||||||
version = 2
|
version = 2
|
||||||
|
|
||||||
with open(CONFIGFILE, 'r') as json_file:
|
with open(CONFIGFILE, "r") as json_file:
|
||||||
try:
|
try:
|
||||||
configfile = json.load(json_file)
|
configfile = json.load(json_file)
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
@@ -25,75 +25,76 @@ with open(CONFIGFILE, 'r') as json_file:
|
|||||||
|
|
||||||
|
|
||||||
if not error:
|
if not error:
|
||||||
leases = {'total': 0,
|
leases = {
|
||||||
'active': 0,
|
"total": 0,
|
||||||
'expired': 0,
|
"active": 0,
|
||||||
'released': 0,
|
"expired": 0,
|
||||||
'abandoned': 0,
|
"released": 0,
|
||||||
'reset': 0,
|
"abandoned": 0,
|
||||||
'bootp': 0,
|
"reset": 0,
|
||||||
'backup': 0,
|
"bootp": 0,
|
||||||
'free': 0,
|
"backup": 0,
|
||||||
}
|
"free": 0,
|
||||||
if not isfile(configfile['leasefile']):
|
}
|
||||||
|
if not isfile(configfile["leasefile"]):
|
||||||
error = 1
|
error = 1
|
||||||
error_string = 'Lease File not found'
|
error_string = "Lease File not found"
|
||||||
else:
|
else:
|
||||||
with open(configfile['leasefile']) as fp:
|
with open(configfile["leasefile"]) as fp:
|
||||||
line = fp.readline()
|
line = fp.readline()
|
||||||
while line:
|
while line:
|
||||||
line = fp.readline()
|
line = fp.readline()
|
||||||
|
|
||||||
if 'rewind' not in line:
|
if "rewind" not in line:
|
||||||
if line.startswith('lease'):
|
if line.startswith("lease"):
|
||||||
leases['total'] += 1
|
leases["total"] += 1
|
||||||
elif 'binding state active' in line:
|
elif "binding state active" in line:
|
||||||
leases['active'] += 1
|
leases["active"] += 1
|
||||||
elif 'binding state expired' in line:
|
elif "binding state expired" in line:
|
||||||
leases['expired'] += 1
|
leases["expired"] += 1
|
||||||
elif 'binding state released' in line:
|
elif "binding state released" in line:
|
||||||
leases['released'] += 1
|
leases["released"] += 1
|
||||||
elif 'binding state abandoned' in line:
|
elif "binding state abandoned" in line:
|
||||||
leases['abandoned'] += 1
|
leases["abandoned"] += 1
|
||||||
elif 'binding state reset' in line:
|
elif "binding state reset" in line:
|
||||||
leases['reset'] += 1
|
leases["reset"] += 1
|
||||||
elif 'binding state bootp' in line:
|
elif "binding state bootp" in line:
|
||||||
leases['bootp'] += 1
|
leases["bootp"] += 1
|
||||||
elif 'binding state backup' in line:
|
elif "binding state backup" in line:
|
||||||
leases['backup'] += 1
|
leases["backup"] += 1
|
||||||
elif 'binding state free' in line:
|
elif "binding state free" in line:
|
||||||
leases['free'] += 1
|
leases["free"] += 1
|
||||||
|
|
||||||
shell_cmd = "dhcpd-pools -s i -A"
|
shell_cmd = "dhcpd-pools -s i -A"
|
||||||
pool_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n')
|
pool_data = (
|
||||||
|
subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE)
|
||||||
|
.stdout.read()
|
||||||
|
.split(b"\n")
|
||||||
|
)
|
||||||
|
|
||||||
data = {'leases': leases,
|
data = {"leases": leases, "pools": [], "networks": [], "all_networks": []}
|
||||||
'pools': [],
|
|
||||||
'networks': [],
|
|
||||||
'all_networks': []
|
|
||||||
}
|
|
||||||
|
|
||||||
category = None
|
category = None
|
||||||
jump_line = 0
|
jump_line = 0
|
||||||
for p in pool_data:
|
for p in pool_data:
|
||||||
line = p.decode('utf-8')
|
line = p.decode("utf-8")
|
||||||
|
|
||||||
if jump_line:
|
if jump_line:
|
||||||
jump_line -= 1
|
jump_line -= 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if line.startswith('Ranges:'):
|
if line.startswith("Ranges:"):
|
||||||
category = 'pools'
|
category = "pools"
|
||||||
jump_line = 1
|
jump_line = 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if line.startswith('Shared networks:'):
|
if line.startswith("Shared networks:"):
|
||||||
category = 'networks'
|
category = "networks"
|
||||||
jump_line = 1
|
jump_line = 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if line.startswith('Sum of all ranges:'):
|
if line.startswith("Sum of all ranges:"):
|
||||||
category = 'all_networks'
|
category = "all_networks"
|
||||||
jump_line = 1
|
jump_line = 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -102,34 +103,38 @@ for p in pool_data:
|
|||||||
|
|
||||||
p = line.split()
|
p = line.split()
|
||||||
|
|
||||||
if category == 'pools':
|
if category == "pools":
|
||||||
data[category].append({'first_ip': p[1],
|
data[category].append(
|
||||||
'last_ip':p[3],
|
{
|
||||||
'max': p[4],
|
"first_ip": p[1],
|
||||||
'cur': p[5],
|
"last_ip": p[3],
|
||||||
'percent': p[6],
|
"max": p[4],
|
||||||
})
|
"cur": p[5],
|
||||||
|
"percent": p[6],
|
||||||
|
}
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if category == 'networks':
|
if category == "networks":
|
||||||
data[category].append({'network': p[0],
|
data[category].append(
|
||||||
'max': p[1],
|
{
|
||||||
'cur': p[2],
|
"network": p[0],
|
||||||
'percent': p[3],
|
"max": p[1],
|
||||||
})
|
"cur": p[2],
|
||||||
|
"percent": p[3],
|
||||||
|
}
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if category == 'all_networks':
|
if category == "all_networks":
|
||||||
data[category] ={'max': p[2],
|
data[category] = {
|
||||||
'cur': p[3],
|
"max": p[2],
|
||||||
'percent': p[4],
|
"cur": p[3],
|
||||||
}
|
"percent": p[4],
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
output = {'version': version,
|
output = {"version": version, "error": error, "errorString": error_string, "data": data}
|
||||||
'error': error,
|
|
||||||
'errorString': error_string,
|
|
||||||
'data': data}
|
|
||||||
|
|
||||||
print (json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
@@ -19,17 +19,17 @@
|
|||||||
# ------------------------------------------------------------- #
|
# ------------------------------------------------------------- #
|
||||||
# restart snmpd and activate the app for desired host #
|
# restart snmpd and activate the app for desired host #
|
||||||
#################################################################
|
#################################################################
|
||||||
BIN_EXIM=`which exim`
|
BIN_EXIM=$(which exim)
|
||||||
BIN_GREP=`which grep`
|
BIN_GREP=$(which grep)
|
||||||
BIN_WC=`which wc`
|
BIN_WC=$(which wc)
|
||||||
CFG_EXIM_1='-bp'
|
CFG_EXIM_1='-bp'
|
||||||
CFG_EXIM_2='-bpc'
|
CFG_EXIM_2='-bpc'
|
||||||
CFG_GREP='frozen'
|
CFG_GREP='frozen'
|
||||||
CFG_WC='-l'
|
CFG_WC='-l'
|
||||||
#################################################################
|
#################################################################
|
||||||
|
|
||||||
FROZEN=`$BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC`
|
FROZEN=$($BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC)
|
||||||
echo $FROZEN
|
echo "$FROZEN"
|
||||||
|
|
||||||
QUEUE=`$BIN_EXIM $CFG_EXIM_2`
|
QUEUE=$($BIN_EXIM $CFG_EXIM_2)
|
||||||
echo $QUEUE
|
echo "$QUEUE"
|
||||||
|
@@ -25,46 +25,46 @@ if [ $AGENT == 1 ]; then
|
|||||||
echo "<<<freeradius>>>"
|
echo "<<<freeradius>>>"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RESULT=`echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY`
|
RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY)
|
||||||
|
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*'
|
||||||
echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*'
|
echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*'
|
||||||
|
90
snmp/gpsd
90
snmp/gpsd
@@ -1,45 +1,45 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
#
|
#
|
||||||
# Copyright (c) 2019 Mike Centola <mcentola@appliedengdesign.com>
|
# Copyright (c) 2019 Mike Centola <mcentola@appliedengdesign.com>
|
||||||
#
|
#
|
||||||
# Please make sure the paths below are correct.
|
# Please make sure the paths below are correct.
|
||||||
# Alternatively you can put them in $0.conf, meaning if you've named
|
# Alternatively you can put them in $0.conf, meaning if you've named
|
||||||
# this script gpsd.sh then it must go in gpsd.sh.conf .
|
# this script gpsd.sh then it must go in gpsd.sh.conf .
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
################################################################
|
################################################################
|
||||||
# Don't change anything unless you know what are you doing #
|
# Don't change anything unless you know what are you doing #
|
||||||
################################################################
|
################################################################
|
||||||
|
|
||||||
BIN_GPIPE='/usr/bin/env gpspipe'
|
BIN_GPIPE='/usr/bin/env gpspipe'
|
||||||
BIN_GREP='/usr/bin/env grep'
|
BIN_GREP='/usr/bin/env grep'
|
||||||
BIN_PYTHON='/usr/bin/env python'
|
BIN_PYTHON='/usr/bin/env python'
|
||||||
|
|
||||||
# Check for config file
|
# Check for config file
|
||||||
CONFIG=$0".conf"
|
CONFIG=$0".conf"
|
||||||
if [ -f $CONFIG ]; then
|
if [ -f "$CONFIG" ]; then
|
||||||
. $CONFIG
|
. "$CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create Temp File
|
# Create Temp File
|
||||||
TMPFILE=$(mktemp)
|
TMPFILE=$(mktemp)
|
||||||
trap "rm -f $TMPFILE" 0 2 3 15
|
trap "rm -f $TMPFILE" 0 2 3 15
|
||||||
|
|
||||||
# Write GPSPIPE Data to Temp File
|
# Write GPSPIPE Data to Temp File
|
||||||
$BIN_GPIPE -w -n 20 > $TMPFILE
|
$BIN_GPIPE -w -n 20 > "$TMPFILE"
|
||||||
|
|
||||||
# Parse Temp file for GPSD Data
|
# Parse Temp file for GPSD Data
|
||||||
VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'`
|
VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]')
|
||||||
GPSDMODE=`cat $TMPFILE | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]'`
|
GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]')
|
||||||
HDOP=`cat $TMPFILE | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]'`
|
HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]')
|
||||||
VDOP=`cat $TMPFILE | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]'`
|
VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]')
|
||||||
LAT=`cat $TMPFILE | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]'`
|
LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]')
|
||||||
LONG=`cat $TMPFILE | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]'`
|
LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]')
|
||||||
ALT=`cat $TMPFILE | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]'`
|
ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]')
|
||||||
SATS=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])'`
|
SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])')
|
||||||
SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])'`
|
SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])')
|
||||||
|
|
||||||
# Output info for SNMP Extend
|
# Output info for SNMP Extend
|
||||||
echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}'
|
echo '{"data":{"mode":"'"$GPSDMODE"'", "hdop":"'"$HDOP"'", "vdop":"'"$VDOP"'", "latitude":"'"$LAT"'", "longitude":"'"$LONG"'", "altitude":"'"$ALT"'", "satellites":"'"$SATS"'", "satellites_used":"'"$SATSUSED"'"}, "error":"0", "errorString":"", "version":"'"$VERSION"'"}'
|
||||||
|
|
||||||
rm $TMPFILE
|
rm "$TMPFILE"
|
||||||
|
@@ -5,10 +5,10 @@ used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}')
|
|||||||
cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}')
|
cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}')
|
||||||
|
|
||||||
pid=$(pidof icecast)
|
pid=$(pidof icecast)
|
||||||
total_files=$(ls -l /proc/${pid}/fd | wc -l)
|
total_files=$(ls -l /proc/"${pid}"/fd | wc -l)
|
||||||
|
|
||||||
echo "Used Memory="$used_memory
|
echo "Used Memory=""$used_memory"
|
||||||
echo "CPU Load="$cpu_load
|
echo "CPU Load=""$cpu_load"
|
||||||
echo "Open files="$total_files
|
echo "Open files=""$total_files"
|
||||||
|
|
||||||
exit
|
exit
|
||||||
|
@@ -21,9 +21,9 @@
|
|||||||
# requirements: mailcow-dockerized and pflogsumm
|
# requirements: mailcow-dockerized and pflogsumm
|
||||||
#
|
#
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
# LibreNMS poller interval
|
# LibreNMS poller interval
|
||||||
librenms_poller_interval = 300
|
librenms_poller_interval = 300
|
||||||
@@ -34,37 +34,46 @@ def libre_to_mcd_postfix(libre_seconds):
|
|||||||
|
|
||||||
|
|
||||||
def cli_get_docker_container():
|
def cli_get_docker_container():
|
||||||
return subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True).decode('utf8').strip()
|
return (
|
||||||
|
subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True)
|
||||||
|
.decode("utf8")
|
||||||
|
.strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def cli_command():
|
def cli_command():
|
||||||
cli_part = "docker logs --since " + libre_to_mcd_postfix(librenms_poller_interval) \
|
cli_part = (
|
||||||
+ "m " + cli_get_docker_container() + "| pflogsumm --smtpd-stats"
|
"docker logs --since "
|
||||||
|
+ libre_to_mcd_postfix(librenms_poller_interval)
|
||||||
|
+ "m "
|
||||||
|
+ cli_get_docker_container()
|
||||||
|
+ "| pflogsumm --smtpd-stats"
|
||||||
|
)
|
||||||
return cli_part
|
return cli_part
|
||||||
|
|
||||||
|
|
||||||
def get_output():
|
def get_output():
|
||||||
return subprocess.check_output(cli_command(), shell=True).decode('utf8')
|
return subprocess.check_output(cli_command(), shell=True).decode("utf8")
|
||||||
|
|
||||||
|
|
||||||
def output_cleaning(input):
|
def output_cleaning(input):
|
||||||
output = re.split('\n', input)
|
output = re.split("\n", input)
|
||||||
return list(filter(None, output))
|
return list(filter(None, output))
|
||||||
|
|
||||||
|
|
||||||
def entry_generator(input):
|
def entry_generator(input):
|
||||||
entry = re.sub(' +', ':', input.strip().lstrip())
|
entry = re.sub(" +", ":", input.strip().lstrip())
|
||||||
return entry.split(':')
|
return entry.split(":")
|
||||||
|
|
||||||
|
|
||||||
# limit our needed output
|
# limit our needed output
|
||||||
mcd_postfix_data = get_output().split('messages')
|
mcd_postfix_data = get_output().split("messages")
|
||||||
data = mcd_postfix_data[1].split('smtpd')
|
data = mcd_postfix_data[1].split("smtpd")
|
||||||
|
|
||||||
# postfix stats only
|
# postfix stats only
|
||||||
mcd_postfix_info = data[0]
|
mcd_postfix_info = data[0]
|
||||||
# smtpd stats only
|
# smtpd stats only
|
||||||
mcd_smtpd_info = data[1].split('Per-Hour Traffic Summary')[0]
|
mcd_smtpd_info = data[1].split("Per-Hour Traffic Summary")[0]
|
||||||
|
|
||||||
# postfix stats export
|
# postfix stats export
|
||||||
mcd_postfix = output_cleaning(mcd_postfix_info)
|
mcd_postfix = output_cleaning(mcd_postfix_info)
|
||||||
@@ -74,17 +83,16 @@ points_label = []
|
|||||||
for entry in mcd_postfix:
|
for entry in mcd_postfix:
|
||||||
data_labels = entry_generator(entry)
|
data_labels = entry_generator(entry)
|
||||||
|
|
||||||
if data_labels[0].find('k') == -1:
|
if data_labels[0].find("k") == -1:
|
||||||
points_data.append(data_labels[0])
|
points_data.append(data_labels[0])
|
||||||
else:
|
else:
|
||||||
data_point = data_labels[0].replace('k', '', 1)
|
data_point = data_labels[0].replace("k", "", 1)
|
||||||
data_point = int(data_point) * 1024
|
data_point = int(data_point) * 1024
|
||||||
points_data.append(data_point)
|
points_data.append(data_point)
|
||||||
|
|
||||||
points_label.append(re.sub('[^a-zA-Z]+', '', data_labels[1]))
|
points_label.append(re.sub("[^a-zA-Z]+", "", data_labels[1]))
|
||||||
|
|
||||||
entries = dict(zip(points_label, points_data))
|
entries = dict(zip(points_label, points_data))
|
||||||
export = {"data": entries, "error": "0", "errorString": "", "version": "1"}
|
export = {"data": entries, "error": "0", "errorString": "", "version": "1"}
|
||||||
data = re.sub(' ', '', json.dumps(export))
|
data = re.sub(" ", "", json.dumps(export))
|
||||||
print(data)
|
print(data)
|
||||||
|
|
||||||
|
@@ -17,60 +17,60 @@
|
|||||||
///
|
///
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
// START SETTINGS ///
|
// START SETTINGS ///
|
||||||
|
|
||||||
$mailstats = "/opt/librenms/scripts/watchmaillog/watchmaillog_counters";
|
$mailstats = '/opt/librenms/scripts/watchmaillog/watchmaillog_counters';
|
||||||
|
|
||||||
// END SETTINGS ///
|
// END SETTINGS ///
|
||||||
|
|
||||||
|
|
||||||
///
|
///
|
||||||
// DO NOT EDIT BENETH THIS LINE
|
// DO NOT EDIT BENETH THIS LINE
|
||||||
///
|
///
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
function doSNMPv2($vars) {
|
function doSNMPv2($vars)
|
||||||
$stats = array();
|
{
|
||||||
if (file_exists($vars)) {
|
$stats = [];
|
||||||
$data = file($vars);
|
if (file_exists($vars)) {
|
||||||
foreach ($data as $item=>$value) {
|
$data = file($vars);
|
||||||
if (!empty($value)) {
|
foreach ($data as $item=>$value) {
|
||||||
$temp = explode(':', trim($value));
|
if (!empty($value)) {
|
||||||
if (isset($temp[1])) {
|
$temp = explode(':', trim($value));
|
||||||
$stats[$temp[0]] = $temp[1];
|
if (isset($temp[1])) {
|
||||||
}
|
$stats[$temp[0]] = $temp[1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
$var = array();
|
}
|
||||||
$var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : "U");
|
$var = [];
|
||||||
$var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : "U");
|
$var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : 'U');
|
||||||
$var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : "U");
|
$var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : 'U');
|
||||||
$var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : "U");
|
$var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : 'U');
|
||||||
$var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : "U");
|
$var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : 'U');
|
||||||
$var['spam'] = (isset($stats['spam']) ? $stats['spam'] : "U");
|
$var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : 'U');
|
||||||
$var['virus'] = (isset($stats['virus']) ? $stats['virus'] : "U");
|
$var['spam'] = (isset($stats['spam']) ? $stats['spam'] : 'U');
|
||||||
foreach ($var as $item=>$count) {
|
$var['virus'] = (isset($stats['virus']) ? $stats['virus'] : 'U');
|
||||||
echo $count."\n";
|
foreach ($var as $item=>$count) {
|
||||||
}
|
echo $count."\n";
|
||||||
}
|
}
|
||||||
|
}
|
||||||
function clearStats($mailstats) {
|
|
||||||
if (file_exists($mailstats)) {
|
|
||||||
$fp = fopen($mailstats, 'w');
|
|
||||||
fwrite($fp, "mess_recv:0\n");
|
|
||||||
fwrite($fp, "mess_rejected:0\n");
|
|
||||||
fwrite($fp, "mess_relay:0\n");
|
|
||||||
fwrite($fp, "mess_sent:0\n");
|
|
||||||
fwrite($fp, "mess_waiting:0\n");
|
|
||||||
fwrite($fp, "spam:0\n");
|
|
||||||
fwrite($fp, "virus:0\n");
|
|
||||||
fclose($fp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
doSNMPv2($mailstats);
|
function clearStats($mailstats)
|
||||||
//clearStats($mailstats);
|
{
|
||||||
|
if (file_exists($mailstats)) {
|
||||||
|
$fp = fopen($mailstats, 'w');
|
||||||
|
fwrite($fp, "mess_recv:0\n");
|
||||||
|
fwrite($fp, "mess_rejected:0\n");
|
||||||
|
fwrite($fp, "mess_relay:0\n");
|
||||||
|
fwrite($fp, "mess_sent:0\n");
|
||||||
|
fwrite($fp, "mess_waiting:0\n");
|
||||||
|
fwrite($fp, "spam:0\n");
|
||||||
|
fwrite($fp, "virus:0\n");
|
||||||
|
fclose($fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doSNMPv2($mailstats);
|
||||||
|
//clearStats($mailstats);
|
||||||
|
|
||||||
?>
|
?>
|
||||||
|
38
snmp/mdadm
38
snmp/mdadm
@@ -19,10 +19,10 @@ OUTPUT_DATA='['
|
|||||||
# use 'ls' command to check if md blocks exist
|
# use 'ls' command to check if md blocks exist
|
||||||
if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
||||||
for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do
|
for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do
|
||||||
RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE))
|
RAID="/sys/block/"$($BASENAME $($REALPATH "$ARRAY_BLOCKDEVICE"))
|
||||||
|
|
||||||
# ignore arrays with no slaves
|
# ignore arrays with no slaves
|
||||||
if [ -z "$($LS -1 $RAID/slaves 2> /dev/null)" ] ; then
|
if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
# ignore "non existing" arrays
|
# ignore "non existing" arrays
|
||||||
@@ -30,27 +30,27 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
|||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]] ; then
|
if [[ $($BASENAME "$ARRAY_BLOCKDEVICE") = [[:digit:]] ]] ; then
|
||||||
RAID_NAME=$($BASENAME $RAID)
|
RAID_NAME=$($BASENAME "$RAID")
|
||||||
else
|
else
|
||||||
RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE)
|
RAID_NAME=$($BASENAME "$ARRAY_BLOCKDEVICE")
|
||||||
fi
|
fi
|
||||||
RAID_DEV_LIST=$($LS $RAID/slaves/)
|
RAID_DEV_LIST=$($LS "$RAID"/slaves/)
|
||||||
RAID_LEVEL=$($CAT $RAID/md/level)
|
RAID_LEVEL=$($CAT "$RAID"/md/level)
|
||||||
RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks| cut -d' ' -f1)
|
RAID_DISC_COUNT=$($CAT "$RAID"/md/raid_disks| cut -d' ' -f1)
|
||||||
RAID_STATE=$($CAT $RAID/md/array_state)
|
RAID_STATE=$($CAT "$RAID"/md/array_state)
|
||||||
RAID_ACTION=$($CAT $RAID/md/sync_action)
|
RAID_ACTION=$($CAT "$RAID"/md/sync_action)
|
||||||
RAID_DEGRADED=$($CAT $RAID/md/degraded)
|
RAID_DEGRADED=$($CAT "$RAID"/md/degraded)
|
||||||
|
|
||||||
if [ "$RAID_SYNC_SPEED" = "none" ] ; then
|
if [ "$RAID_SYNC_SPEED" = "none" ] ; then
|
||||||
RAID_SYNC_SPEED=0
|
RAID_SYNC_SPEED=0
|
||||||
else
|
else
|
||||||
let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024"
|
let "RAID_SYNC_SPEED=$($CAT "$RAID"/md/sync_speed)*1024"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$($CAT $RAID/md/sync_completed)" != "none" ] ; then
|
if [ "$($CAT "$RAID"/md/sync_completed)" != "none" ] ; then
|
||||||
let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)"
|
let "RAID_SYNC_COMPLETED=100*$($CAT "$RAID"/md/sync_completed)"
|
||||||
elif [ $RAID_DEGRADED -eq 1 ] ; then
|
elif [ "$RAID_DEGRADED" -eq 1 ] ; then
|
||||||
RAID_SYNC_COMPLETED=0
|
RAID_SYNC_COMPLETED=0
|
||||||
else
|
else
|
||||||
RAID_SYNC_COMPLETED=100
|
RAID_SYNC_COMPLETED=100
|
||||||
@@ -58,7 +58,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
|||||||
|
|
||||||
# divide with 2 to size like in /proc/mdstat
|
# divide with 2 to size like in /proc/mdstat
|
||||||
# and multiply with 1024 to get size in bytes
|
# and multiply with 1024 to get size in bytes
|
||||||
let "RAID_SIZE=$($CAT $RAID/size)*1024/2"
|
let "RAID_SIZE=$($CAT "$RAID"/size)*1024/2"
|
||||||
|
|
||||||
RAID_DEVICE_LIST='['
|
RAID_DEVICE_LIST='['
|
||||||
ALL_DEVICE_COUNT=0
|
ALL_DEVICE_COUNT=0
|
||||||
@@ -73,7 +73,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
|||||||
|
|
||||||
RAID_MISSING_DEVICES='['
|
RAID_MISSING_DEVICES='['
|
||||||
for D in $RAID_DEV_LIST ; do
|
for D in $RAID_DEV_LIST ; do
|
||||||
if [ -L $RAID/slaves/$D ] && [ -f $RAID/slaves/$D ] ; then
|
if [ -L "$RAID"/slaves/"$D" ] && [ -f "$RAID"/slaves/"$D" ] ; then
|
||||||
RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",'
|
RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",'
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@@ -83,7 +83,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then
|
|||||||
RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']'
|
RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']'
|
||||||
|
|
||||||
let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT"
|
let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT"
|
||||||
if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then
|
if [ "$RAID_HOTSPARE_COUNT" -lt 0 ] ; then
|
||||||
RAID_HOTSPARE_COUNT=0
|
RAID_HOTSPARE_COUNT=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -115,5 +115,5 @@ OUTPUT='{"data":'$OUTPUT_DATA\
|
|||||||
'","errorString":"'$ERROR_STRING\
|
'","errorString":"'$ERROR_STRING\
|
||||||
'","version":"'$VERSION'"}'
|
'","version":"'$VERSION'"}'
|
||||||
|
|
||||||
echo $OUTPUT
|
echo "$OUTPUT"
|
||||||
|
|
||||||
|
288
snmp/mysql-stats
288
snmp/mysql-stats
@@ -1,52 +1,117 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
import warnings
|
|
||||||
import re
|
import re
|
||||||
warnings.filterwarnings(action="ignore", message='the sets module is deprecated')
|
import warnings
|
||||||
import sets
|
|
||||||
import MySQLdb
|
warnings.filterwarnings(action="ignore", message="the sets module is deprecated")
|
||||||
import base64
|
import base64
|
||||||
conn = MySQLdb.connect(host='',
|
|
||||||
user='',
|
|
||||||
passwd='',
|
|
||||||
db='')
|
|
||||||
|
|
||||||
cursor = conn.cursor ()
|
import MySQLdb
|
||||||
|
import sets
|
||||||
|
|
||||||
|
conn = MySQLdb.connect(host="", user="", passwd="", db="")
|
||||||
|
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
|
||||||
cursor.execute ("SHOW GLOBAL STATUS")
|
cursor.execute("SHOW GLOBAL STATUS")
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
|
||||||
datavariables = {
|
datavariables = {
|
||||||
'Command Counters': ['Com_delete','Com_insert','Com_insert_select','Com_load','Com_replace','Com_replace_select', 'Com_select', 'Com_update', 'Com_update_multi'],
|
"Command Counters": [
|
||||||
'Connections': ['max_connections', 'Max_used_connections', 'Aborted_clients', 'Aborted_connects','Threads_connected','Connections'],
|
"Com_delete",
|
||||||
'Files and Tables': ['table_open_cache','Open_files','Open_tables','Opened_tables'],
|
"Com_insert",
|
||||||
'InnoDB Buffer Pool': ['ib_bpool_size','ib_bpool_dbpages', 'ib_bpool_free','ib_bpool_modpages'],
|
"Com_insert_select",
|
||||||
'InnoDB Buffer Pool Activity': ['ib_bpool_read','ib_bpool_created', 'ib_bpool_written'],
|
"Com_load",
|
||||||
'InnoDB Insert Buffer': ['ib_ibuf_inserts','ib_ibuf_merged_rec', 'ib_ibuf_merges'],
|
"Com_replace",
|
||||||
'InnoDB IO': ['ib_io_read','ib_io_write','ib_io_log', 'ib_io_fsync'],
|
"Com_replace_select",
|
||||||
'InnoDB IO Pending': ['ib_iop_log','ib_iop_sync', 'ib_iop_flush_log', 'ib_iop_flush_bpool', 'ib_iop_ibuf_aio','ib_iop_aioread','ib_iop_aiowrite'],
|
"Com_select",
|
||||||
'InnoDB Log': ['innodb_log_buffer_size','ib_log_flush','ib_log_written'],
|
"Com_update",
|
||||||
'InnoDB Row Operations': ['Innodb_rows_deleted','Innodb_rows_inserted','Innodb_rows_read','Innodb_rows_updated'],
|
"Com_update_multi",
|
||||||
'InnoDB Semaphores': ['ib_spin_rounds','ib_spin_waits','ib_os_waits'],
|
],
|
||||||
'InnoDB Transactions': ['ib_tnx'],
|
"Connections": [
|
||||||
'MyISAM Indexes': ['Key_read_requests','Key_reads','Key_write_requests','Key_writes'],
|
"max_connections",
|
||||||
'Network Traffic': ['Bytes_received','Bytes_sent'],
|
"Max_used_connections",
|
||||||
'Query Cache': ['Qcache_queries_in_cache','Qcache_hits','Qcache_inserts','Qcache_not_cached','Qcache_lowmem_prunes'],
|
"Aborted_clients",
|
||||||
'Query Cache Memory': ['query_cache_size','Qcache_free_memory'],
|
"Aborted_connects",
|
||||||
'Select Types': ['Select_full_join','Select_full_range_join','Select_range','Select_range_check','Select_scan'],
|
"Threads_connected",
|
||||||
'Slow Queries': ['Slow_queries'],
|
"Connections",
|
||||||
'Sorts': ['Sort_rows','Sort_range','Sort_merge_passes','Sort_scan'],
|
],
|
||||||
'Table Locks': ['Table_locks_immediate','Table_locks_waited'],
|
"Files and Tables": [
|
||||||
'Temporary Objects': ['Created_tmp_disk_tables','Created_tmp_tables','Created_tmp_files']
|
"table_open_cache",
|
||||||
}
|
"Open_files",
|
||||||
|
"Open_tables",
|
||||||
|
"Opened_tables",
|
||||||
|
],
|
||||||
|
"InnoDB Buffer Pool": [
|
||||||
|
"ib_bpool_size",
|
||||||
|
"ib_bpool_dbpages",
|
||||||
|
"ib_bpool_free",
|
||||||
|
"ib_bpool_modpages",
|
||||||
|
],
|
||||||
|
"InnoDB Buffer Pool Activity": [
|
||||||
|
"ib_bpool_read",
|
||||||
|
"ib_bpool_created",
|
||||||
|
"ib_bpool_written",
|
||||||
|
],
|
||||||
|
"InnoDB Insert Buffer": ["ib_ibuf_inserts", "ib_ibuf_merged_rec", "ib_ibuf_merges"],
|
||||||
|
"InnoDB IO": ["ib_io_read", "ib_io_write", "ib_io_log", "ib_io_fsync"],
|
||||||
|
"InnoDB IO Pending": [
|
||||||
|
"ib_iop_log",
|
||||||
|
"ib_iop_sync",
|
||||||
|
"ib_iop_flush_log",
|
||||||
|
"ib_iop_flush_bpool",
|
||||||
|
"ib_iop_ibuf_aio",
|
||||||
|
"ib_iop_aioread",
|
||||||
|
"ib_iop_aiowrite",
|
||||||
|
],
|
||||||
|
"InnoDB Log": ["innodb_log_buffer_size", "ib_log_flush", "ib_log_written"],
|
||||||
|
"InnoDB Row Operations": [
|
||||||
|
"Innodb_rows_deleted",
|
||||||
|
"Innodb_rows_inserted",
|
||||||
|
"Innodb_rows_read",
|
||||||
|
"Innodb_rows_updated",
|
||||||
|
],
|
||||||
|
"InnoDB Semaphores": ["ib_spin_rounds", "ib_spin_waits", "ib_os_waits"],
|
||||||
|
"InnoDB Transactions": ["ib_tnx"],
|
||||||
|
"MyISAM Indexes": [
|
||||||
|
"Key_read_requests",
|
||||||
|
"Key_reads",
|
||||||
|
"Key_write_requests",
|
||||||
|
"Key_writes",
|
||||||
|
],
|
||||||
|
"Network Traffic": ["Bytes_received", "Bytes_sent"],
|
||||||
|
"Query Cache": [
|
||||||
|
"Qcache_queries_in_cache",
|
||||||
|
"Qcache_hits",
|
||||||
|
"Qcache_inserts",
|
||||||
|
"Qcache_not_cached",
|
||||||
|
"Qcache_lowmem_prunes",
|
||||||
|
],
|
||||||
|
"Query Cache Memory": ["query_cache_size", "Qcache_free_memory"],
|
||||||
|
"Select Types": [
|
||||||
|
"Select_full_join",
|
||||||
|
"Select_full_range_join",
|
||||||
|
"Select_range",
|
||||||
|
"Select_range_check",
|
||||||
|
"Select_scan",
|
||||||
|
],
|
||||||
|
"Slow Queries": ["Slow_queries"],
|
||||||
|
"Sorts": ["Sort_rows", "Sort_range", "Sort_merge_passes", "Sort_scan"],
|
||||||
|
"Table Locks": ["Table_locks_immediate", "Table_locks_waited"],
|
||||||
|
"Temporary Objects": [
|
||||||
|
"Created_tmp_disk_tables",
|
||||||
|
"Created_tmp_tables",
|
||||||
|
"Created_tmp_files",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
data = {}
|
data = {}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
data[row[0]] = row[1]
|
data[row[0]] = row[1]
|
||||||
|
|
||||||
cursor = ""
|
cursor = ""
|
||||||
cursor = conn.cursor ()
|
cursor = conn.cursor()
|
||||||
cursor.execute ("SHOW VARIABLES")
|
cursor.execute("SHOW VARIABLES")
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@@ -59,73 +124,98 @@ rows = cursor.fetchall()
|
|||||||
|
|
||||||
for row in rows:
|
for row in rows:
|
||||||
for line in row[2].split("\n"):
|
for line in row[2].split("\n"):
|
||||||
ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line)
|
ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line)
|
||||||
ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line)
|
ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line)
|
||||||
ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line)
|
ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line)
|
||||||
ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line)
|
ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line)
|
||||||
ib_b_reg = re.match(r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line)
|
ib_b_reg = re.match(
|
||||||
ib_insert_buffer = re.match(r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line)
|
r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line
|
||||||
ib_io = re.match(r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", line)
|
)
|
||||||
ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line)
|
ib_insert_buffer = re.match(
|
||||||
ib_io_p1 = re.match(r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line)
|
r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line
|
||||||
ib_io_p2 = re.match(r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", line)
|
)
|
||||||
ib_io_p3 = re.match(r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", line)
|
ib_io = re.match(
|
||||||
ib_log_p1 = re.match(r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line)
|
r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs",
|
||||||
ib_log_p2 = re.match(r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line)
|
line,
|
||||||
ib_semaphore = re.match(r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line)
|
)
|
||||||
ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line)
|
ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line)
|
||||||
|
ib_io_p1 = re.match(
|
||||||
|
r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line
|
||||||
|
)
|
||||||
|
ib_io_p2 = re.match(
|
||||||
|
r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
ib_io_p3 = re.match(
|
||||||
|
r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?",
|
||||||
|
line,
|
||||||
|
)
|
||||||
|
ib_log_p1 = re.match(
|
||||||
|
r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line
|
||||||
|
)
|
||||||
|
ib_log_p2 = re.match(
|
||||||
|
r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line
|
||||||
|
)
|
||||||
|
ib_semaphore = re.match(
|
||||||
|
r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line
|
||||||
|
)
|
||||||
|
ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line)
|
||||||
|
|
||||||
if ib_bpool_size:
|
if ib_bpool_size:
|
||||||
data['ib_bpool_size'] = ib_bpool_size.group(1)
|
data["ib_bpool_size"] = ib_bpool_size.group(1)
|
||||||
elif ib_bpool_free:
|
elif ib_bpool_free:
|
||||||
data['ib_bpool_free'] = ib_bpool_free.group(1)
|
data["ib_bpool_free"] = ib_bpool_free.group(1)
|
||||||
elif ib_bpool_dbpages:
|
elif ib_bpool_dbpages:
|
||||||
data['ib_bpool_dbpages'] = ib_bpool_dbpages.group(1)
|
data["ib_bpool_dbpages"] = ib_bpool_dbpages.group(1)
|
||||||
elif ib_bpool_modpages:
|
elif ib_bpool_modpages:
|
||||||
data['ib_bpool_modpages'] = ib_bpool_modpages.group(1)
|
data["ib_bpool_modpages"] = ib_bpool_modpages.group(1)
|
||||||
elif ib_insert_buffer:
|
elif ib_insert_buffer:
|
||||||
data['ib_ibuf_inserts'] = ib_insert_buffer.group(1)
|
data["ib_ibuf_inserts"] = ib_insert_buffer.group(1)
|
||||||
data['ib_ibuf_merged_rec'] = ib_insert_buffer.group(2)
|
data["ib_ibuf_merged_rec"] = ib_insert_buffer.group(2)
|
||||||
data['ib_ibuf_merges'] = ib_insert_buffer.group(3)
|
data["ib_ibuf_merges"] = ib_insert_buffer.group(3)
|
||||||
elif ib_io:
|
elif ib_io:
|
||||||
data['ib_io_read'] = ib_io.group(1)
|
data["ib_io_read"] = ib_io.group(1)
|
||||||
data['ib_io_write'] = ib_io.group(2)
|
data["ib_io_write"] = ib_io.group(2)
|
||||||
data['ib_io_fsync'] = ib_io.group(3)
|
data["ib_io_fsync"] = ib_io.group(3)
|
||||||
elif ib_io_log:
|
elif ib_io_log:
|
||||||
data['ib_io_log'] = ib_io_log.group(1)
|
data["ib_io_log"] = ib_io_log.group(1)
|
||||||
elif ib_io_p1:
|
elif ib_io_p1:
|
||||||
data['ib_iop_aioread'] = ib_io_p1.group(1)
|
data["ib_iop_aioread"] = ib_io_p1.group(1)
|
||||||
data['ib_iop_aiowrite'] = ib_io_p1.group(2)
|
data["ib_iop_aiowrite"] = ib_io_p1.group(2)
|
||||||
elif ib_io_p2:
|
elif ib_io_p2:
|
||||||
data['ib_iop_ibuf_aio'] = ib_io_p2.group(1)
|
data["ib_iop_ibuf_aio"] = ib_io_p2.group(1)
|
||||||
data['ib_iop_log'] = ib_io_p2.group(2)
|
data["ib_iop_log"] = ib_io_p2.group(2)
|
||||||
data['ib_iop_sync'] = ib_io_p2.group(3)
|
data["ib_iop_sync"] = ib_io_p2.group(3)
|
||||||
elif ib_io_p3:
|
elif ib_io_p3:
|
||||||
data['ib_iop_flush_log'] = ib_io_p3.group(1)
|
data["ib_iop_flush_log"] = ib_io_p3.group(1)
|
||||||
data['ib_iop_flush_bpool'] = ib_io_p3.group(2)
|
data["ib_iop_flush_bpool"] = ib_io_p3.group(2)
|
||||||
elif ib_log_p1:
|
elif ib_log_p1:
|
||||||
data['ib_log_written'] = ib_log_p1.group(1)
|
data["ib_log_written"] = ib_log_p1.group(1)
|
||||||
if ib_log_p1.group(2):
|
if ib_log_p1.group(2):
|
||||||
data['ib_log_written'] = int(data['ib_log_written']) + int(ib_log_p1.group(2))
|
data["ib_log_written"] = int(data["ib_log_written"]) + int(
|
||||||
elif ib_log_p2:
|
ib_log_p1.group(2)
|
||||||
data['ib_log_flush'] = ib_log_p2.group(1)
|
)
|
||||||
if ib_log_p2.group(2):
|
elif ib_log_p2:
|
||||||
data['ib_log_flush'] = int(data['ib_log_flush']) + int(ib_log_p2.group(2))
|
data["ib_log_flush"] = ib_log_p2.group(1)
|
||||||
elif ib_semaphore:
|
if ib_log_p2.group(2):
|
||||||
data['ib_spin_waits'] = ib_semaphore.group(1)
|
data["ib_log_flush"] = int(data["ib_log_flush"]) + int(
|
||||||
data['ib_spin_rounds'] = ib_semaphore.group(2)
|
ib_log_p2.group(2)
|
||||||
data['ib_os_waits'] = ib_semaphore.group(3)
|
)
|
||||||
elif ib_tnx:
|
elif ib_semaphore:
|
||||||
data['ib_tnx'] = ib_tnx.group(1)
|
data["ib_spin_waits"] = ib_semaphore.group(1)
|
||||||
if ib_tnx.group(2):
|
data["ib_spin_rounds"] = ib_semaphore.group(2)
|
||||||
data['ib_tnx'] = int(data['ib_tnx']) + int(ib_tnx.group(2))
|
data["ib_os_waits"] = ib_semaphore.group(3)
|
||||||
elif ib_b_reg:
|
elif ib_tnx:
|
||||||
data['ib_bpool_read'] = ib_b_reg.group(1)
|
data["ib_tnx"] = ib_tnx.group(1)
|
||||||
data['ib_bpool_created'] = ib_b_reg.group(2)
|
if ib_tnx.group(2):
|
||||||
data['ib_bpool_written'] = ib_b_reg.group(3)
|
data["ib_tnx"] = int(data["ib_tnx"]) + int(ib_tnx.group(2))
|
||||||
|
elif ib_b_reg:
|
||||||
|
data["ib_bpool_read"] = ib_b_reg.group(1)
|
||||||
|
data["ib_bpool_created"] = ib_b_reg.group(2)
|
||||||
|
data["ib_bpool_written"] = ib_b_reg.group(3)
|
||||||
|
|
||||||
|
|
||||||
for category in datavariables:
|
for category in datavariables:
|
||||||
for variable in datavariables[category]:
|
for variable in datavariables[category]:
|
||||||
if variable in data:
|
if variable in data:
|
||||||
print data[variable]
|
print data[variable]
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from urllib.request import urlopen
|
|
||||||
import re
|
import re
|
||||||
|
from urllib.request import urlopen
|
||||||
|
|
||||||
data = urlopen('http://localhost/nginx-status').read()
|
data = urlopen("http://localhost/nginx-status").read()
|
||||||
|
|
||||||
params = {}
|
params = {}
|
||||||
|
|
||||||
@@ -22,7 +22,9 @@ dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"]
|
|||||||
|
|
||||||
for param in dataorder:
|
for param in dataorder:
|
||||||
if param == "Active":
|
if param == "Active":
|
||||||
Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
Active = (
|
||||||
|
int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
||||||
|
)
|
||||||
print(Active)
|
print(Active)
|
||||||
else:
|
else:
|
||||||
print(params[param])
|
print(params[param])
|
||||||
|
@@ -1,28 +1,31 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
import urllib2
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
data = urllib2.urlopen('http://localhost/nginx-status').read()
|
import urllib2
|
||||||
|
|
||||||
|
data = urllib2.urlopen("http://localhost/nginx-status").read()
|
||||||
|
|
||||||
params = {}
|
params = {}
|
||||||
|
|
||||||
for line in data.split("\n"):
|
for line in data.split("\n"):
|
||||||
smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line)
|
smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line)
|
||||||
req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line)
|
req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line)
|
||||||
if smallstat:
|
if smallstat:
|
||||||
params["Reading"] = smallstat.group(1)
|
params["Reading"] = smallstat.group(1)
|
||||||
params["Writing"] = smallstat.group(2)
|
params["Writing"] = smallstat.group(2)
|
||||||
params["Waiting"] = smallstat.group(3)
|
params["Waiting"] = smallstat.group(3)
|
||||||
elif req:
|
elif req:
|
||||||
params["Requests"] = req.group(3)
|
params["Requests"] = req.group(3)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"]
|
dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"]
|
||||||
|
|
||||||
for param in dataorder:
|
for param in dataorder:
|
||||||
if param == "Active":
|
if param == "Active":
|
||||||
Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
Active = (
|
||||||
print Active
|
int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"])
|
||||||
else:
|
)
|
||||||
print params[param]
|
print Active
|
||||||
|
else:
|
||||||
|
print params[param]
|
||||||
|
@@ -19,17 +19,17 @@ BIN_AWK='/usr/bin/env awk'
|
|||||||
BIN_HEAD='/usr/bin/env head'
|
BIN_HEAD='/usr/bin/env head'
|
||||||
|
|
||||||
CONFIG=$0".conf"
|
CONFIG=$0".conf"
|
||||||
if [ -f $CONFIG ]; then
|
if [ -f "$CONFIG" ]; then
|
||||||
. $CONFIG
|
. "$CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
NTP_OFFSET=`$BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}'`
|
NTP_OFFSET=$($BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}')
|
||||||
NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}'`
|
NTP_FREQUENCY=$($BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}')
|
||||||
NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'`
|
NTP_SYS_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}')
|
||||||
NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'`
|
NTP_CLK_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}')
|
||||||
NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'`
|
NTP_WANDER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}')
|
||||||
NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'`
|
NTP_VERSION=$($BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}')
|
||||||
|
|
||||||
echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}'
|
echo '{"data":{"offset":"'"$NTP_OFFSET"'","frequency":"'"$NTP_FREQUENCY"'","sys_jitter":"'"$NTP_SYS_JITTER"'","clk_jitter":"'"$NTP_CLK_JITTER"'","clk_wander":"'"$NTP_WANDER"'"},"version":"'"$NTP_VERSION"'","error":"0","errorString":""}'
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@@ -33,67 +33,67 @@ NTPQV="p11"
|
|||||||
# Don't change anything unless you know what are you doing #
|
# Don't change anything unless you know what are you doing #
|
||||||
################################################################
|
################################################################
|
||||||
CONFIG=$0".conf"
|
CONFIG=$0".conf"
|
||||||
if [ -f $CONFIG ]; then
|
if [ -f "$CONFIG" ]; then
|
||||||
. $CONFIG
|
. "$CONFIG"
|
||||||
fi
|
fi
|
||||||
VERSION=1
|
VERSION=1
|
||||||
|
|
||||||
STRATUM=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2`
|
STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2)
|
||||||
|
|
||||||
# parse the ntpq info that requires version specific info
|
# parse the ntpq info that requires version specific info
|
||||||
NTPQ_RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'`
|
NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g')
|
||||||
if [ $NTPQV = "p11" ]; then
|
if [ $NTPQV = "p11" ]; then
|
||||||
OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'`
|
OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}')
|
||||||
FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'`
|
FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}')
|
||||||
SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'`
|
SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}')
|
||||||
CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'`
|
CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}')
|
||||||
CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}'`
|
CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $7}')
|
||||||
fi
|
fi
|
||||||
if [ $NTPQV = "p1" ]; then
|
if [ $NTPQV = "p1" ]; then
|
||||||
OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}'`
|
OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $2}')
|
||||||
FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'`
|
FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}')
|
||||||
SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'`
|
SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}')
|
||||||
CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'`
|
CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}')
|
||||||
CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'`
|
CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VER=`$BIN_NTPD --version`
|
VER=$($BIN_NTPD --version)
|
||||||
if [ "$VER" = '4.2.6p5' ]; then
|
if [ "$VER" = '4.2.6p5' ]; then
|
||||||
USECMD=`echo $BIN_NTPDC -c iostats`
|
USECMD=$(echo "$BIN_NTPDC" -c iostats)
|
||||||
else
|
else
|
||||||
USECMD=`echo $BIN_NTPQ -c iostats localhost`
|
USECMD=$(echo "$BIN_NTPQ" -c iostats localhost)
|
||||||
fi
|
fi
|
||||||
CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' '`
|
CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ')
|
||||||
|
|
||||||
TIMESINCERESET=`echo $CMD2 | $BIN_AWK -F ' ' '{print $1}'`
|
TIMESINCERESET=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $1}')
|
||||||
RECEIVEDBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $2}'`
|
RECEIVEDBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $2}')
|
||||||
FREERECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $3}'`
|
FREERECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $3}')
|
||||||
USEDRECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $4}'`
|
USEDRECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $4}')
|
||||||
LOWWATERREFILLS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $5}'`
|
LOWWATERREFILLS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $5}')
|
||||||
DROPPEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $6}'`
|
DROPPEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $6}')
|
||||||
IGNOREDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $7}'`
|
IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}')
|
||||||
RECEIVEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $8}'`
|
RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}')
|
||||||
PACKETSSENT=`echo $CMD2 | $BIN_AWK -F ' ' '{print $9}'`
|
PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}')
|
||||||
PACKETSENDFAILURES=`echo $CMD2 | $BIN_AWK -F ' ' '{print $10}'`
|
PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}')
|
||||||
INPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $11}'`
|
INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}')
|
||||||
USEFULINPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $12}'`
|
USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}')
|
||||||
|
|
||||||
echo '{"data":{"offset":"'$OFFSET\
|
echo '{"data":{"offset":"'"$OFFSET"\
|
||||||
'","frequency":"'$FREQUENCY\
|
'","frequency":"'"$FREQUENCY"\
|
||||||
'","sys_jitter":"'$SYS_JITTER\
|
'","sys_jitter":"'"$SYS_JITTER"\
|
||||||
'","clk_jitter":"'$CLK_JITTER\
|
'","clk_jitter":"'"$CLK_JITTER"\
|
||||||
'","clk_wander":"'$CLK_WANDER\
|
'","clk_wander":"'"$CLK_WANDER"\
|
||||||
'","stratum":"'$STRATUM\
|
'","stratum":"'"$STRATUM"\
|
||||||
'","time_since_reset":"'$TIMESINCERESET\
|
'","time_since_reset":"'"$TIMESINCERESET"\
|
||||||
'","receive_buffers":"'$RECEIVEDBUFFERS\
|
'","receive_buffers":"'"$RECEIVEDBUFFERS"\
|
||||||
'","free_receive_buffers":"'$FREERECEIVEBUFFERS\
|
'","free_receive_buffers":"'"$FREERECEIVEBUFFERS"\
|
||||||
'","used_receive_buffers":"'$USEDRECEIVEBUFFERS\
|
'","used_receive_buffers":"'"$USEDRECEIVEBUFFERS"\
|
||||||
'","low_water_refills":"'$LOWWATERREFILLS\
|
'","low_water_refills":"'"$LOWWATERREFILLS"\
|
||||||
'","dropped_packets":"'$DROPPEDPACKETS\
|
'","dropped_packets":"'"$DROPPEDPACKETS"\
|
||||||
'","ignored_packets":"'$IGNOREDPACKETS\
|
'","ignored_packets":"'"$IGNOREDPACKETS"\
|
||||||
'","received_packets":"'$RECEIVEDPACKETS\
|
'","received_packets":"'"$RECEIVEDPACKETS"\
|
||||||
'","packets_sent":"'$PACKETSSENT\
|
'","packets_sent":"'"$PACKETSSENT"\
|
||||||
'","packet_send_failures":"'$PACKETSENDFAILURES\
|
'","packet_send_failures":"'"$PACKETSENDFAILURES"\
|
||||||
'","input_wakeups":"'$PACKETSENDFAILURES\
|
'","input_wakeups":"'"$PACKETSENDFAILURES"\
|
||||||
'","useful_input_wakeups":"'$USEFULINPUTWAKEUPS\
|
'","useful_input_wakeups":"'"$USEFULINPUTWAKEUPS"\
|
||||||
'"},"error":"0","errorString":"","version":"'$VERSION'"}'
|
'"},"error":"0","errorString":"","version":"'$VERSION'"}'
|
||||||
|
@@ -17,10 +17,10 @@ sed='/usr/bin/env sed'
|
|||||||
# 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3
|
# 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3
|
||||||
$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g'
|
$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g'
|
||||||
|
|
||||||
lines=`$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l`
|
lines=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l)
|
||||||
|
|
||||||
# if we are less than 5 then all GPUs were printed
|
# if we are less than 5 then all GPUs were printed
|
||||||
if [ $lines -lt 5 ]; then
|
if [ "$lines" -lt 5 ]; then
|
||||||
exit 0;
|
exit 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -35,5 +35,5 @@ do
|
|||||||
loop=0
|
loop=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
gpu=`expr $gpu + 1`
|
gpu=$(expr $gpu + 1)
|
||||||
done
|
done
|
||||||
|
@@ -9,11 +9,11 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag
|
|||||||
total_files=$(lsof -c opensips | wc -l)
|
total_files=$(lsof -c opensips | wc -l)
|
||||||
|
|
||||||
|
|
||||||
echo $total_memory
|
echo "$total_memory"
|
||||||
echo $used_memory
|
echo "$used_memory"
|
||||||
echo $free_memory
|
echo "$free_memory"
|
||||||
echo $load_average
|
echo "$load_average"
|
||||||
echo "Open files="$total_files
|
echo "Open files=""$total_files"
|
||||||
|
|
||||||
exit
|
exit
|
||||||
|
|
||||||
|
@@ -9,10 +9,10 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag
|
|||||||
total_files=$(lsof -c opensips | wc -l)
|
total_files=$(lsof -c opensips | wc -l)
|
||||||
|
|
||||||
|
|
||||||
echo $total_memory
|
echo "$total_memory"
|
||||||
echo $used_memory
|
echo "$used_memory"
|
||||||
echo $free_memory
|
echo "$free_memory"
|
||||||
echo $load_average
|
echo "$load_average"
|
||||||
echo "Open files="$total_files
|
echo "Open files=""$total_files"
|
||||||
|
|
||||||
exit
|
exit
|
||||||
|
@@ -34,56 +34,56 @@ CMD_APK=' version'
|
|||||||
################################################################
|
################################################################
|
||||||
if command -v zypper &>/dev/null ; then
|
if command -v zypper &>/dev/null ; then
|
||||||
# OpenSUSE
|
# OpenSUSE
|
||||||
UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_ZYPPER "$CMD_ZYPPER" | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 2 ]; then
|
if [ "$UPDATES" -ge 2 ]; then
|
||||||
echo $(($UPDATES-2));
|
echo $(($UPDATES-2));
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v dnf &>/dev/null ; then
|
elif command -v dnf &>/dev/null ; then
|
||||||
# Fedora
|
# Fedora
|
||||||
UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_DNF "$CMD_DNF" | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 1 ]; then
|
if [ "$UPDATES" -ge 1 ]; then
|
||||||
echo $(($UPDATES-1));
|
echo $(($UPDATES-1));
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v pacman &>/dev/null ; then
|
elif command -v pacman &>/dev/null ; then
|
||||||
# Arch
|
# Arch
|
||||||
UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 1 ]; then
|
if [ "$UPDATES" -ge 1 ]; then
|
||||||
echo $(($UPDATES-1));
|
echo $(($UPDATES-1));
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v yum &>/dev/null ; then
|
elif command -v yum &>/dev/null ; then
|
||||||
# CentOS / Redhat
|
# CentOS / Redhat
|
||||||
UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_YUM "$CMD_YUM" | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 1 ]; then
|
if [ "$UPDATES" -ge 1 ]; then
|
||||||
echo $(($UPDATES-1));
|
echo $(($UPDATES-1));
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v apt-get &>/dev/null ; then
|
elif command -v apt-get &>/dev/null ; then
|
||||||
# Debian / Devuan / Ubuntu
|
# Debian / Devuan / Ubuntu
|
||||||
UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'`
|
UPDATES=$($BIN_APT "$CMD_APT" | $BIN_GREP $CMD_GREP 'Inst')
|
||||||
if [ $UPDATES -ge 1 ]; then
|
if [ "$UPDATES" -ge 1 ]; then
|
||||||
echo $UPDATES;
|
echo "$UPDATES";
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v pkg &>/dev/null ; then
|
elif command -v pkg &>/dev/null ; then
|
||||||
# FreeBSD
|
# FreeBSD
|
||||||
UPDATES=`$BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_PKG "$CMD_PKG" | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 1 ]; then
|
if [ "$UPDATES" -ge 1 ]; then
|
||||||
echo $UPDATES;
|
echo "$UPDATES";
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
fi
|
fi
|
||||||
elif command -v apk &>/dev/null ; then
|
elif command -v apk &>/dev/null ; then
|
||||||
# Alpine
|
# Alpine
|
||||||
UPDATES=`$BIN_APK $CMD_APK | $BIN_WC $CMD_WC`
|
UPDATES=$($BIN_APK "$CMD_APK" | $BIN_WC $CMD_WC)
|
||||||
if [ $UPDATES -ge 2 ]; then
|
if [ "$UPDATES" -ge 2 ]; then
|
||||||
echo $(($UPDATES-1));
|
echo $(($UPDATES-1));
|
||||||
else
|
else
|
||||||
echo "0";
|
echo "0";
|
||||||
|
@@ -78,7 +78,7 @@ phpfpm_slow_requests=0
|
|||||||
|
|
||||||
# local opts="${1}" url="${2}"
|
# local opts="${1}" url="${2}"
|
||||||
|
|
||||||
phpfpm_response=($(curl -Ss ${opts} "${url}"))
|
phpfpm_response=($(curl -Ss "${opts}" "${url}"))
|
||||||
[ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1
|
[ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1
|
||||||
|
|
||||||
if [[ "${phpfpm_response[0]}" != "pool:" \
|
if [[ "${phpfpm_response[0]}" != "pool:" \
|
||||||
@@ -131,16 +131,16 @@ phpfpm_slow_requests=0
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo $phpfpm_pool
|
echo "$phpfpm_pool"
|
||||||
echo $phpfpm_start_time
|
echo "$phpfpm_start_time"
|
||||||
echo $phpfpm_start_since
|
echo "$phpfpm_start_since"
|
||||||
echo $phpfpm_accepted_conn
|
echo "$phpfpm_accepted_conn"
|
||||||
echo $phpfpm_listen_queue
|
echo "$phpfpm_listen_queue"
|
||||||
echo $phpfpm_max_listen_queue
|
echo "$phpfpm_max_listen_queue"
|
||||||
echo $phpfpm_listen_queue_len
|
echo "$phpfpm_listen_queue_len"
|
||||||
echo $phpfpm_idle_processes
|
echo "$phpfpm_idle_processes"
|
||||||
echo $phpfpm_active_processes
|
echo "$phpfpm_active_processes"
|
||||||
echo $phpfpm_total_processes
|
echo "$phpfpm_total_processes"
|
||||||
echo $phpfpm_max_active_processes
|
echo "$phpfpm_max_active_processes"
|
||||||
echo $phpfpm_max_children_reached
|
echo "$phpfpm_max_children_reached"
|
||||||
echo $phpfpm_slow_requests
|
echo $phpfpm_slow_requests
|
||||||
|
20
snmp/pi-hole
20
snmp/pi-hole
@@ -55,19 +55,19 @@ debug() {
|
|||||||
echo '[ok] API_URL is set'
|
echo '[ok] API_URL is set'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z $API_AUTH_KEY ]; then
|
if [ -z "$API_AUTH_KEY" ]; then
|
||||||
echo '[warning] API_AUTH_KEY is not set, some values will not be available'
|
echo '[warning] API_AUTH_KEY is not set, some values will not be available'
|
||||||
else
|
else
|
||||||
echo '[ok] API_AUTH_KEY is set'
|
echo '[ok] API_AUTH_KEY is set'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z ${URL_READ_ONLY} ]; then
|
if [ -z "${URL_READ_ONLY}" ]; then
|
||||||
echo '[error] URL_READ_ONLY is not set'
|
echo '[error] URL_READ_ONLY is not set'
|
||||||
else
|
else
|
||||||
echo '[ok] URL_READ_ONLY is set'
|
echo '[ok] URL_READ_ONLY is set'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z ${URL_QUERY_TYPE} ]; then
|
if [ -z "${URL_QUERY_TYPE}" ]; then
|
||||||
echo '[error] URL_QUERY_TYPE is not set'
|
echo '[error] URL_QUERY_TYPE is not set'
|
||||||
else
|
else
|
||||||
echo '[ok] URL_QUERY_TYPE not set'
|
echo '[ok] URL_QUERY_TYPE not set'
|
||||||
@@ -87,20 +87,20 @@ debug() {
|
|||||||
exportdata() {
|
exportdata() {
|
||||||
# domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today
|
# domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today
|
||||||
# unique_domains / queries_forwarded / queries_cached
|
# unique_domains / queries_forwarded / queries_cached
|
||||||
GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached')
|
GET_STATS=$(curl -s $API_URL"$URL_READ_ONLY" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached')
|
||||||
echo $GET_STATS | tr " " "\n"
|
echo "$GET_STATS" | tr " " "\n"
|
||||||
# A / AAAA / PTR / SRV
|
# A / AAAA / PTR / SRV
|
||||||
GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]')
|
GET_QUERY_TYPE=$(curl -s $API_URL"$URL_QUERY_TYPE""$API_AUTH_KEY" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]')
|
||||||
echo $GET_QUERY_TYPE | tr " " "\n"
|
echo "$GET_QUERY_TYPE" | tr " " "\n"
|
||||||
|
|
||||||
# Find number of DHCP address in scope and current lease count
|
# Find number of DHCP address in scope and current lease count
|
||||||
# case-insensitive compare, just in case :)
|
# case-insensitive compare, just in case :)
|
||||||
if [ "${DHCP_ACTIVE,,}" = "true" ]; then
|
if [ "${DHCP_ACTIVE,,}" = "true" ]; then
|
||||||
# Max IP addresses in scope
|
# Max IP addresses in scope
|
||||||
# Convert IPs to decimal and subtract
|
# Convert IPs to decimal and subtract
|
||||||
IFS="." read -r -a array <<< $DHCP_START
|
IFS="." read -r -a array <<< "$DHCP_START"
|
||||||
DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} ))
|
DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} ))
|
||||||
IFS="." read -r -a array <<< $DHCP_END
|
IFS="." read -r -a array <<< "$DHCP_END"
|
||||||
DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} ))
|
DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} ))
|
||||||
expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL
|
expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL
|
||||||
# Current lease count
|
# Current lease count
|
||||||
@@ -111,7 +111,7 @@ exportdata() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ -z $* ]; then
|
if [ -z "$*" ]; then
|
||||||
exportdata
|
exportdata
|
||||||
fi
|
fi
|
||||||
expr "$*" : ".*--help" > /dev/null && usage
|
expr "$*" : ".*--help" > /dev/null && usage
|
||||||
|
@@ -8,6 +8,6 @@
|
|||||||
QUEUES="incoming active deferred hold"
|
QUEUES="incoming active deferred hold"
|
||||||
|
|
||||||
for i in $QUEUES; do
|
for i in $QUEUES; do
|
||||||
COUNT=`qshape $i | grep TOTAL | awk '{print $2}'`
|
COUNT=$(qshape "$i" | grep TOTAL | awk '{print $2}')
|
||||||
printf "$COUNT\n"
|
printf "$COUNT\n"
|
||||||
done
|
done
|
||||||
|
@@ -6,7 +6,7 @@ API_AUTH_USER="admin"
|
|||||||
API_AUTH_PASS=""
|
API_AUTH_PASS=""
|
||||||
API_URL=""
|
API_URL=""
|
||||||
API_STATS="jsonstat?command=stats"
|
API_STATS="jsonstat?command=stats"
|
||||||
TMP_FILE=`/usr/bin/mktemp`
|
TMP_FILE=$(/usr/bin/mktemp)
|
||||||
|
|
||||||
#/ Description: BASH script to get PowerDNS dnsdist stats
|
#/ Description: BASH script to get PowerDNS dnsdist stats
|
||||||
#/ Examples: ./powerdns-dnsdist
|
#/ Examples: ./powerdns-dnsdist
|
||||||
@@ -65,100 +65,100 @@ debug() {
|
|||||||
|
|
||||||
exportdata() {
|
exportdata() {
|
||||||
# get current data
|
# get current data
|
||||||
curl -s -u$API_AUTH_USER:$API_AUTH_PASS $API_URL$API_STATS | jq '.' > $TMP_FILE
|
curl -s -u$API_AUTH_USER:"$API_AUTH_PASS" "$API_URL""$API_STATS" | jq '.' > "$TMP_FILE"
|
||||||
|
|
||||||
# generate export values
|
# generate export values
|
||||||
JSON_VALUES=$(cat $TMP_FILE)
|
JSON_VALUES=$(cat "$TMP_FILE")
|
||||||
|
|
||||||
STAT_CACHE_HIT=$(echo $JSON_VALUES | jq '."cache-hits"')
|
STAT_CACHE_HIT=$(echo "$JSON_VALUES" | jq '."cache-hits"')
|
||||||
echo $STAT_CACHE_HIT
|
echo "$STAT_CACHE_HIT"
|
||||||
|
|
||||||
STAT_CACHE_MISS=$(echo $JSON_VALUES | jq '."cache-misses"')
|
STAT_CACHE_MISS=$(echo "$JSON_VALUES" | jq '."cache-misses"')
|
||||||
echo $STAT_CACHE_MISS
|
echo "$STAT_CACHE_MISS"
|
||||||
|
|
||||||
STAT_DOWNSTREAM_ERR=$(echo $JSON_VALUES | jq '."downstream-send-errors"')
|
STAT_DOWNSTREAM_ERR=$(echo "$JSON_VALUES" | jq '."downstream-send-errors"')
|
||||||
echo $STAT_DOWNSTREAM_ERR
|
echo "$STAT_DOWNSTREAM_ERR"
|
||||||
|
|
||||||
STAT_DOWNSTREAM_TIMEOUT=$(echo $JSON_VALUES | jq '."downstream-timeouts"')
|
STAT_DOWNSTREAM_TIMEOUT=$(echo "$JSON_VALUES" | jq '."downstream-timeouts"')
|
||||||
echo $STAT_DOWNSTREAM_TIMEOUT
|
echo "$STAT_DOWNSTREAM_TIMEOUT"
|
||||||
|
|
||||||
STAT_DYNAMIC_BLOCK_SIZE=$(echo $JSON_VALUES | jq '."dyn-block-nmg-size"')
|
STAT_DYNAMIC_BLOCK_SIZE=$(echo "$JSON_VALUES" | jq '."dyn-block-nmg-size"')
|
||||||
echo $STAT_DYNAMIC_BLOCK_SIZE
|
echo "$STAT_DYNAMIC_BLOCK_SIZE"
|
||||||
|
|
||||||
STAT_DYNAMIC_BLOCK=$(echo $JSON_VALUES | jq '."dyn-blocked"')
|
STAT_DYNAMIC_BLOCK=$(echo "$JSON_VALUES" | jq '."dyn-blocked"')
|
||||||
echo $STAT_DYNAMIC_BLOCK
|
echo "$STAT_DYNAMIC_BLOCK"
|
||||||
|
|
||||||
STAT_QUERIES_COUNT=$(echo $JSON_VALUES | jq '.queries')
|
STAT_QUERIES_COUNT=$(echo "$JSON_VALUES" | jq '.queries')
|
||||||
echo $STAT_QUERIES_COUNT
|
echo "$STAT_QUERIES_COUNT"
|
||||||
|
|
||||||
STAT_QUERIES_RECURSIVE=$(echo $JSON_VALUES | jq '.rdqueries')
|
STAT_QUERIES_RECURSIVE=$(echo "$JSON_VALUES" | jq '.rdqueries')
|
||||||
echo $STAT_QUERIES_RECURSIVE
|
echo "$STAT_QUERIES_RECURSIVE"
|
||||||
|
|
||||||
STAT_QUERIES_EMPTY=$(echo $JSON_VALUES | jq '."empty-queries"')
|
STAT_QUERIES_EMPTY=$(echo "$JSON_VALUES" | jq '."empty-queries"')
|
||||||
echo $STAT_QUERIES_EMPTY
|
echo "$STAT_QUERIES_EMPTY"
|
||||||
|
|
||||||
STAT_QUERIES_DROP_NO_POLICY=$(echo $JSON_VALUES | jq '."no-policy"')
|
STAT_QUERIES_DROP_NO_POLICY=$(echo "$JSON_VALUES" | jq '."no-policy"')
|
||||||
echo $STAT_QUERIES_DROP_NO_POLICY
|
echo "$STAT_QUERIES_DROP_NO_POLICY"
|
||||||
|
|
||||||
STAT_QUERIES_DROP_NC=$(echo $JSON_VALUES | jq '."noncompliant-queries"')
|
STAT_QUERIES_DROP_NC=$(echo "$JSON_VALUES" | jq '."noncompliant-queries"')
|
||||||
echo $STAT_QUERIES_DROP_NC
|
echo "$STAT_QUERIES_DROP_NC"
|
||||||
|
|
||||||
STAT_QUERIES_DROP_NC_ANSWER=$(echo $JSON_VALUES | jq '."noncompliant-responses"')
|
STAT_QUERIES_DROP_NC_ANSWER=$(echo "$JSON_VALUES" | jq '."noncompliant-responses"')
|
||||||
echo $STAT_QUERIES_DROP_NC_ANSWER
|
echo "$STAT_QUERIES_DROP_NC_ANSWER"
|
||||||
|
|
||||||
STAT_QUERIES_SELF_ANSWER=$(echo $JSON_VALUES | jq '."self-answered"')
|
STAT_QUERIES_SELF_ANSWER=$(echo "$JSON_VALUES" | jq '."self-answered"')
|
||||||
echo $STAT_QUERIES_SELF_ANSWER
|
echo "$STAT_QUERIES_SELF_ANSWER"
|
||||||
|
|
||||||
STAT_QUERIES_SERVFAIL=$(echo $JSON_VALUES | jq '."servfail-responses"')
|
STAT_QUERIES_SERVFAIL=$(echo "$JSON_VALUES" | jq '."servfail-responses"')
|
||||||
echo $STAT_QUERIES_SERVFAIL
|
echo "$STAT_QUERIES_SERVFAIL"
|
||||||
|
|
||||||
STAT_QUERIES_FAILURE=$(echo $JSON_VALUES | jq '."trunc-failures"')
|
STAT_QUERIES_FAILURE=$(echo "$JSON_VALUES" | jq '."trunc-failures"')
|
||||||
echo $STAT_QUERIES_FAILURE
|
echo "$STAT_QUERIES_FAILURE"
|
||||||
|
|
||||||
STAT_QUERIES_ACL_DROPS=$(echo $JSON_VALUES | jq '."acl-drops"')
|
STAT_QUERIES_ACL_DROPS=$(echo "$JSON_VALUES" | jq '."acl-drops"')
|
||||||
echo $STAT_QUERIES_ACL_DROPS
|
echo "$STAT_QUERIES_ACL_DROPS"
|
||||||
|
|
||||||
STAT_RULE_DROP=$(echo $JSON_VALUES | jq '."rule-drop"')
|
STAT_RULE_DROP=$(echo "$JSON_VALUES" | jq '."rule-drop"')
|
||||||
echo $STAT_RULE_DROP
|
echo "$STAT_RULE_DROP"
|
||||||
|
|
||||||
STAT_RULE_NXDOMAIN=$(echo $JSON_VALUES | jq '."rule-nxdomain"')
|
STAT_RULE_NXDOMAIN=$(echo "$JSON_VALUES" | jq '."rule-nxdomain"')
|
||||||
echo $STAT_RULE_NXDOMAIN
|
echo "$STAT_RULE_NXDOMAIN"
|
||||||
|
|
||||||
STAT_RULE_REFUSED=$(echo $JSON_VALUES | jq '."rule-refused"')
|
STAT_RULE_REFUSED=$(echo "$JSON_VALUES" | jq '."rule-refused"')
|
||||||
echo $STAT_RULE_REFUSED
|
echo "$STAT_RULE_REFUSED"
|
||||||
|
|
||||||
STAT_LATENCY_AVG_100=$(echo $JSON_VALUES | jq '."latency-avg100"')
|
STAT_LATENCY_AVG_100=$(echo "$JSON_VALUES" | jq '."latency-avg100"')
|
||||||
echo $STAT_LATENCY_AVG_100
|
echo "$STAT_LATENCY_AVG_100"
|
||||||
|
|
||||||
STAT_LATENCY_AVG_1000=$(echo $JSON_VALUES | jq '."latency-avg1000"')
|
STAT_LATENCY_AVG_1000=$(echo "$JSON_VALUES" | jq '."latency-avg1000"')
|
||||||
echo $STAT_LATENCY_AVG_1000
|
echo "$STAT_LATENCY_AVG_1000"
|
||||||
|
|
||||||
STAT_LATENCY_AVG_10000=$(echo $JSON_VALUES | jq '."latency-avg10000"')
|
STAT_LATENCY_AVG_10000=$(echo "$JSON_VALUES" | jq '."latency-avg10000"')
|
||||||
echo $STAT_LATENCY_AVG_10000
|
echo "$STAT_LATENCY_AVG_10000"
|
||||||
|
|
||||||
STAT_LATENCY_AVG_1000000=$(echo $JSON_VALUES | jq '."latency-avg1000000"')
|
STAT_LATENCY_AVG_1000000=$(echo "$JSON_VALUES" | jq '."latency-avg1000000"')
|
||||||
echo $STAT_LATENCY_AVG_1000000
|
echo "$STAT_LATENCY_AVG_1000000"
|
||||||
|
|
||||||
STAT_LATENCY_SLOW=$(echo $JSON_VALUES | jq '."latency-slow"')
|
STAT_LATENCY_SLOW=$(echo "$JSON_VALUES" | jq '."latency-slow"')
|
||||||
echo $STAT_LATENCY_SLOW
|
echo "$STAT_LATENCY_SLOW"
|
||||||
|
|
||||||
STAT_LATENCY_0_1=$(echo $JSON_VALUES | jq '."latency0-1"')
|
STAT_LATENCY_0_1=$(echo "$JSON_VALUES" | jq '."latency0-1"')
|
||||||
echo $STAT_LATENCY_0_1
|
echo "$STAT_LATENCY_0_1"
|
||||||
|
|
||||||
STAT_LATENCY_1_10=$(echo $JSON_VALUES | jq '."latency1-10"')
|
STAT_LATENCY_1_10=$(echo "$JSON_VALUES" | jq '."latency1-10"')
|
||||||
echo $STAT_LATENCY_1_10
|
echo "$STAT_LATENCY_1_10"
|
||||||
|
|
||||||
STAT_LATENCY_10_50=$(echo $JSON_VALUES | jq '."latency10-50"')
|
STAT_LATENCY_10_50=$(echo "$JSON_VALUES" | jq '."latency10-50"')
|
||||||
echo $STAT_LATENCY_10_50
|
echo "$STAT_LATENCY_10_50"
|
||||||
|
|
||||||
STAT_LATENCY_50_100=$(echo $JSON_VALUES | jq '."latency50-100"')
|
STAT_LATENCY_50_100=$(echo "$JSON_VALUES" | jq '."latency50-100"')
|
||||||
echo $STAT_LATENCY_50_100
|
echo "$STAT_LATENCY_50_100"
|
||||||
|
|
||||||
STAT_LATENCY_100_1000=$(echo $JSON_VALUES | jq '."latency100-1000"')
|
STAT_LATENCY_100_1000=$(echo "$JSON_VALUES" | jq '."latency100-1000"')
|
||||||
echo $STAT_LATENCY_100_1000
|
echo "$STAT_LATENCY_100_1000"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ -z $* ]; then
|
if [ -z "$*" ]; then
|
||||||
exportdata
|
exportdata
|
||||||
fi
|
fi
|
||||||
expr "$*" : ".*--help" > /dev/null && usage
|
expr "$*" : ".*--help" > /dev/null && usage
|
||||||
|
@@ -1,12 +1,13 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
import json, subprocess
|
import json
|
||||||
from subprocess import Popen, PIPE
|
import subprocess
|
||||||
|
from subprocess import PIPE, Popen
|
||||||
|
|
||||||
input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0]
|
input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0]
|
||||||
data = []
|
data = []
|
||||||
|
|
||||||
for line in input.splitlines():
|
for line in input.splitlines():
|
||||||
item = line.split()
|
item = line.split()
|
||||||
data.append({'name': item[0].decode(), 'value': int(item[1].decode())})
|
data.append({"name": item[0].decode(), "value": int(item[1].decode())})
|
||||||
|
|
||||||
print(json.dumps(data))
|
print(json.dumps(data))
|
||||||
|
@@ -3,24 +3,26 @@
|
|||||||
import json
|
import json
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
pdnscontrol = '/usr/bin/pdns_control'
|
pdnscontrol = "/usr/bin/pdns_control"
|
||||||
|
|
||||||
process = subprocess.Popen([pdnscontrol, 'show', '*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
process = subprocess.Popen(
|
||||||
|
[pdnscontrol, "show", "*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
input = process.communicate()
|
input = process.communicate()
|
||||||
stdout = input[0].decode()
|
stdout = input[0].decode()
|
||||||
stderr = input[1].decode()
|
stderr = input[1].decode()
|
||||||
|
|
||||||
data = {}
|
data = {}
|
||||||
for var in stdout.split(','):
|
for var in stdout.split(","):
|
||||||
if '=' in var:
|
if "=" in var:
|
||||||
key, value = var.split('=')
|
key, value = var.split("=")
|
||||||
data[key] = value
|
data[key] = value
|
||||||
|
|
||||||
output = {
|
output = {
|
||||||
'version': 1,
|
"version": 1,
|
||||||
'error': process.returncode,
|
"error": process.returncode,
|
||||||
'errorString': stderr,
|
"errorString": stderr,
|
||||||
'data': data
|
"data": data,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
@@ -67,22 +67,22 @@ version = 1.4
|
|||||||
|
|
||||||
### Libraries
|
### Libraries
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import getopt
|
import getopt
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
### Option defaults
|
### Option defaults
|
||||||
|
|
||||||
method = "" # must be one of methods array
|
method = "" # must be one of methods array
|
||||||
verbose = False
|
verbose = False
|
||||||
warnings = False
|
warnings = False
|
||||||
librenms = True # Return results in a JSON format suitable for Librenms
|
librenms = True # Return results in a JSON format suitable for Librenms
|
||||||
# Set to false to return JSON data only
|
# Set to false to return JSON data only
|
||||||
pretty = False # Pretty printing
|
pretty = False # Pretty printing
|
||||||
|
|
||||||
### Globals
|
### Globals
|
||||||
|
|
||||||
@@ -90,40 +90,51 @@ error = 0
|
|||||||
errorString = ""
|
errorString = ""
|
||||||
data = {}
|
data = {}
|
||||||
result = {}
|
result = {}
|
||||||
usage = "USAGE: " + os.path.basename(__file__) + " [-h|--help] |" \
|
usage = (
|
||||||
+ " [-m|--method <method>] [-N|--no-librenms] [-p|--pretty]" \
|
"USAGE: "
|
||||||
+ " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help"
|
+ os.path.basename(__file__)
|
||||||
|
+ " [-h|--help] |"
|
||||||
|
+ " [-m|--method <method>] [-N|--no-librenms] [-p|--pretty]"
|
||||||
|
+ " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help"
|
||||||
|
)
|
||||||
methods = ["sensors", "hpasmcli"]
|
methods = ["sensors", "hpasmcli"]
|
||||||
#costPerkWh = 0.15 # <<<< UNCOMMENT
|
# costPerkWh = 0.15 # <<<< UNCOMMENT
|
||||||
|
|
||||||
### General functions
|
### General functions
|
||||||
|
|
||||||
|
|
||||||
def errorMsg(message):
|
def errorMsg(message):
|
||||||
sys.stderr.write("ERROR: " + message + "\n")
|
sys.stderr.write("ERROR: " + message + "\n")
|
||||||
|
|
||||||
|
|
||||||
def usageError(message="Invalid argument"):
|
def usageError(message="Invalid argument"):
|
||||||
errorMsg(message)
|
errorMsg(message)
|
||||||
sys.stderr.write(usage + "\n")
|
sys.stderr.write(usage + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def warningMsg(message):
|
def warningMsg(message):
|
||||||
if verbose or warnings:
|
if verbose or warnings:
|
||||||
sys.stderr.write("WARN: " + message + "\n")
|
sys.stderr.write("WARN: " + message + "\n")
|
||||||
|
|
||||||
|
|
||||||
def verboseMsg(message):
|
def verboseMsg(message):
|
||||||
if verbose:
|
if verbose:
|
||||||
sys.stderr.write("INFO: " + message + "\n")
|
sys.stderr.write("INFO: " + message + "\n")
|
||||||
|
|
||||||
|
|
||||||
def listMethods():
|
def listMethods():
|
||||||
global verbose
|
global verbose
|
||||||
verbose = True
|
verbose = True
|
||||||
verboseMsg("Available methods are: " + str(methods).strip('[]'))
|
verboseMsg("Available methods are: " + str(methods).strip("[]"))
|
||||||
|
|
||||||
|
|
||||||
### Data functions
|
### Data functions
|
||||||
|
|
||||||
|
|
||||||
def getData(method):
|
def getData(method):
|
||||||
if method == "sensors":
|
if method == "sensors":
|
||||||
data = getSensorData()
|
data = getSensorData()
|
||||||
|
|
||||||
elif method == "hpasmcli":
|
elif method == "hpasmcli":
|
||||||
data = getHPASMData()
|
data = getHPASMData()
|
||||||
@@ -132,6 +143,7 @@ def getData(method):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def getSensorData():
|
def getSensorData():
|
||||||
global error, errorString
|
global error, errorString
|
||||||
error = 2
|
error = 2
|
||||||
@@ -139,6 +151,7 @@ def getSensorData():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import sensors
|
import sensors
|
||||||
|
|
||||||
sensors.init()
|
sensors.init()
|
||||||
|
|
||||||
except ModuleNotFoundError as e:
|
except ModuleNotFoundError as e:
|
||||||
@@ -173,7 +186,7 @@ def getSensorData():
|
|||||||
error = 0
|
error = 0
|
||||||
errorString = ""
|
errorString = ""
|
||||||
|
|
||||||
junk, meter_id = chip_name.split('acpi-', 1)
|
junk, meter_id = chip_name.split("acpi-", 1)
|
||||||
sdata["meter"][meter_id] = {}
|
sdata["meter"][meter_id] = {}
|
||||||
|
|
||||||
for feature in chip:
|
for feature in chip:
|
||||||
@@ -192,91 +205,105 @@ def getSensorData():
|
|||||||
sdata[chip_name][feature_label] = feature.get_value()
|
sdata[chip_name][feature_label] = feature.get_value()
|
||||||
|
|
||||||
except:
|
except:
|
||||||
es = sys.exc_info()
|
es = sys.exc_info()
|
||||||
error = 1
|
error = 1
|
||||||
errorString = "Unable to get data: General exception: " + str(es)
|
errorString = "Unable to get data: General exception: " + str(es)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
sensors.cleanup()
|
sensors.cleanup()
|
||||||
return sdata
|
return sdata
|
||||||
|
|
||||||
|
|
||||||
def getHPASMData():
|
def getHPASMData():
|
||||||
global error, errorString
|
global error, errorString
|
||||||
|
|
||||||
exe = shutil.which('hpasmcli')
|
exe = shutil.which("hpasmcli")
|
||||||
#if not os.access(candidate, os.W_OK):
|
# if not os.access(candidate, os.W_OK):
|
||||||
cmd = [exe, '-s', 'show powermeter; show powersupply']
|
cmd = [exe, "-s", "show powermeter; show powersupply"]
|
||||||
warningMsg("hpasmcli only runs as root")
|
warningMsg("hpasmcli only runs as root")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
output = subprocess.run(cmd, capture_output=True, check=True, text=True, timeout=2)
|
output = subprocess.run(
|
||||||
|
cmd, capture_output=True, check=True, text=True, timeout=2
|
||||||
|
)
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
errorMsg(str(e) + ": " + str(e.stdout).strip('\n'))
|
errorMsg(str(e) + ": " + str(e.stdout).strip("\n"))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
rawdata = str(output.stdout).replace('\t', ' ').replace('\n ', '\n').split('\n')
|
rawdata = str(output.stdout).replace("\t", " ").replace("\n ", "\n").split("\n")
|
||||||
|
|
||||||
hdata = {}
|
hdata = {}
|
||||||
hdata["meter"] = {}
|
hdata["meter"] = {}
|
||||||
hdata["psu"] = {}
|
hdata["psu"] = {}
|
||||||
|
|
||||||
re_meter = "^Power Meter #([0-9]+)"
|
re_meter = "^Power Meter #([0-9]+)"
|
||||||
re_meter_reading = "^Power Reading :"
|
re_meter_reading = "^Power Reading :"
|
||||||
re_psu = "^Power supply #[0-9]+"
|
re_psu = "^Power supply #[0-9]+"
|
||||||
re_psu_present = "^Present :"
|
re_psu_present = "^Present :"
|
||||||
re_psu_redundant = "^Redundant:"
|
re_psu_redundant = "^Redundant:"
|
||||||
re_psu_condition = "^Condition:"
|
re_psu_condition = "^Condition:"
|
||||||
re_psu_hotplug = "^Hotplug :"
|
re_psu_hotplug = "^Hotplug :"
|
||||||
re_psu_reading = "^Power :"
|
re_psu_reading = "^Power :"
|
||||||
|
|
||||||
for line in rawdata:
|
for line in rawdata:
|
||||||
if re.match(re_meter, line):
|
if re.match(re_meter, line):
|
||||||
verboseMsg("found power meter: " + line)
|
verboseMsg("found power meter: " + line)
|
||||||
junk, meter_id = line.split('#', 1)
|
junk, meter_id = line.split("#", 1)
|
||||||
hdata["meter"][meter_id] = {}
|
hdata["meter"][meter_id] = {}
|
||||||
|
|
||||||
elif re.match(re_meter_reading, line):
|
elif re.match(re_meter_reading, line):
|
||||||
verboseMsg("found power meter reading: " + line)
|
verboseMsg("found power meter reading: " + line)
|
||||||
junk, meter_reading = line.split(':', 1)
|
junk, meter_reading = line.split(":", 1)
|
||||||
hdata["meter"][meter_id]["reading"] = meter_reading.strip()
|
hdata["meter"][meter_id]["reading"] = meter_reading.strip()
|
||||||
|
|
||||||
elif re.match(re_psu, line):
|
elif re.match(re_psu, line):
|
||||||
verboseMsg("found power supply: " + line)
|
verboseMsg("found power supply: " + line)
|
||||||
junk, psu_id = line.split('#', 1)
|
junk, psu_id = line.split("#", 1)
|
||||||
hdata["psu"][psu_id] = {}
|
hdata["psu"][psu_id] = {}
|
||||||
|
|
||||||
elif re.match(re_psu_present, line):
|
elif re.match(re_psu_present, line):
|
||||||
verboseMsg("found power supply present: " + line)
|
verboseMsg("found power supply present: " + line)
|
||||||
junk, psu_present = line.split(':', 1)
|
junk, psu_present = line.split(":", 1)
|
||||||
hdata["psu"][psu_id]["present"] = psu_present.strip()
|
hdata["psu"][psu_id]["present"] = psu_present.strip()
|
||||||
|
|
||||||
elif re.match(re_psu_redundant, line):
|
elif re.match(re_psu_redundant, line):
|
||||||
verboseMsg("found power supply redundant: " + line)
|
verboseMsg("found power supply redundant: " + line)
|
||||||
junk, psu_redundant = line.split(':', 1)
|
junk, psu_redundant = line.split(":", 1)
|
||||||
hdata["psu"][psu_id]["redundant"] = psu_redundant.strip()
|
hdata["psu"][psu_id]["redundant"] = psu_redundant.strip()
|
||||||
|
|
||||||
elif re.match(re_psu_condition, line):
|
elif re.match(re_psu_condition, line):
|
||||||
verboseMsg("found power supply condition: " + line)
|
verboseMsg("found power supply condition: " + line)
|
||||||
junk, psu_condition = line.split(':', 1)
|
junk, psu_condition = line.split(":", 1)
|
||||||
hdata["psu"][psu_id]["condition"] = psu_condition.strip()
|
hdata["psu"][psu_id]["condition"] = psu_condition.strip()
|
||||||
|
|
||||||
elif re.match(re_psu_hotplug, line):
|
elif re.match(re_psu_hotplug, line):
|
||||||
verboseMsg("found power supply hotplug: " + line)
|
verboseMsg("found power supply hotplug: " + line)
|
||||||
junk, psu_hotplug = line.split(':', 1)
|
junk, psu_hotplug = line.split(":", 1)
|
||||||
hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip()
|
hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip()
|
||||||
|
|
||||||
elif re.match(re_psu_reading, line):
|
elif re.match(re_psu_reading, line):
|
||||||
verboseMsg("found power supply reading: " + line)
|
verboseMsg("found power supply reading: " + line)
|
||||||
junk, psu_reading = line.split(':', 1)
|
junk, psu_reading = line.split(":", 1)
|
||||||
hdata["psu"][psu_id]["reading"] = psu_reading.replace('Watts', '').strip()
|
hdata["psu"][psu_id]["reading"] = psu_reading.replace("Watts", "").strip()
|
||||||
|
|
||||||
return hdata
|
return hdata
|
||||||
|
|
||||||
|
|
||||||
# Argument Parsing
|
# Argument Parsing
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.gnu_getopt(
|
opts, args = getopt.gnu_getopt(
|
||||||
sys.argv[1:], 'm:hlNpvw', ['method', 'help', 'list-methods', 'no-librenms', 'pretty', 'verbose', 'warnings']
|
sys.argv[1:],
|
||||||
|
"m:hlNpvw",
|
||||||
|
[
|
||||||
|
"method",
|
||||||
|
"help",
|
||||||
|
"list-methods",
|
||||||
|
"no-librenms",
|
||||||
|
"pretty",
|
||||||
|
"verbose",
|
||||||
|
"warnings",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
if len(args) != 0:
|
if len(args) != 0:
|
||||||
usageError("Unknown argument")
|
usageError("Unknown argument")
|
||||||
@@ -336,8 +363,8 @@ try:
|
|||||||
data["reading"] = data["meter"]["1"]["reading"]
|
data["reading"] = data["meter"]["1"]["reading"]
|
||||||
|
|
||||||
# Example 2 - sum the two power supplies and apply a power factor
|
# Example 2 - sum the two power supplies and apply a power factor
|
||||||
#pf = 0.95
|
# pf = 0.95
|
||||||
#data["reading"] = str( float(data["psu"]["1"]["reading"]) \
|
# data["reading"] = str( float(data["psu"]["1"]["reading"]) \
|
||||||
# + float(data["psu"]["2"]["reading"]) / pf )
|
# + float(data["psu"]["2"]["reading"]) / pf )
|
||||||
|
|
||||||
except:
|
except:
|
||||||
@@ -345,13 +372,13 @@ except:
|
|||||||
|
|
||||||
# Build result
|
# Build result
|
||||||
if librenms:
|
if librenms:
|
||||||
result['version']=version
|
result["version"] = version
|
||||||
result['error']=error
|
result["error"] = error
|
||||||
result['errorString']=errorString
|
result["errorString"] = errorString
|
||||||
result['data']=data
|
result["data"] = data
|
||||||
|
|
||||||
else:
|
else:
|
||||||
result=data
|
result = data
|
||||||
|
|
||||||
# Print result
|
# Print result
|
||||||
if pretty:
|
if pretty:
|
||||||
@@ -359,4 +386,3 @@ if pretty:
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
|
|
||||||
|
@@ -1,17 +1,17 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import yaml
|
|
||||||
from os.path import isfile
|
from os.path import isfile
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
output = {}
|
output = {}
|
||||||
output['error'] = 0
|
output["error"] = 0
|
||||||
output['errorString'] = ""
|
output["errorString"] = ""
|
||||||
output['version'] = 1
|
output["version"] = 1
|
||||||
|
|
||||||
CONFIGFILE = '/etc/snmp/puppet.json'
|
CONFIGFILE = "/etc/snmp/puppet.json"
|
||||||
# optional config file
|
# optional config file
|
||||||
# {
|
# {
|
||||||
# "agent": {
|
# "agent": {
|
||||||
@@ -20,13 +20,15 @@ CONFIGFILE = '/etc/snmp/puppet.json'
|
|||||||
# }
|
# }
|
||||||
|
|
||||||
|
|
||||||
summary_files = ['/var/cache/puppet/state/last_run_summary.yaml',
|
summary_files = [
|
||||||
'/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml']
|
"/var/cache/puppet/state/last_run_summary.yaml",
|
||||||
|
"/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def parse_yaml_file(filename):
|
def parse_yaml_file(filename):
|
||||||
try:
|
try:
|
||||||
yaml_data = yaml.load(open(filename, 'r'))
|
yaml_data = yaml.load(open(filename, "r"))
|
||||||
msg = None
|
msg = None
|
||||||
except yaml.scanner.ScannerError as e:
|
except yaml.scanner.ScannerError as e:
|
||||||
yaml_data = []
|
yaml_data = []
|
||||||
@@ -42,7 +44,7 @@ def time_processing(data):
|
|||||||
new_data = {}
|
new_data = {}
|
||||||
|
|
||||||
for k in data.keys():
|
for k in data.keys():
|
||||||
if k == 'last_run':
|
if k == "last_run":
|
||||||
# generate difference to last run (seconds)
|
# generate difference to last run (seconds)
|
||||||
new_data[k] = round(time() - data[k])
|
new_data[k] = round(time() - data[k])
|
||||||
continue
|
continue
|
||||||
@@ -53,36 +55,36 @@ def time_processing(data):
|
|||||||
|
|
||||||
def processing(data):
|
def processing(data):
|
||||||
new_data = {}
|
new_data = {}
|
||||||
for k in ['changes', 'events', 'resources', 'version']:
|
for k in ["changes", "events", "resources", "version"]:
|
||||||
new_data[k] = data[k]
|
new_data[k] = data[k]
|
||||||
|
|
||||||
new_data['time'] = time_processing(data['time'])
|
new_data["time"] = time_processing(data["time"])
|
||||||
|
|
||||||
return new_data
|
return new_data
|
||||||
|
|
||||||
|
|
||||||
# extend last_run_summary_file list with optional custom file
|
# extend last_run_summary_file list with optional custom file
|
||||||
if isfile(CONFIGFILE):
|
if isfile(CONFIGFILE):
|
||||||
with open(CONFIGFILE, 'r') as json_file:
|
with open(CONFIGFILE, "r") as json_file:
|
||||||
try:
|
try:
|
||||||
configfile = json.load(json_file)
|
configfile = json.load(json_file)
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = "Configfile Error: '%s'" % e
|
output["errorString"] = "Configfile Error: '%s'" % e
|
||||||
else:
|
else:
|
||||||
configfile = None
|
configfile = None
|
||||||
|
|
||||||
if not output['error'] and configfile:
|
if not output["error"] and configfile:
|
||||||
try:
|
try:
|
||||||
if 'agent' in configfile.keys():
|
if "agent" in configfile.keys():
|
||||||
custom_summary_file = configfile['agent']['summary_file']
|
custom_summary_file = configfile["agent"]["summary_file"]
|
||||||
summary_files.insert(0, custom_summary_file)
|
summary_files.insert(0, custom_summary_file)
|
||||||
except KeyError:
|
except KeyError as e:
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = "Configfile Error: '%s'" % e
|
output["errorString"] = "Configfile Error: '%s'" % e
|
||||||
|
|
||||||
# search existing summary file from list
|
# search existing summary file from list
|
||||||
if not output['error']:
|
if not output["error"]:
|
||||||
summary_file = None
|
summary_file = None
|
||||||
for sum_file in summary_files:
|
for sum_file in summary_files:
|
||||||
if isfile(sum_file):
|
if isfile(sum_file):
|
||||||
@@ -90,17 +92,17 @@ if not output['error']:
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not summary_file:
|
if not summary_file:
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = "no puppet agent run summary file found"
|
output["errorString"] = "no puppet agent run summary file found"
|
||||||
|
|
||||||
# open summary file
|
# open summary file
|
||||||
if not output['error']:
|
if not output["error"]:
|
||||||
msg, data = parse_yaml_file(summary_file)
|
msg, data = parse_yaml_file(summary_file)
|
||||||
|
|
||||||
if msg:
|
if msg:
|
||||||
output['error'] = 1
|
output["error"] = 1
|
||||||
output['errorString'] = msg
|
output["errorString"] = msg
|
||||||
|
|
||||||
output['data'] = processing(data)
|
output["data"] = processing(data)
|
||||||
|
|
||||||
print (json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
@@ -1,48 +1,61 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os
|
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
CONFIGFILE = '/etc/snmp/pureftpd.json'
|
CONFIGFILE = "/etc/snmp/pureftpd.json"
|
||||||
|
|
||||||
pureftpwho_cmd = '/usr/sbin/pure-ftpwho'
|
pureftpwho_cmd = "/usr/sbin/pure-ftpwho"
|
||||||
pureftpwho_args = '-v -s -n'
|
pureftpwho_args = "-v -s -n"
|
||||||
|
|
||||||
|
|
||||||
output_data = {}
|
output_data = {}
|
||||||
output_data['version'] = 1
|
output_data["version"] = 1
|
||||||
output_data['errorString'] = ""
|
output_data["errorString"] = ""
|
||||||
output_data['error'] = 0
|
output_data["error"] = 0
|
||||||
|
|
||||||
|
|
||||||
if os.path.isfile(CONFIGFILE):
|
if os.path.isfile(CONFIGFILE):
|
||||||
with open(CONFIGFILE, 'r') as json_file:
|
with open(CONFIGFILE, "r") as json_file:
|
||||||
try:
|
try:
|
||||||
configfile = json.load(json_file)
|
configfile = json.load(json_file)
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
output_data['error'] = 1
|
output_data["error"] = 1
|
||||||
output_data['errorString'] = "Configfile Error: '%s'" % e
|
output_data["errorString"] = "Configfile Error: '%s'" % e
|
||||||
else:
|
else:
|
||||||
configfile = None
|
configfile = None
|
||||||
|
|
||||||
if not output_data['error'] and configfile:
|
if not output_data["error"] and configfile:
|
||||||
try:
|
try:
|
||||||
if 'pureftpwho_cmd' in configfile.keys():
|
if "pureftpwho_cmd" in configfile.keys():
|
||||||
pureftpwho_cmd = configfile['pureftpwho_cmd']
|
pureftpwho_cmd = configfile["pureftpwho_cmd"]
|
||||||
except KeyError:
|
except KeyError as e:
|
||||||
output_data['error'] = 1
|
output_data["error"] = 1
|
||||||
output_data['errorString'] = "Configfile Error: '%s'" % e
|
output_data["errorString"] = "Configfile Error: '%s'" % e
|
||||||
|
|
||||||
|
|
||||||
output = os.popen(pureftpwho_cmd + ' ' + pureftpwho_args).read()
|
output = os.popen(pureftpwho_cmd + " " + pureftpwho_args).read()
|
||||||
|
|
||||||
data = {}
|
data = {}
|
||||||
|
|
||||||
for line in output.split('\n'):
|
for line in output.split("\n"):
|
||||||
if not len(line):
|
if not len(line):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|')
|
(
|
||||||
|
pid,
|
||||||
|
acct,
|
||||||
|
time,
|
||||||
|
state,
|
||||||
|
file,
|
||||||
|
peer,
|
||||||
|
local,
|
||||||
|
port,
|
||||||
|
transfered,
|
||||||
|
total,
|
||||||
|
percent,
|
||||||
|
bandwidth,
|
||||||
|
) = line.split("|")
|
||||||
|
|
||||||
if "IDLE" in state:
|
if "IDLE" in state:
|
||||||
state = "IDLE"
|
state = "IDLE"
|
||||||
@@ -54,13 +67,11 @@ for line in output.split('\n'):
|
|||||||
if acct not in data.keys():
|
if acct not in data.keys():
|
||||||
data[acct] = {}
|
data[acct] = {}
|
||||||
if state not in data[acct]:
|
if state not in data[acct]:
|
||||||
data[acct][state] = {'bitrate': 0,
|
data[acct][state] = {"bitrate": 0, "connections": 0}
|
||||||
'connections': 0
|
|
||||||
}
|
|
||||||
bandwidth_bit = int(bandwidth) * 1024 * 8
|
bandwidth_bit = int(bandwidth) * 1024 * 8
|
||||||
data[acct][state]['bitrate'] += bandwidth_bit
|
data[acct][state]["bitrate"] += bandwidth_bit
|
||||||
data[acct][state]['connections'] += 1
|
data[acct][state]["connections"] += 1
|
||||||
|
|
||||||
output_data['data'] = data
|
output_data["data"] = data
|
||||||
|
|
||||||
print (json.dumps(output_data))
|
print(json.dumps(output_data))
|
||||||
|
@@ -20,27 +20,27 @@ getStatusMJPG='codec_enabled MJPG'
|
|||||||
getStatusWMV9='codec_enabled WMV9'
|
getStatusWMV9='codec_enabled WMV9'
|
||||||
|
|
||||||
$picmd $getTemp | $pised 's|[^0-9.]||g'
|
$picmd $getTemp | $pised 's|[^0-9.]||g'
|
||||||
$picmd $getVoltsCore | $pised 's|[^0-9.]||g'
|
$picmd "$getVoltsCore" | $pised 's|[^0-9.]||g'
|
||||||
$picmd $getVoltsRamC | $pised 's|[^0-9.]||g'
|
$picmd "$getVoltsRamC" | $pised 's|[^0-9.]||g'
|
||||||
$picmd $getVoltsRamI | $pised 's|[^0-9.]||g'
|
$picmd "$getVoltsRamI" | $pised 's|[^0-9.]||g'
|
||||||
$picmd $getVoltsRamP | $pised 's|[^0-9.]||g'
|
$picmd "$getVoltsRamP" | $pised 's|[^0-9.]||g'
|
||||||
$picmd $getFreqArm | $pised 's/frequency([0-9]*)=//g'
|
$picmd "$getFreqArm" | $pised 's/frequency([0-9]*)=//g'
|
||||||
$picmd $getFreqCore | $pised 's/frequency([0-9]*)=//g'
|
$picmd "$getFreqCore" | $pised 's/frequency([0-9]*)=//g'
|
||||||
$picmd $getStatusH264 | $pised 's/H264=//g'
|
$picmd "$getStatusH264" | $pised 's/H264=//g'
|
||||||
$picmd $getStatusMPG2 | $pised 's/MPG2=//g'
|
$picmd "$getStatusMPG2" | $pised 's/MPG2=//g'
|
||||||
$picmd $getStatusWVC1 | $pised 's/WVC1=//g'
|
$picmd "$getStatusWVC1" | $pised 's/WVC1=//g'
|
||||||
$picmd $getStatusMPG4 | $pised 's/MPG4=//g'
|
$picmd "$getStatusMPG4" | $pised 's/MPG4=//g'
|
||||||
$picmd $getStatusMJPG | $pised 's/MJPG=//g'
|
$picmd "$getStatusMJPG" | $pised 's/MJPG=//g'
|
||||||
$picmd $getStatusWMV9 | $pised 's/WMV9=//g'
|
$picmd "$getStatusWMV9" | $pised 's/WMV9=//g'
|
||||||
$picmd $getStatusH264 | $pised 's/enabled/2/g'
|
$picmd "$getStatusH264" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusMPG2 | $pised 's/enabled/2/g'
|
$picmd "$getStatusMPG2" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusWVC1 | $pised 's/enabled/2/g'
|
$picmd "$getStatusWVC1" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusMPG4 | $pised 's/enabled/2/g'
|
$picmd "$getStatusMPG4" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusMJPG | $pised 's/enabled/2/g'
|
$picmd "$getStatusMJPG" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusWMV9 | $pised 's/enabled/2/g'
|
$picmd "$getStatusWMV9" | $pised 's/enabled/2/g'
|
||||||
$picmd $getStatusH264 | $pised 's/disabled/1/g'
|
$picmd "$getStatusH264" | $pised 's/disabled/1/g'
|
||||||
$picmd $getStatusMPG2 | $pised 's/disabled/1/g'
|
$picmd "$getStatusMPG2" | $pised 's/disabled/1/g'
|
||||||
$picmd $getStatusWVC1 | $pised 's/disabled/1/g'
|
$picmd "$getStatusWVC1" | $pised 's/disabled/1/g'
|
||||||
$picmd $getStatusMPG4 | $pised 's/disabled/1/g'
|
$picmd "$getStatusMPG4" | $pised 's/disabled/1/g'
|
||||||
$picmd $getStatusMJPG | $pised 's/disabled/1/g'
|
$picmd "$getStatusMJPG" | $pised 's/disabled/1/g'
|
||||||
$picmd $getStatusWMV9 | $pised 's/disabled/1/g'
|
$picmd "$getStatusWMV9" | $pised 's/disabled/1/g'
|
||||||
|
@@ -1,10 +1,14 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
import json
|
||||||
|
import subprocess
|
||||||
|
|
||||||
shell_cmd = "redis-cli info"
|
shell_cmd = "redis-cli info"
|
||||||
all_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n')
|
all_data = (
|
||||||
|
subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE)
|
||||||
|
.stdout.read()
|
||||||
|
.split(b"\n")
|
||||||
|
)
|
||||||
|
|
||||||
version = 1
|
version = 1
|
||||||
error = 0
|
error = 0
|
||||||
@@ -13,24 +17,24 @@ redis_data = {}
|
|||||||
|
|
||||||
# stdout list to json
|
# stdout list to json
|
||||||
try:
|
try:
|
||||||
category = ''
|
category = ""
|
||||||
for d in all_data:
|
for d in all_data:
|
||||||
d = d.replace(b'\r', b'')
|
d = d.replace(b"\r", b"")
|
||||||
|
|
||||||
if d in [b'']:
|
if d in [b""]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if d.startswith(b'#'):
|
if d.startswith(b"#"):
|
||||||
category = d.replace(b'# ', b'').decode("utf-8")
|
category = d.replace(b"# ", b"").decode("utf-8")
|
||||||
redis_data[category] = {}
|
redis_data[category] = {}
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not len(category):
|
if not len(category):
|
||||||
error = 2
|
error = 2
|
||||||
error_string = 'category not defined'
|
error_string = "category not defined"
|
||||||
break
|
break
|
||||||
|
|
||||||
k, v = d.split(b':')
|
k, v = d.split(b":")
|
||||||
k = k.decode("utf-8")
|
k = k.decode("utf-8")
|
||||||
v = v.decode("utf-8")
|
v = v.decode("utf-8")
|
||||||
|
|
||||||
@@ -38,11 +42,13 @@ try:
|
|||||||
|
|
||||||
except:
|
except:
|
||||||
error = 1
|
error = 1
|
||||||
error_string = 'data extracting error'
|
error_string = "data extracting error"
|
||||||
|
|
||||||
output = {'version': version,
|
output = {
|
||||||
'error': error,
|
"version": version,
|
||||||
'errorString': error_string,
|
"error": error,
|
||||||
'data': redis_data}
|
"errorString": error_string,
|
||||||
|
"data": redis_data,
|
||||||
|
}
|
||||||
|
|
||||||
print (json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
@@ -18,9 +18,9 @@
|
|||||||
#
|
#
|
||||||
##################################################################
|
##################################################################
|
||||||
|
|
||||||
SDFSCLI_BIN=`which sdfscli`
|
SDFSCLI_BIN=$(which sdfscli)
|
||||||
SDFSCLI_CMD=' --volume-info'
|
SDFSCLI_CMD=' --volume-info'
|
||||||
GREP_BIN=`which grep`
|
GREP_BIN=$(which grep)
|
||||||
GREP_CMD=' -o -E '
|
GREP_CMD=' -o -E '
|
||||||
DATAPOINTS=`$SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)"`
|
DATAPOINTS=$($SDFSCLI_BIN "$SDFSCLI_CMD" | $GREP_BIN "$GREP_CMD" "(([0-9]+)\.?([0-9]+)?)")
|
||||||
echo $DATAPOINTS
|
echo "$DATAPOINTS"
|
||||||
|
132
snmp/seafile.py
132
snmp/seafile.py
@@ -14,9 +14,10 @@
|
|||||||
# Clients -> plattform (count)
|
# Clients -> plattform (count)
|
||||||
# Clients -> version (count)
|
# Clients -> version (count)
|
||||||
|
|
||||||
import requests
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
# Configfile content example:
|
# Configfile content example:
|
||||||
# {"url": "https://seafile.mydomain.org",
|
# {"url": "https://seafile.mydomain.org",
|
||||||
# "username": "some_admin_login@mail.address",
|
# "username": "some_admin_login@mail.address",
|
||||||
@@ -25,65 +26,65 @@ import json
|
|||||||
# "hide_monitoring_account": true
|
# "hide_monitoring_account": true
|
||||||
# }
|
# }
|
||||||
|
|
||||||
CONFIGFILE='/etc/snmp/seafile.json'
|
CONFIGFILE = "/etc/snmp/seafile.json"
|
||||||
error = 0
|
error = 0
|
||||||
error_string = ''
|
error_string = ""
|
||||||
version = 1
|
version = 1
|
||||||
|
|
||||||
|
|
||||||
def get_data(url_path, data=None, token=None):
|
def get_data(url_path, data=None, token=None):
|
||||||
complete_url = "%s/%s" % (url, url_path)
|
complete_url = "%s/%s" % (url, url_path)
|
||||||
headers = {'Accept': 'application/json'}
|
headers = {"Accept": "application/json"}
|
||||||
if token:
|
if token:
|
||||||
headers['Authorization'] = "Token %s" % token
|
headers["Authorization"] = "Token %s" % token
|
||||||
|
|
||||||
|
try:
|
||||||
|
if token:
|
||||||
|
r = requests.get(complete_url, data=data, headers=headers)
|
||||||
|
else:
|
||||||
|
r = requests.post(complete_url, data=data, headers=headers)
|
||||||
try:
|
try:
|
||||||
if token:
|
return r.json()
|
||||||
r = requests.get(complete_url, data=data, headers=headers)
|
except json.decoder.JSONDecodeError:
|
||||||
else:
|
return "no valid json returned - url correct?"
|
||||||
r = requests.post(complete_url, data=data, headers=headers)
|
except requests.exceptions.RequestException as err:
|
||||||
try:
|
return str(err)
|
||||||
return r.json()
|
|
||||||
except json.decoder.JSONDecodeError:
|
|
||||||
return 'no valid json returned - url correct?'
|
|
||||||
except requests.exceptions.RequestException as err:
|
|
||||||
return str(err)
|
|
||||||
|
|
||||||
|
|
||||||
def get_devices():
|
def get_devices():
|
||||||
# get all devices
|
# get all devices
|
||||||
url_path = 'api/v2.1/admin/devices/'
|
url_path = "api/v2.1/admin/devices/"
|
||||||
return get_data(url_path, token=token)
|
return get_data(url_path, token=token)
|
||||||
|
|
||||||
|
|
||||||
def get_groups():
|
def get_groups():
|
||||||
# get all groups
|
# get all groups
|
||||||
url_path = 'api/v2.1/admin/groups/'
|
url_path = "api/v2.1/admin/groups/"
|
||||||
return get_data(url_path, token=token)
|
return get_data(url_path, token=token)
|
||||||
|
|
||||||
|
|
||||||
def get_sysinfo():
|
def get_sysinfo():
|
||||||
# get all groups
|
# get all groups
|
||||||
url_path = 'api/v2.1/admin/sysinfo/'
|
url_path = "api/v2.1/admin/sysinfo/"
|
||||||
return get_data(url_path, token=token)
|
return get_data(url_path, token=token)
|
||||||
|
|
||||||
|
|
||||||
def get_account_information():
|
def get_account_information():
|
||||||
# get all accounts withs details
|
# get all accounts withs details
|
||||||
account_list = []
|
account_list = []
|
||||||
for account in get_data('api2/accounts/', token=token):
|
for account in get_data("api2/accounts/", token=token):
|
||||||
|
|
||||||
# get account details
|
# get account details
|
||||||
url_path = 'api2/accounts/%s/' % account['email']
|
url_path = "api2/accounts/%s/" % account["email"]
|
||||||
account_data = get_data(url_path, token=token)
|
account_data = get_data(url_path, token=token)
|
||||||
|
|
||||||
# get libraries by owner
|
# get libraries by owner
|
||||||
url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email']
|
url_path = "api/v2.1/admin/libraries/?owner=%s" % account["email"]
|
||||||
account_data['repos'] = get_data(url_path, token=token)['repos']
|
account_data["repos"] = get_data(url_path, token=token)["repos"]
|
||||||
|
|
||||||
# get deleted libraries by owner
|
# get deleted libraries by owner
|
||||||
url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email']
|
url_path = "api/v2.1/admin/trash-libraries/?owner=%s" % account["email"]
|
||||||
account_data['trash_repos'] = get_data(url_path, token=token)['repos']
|
account_data["trash_repos"] = get_data(url_path, token=token)["repos"]
|
||||||
|
|
||||||
account_list.append(account_data)
|
account_list.append(account_data)
|
||||||
return account_list
|
return account_list
|
||||||
@@ -96,55 +97,53 @@ def resort_devices(device_list):
|
|||||||
for device in device_list:
|
for device in device_list:
|
||||||
# don't list information assigned to monitor account
|
# don't list information assigned to monitor account
|
||||||
if hide_monitoring_account:
|
if hide_monitoring_account:
|
||||||
if device['user'] == configfile['username']:
|
if device["user"] == configfile["username"]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if device['platform'] not in platform.keys():
|
if device["platform"] not in platform.keys():
|
||||||
platform[device['platform']] = 1
|
platform[device["platform"]] = 1
|
||||||
else:
|
else:
|
||||||
platform[device['platform']] += 1
|
platform[device["platform"]] += 1
|
||||||
|
|
||||||
if device['client_version'] not in client_version.keys():
|
if device["client_version"] not in client_version.keys():
|
||||||
client_version[device['client_version']] = 1
|
client_version[device["client_version"]] = 1
|
||||||
else:
|
else:
|
||||||
client_version[device['client_version']] += 1
|
client_version[device["client_version"]] += 1
|
||||||
|
|
||||||
data['platform'] = []
|
data["platform"] = []
|
||||||
for k, v in platform.items():
|
for k, v in platform.items():
|
||||||
data['platform'].append({'os_name': k,
|
data["platform"].append({"os_name": k, "clients": v})
|
||||||
'clients':v})
|
data["client_version"] = []
|
||||||
data['client_version'] = []
|
|
||||||
for k, v in client_version.items():
|
for k, v in client_version.items():
|
||||||
data['client_version'].append({'client_version': k,
|
data["client_version"].append({"client_version": k, "clients": v})
|
||||||
'clients':v})
|
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def resort_groups(group_list):
|
def resort_groups(group_list):
|
||||||
data = {'count': len(group_list)}
|
data = {"count": len(group_list)}
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def resort_accounts(account_list):
|
def resort_accounts(account_list):
|
||||||
if account_identifier in ['name', 'email']:
|
if account_identifier in ["name", "email"]:
|
||||||
identifier = account_identifier
|
identifier = account_identifier
|
||||||
else:
|
else:
|
||||||
identifier = 'name'
|
identifier = "name"
|
||||||
|
|
||||||
accepted_key_list = ['is_active', 'usage']
|
accepted_key_list = ["is_active", "usage"]
|
||||||
|
|
||||||
data = []
|
data = []
|
||||||
for user_account in account_list:
|
for user_account in account_list:
|
||||||
# don't list information assigned to monitor account
|
# don't list information assigned to monitor account
|
||||||
if hide_monitoring_account:
|
if hide_monitoring_account:
|
||||||
if user_account['email'] == configfile['username']:
|
if user_account["email"] == configfile["username"]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
new_account = {}
|
new_account = {}
|
||||||
new_account['owner'] = user_account[identifier]
|
new_account["owner"] = user_account[identifier]
|
||||||
new_account['repos'] = len(user_account['repos'])
|
new_account["repos"] = len(user_account["repos"])
|
||||||
new_account['trash_repos'] = len(user_account['trash_repos'])
|
new_account["trash_repos"] = len(user_account["trash_repos"])
|
||||||
|
|
||||||
for k in user_account.keys():
|
for k in user_account.keys():
|
||||||
if k not in accepted_key_list:
|
if k not in accepted_key_list:
|
||||||
@@ -152,11 +151,11 @@ def resort_accounts(account_list):
|
|||||||
new_account[k] = user_account[k]
|
new_account[k] = user_account[k]
|
||||||
data.append(new_account)
|
data.append(new_account)
|
||||||
|
|
||||||
return sorted(data, key=lambda k: k['owner'].lower())
|
return sorted(data, key=lambda k: k["owner"].lower())
|
||||||
|
|
||||||
|
|
||||||
# ------------------------ MAIN --------------------------------------------------------
|
# ------------------------ MAIN --------------------------------------------------------
|
||||||
with open(CONFIGFILE, 'r') as json_file:
|
with open(CONFIGFILE, "r") as json_file:
|
||||||
try:
|
try:
|
||||||
configfile = json.load(json_file)
|
configfile = json.load(json_file)
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
@@ -164,24 +163,24 @@ with open(CONFIGFILE, 'r') as json_file:
|
|||||||
error_string = "Configfile Error: '%s'" % e
|
error_string = "Configfile Error: '%s'" % e
|
||||||
|
|
||||||
if not error:
|
if not error:
|
||||||
url = configfile['url']
|
url = configfile["url"]
|
||||||
username = configfile['username']
|
username = configfile["username"]
|
||||||
password = configfile['password']
|
password = configfile["password"]
|
||||||
try:
|
try:
|
||||||
account_identifier = configfile['account_identifier']
|
account_identifier = configfile["account_identifier"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
account_identifier = None
|
account_identifier = None
|
||||||
try:
|
try:
|
||||||
hide_monitoring_account = configfile['hide_monitoring_account']
|
hide_monitoring_account = configfile["hide_monitoring_account"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
hide_monitoring_account = False
|
hide_monitoring_account = False
|
||||||
|
|
||||||
# get token
|
# get token
|
||||||
login_data = {'username': username, 'password': password}
|
login_data = {"username": username, "password": password}
|
||||||
ret = get_data('api2/auth-token/', data=login_data)
|
ret = get_data("api2/auth-token/", data=login_data)
|
||||||
if type(ret) != str:
|
if type(ret) != str:
|
||||||
if 'token' in ret.keys():
|
if "token" in ret.keys():
|
||||||
token = ret['token']
|
token = ret["token"]
|
||||||
else:
|
else:
|
||||||
error = 1
|
error = 1
|
||||||
try:
|
try:
|
||||||
@@ -194,18 +193,13 @@ if not error:
|
|||||||
|
|
||||||
data = {}
|
data = {}
|
||||||
if not error:
|
if not error:
|
||||||
ret= get_account_information()
|
ret = get_account_information()
|
||||||
if not error:
|
if not error:
|
||||||
data['accounts'] = resort_accounts(ret)
|
data["accounts"] = resort_accounts(ret)
|
||||||
data['devices'] = resort_devices(get_devices()['devices'])
|
data["devices"] = resort_devices(get_devices()["devices"])
|
||||||
data['groups'] = resort_groups(get_groups()['groups'])
|
data["groups"] = resort_groups(get_groups()["groups"])
|
||||||
data['sysinfo'] = get_sysinfo()
|
data["sysinfo"] = get_sysinfo()
|
||||||
|
|
||||||
output = {'error': error,
|
output = {"error": error, "errorString": error_string, "version": version, "data": data}
|
||||||
'errorString': error_string,
|
|
||||||
'version': version,
|
|
||||||
'data': data
|
|
||||||
}
|
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
|
||||||
|
@@ -18,105 +18,113 @@
|
|||||||
///
|
///
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
// START SETTINGS ///
|
// START SETTINGS ///
|
||||||
|
|
||||||
$config = "/opt/librenms/scripts/shoutcast.conf";
|
$config = '/opt/librenms/scripts/shoutcast.conf';
|
||||||
$cache = "/opt/librenms/scripts/shoutcast.cache";
|
$cache = '/opt/librenms/scripts/shoutcast.cache';
|
||||||
|
|
||||||
// END SETTINGS ///
|
// END SETTINGS ///
|
||||||
|
|
||||||
|
|
||||||
///
|
///
|
||||||
// DO NOT EDIT BENETH THIS LINE
|
// DO NOT EDIT BENETH THIS LINE
|
||||||
///
|
///
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/* Do NOT run this script through a web browser */
|
/* Do NOT run this script through a web browser */
|
||||||
if (!isset($_SERVER["argv"][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) {
|
if (!isset($_SERVER['argv'][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) {
|
||||||
die('<span style="color: #880000; text-weight: bold; font-size: 1.3em;">This script is only meant to run at the command line.</span>');
|
exit('<span style="color: #880000; text-weight: bold; font-size: 1.3em;">This script is only meant to run at the command line.</span>');
|
||||||
}
|
}
|
||||||
|
|
||||||
$cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : "");
|
|
||||||
|
|
||||||
function get_data($host, $port) {
|
$cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : '');
|
||||||
$fp = @fsockopen($host, $port, $errno, $errstr, 5);
|
|
||||||
if(!$fp) { $connect = 0; }
|
|
||||||
if (!isset($connect)) {
|
|
||||||
fputs($fp, "GET /7.html HTTP/1.0\r\n"
|
|
||||||
. "User-Agent: All In One - SHOUTcast Stats Parser"
|
|
||||||
. " (Mozilla Compatible)\r\n\r\n");
|
|
||||||
while (!feof($fp)) {
|
|
||||||
$rawdata = fgets($fp, 1024);
|
|
||||||
}
|
|
||||||
fclose($fp);
|
|
||||||
}
|
|
||||||
preg_match('/body>(.*)<\/body/', $rawdata, $matches);
|
|
||||||
$res = explode(',', $matches[1], 7);
|
|
||||||
$res[7] = $host;
|
|
||||||
$res[8] = $port;
|
|
||||||
return $res;
|
|
||||||
}
|
|
||||||
|
|
||||||
function get_list($config) {
|
|
||||||
if (file_exists($config)) {
|
|
||||||
$servers = file($config);
|
|
||||||
$data = array();
|
|
||||||
foreach ($servers as $item=>$server) {
|
|
||||||
list($host, $port) = explode(":", $server, 2);
|
|
||||||
array_push($data, get_data(trim($host), trim($port)));
|
|
||||||
}
|
|
||||||
return $data;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function doSNMPv2($vars) {
|
|
||||||
$res = array();
|
|
||||||
foreach ($vars as $items=>$server) {
|
|
||||||
$var = array();
|
|
||||||
$var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : "0");
|
|
||||||
//$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0");
|
|
||||||
$var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : "0");
|
|
||||||
$var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : "0");
|
|
||||||
$var['current'] = (isset($server['0']) ? $server['0'] : "0");
|
|
||||||
$var['status'] = (isset($server['1']) ? $server['1'] : "0");
|
|
||||||
$var['peak'] = (isset($server['2']) ? $server['2'] : "0");
|
|
||||||
$var['max'] = (isset($server['3']) ? $server['3'] : "0");
|
|
||||||
$var['unique'] = (isset($server['4']) ? $server['4'] : "0");
|
|
||||||
$host = (isset($server['7']) ? $server['7'] : "unknown");
|
|
||||||
$port = (isset($server['8']) ? $server['8'] : "unknown");
|
|
||||||
$tmp = $host.":".$port;
|
|
||||||
foreach ($var as $item=>$value) {
|
|
||||||
$tmp .= ";".$value;
|
|
||||||
}
|
|
||||||
array_push($res, $tmp);
|
|
||||||
}
|
|
||||||
return $res;
|
|
||||||
}
|
|
||||||
|
|
||||||
function makeCacheFile($data, $cache) {
|
function get_data($host, $port)
|
||||||
$fp = fopen($cache, 'w');
|
{
|
||||||
foreach ($data as $item=>$value) {
|
$fp = @fsockopen($host, $port, $errno, $errstr, 5);
|
||||||
fwrite($fp, $value."\n");
|
if (!$fp) {
|
||||||
}
|
$connect = 0;
|
||||||
fclose($fp);
|
}
|
||||||
}
|
if (!isset($connect)) {
|
||||||
|
fputs($fp, "GET /7.html HTTP/1.0\r\n"
|
||||||
function readCacheFile($cache) {
|
.'User-Agent: All In One - SHOUTcast Stats Parser'
|
||||||
if (file_exists($cache)) {
|
." (Mozilla Compatible)\r\n\r\n");
|
||||||
$data = file($cache);
|
while (!feof($fp)) {
|
||||||
foreach ($data as $item=>$value) {
|
$rawdata = fgets($fp, 1024);
|
||||||
echo trim($value)."\n";
|
}
|
||||||
}
|
fclose($fp);
|
||||||
}
|
}
|
||||||
}
|
preg_match('/body>(.*)<\/body/', $rawdata, $matches);
|
||||||
|
$res = explode(',', $matches[1], 7);
|
||||||
|
$res[7] = $host;
|
||||||
|
$res[8] = $port;
|
||||||
|
|
||||||
if ($cmd == "makeCache") {
|
return $res;
|
||||||
$servers = get_list($config);
|
}
|
||||||
$data = doSNMPv2($servers);
|
|
||||||
makeCacheFile($data, $cache);
|
function get_list($config)
|
||||||
} else {
|
{
|
||||||
readCacheFile($cache);
|
if (file_exists($config)) {
|
||||||
}
|
$servers = file($config);
|
||||||
|
$data = [];
|
||||||
|
foreach ($servers as $item=>$server) {
|
||||||
|
list($host, $port) = explode(':', $server, 2);
|
||||||
|
array_push($data, get_data(trim($host), trim($port)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return $data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function doSNMPv2($vars)
|
||||||
|
{
|
||||||
|
$res = [];
|
||||||
|
foreach ($vars as $items=>$server) {
|
||||||
|
$var = [];
|
||||||
|
$var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : '0');
|
||||||
|
//$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0");
|
||||||
|
$var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : '0');
|
||||||
|
$var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : '0');
|
||||||
|
$var['current'] = (isset($server['0']) ? $server['0'] : '0');
|
||||||
|
$var['status'] = (isset($server['1']) ? $server['1'] : '0');
|
||||||
|
$var['peak'] = (isset($server['2']) ? $server['2'] : '0');
|
||||||
|
$var['max'] = (isset($server['3']) ? $server['3'] : '0');
|
||||||
|
$var['unique'] = (isset($server['4']) ? $server['4'] : '0');
|
||||||
|
$host = (isset($server['7']) ? $server['7'] : 'unknown');
|
||||||
|
$port = (isset($server['8']) ? $server['8'] : 'unknown');
|
||||||
|
$tmp = $host.':'.$port;
|
||||||
|
foreach ($var as $item=>$value) {
|
||||||
|
$tmp .= ';'.$value;
|
||||||
|
}
|
||||||
|
array_push($res, $tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
return $res;
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeCacheFile($data, $cache)
|
||||||
|
{
|
||||||
|
$fp = fopen($cache, 'w');
|
||||||
|
foreach ($data as $item=> $value) {
|
||||||
|
fwrite($fp, $value."\n");
|
||||||
|
}
|
||||||
|
fclose($fp);
|
||||||
|
}
|
||||||
|
|
||||||
|
function readCacheFile($cache)
|
||||||
|
{
|
||||||
|
if (file_exists($cache)) {
|
||||||
|
$data = file($cache);
|
||||||
|
foreach ($data as $item=>$value) {
|
||||||
|
echo trim($value)."\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($cmd == 'makeCache') {
|
||||||
|
$servers = get_list($config);
|
||||||
|
$data = doSNMPv2($servers);
|
||||||
|
makeCacheFile($data, $cache);
|
||||||
|
} else {
|
||||||
|
readCacheFile($cache);
|
||||||
|
}
|
||||||
|
|
||||||
?>
|
?>
|
||||||
|
@@ -17,13 +17,13 @@ BIN_GREP='/usr/bin/grep'
|
|||||||
################################################################
|
################################################################
|
||||||
# Don't change anything unless you know what are you doing #
|
# Don't change anything unless you know what are you doing #
|
||||||
################################################################
|
################################################################
|
||||||
TMP=`$BIN_APCS 2>/dev/null`
|
TMP=$($BIN_APCS 2>/dev/null)
|
||||||
|
|
||||||
for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+"
|
for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+"
|
||||||
do
|
do
|
||||||
OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo $value | $BIN_CUT -d ":" -f 2`
|
OUT=$(echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo "$value" | $BIN_CUT -d ":" -f 2)
|
||||||
if [ -n "$OUT" ]; then
|
if [ -n "$OUT" ]; then
|
||||||
echo $OUT
|
echo "$OUT"
|
||||||
else
|
else
|
||||||
echo "Unknown"
|
echo "Unknown"
|
||||||
fi
|
fi
|
||||||
|
@@ -16,9 +16,9 @@ TMP=$(upsc $UPS_NAME 2>/dev/null)
|
|||||||
|
|
||||||
for value in "battery\.charge: [0-9.]+" "battery\.(runtime\.)?low: [0-9]+" "battery\.runtime: [0-9]+" "battery\.voltage: [0-9.]+" "battery\.voltage\.nominal: [0-9]+" "input\.voltage\.nominal: [0-9.]+" "input\.voltage: [0-9.]+" "ups\.load: [0-9.]+"
|
for value in "battery\.charge: [0-9.]+" "battery\.(runtime\.)?low: [0-9]+" "battery\.runtime: [0-9]+" "battery\.voltage: [0-9.]+" "battery\.voltage\.nominal: [0-9]+" "input\.voltage\.nominal: [0-9.]+" "input\.voltage: [0-9.]+" "ups\.load: [0-9.]+"
|
||||||
do
|
do
|
||||||
OUT=$(echo $TMP | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1)
|
OUT=$(echo "$TMP" | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1)
|
||||||
if [ -n "$OUT" ]; then
|
if [ -n "$OUT" ]; then
|
||||||
echo $OUT
|
echo "$OUT"
|
||||||
else
|
else
|
||||||
echo "Unknown"
|
echo "Unknown"
|
||||||
fi
|
fi
|
||||||
@@ -26,11 +26,11 @@ done
|
|||||||
|
|
||||||
for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD"
|
for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD"
|
||||||
do
|
do
|
||||||
UNKNOWN=$(echo $TMP | grep -Eo "ups\.status:")
|
UNKNOWN=$(echo "$TMP" | grep -Eo "ups\.status:")
|
||||||
if [ -z "$UNKNOWN" ]; then
|
if [ -z "$UNKNOWN" ]; then
|
||||||
echo "Unknown"
|
echo "Unknown"
|
||||||
else
|
else
|
||||||
OUT=$(echo $TMP | grep -Eo "$value")
|
OUT=$(echo "$TMP" | grep -Eo "$value")
|
||||||
if [ -n "$OUT" ]; then
|
if [ -n "$OUT" ]; then
|
||||||
echo "1"
|
echo "1"
|
||||||
else
|
else
|
||||||
|
@@ -5,9 +5,9 @@ used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}')
|
|||||||
cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}')
|
cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}')
|
||||||
|
|
||||||
pid=$(pidof voipmonitor)
|
pid=$(pidof voipmonitor)
|
||||||
total_files=$(ls -l /proc/${pid}/fd | wc -l)
|
total_files=$(ls -l /proc/"${pid}"/fd | wc -l)
|
||||||
|
|
||||||
echo "Used Memory="$used_memory
|
echo "Used Memory=""$used_memory"
|
||||||
echo "CPU Load="$cpu_load
|
echo "CPU Load=""$cpu_load"
|
||||||
echo "Open files="$total_files
|
echo "Open files=""$total_files"
|
||||||
exit
|
exit
|
||||||
|
@@ -5,124 +5,187 @@
|
|||||||
import json
|
import json
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
SYSCTL = '/sbin/sysctl'
|
SYSCTL = "/sbin/sysctl"
|
||||||
ZPOOL = '/usr/local/sbin/zpool'
|
ZPOOL = "/usr/local/sbin/zpool"
|
||||||
|
|
||||||
|
|
||||||
def percent(numerator, denominator, default=0):
|
def percent(numerator, denominator, default=0):
|
||||||
try:
|
try:
|
||||||
return numerator / denominator * 100
|
return numerator / denominator * 100
|
||||||
except ZeroDivisionError:
|
except ZeroDivisionError:
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
p = subprocess.run([SYSCTL, '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True)
|
p = subprocess.run(
|
||||||
|
[SYSCTL, "-q", "kstat.zfs", "vfs.zfs"],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
universal_newlines=True,
|
||||||
|
)
|
||||||
|
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
return p.returncode
|
return p.returncode
|
||||||
|
|
||||||
def chomp(line):
|
def chomp(line):
|
||||||
bits = [b.strip() for b in line.split(':')]
|
bits = [b.strip() for b in line.split(":")]
|
||||||
try:
|
try:
|
||||||
return bits[0], int(bits[1])
|
return bits[0], int(bits[1])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return bits[0], bits[1]
|
return bits[0], bits[1]
|
||||||
|
|
||||||
stats = dict(chomp(l) for l in p.stdout.splitlines() if l)
|
stats = dict(chomp(line) for line in p.stdout.splitlines() if line)
|
||||||
if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats:
|
if "kstat.zfs.misc.arcstats.recycle_miss" not in stats:
|
||||||
stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0
|
stats["kstat.zfs.misc.arcstats.recycle_miss"] = 0
|
||||||
|
|
||||||
output = dict()
|
output = dict()
|
||||||
|
|
||||||
# ARC misc
|
# ARC misc
|
||||||
output['deleted'] = stats['kstat.zfs.misc.arcstats.deleted']
|
output["deleted"] = stats["kstat.zfs.misc.arcstats.deleted"]
|
||||||
output['evict_skip'] = stats['kstat.zfs.misc.arcstats.evict_skip']
|
output["evict_skip"] = stats["kstat.zfs.misc.arcstats.evict_skip"]
|
||||||
output['mutex_skip'] = stats['kstat.zfs.misc.arcstats.mutex_miss']
|
output["mutex_skip"] = stats["kstat.zfs.misc.arcstats.mutex_miss"]
|
||||||
output['recycle_miss'] = stats['kstat.zfs.misc.arcstats.recycle_miss']
|
output["recycle_miss"] = stats["kstat.zfs.misc.arcstats.recycle_miss"]
|
||||||
|
|
||||||
# ARC size
|
# ARC size
|
||||||
output['target_size_per'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100
|
output["target_size_per"] = (
|
||||||
output['arc_size_per'] = stats['kstat.zfs.misc.arcstats.size'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100
|
stats["kstat.zfs.misc.arcstats.c"]
|
||||||
output['target_size_arat'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max']
|
/ stats["kstat.zfs.misc.arcstats.c_max"]
|
||||||
output['min_size_per'] = stats['kstat.zfs.misc.arcstats.c_min'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100
|
* 100
|
||||||
|
)
|
||||||
|
output["arc_size_per"] = (
|
||||||
|
stats["kstat.zfs.misc.arcstats.size"]
|
||||||
|
/ stats["kstat.zfs.misc.arcstats.c_max"]
|
||||||
|
* 100
|
||||||
|
)
|
||||||
|
output["target_size_arat"] = (
|
||||||
|
stats["kstat.zfs.misc.arcstats.c"] / stats["kstat.zfs.misc.arcstats.c_max"]
|
||||||
|
)
|
||||||
|
output["min_size_per"] = (
|
||||||
|
stats["kstat.zfs.misc.arcstats.c_min"]
|
||||||
|
/ stats["kstat.zfs.misc.arcstats.c_max"]
|
||||||
|
* 100
|
||||||
|
)
|
||||||
|
|
||||||
output['arc_size'] = stats['kstat.zfs.misc.arcstats.size']
|
output["arc_size"] = stats["kstat.zfs.misc.arcstats.size"]
|
||||||
output['target_size_max'] = stats['kstat.zfs.misc.arcstats.c_max']
|
output["target_size_max"] = stats["kstat.zfs.misc.arcstats.c_max"]
|
||||||
output['target_size_min'] = stats['kstat.zfs.misc.arcstats.c_min']
|
output["target_size_min"] = stats["kstat.zfs.misc.arcstats.c_min"]
|
||||||
output['target_size'] = stats['kstat.zfs.misc.arcstats.c']
|
output["target_size"] = stats["kstat.zfs.misc.arcstats.c"]
|
||||||
|
|
||||||
# ARC size breakdown
|
# ARC size breakdown
|
||||||
output['mfu_size'] = stats['kstat.zfs.misc.arcstats.size'] - stats['kstat.zfs.misc.arcstats.p']
|
output["mfu_size"] = (
|
||||||
output['p'] = stats['kstat.zfs.misc.arcstats.p']
|
stats["kstat.zfs.misc.arcstats.size"] - stats["kstat.zfs.misc.arcstats.p"]
|
||||||
output['rec_used_per'] = stats['kstat.zfs.misc.arcstats.p'] / stats['kstat.zfs.misc.arcstats.size'] * 100
|
)
|
||||||
output['freq_used_per'] = output['mfu_size'] / stats['kstat.zfs.misc.arcstats.size'] * 100
|
output["p"] = stats["kstat.zfs.misc.arcstats.p"]
|
||||||
|
output["rec_used_per"] = (
|
||||||
|
stats["kstat.zfs.misc.arcstats.p"] / stats["kstat.zfs.misc.arcstats.size"] * 100
|
||||||
|
)
|
||||||
|
output["freq_used_per"] = (
|
||||||
|
output["mfu_size"] / stats["kstat.zfs.misc.arcstats.size"] * 100
|
||||||
|
)
|
||||||
|
|
||||||
# ARC misc efficiency stats
|
# ARC misc efficiency stats
|
||||||
output['arc_hits'] = stats['kstat.zfs.misc.arcstats.hits']
|
output["arc_hits"] = stats["kstat.zfs.misc.arcstats.hits"]
|
||||||
output['arc_misses'] = stats['kstat.zfs.misc.arcstats.misses']
|
output["arc_misses"] = stats["kstat.zfs.misc.arcstats.misses"]
|
||||||
output['demand_data_hits'] = stats['kstat.zfs.misc.arcstats.demand_data_hits']
|
output["demand_data_hits"] = stats["kstat.zfs.misc.arcstats.demand_data_hits"]
|
||||||
output['demand_data_misses'] = stats['kstat.zfs.misc.arcstats.demand_data_misses']
|
output["demand_data_misses"] = stats["kstat.zfs.misc.arcstats.demand_data_misses"]
|
||||||
output['demand_meta_hits'] = stats['kstat.zfs.misc.arcstats.demand_metadata_hits']
|
output["demand_meta_hits"] = stats["kstat.zfs.misc.arcstats.demand_metadata_hits"]
|
||||||
output['demand_meta_misses'] = stats['kstat.zfs.misc.arcstats.demand_metadata_misses']
|
output["demand_meta_misses"] = stats[
|
||||||
output['mfu_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mfu_ghost_hits']
|
"kstat.zfs.misc.arcstats.demand_metadata_misses"
|
||||||
output['mfu_hits'] = stats['kstat.zfs.misc.arcstats.mfu_hits']
|
]
|
||||||
output['mru_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mru_ghost_hits']
|
output["mfu_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mfu_ghost_hits"]
|
||||||
output['mru_hits'] = stats['kstat.zfs.misc.arcstats.mru_hits']
|
output["mfu_hits"] = stats["kstat.zfs.misc.arcstats.mfu_hits"]
|
||||||
output['pre_data_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_data_hits']
|
output["mru_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mru_ghost_hits"]
|
||||||
output['pre_data_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_data_misses']
|
output["mru_hits"] = stats["kstat.zfs.misc.arcstats.mru_hits"]
|
||||||
output['pre_meta_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_hits']
|
output["pre_data_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_data_hits"]
|
||||||
output['pre_meta_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_misses']
|
output["pre_data_misses"] = stats["kstat.zfs.misc.arcstats.prefetch_data_misses"]
|
||||||
|
output["pre_meta_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_metadata_hits"]
|
||||||
|
output["pre_meta_misses"] = stats[
|
||||||
|
"kstat.zfs.misc.arcstats.prefetch_metadata_misses"
|
||||||
|
]
|
||||||
|
|
||||||
output['anon_hits'] = output['arc_hits'] - (output['mfu_hits'] + output['mru_hits'] + output['mfu_ghost_hits'] + output['mru_ghost_hits'])
|
output["anon_hits"] = output["arc_hits"] - (
|
||||||
output['arc_accesses_total'] = output['arc_hits'] + output['arc_misses']
|
output["mfu_hits"]
|
||||||
output['demand_data_total'] = output['demand_data_hits'] + output['demand_data_misses']
|
+ output["mru_hits"]
|
||||||
output['pre_data_total'] = output['pre_data_hits'] + output['pre_data_misses']
|
+ output["mfu_ghost_hits"]
|
||||||
output['real_hits'] = output['mfu_hits'] + output['mru_hits']
|
+ output["mru_ghost_hits"]
|
||||||
|
)
|
||||||
|
output["arc_accesses_total"] = output["arc_hits"] + output["arc_misses"]
|
||||||
|
output["demand_data_total"] = (
|
||||||
|
output["demand_data_hits"] + output["demand_data_misses"]
|
||||||
|
)
|
||||||
|
output["pre_data_total"] = output["pre_data_hits"] + output["pre_data_misses"]
|
||||||
|
output["real_hits"] = output["mfu_hits"] + output["mru_hits"]
|
||||||
|
|
||||||
# ARC efficiency percents
|
# ARC efficiency percents
|
||||||
output['cache_hits_per'] = percent(output['arc_hits'], output['arc_accesses_total'])
|
output["cache_hits_per"] = percent(output["arc_hits"], output["arc_accesses_total"])
|
||||||
output['cache_miss_per'] = percent(output['arc_misses'], output['arc_accesses_total'])
|
output["cache_miss_per"] = percent(
|
||||||
output['actual_hit_per'] = percent(output['real_hits'], output['arc_accesses_total'])
|
output["arc_misses"], output["arc_accesses_total"]
|
||||||
output['data_demand_per'] = percent(output['demand_data_hits'], output['demand_data_total'])
|
)
|
||||||
output['data_pre_per'] = percent(output['pre_data_hits'], output['pre_data_total'])
|
output["actual_hit_per"] = percent(
|
||||||
output['anon_hits_per'] = percent(output['anon_hits'], output['arc_hits'])
|
output["real_hits"], output["arc_accesses_total"]
|
||||||
output['mru_per'] = percent(output['mru_hits'], output['arc_hits'])
|
)
|
||||||
output['mfu_per'] = percent(output['mfu_hits'], output['arc_hits'])
|
output["data_demand_per"] = percent(
|
||||||
output['mru_ghost_per'] = percent(output['mru_ghost_hits'], output['arc_hits'])
|
output["demand_data_hits"], output["demand_data_total"]
|
||||||
output['mfu_ghost_per'] = percent(output['mfu_ghost_hits'], output['arc_hits'])
|
)
|
||||||
output['demand_hits_per'] = percent(output['demand_data_hits'], output['arc_hits'])
|
output["data_pre_per"] = percent(output["pre_data_hits"], output["pre_data_total"])
|
||||||
output['pre_hits_per'] = percent(output['pre_data_hits'], output['arc_hits'])
|
output["anon_hits_per"] = percent(output["anon_hits"], output["arc_hits"])
|
||||||
output['meta_hits_per'] = percent(output['demand_meta_hits'], output['arc_hits'])
|
output["mru_per"] = percent(output["mru_hits"], output["arc_hits"])
|
||||||
output['pre_meta_hits_per'] = percent(output['pre_meta_hits'], output['arc_hits'])
|
output["mfu_per"] = percent(output["mfu_hits"], output["arc_hits"])
|
||||||
output['demand_misses_per'] = percent(output['demand_data_misses'], output['arc_misses'])
|
output["mru_ghost_per"] = percent(output["mru_ghost_hits"], output["arc_hits"])
|
||||||
output['pre_misses_per'] = percent(output['pre_data_misses'], output['arc_misses'])
|
output["mfu_ghost_per"] = percent(output["mfu_ghost_hits"], output["arc_hits"])
|
||||||
output['meta_misses_per'] = percent(output['demand_meta_misses'], output['arc_misses'])
|
output["demand_hits_per"] = percent(output["demand_data_hits"], output["arc_hits"])
|
||||||
output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses'])
|
output["pre_hits_per"] = percent(output["pre_data_hits"], output["arc_hits"])
|
||||||
|
output["meta_hits_per"] = percent(output["demand_meta_hits"], output["arc_hits"])
|
||||||
|
output["pre_meta_hits_per"] = percent(output["pre_meta_hits"], output["arc_hits"])
|
||||||
|
output["demand_misses_per"] = percent(
|
||||||
|
output["demand_data_misses"], output["arc_misses"]
|
||||||
|
)
|
||||||
|
output["pre_misses_per"] = percent(output["pre_data_misses"], output["arc_misses"])
|
||||||
|
output["meta_misses_per"] = percent(
|
||||||
|
output["demand_meta_misses"], output["arc_misses"]
|
||||||
|
)
|
||||||
|
output["pre_meta_misses_per"] = percent(
|
||||||
|
output["pre_meta_misses"], output["arc_misses"]
|
||||||
|
)
|
||||||
|
|
||||||
# pools
|
# pools
|
||||||
p = subprocess.run([ZPOOL, 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True)
|
p = subprocess.run(
|
||||||
if p.returncode != 0:
|
[ZPOOL, "list", "-pH"], stdout=subprocess.PIPE, universal_newlines=True
|
||||||
return p.returncode
|
)
|
||||||
output['pools'] = []
|
if p.returncode != 0:
|
||||||
fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup']
|
return p.returncode
|
||||||
for l in p.stdout.splitlines():
|
output["pools"] = []
|
||||||
p = dict(zip(fields, l.split('\t')))
|
fields = [
|
||||||
if p['ckpoint'] == '-':
|
"name",
|
||||||
p['ckpoint'] = 0
|
"size",
|
||||||
if p['expandsz'] == '-':
|
"alloc",
|
||||||
p['expandsz'] = 0
|
"free",
|
||||||
p['frag'] = p['frag'].rstrip('%')
|
"ckpoint",
|
||||||
if p['frag'] == '-':
|
"expandsz",
|
||||||
p['frag'] = 0
|
"frag",
|
||||||
p['cap'] = p['cap'].rstrip('%')
|
"cap",
|
||||||
if p['cap'] == '-':
|
"dedup",
|
||||||
p['cap'] = 0
|
]
|
||||||
p['dedup'] = p['dedup'].rstrip('x')
|
for l in p.stdout.splitlines():
|
||||||
output['pools'].append(p)
|
p = dict(zip(fields, l.split("\t")))
|
||||||
|
if p["ckpoint"] == "-":
|
||||||
|
p["ckpoint"] = 0
|
||||||
|
if p["expandsz"] == "-":
|
||||||
|
p["expandsz"] = 0
|
||||||
|
p["frag"] = p["frag"].rstrip("%")
|
||||||
|
if p["frag"] == "-":
|
||||||
|
p["frag"] = 0
|
||||||
|
p["cap"] = p["cap"].rstrip("%")
|
||||||
|
if p["cap"] == "-":
|
||||||
|
p["cap"] = 0
|
||||||
|
p["dedup"] = p["dedup"].rstrip("x")
|
||||||
|
output["pools"].append(p)
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
import sys
|
if __name__ == "__main__":
|
||||||
sys.exit(main(sys.argv[1:]))
|
import sys
|
||||||
|
|
||||||
|
sys.exit(main(sys.argv[1:]))
|
||||||
|
301
snmp/zfs-linux
301
snmp/zfs-linux
@@ -2,64 +2,70 @@
|
|||||||
import json
|
import json
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
def proc_err(cmd, proc):
|
def proc_err(cmd, proc):
|
||||||
# output process error and first line of error code
|
# output process error and first line of error code
|
||||||
return "{}{}".format(
|
return "{}{}".format(
|
||||||
subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr),
|
subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr),
|
||||||
" ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else ""
|
" ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
LINUX = '/proc/spl/kstat/zfs/arcstats'
|
LINUX = "/proc/spl/kstat/zfs/arcstats"
|
||||||
BSD1 = 'sysctl'
|
BSD1 = "sysctl"
|
||||||
BSD2 = 'kstat.zfs.misc.arcstats'
|
BSD2 = "kstat.zfs.misc.arcstats"
|
||||||
ILLUMOS = 'kstat -n arcstats'
|
ILLUMOS = "kstat -n arcstats"
|
||||||
COLUMN = 1
|
COLUMN = 1
|
||||||
SPLIT = None
|
SPLIT = None
|
||||||
res = {}
|
res = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LINES = open(LINUX, 'r').readlines()
|
LINES = open(LINUX, "r").readlines()
|
||||||
COLUMN = 2
|
COLUMN = 2
|
||||||
|
|
||||||
except IOError as e1:
|
except IOError as e1:
|
||||||
try:
|
try:
|
||||||
proc = subprocess.run([BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True)
|
proc = subprocess.run(
|
||||||
|
[BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True
|
||||||
|
)
|
||||||
LINES = proc.stdout.splitlines()
|
LINES = proc.stdout.splitlines()
|
||||||
LINES = [x[len(BSD2)+1:] for x in LINES]
|
LINES = [x[len(BSD2) + 1 :] for x in LINES]
|
||||||
SPLIT = ':'
|
SPLIT = ":"
|
||||||
except FileNotFoundError as e2:
|
except FileNotFoundError as e2:
|
||||||
try:
|
try:
|
||||||
proc = subprocess.run(ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True)
|
proc = subprocess.run(
|
||||||
|
ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True
|
||||||
|
)
|
||||||
LINES = proc.stdout.splitlines()
|
LINES = proc.stdout.splitlines()
|
||||||
except FileNotFoundError as e3:
|
except FileNotFoundError as e3:
|
||||||
print('Linux :', e1)
|
print("Linux :", e1)
|
||||||
print('BSD :', e2)
|
print("BSD :", e2)
|
||||||
print('Illumos:', e3)
|
print("Illumos:", e3)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
LINES = [x.strip() for x in LINES]
|
LINES = [x.strip() for x in LINES]
|
||||||
|
|
||||||
STATS = {}
|
STATS = {}
|
||||||
for line in LINES[2:]:
|
for line in LINES[2:]:
|
||||||
splitline = line.split(SPLIT)
|
splitline = line.split(SPLIT)
|
||||||
try:
|
try:
|
||||||
STATS[splitline[0]] = int(splitline[COLUMN])
|
STATS[splitline[0]] = int(splitline[COLUMN])
|
||||||
# Skip non int value like Illumos crtime, empty line at the end
|
# Skip non int value like Illumos crtime, empty line at the end
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# ARC misc
|
# ARC misc
|
||||||
DELETED = STATS['deleted']
|
DELETED = STATS["deleted"]
|
||||||
EVICT_SKIP = STATS['evict_skip']
|
EVICT_SKIP = STATS["evict_skip"]
|
||||||
MUTEX_SKIP = STATS['mutex_miss']
|
MUTEX_SKIP = STATS["mutex_miss"]
|
||||||
RECYCLE_MISS = STATS['recycle_miss'] if 'recycle_miss' in STATS else 0
|
RECYCLE_MISS = STATS["recycle_miss"] if "recycle_miss" in STATS else 0
|
||||||
|
|
||||||
# ARC size
|
# ARC size
|
||||||
ARC_SIZE = STATS['size']
|
ARC_SIZE = STATS["size"]
|
||||||
TARGET_SIZE_MAX = STATS['c_max']
|
TARGET_SIZE_MAX = STATS["c_max"]
|
||||||
TARGET_SIZE_MIN = STATS['c_min']
|
TARGET_SIZE_MIN = STATS["c_min"]
|
||||||
TARGET_SIZE = STATS['c']
|
TARGET_SIZE = STATS["c"]
|
||||||
|
|
||||||
TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100
|
TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100
|
||||||
ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100
|
ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100
|
||||||
@@ -70,7 +76,7 @@ def main(args):
|
|||||||
MFU_SIZE = 0
|
MFU_SIZE = 0
|
||||||
RECENTLY_USED_PERCENT = 0
|
RECENTLY_USED_PERCENT = 0
|
||||||
FREQUENTLY_USED_PERCENT = 0
|
FREQUENTLY_USED_PERCENT = 0
|
||||||
P = STATS['p']
|
P = STATS["p"]
|
||||||
|
|
||||||
if ARC_SIZE >= TARGET_SIZE:
|
if ARC_SIZE >= TARGET_SIZE:
|
||||||
MFU_SIZE = ARC_SIZE - P
|
MFU_SIZE = ARC_SIZE - P
|
||||||
@@ -81,22 +87,21 @@ def main(args):
|
|||||||
RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100
|
RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100
|
||||||
FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100
|
FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100
|
||||||
|
|
||||||
|
|
||||||
# ARC misc. efficient stats
|
# ARC misc. efficient stats
|
||||||
ARC_HITS = STATS['hits']
|
ARC_HITS = STATS["hits"]
|
||||||
ARC_MISSES = STATS['misses']
|
ARC_MISSES = STATS["misses"]
|
||||||
DEMAND_DATA_HITS = STATS['demand_data_hits']
|
DEMAND_DATA_HITS = STATS["demand_data_hits"]
|
||||||
DEMAND_DATA_MISSES = STATS['demand_data_misses']
|
DEMAND_DATA_MISSES = STATS["demand_data_misses"]
|
||||||
DEMAND_METADATA_HITS = STATS['demand_metadata_hits']
|
DEMAND_METADATA_HITS = STATS["demand_metadata_hits"]
|
||||||
DEMAND_METADATA_MISSES = STATS['demand_metadata_misses']
|
DEMAND_METADATA_MISSES = STATS["demand_metadata_misses"]
|
||||||
MFU_GHOST_HITS = STATS['mfu_ghost_hits']
|
MFU_GHOST_HITS = STATS["mfu_ghost_hits"]
|
||||||
MFU_HITS = STATS['mfu_hits']
|
MFU_HITS = STATS["mfu_hits"]
|
||||||
MRU_GHOST_HITS = STATS['mru_ghost_hits']
|
MRU_GHOST_HITS = STATS["mru_ghost_hits"]
|
||||||
MRU_HITS = STATS['mru_hits']
|
MRU_HITS = STATS["mru_hits"]
|
||||||
PREFETCH_DATA_HITS = STATS['prefetch_data_hits']
|
PREFETCH_DATA_HITS = STATS["prefetch_data_hits"]
|
||||||
PREFETCH_DATA_MISSES = STATS['prefetch_data_misses']
|
PREFETCH_DATA_MISSES = STATS["prefetch_data_misses"]
|
||||||
PREFETCH_METADATA_HITS = STATS['prefetch_metadata_hits']
|
PREFETCH_METADATA_HITS = STATS["prefetch_metadata_hits"]
|
||||||
PREFETCH_METADATA_MISSES = STATS['prefetch_metadata_misses']
|
PREFETCH_METADATA_MISSES = STATS["prefetch_metadata_misses"]
|
||||||
|
|
||||||
ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS)
|
ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS)
|
||||||
ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES
|
ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES
|
||||||
@@ -108,9 +113,15 @@ def main(args):
|
|||||||
CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100
|
CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100
|
||||||
CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100
|
CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100
|
||||||
ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100
|
ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100
|
||||||
DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0
|
DATA_DEMAND_PERCENT = (
|
||||||
|
DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0
|
||||||
|
)
|
||||||
|
|
||||||
DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0
|
DATA_PREFETCH_PERCENT = (
|
||||||
|
PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100
|
||||||
|
if PREFETCH_DATA_TOTAL != 0
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
|
||||||
ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0
|
ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0
|
||||||
|
|
||||||
@@ -121,125 +132,157 @@ def main(args):
|
|||||||
|
|
||||||
DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
||||||
PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
||||||
METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
METADATA_HITS_PERCENT = (
|
||||||
PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
||||||
|
)
|
||||||
|
PREFETCH_METADATA_HITS_PERCENT = (
|
||||||
|
DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
|
||||||
|
)
|
||||||
|
|
||||||
DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
DEMAND_MISSES_PERCENT = (
|
||||||
PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
||||||
METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
)
|
||||||
PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
PREFETCH_MISSES_PERCENT = (
|
||||||
|
PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
||||||
|
)
|
||||||
|
METADATA_MISSES_PERCENT = (
|
||||||
|
DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
||||||
|
)
|
||||||
|
PREFETCH_METADATA_MISSES_PERCENT = (
|
||||||
|
PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
|
||||||
|
)
|
||||||
|
|
||||||
# pools
|
# pools
|
||||||
exact_size = True
|
exact_size = True
|
||||||
zpool_cmd = ['/sbin/zpool']
|
zpool_cmd = ["/sbin/zpool"]
|
||||||
zpool_cmd_list = zpool_cmd + ['list', '-p', '-H']
|
zpool_cmd_list = zpool_cmd + ["list", "-p", "-H"]
|
||||||
std = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True}
|
std = {
|
||||||
|
"stdout": subprocess.PIPE,
|
||||||
|
"stderr": subprocess.PIPE,
|
||||||
|
"universal_newlines": True,
|
||||||
|
}
|
||||||
|
|
||||||
## account for variations between ZoL zfs versions
|
## account for variations between ZoL zfs versions
|
||||||
proc = subprocess.run(zpool_cmd_list, **std)
|
proc = subprocess.run(zpool_cmd_list, **std)
|
||||||
if (proc.returncode == 2):
|
if proc.returncode == 2:
|
||||||
# -p option is not present in older versions
|
# -p option is not present in older versions
|
||||||
# edit snmpd.conf zfs extend section to the following:
|
# edit snmpd.conf zfs extend section to the following:
|
||||||
# extend zfs /usr/bin/sudo /etc/snmp/zfs-linux
|
# extend zfs /usr/bin/sudo /etc/snmp/zfs-linux
|
||||||
# make sure to edit your sudo users (usually visudo) and add at the bottom:
|
# make sure to edit your sudo users (usually visudo) and add at the bottom:
|
||||||
# snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux
|
# snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux
|
||||||
del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue
|
del zpool_cmd_list[
|
||||||
|
zpool_cmd_list.index("-p")
|
||||||
|
] # try removing -p to fix the issue
|
||||||
proc = subprocess.run(zpool_cmd_list, **std)
|
proc = subprocess.run(zpool_cmd_list, **std)
|
||||||
exact_size = False
|
exact_size = False
|
||||||
if (proc.returncode != 0):
|
if proc.returncode != 0:
|
||||||
return proc_err(zpool_cmd_list, proc)
|
return proc_err(zpool_cmd_list, proc)
|
||||||
|
|
||||||
pools = []
|
pools = []
|
||||||
FIELDS = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup', 'health', 'altroot']
|
FIELDS = [
|
||||||
if len(proc.stdout.splitlines()[0].split('\t')) == 10:
|
"name",
|
||||||
FIELDS.remove('ckpoint')
|
"size",
|
||||||
|
"alloc",
|
||||||
for line in proc.stdout.splitlines():
|
"free",
|
||||||
info = dict(zip(FIELDS, line.split('\t')))
|
"ckpoint",
|
||||||
|
"expandsz",
|
||||||
|
"frag",
|
||||||
|
"cap",
|
||||||
|
"dedup",
|
||||||
|
"health",
|
||||||
|
"altroot",
|
||||||
|
]
|
||||||
|
if len(proc.stdout.splitlines()[0].split("\t")) == 10:
|
||||||
|
FIELDS.remove("ckpoint")
|
||||||
|
|
||||||
info['expandsz'] = 0 if info['expandsz'] == '-' else info['expandsz']
|
for line in proc.stdout.splitlines():
|
||||||
info['frag'] = info['frag'].rstrip('%')
|
info = dict(zip(FIELDS, line.split("\t")))
|
||||||
info['frag'] = 0 if info['frag'] == '-' else info['frag']
|
|
||||||
info['dedup'] = info['dedup'].rstrip('x')
|
info["expandsz"] = 0 if info["expandsz"] == "-" else info["expandsz"]
|
||||||
info['cap'] = info['cap'].rstrip('%')
|
info["frag"] = info["frag"].rstrip("%")
|
||||||
if 'ckpoint' in info:
|
info["frag"] = 0 if info["frag"] == "-" else info["frag"]
|
||||||
info['ckpoint'] = 0 if info['ckpoint'] == '-' else info['ckpoint']
|
info["dedup"] = info["dedup"].rstrip("x")
|
||||||
|
info["cap"] = info["cap"].rstrip("%")
|
||||||
|
if "ckpoint" in info:
|
||||||
|
info["ckpoint"] = 0 if info["ckpoint"] == "-" else info["ckpoint"]
|
||||||
|
|
||||||
# zfs-06.5.11 fix
|
# zfs-06.5.11 fix
|
||||||
if not exact_size:
|
if not exact_size:
|
||||||
zpool_cmd_get = zpool_cmd + ['get', '-pH', 'size,alloc,free', info['name']]
|
zpool_cmd_get = zpool_cmd + ["get", "-pH", "size,alloc,free", info["name"]]
|
||||||
proc2 = subprocess.run(zpool_cmd_get, **std)
|
proc2 = subprocess.run(zpool_cmd_get, **std)
|
||||||
if (proc2.returncode != 0):
|
if proc2.returncode != 0:
|
||||||
return proc_err(zpool_cmd_get, proc2)
|
return proc_err(zpool_cmd_get, proc2)
|
||||||
|
|
||||||
info2 = dict([tuple(s.split('\t')[1:3]) for s in proc2.stdout.splitlines()])
|
info2 = dict([tuple(s.split("\t")[1:3]) for s in proc2.stdout.splitlines()])
|
||||||
info['size'] = info2['size']
|
info["size"] = info2["size"]
|
||||||
info['alloc'] = info2['allocated']
|
info["alloc"] = info2["allocated"]
|
||||||
info['free'] = info2['free']
|
info["free"] = info2["free"]
|
||||||
|
|
||||||
pools.append(info)
|
pools.append(info)
|
||||||
|
|
||||||
res = {
|
res = {
|
||||||
'deleted': DELETED, # ARC misc
|
"deleted": DELETED, # ARC misc
|
||||||
'evict_skip': EVICT_SKIP,
|
"evict_skip": EVICT_SKIP,
|
||||||
'mutex_skip': MUTEX_SKIP,
|
"mutex_skip": MUTEX_SKIP,
|
||||||
'recycle_miss': RECYCLE_MISS,
|
"recycle_miss": RECYCLE_MISS,
|
||||||
'arc_size': ARC_SIZE, # ARC size
|
"arc_size": ARC_SIZE, # ARC size
|
||||||
'target_size_max': TARGET_SIZE_MAX,
|
"target_size_max": TARGET_SIZE_MAX,
|
||||||
'target_size_min': TARGET_SIZE_MIN,
|
"target_size_min": TARGET_SIZE_MIN,
|
||||||
'target_size': TARGET_SIZE,
|
"target_size": TARGET_SIZE,
|
||||||
'target_size_per': TARGET_SIZE_PERCENT,
|
"target_size_per": TARGET_SIZE_PERCENT,
|
||||||
'arc_size_per': ARC_SIZE_PERCENT,
|
"arc_size_per": ARC_SIZE_PERCENT,
|
||||||
'target_size_arat': TARGET_SIZE_ADAPTIVE_RATIO,
|
"target_size_arat": TARGET_SIZE_ADAPTIVE_RATIO,
|
||||||
'min_size_per': MIN_SIZE_PERCENT,
|
"min_size_per": MIN_SIZE_PERCENT,
|
||||||
'mfu_size': MFU_SIZE, # ARC size breakdown
|
"mfu_size": MFU_SIZE, # ARC size breakdown
|
||||||
'p': P,
|
"p": P,
|
||||||
'rec_used_per': RECENTLY_USED_PERCENT,
|
"rec_used_per": RECENTLY_USED_PERCENT,
|
||||||
'freq_used_per': FREQUENTLY_USED_PERCENT,
|
"freq_used_per": FREQUENTLY_USED_PERCENT,
|
||||||
'arc_hits': ARC_HITS, # ARC efficiency
|
"arc_hits": ARC_HITS, # ARC efficiency
|
||||||
'arc_misses': ARC_MISSES,
|
"arc_misses": ARC_MISSES,
|
||||||
'demand_data_hits': DEMAND_DATA_HITS,
|
"demand_data_hits": DEMAND_DATA_HITS,
|
||||||
'demand_data_misses': DEMAND_DATA_MISSES,
|
"demand_data_misses": DEMAND_DATA_MISSES,
|
||||||
'demand_meta_hits': DEMAND_METADATA_HITS,
|
"demand_meta_hits": DEMAND_METADATA_HITS,
|
||||||
'demand_meta_misses': DEMAND_METADATA_MISSES,
|
"demand_meta_misses": DEMAND_METADATA_MISSES,
|
||||||
'mfu_ghost_hits': MFU_GHOST_HITS,
|
"mfu_ghost_hits": MFU_GHOST_HITS,
|
||||||
'mfu_hits': MFU_HITS,
|
"mfu_hits": MFU_HITS,
|
||||||
'mru_ghost_hits': MRU_GHOST_HITS,
|
"mru_ghost_hits": MRU_GHOST_HITS,
|
||||||
'mru_hits': MRU_HITS,
|
"mru_hits": MRU_HITS,
|
||||||
'pre_data_hits': PREFETCH_DATA_HITS,
|
"pre_data_hits": PREFETCH_DATA_HITS,
|
||||||
'pre_data_misses': PREFETCH_DATA_MISSES,
|
"pre_data_misses": PREFETCH_DATA_MISSES,
|
||||||
'pre_meta_hits': PREFETCH_METADATA_HITS,
|
"pre_meta_hits": PREFETCH_METADATA_HITS,
|
||||||
'pre_meta_misses': PREFETCH_METADATA_HITS,
|
"pre_meta_misses": PREFETCH_METADATA_HITS,
|
||||||
'anon_hits': ANON_HITS,
|
"anon_hits": ANON_HITS,
|
||||||
'arc_accesses_total': ARC_ACCESSES_TOTAL,
|
"arc_accesses_total": ARC_ACCESSES_TOTAL,
|
||||||
'demand_data_total': DEMAND_DATA_TOTAL,
|
"demand_data_total": DEMAND_DATA_TOTAL,
|
||||||
'pre_data_total': PREFETCH_DATA_TOTAL,
|
"pre_data_total": PREFETCH_DATA_TOTAL,
|
||||||
'real_hits': REAL_HITS,
|
"real_hits": REAL_HITS,
|
||||||
'cache_hits_per': CACHE_HIT_PERCENT, # ARC efficiency percentages
|
"cache_hits_per": CACHE_HIT_PERCENT, # ARC efficiency percentages
|
||||||
'cache_miss_per': CACHE_MISS_PERCENT,
|
"cache_miss_per": CACHE_MISS_PERCENT,
|
||||||
'actual_hit_per': ACTUAL_HIT_PERCENT,
|
"actual_hit_per": ACTUAL_HIT_PERCENT,
|
||||||
'data_demand_per': DATA_DEMAND_PERCENT,
|
"data_demand_per": DATA_DEMAND_PERCENT,
|
||||||
'data_pre_per': DATA_PREFETCH_PERCENT,
|
"data_pre_per": DATA_PREFETCH_PERCENT,
|
||||||
'anon_hits_per': ANON_HITS_PERCENT,
|
"anon_hits_per": ANON_HITS_PERCENT,
|
||||||
'mru_per': MRU_PERCENT,
|
"mru_per": MRU_PERCENT,
|
||||||
'mfu_per': MFU_PERCENT,
|
"mfu_per": MFU_PERCENT,
|
||||||
'mru_ghost_per': MRU_GHOST_PERCENT,
|
"mru_ghost_per": MRU_GHOST_PERCENT,
|
||||||
'mfu_ghost_per': MFU_GHOST_PERCENT,
|
"mfu_ghost_per": MFU_GHOST_PERCENT,
|
||||||
'demand_hits_per': DEMAND_HITS_PERCENT,
|
"demand_hits_per": DEMAND_HITS_PERCENT,
|
||||||
'pre_hits_per': PREFETCH_HITS_PERCENT,
|
"pre_hits_per": PREFETCH_HITS_PERCENT,
|
||||||
'meta_hits_per': METADATA_HITS_PERCENT,
|
"meta_hits_per": METADATA_HITS_PERCENT,
|
||||||
'pre_meta_hits_per': PREFETCH_METADATA_HITS_PERCENT,
|
"pre_meta_hits_per": PREFETCH_METADATA_HITS_PERCENT,
|
||||||
'demand_misses_per': DEMAND_MISSES_PERCENT,
|
"demand_misses_per": DEMAND_MISSES_PERCENT,
|
||||||
'pre_misses_per': PREFETCH_MISSES_PERCENT,
|
"pre_misses_per": PREFETCH_MISSES_PERCENT,
|
||||||
'meta_misses_per': METADATA_MISSES_PERCENT,
|
"meta_misses_per": METADATA_MISSES_PERCENT,
|
||||||
'pre_meta_misses_per': PREFETCH_METADATA_MISSES_PERCENT,
|
"pre_meta_misses_per": PREFETCH_METADATA_MISSES_PERCENT,
|
||||||
'pools': pools
|
"pools": pools,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(json.dumps(res))
|
print(json.dumps(res))
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.exit(main(sys.argv[1:]))
|
sys.exit(main(sys.argv[1:]))
|
||||||
|
Reference in New Issue
Block a user