1
0
mirror of https://github.com/librenms/librenms-agent.git synced 2024-05-09 09:54:52 +00:00

Merge pull request #1 from librenms/master

Resync upstream
This commit is contained in:
fbourqui
2020-01-22 11:18:23 +01:00
committed by GitHub
10 changed files with 409 additions and 19 deletions

View File

@@ -4,7 +4,9 @@ install:
mkdir -p $(PREFIX)/usr/lib/check_mk_agent/plugins
mkdir -p $(PREFIX)/usr/lib/check_mk_agent/repo
mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local
cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/
mkdir -p $(PREFIX)/usr/share/librenms-agent/snmp
cp -rL agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/
cp -rL snmp/* $(PREFIX)/usr/share/librenms-agent/snmp
rm $(PREFIX)/usr/lib/check_mk_agent/repo/README
mkdir -p $(PREFIX)/usr/bin
install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent

View File

@@ -17,6 +17,11 @@
from subprocess import check_output
import json
def cephversion():
cephv = check_output(["/usr/bin/ceph", "version"]).replace('ceph version ', '')
major, minor = cephv.split('.')[0:2]
return [int(major), int(minor)]
def cephdf():
cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0')
@@ -44,12 +49,18 @@ def cephdf():
def osdperf():
global major
osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0')
for o in json.loads(osdperf)['osd_perf_infos']:
print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms']))
if major > 13:
for o in json.loads(osdperf)['osdstats']['osd_perf_infos']:
print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms']))
else:
for o in json.loads(osdperf)['osd_perf_infos']:
print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms']))
def poolstats():
global major
poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0')
for p in json.loads(poolstats):
@@ -62,12 +73,17 @@ def poolstats():
except:
w = 0
try:
o = p['client_io_rate']['op_per_sec']
if major > 11:
o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec']
else:
o = p['client_io_rate']['op_per_sec']
except:
o = 0
print("%s:%i:%i:%i" % (p['pool_name'], o, w, r))
major, minor = cephversion()
print "<<<app-ceph>>>"
print "<poolstats>"
poolstats()
@@ -75,4 +91,3 @@ print "<osdperformance>"
osdperf()
print "<df>"
cephdf()

9
debian/changelog vendored
View File

@@ -1,3 +1,12 @@
librenms-agent (1.1.0) stable; urgency=low
- New upstream versions
- Include SNMP scripts
- Fix Ceph scripts
- Fix nginx scripts
-- Mark Schouten <mark@tuxis.nl> Wed, 03 Jul 2019 12:06:00 +0200
librenms-agent (1.0.7) stable; urgency=low
- New upstream versions

79
snmp/certificate.py Executable file
View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python3
import socket
import ssl
import datetime
import json
CONFIGFILE='/etc/snmp/certificate.json'
# {"domains": [
# {"fqdn": "www.mydomain.com"},
# {"fqdn": "www2.mydomain.com"}
# ]
# }
def get_certificate_data(domain, port=443):
context = ssl.create_default_context()
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=domain,
)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((domain, port))
error_msg = None
except ConnectionRefusedError as e:
error_msg = e
ssl_info = conn.getpeercert()
return ssl_info, error_msg
output = {}
output['error'] = 0
output['errorString'] = ""
output['version'] = 1
with open(CONFIGFILE, 'r') as json_file:
try:
configfile = json.load(json_file)
except json.decoder.JSONDecodeError as e:
output['error'] = 1
output['errorString'] = "Configfile Error: '%s'" % e
if not output['error']:
output_data_list = []
for domain in configfile['domains']:
output_data = {}
if 'port' not in domain.keys():
domain['port'] = 443
certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port'])
output_data['cert_name'] = domain['fqdn']
if not error_msg:
ssl_date_format = r'%b %d %H:%M:%S %Y %Z'
validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format)
validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format)
cert_age = datetime.datetime.now() - validity_start
cert_still_valid = validity_end - datetime.datetime.now()
output_data['age'] = cert_age.days
output_data['remaining_days'] = cert_still_valid.days
else:
output_data['age'] = None
output_data['remaining_days'] = None
output['error'] = 1
output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg)
output_data_list.append(output_data)
output['data'] = output_data_list
print(json.dumps(output))

View File

@@ -80,6 +80,9 @@ if [ -d /dev/md ] ; then
RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']'
let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT"
if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then
RAID_HOTSPARE_COUNT=0
fi
ARRAY_DATA='{'\
'"name":"'$RAID_NAME\

View File

@@ -28,7 +28,7 @@ NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '
NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'`
NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'`
NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'`
NTP_VERSION=`$BIN_NTPD --version | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_HEAD -c 1`
NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'`
echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}'

66
snmp/pureftpd.py Executable file
View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
import os
import json
CONFIGFILE = '/etc/snmp/pureftpd.json'
pureftpwho_cmd = '/usr/sbin/pure-ftpwho'
pureftpwho_args = '-v -s -n'
output_data = {}
output_data['version'] = 1
output_data['errorString'] = ""
output_data['error'] = 0
if os.path.isfile(CONFIGFILE):
with open(CONFIGFILE, 'r') as json_file:
try:
configfile = json.load(json_file)
except json.decoder.JSONDecodeError as e:
output_data['error'] = 1
output_data['errorString'] = "Configfile Error: '%s'" % e
else:
configfile = None
if not output_data['error'] and configfile:
try:
if 'pureftpwho_cmd' in configfile.keys():
pureftpwho_cmd = configfile['pureftpwho_cmd']
except KeyError:
output_data['error'] = 1
output_data['errorString'] = "Configfile Error: '%s'" % e
output = os.popen('sudo ' + pureftpwho_cmd + ' ' + pureftpwho_args).read()
data = {}
for line in output.split('\n'):
if not len(line):
continue
pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|')
if "IDLE" in state:
state = "IDLE"
elif "DL" in state:
state = "DL"
elif "UL" in state:
state = "UL"
if acct not in data.keys():
data[acct] = {}
if state not in data[acct]:
data[acct][state] = {'bitrate': 0,
'connections': 0
}
bandwidth_bit = int(bandwidth) * 1024
data[acct][state]['bitrate'] += bandwidth_bit
data[acct][state]['connections'] += 1
output_data['data'] = data
print (json.dumps(output_data))

211
snmp/seafile.py Executable file
View File

@@ -0,0 +1,211 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://download.seafile.com/published/web-api/v2.1-admin
# user -> libraries (count)
# user -> trash-libraries (count)
# user -> space consumption (count)
# user -> is activated (bool)
# connected_devices (count)
# groups (count)
# Clients -> plattform (count)
# Clients -> version (count)
import requests
import json
# Configfile content example:
# {"url": "https://seafile.mydomain.org",
# "username": "some_admin_login@mail.address",
# "password": "password",
# "account_identifier": "name",
# "hide_monitoring_account": true
# }
CONFIGFILE='/etc/snmp/seafile.json'
error = 0
error_string = ''
version = 1
def get_data(url_path, data=None, token=None):
complete_url = "%s/%s" % (url, url_path)
headers = {'Accept': 'application/json'}
if token:
headers['Authorization'] = "Token %s" % token
try:
if token:
r = requests.get(complete_url, data=data, headers=headers)
else:
r = requests.post(complete_url, data=data, headers=headers)
try:
return r.json()
except json.decoder.JSONDecodeError:
return 'no valid json returned - url correct?'
except requests.exceptions.RequestException as err:
return str(err)
def get_devices():
# get all devices
url_path = 'api/v2.1/admin/devices/'
return get_data(url_path, token=token)
def get_groups():
# get all groups
url_path = 'api/v2.1/admin/groups/'
return get_data(url_path, token=token)
def get_sysinfo():
# get all groups
url_path = 'api/v2.1/admin/sysinfo/'
return get_data(url_path, token=token)
def get_account_information():
# get all accounts withs details
account_list = []
for account in get_data('api2/accounts/', token=token):
# get account details
url_path = 'api2/accounts/%s/' % account['email']
account_data = get_data(url_path, token=token)
# get libraries by owner
url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email']
account_data['repos'] = get_data(url_path, token=token)['repos']
# get deleted libraries by owner
url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email']
account_data['trash_repos'] = get_data(url_path, token=token)['repos']
account_list.append(account_data)
return account_list
def resort_devices(device_list):
data = {}
platform = {}
client_version = {}
for device in device_list:
# don't list information assigned to monitor account
if hide_monitoring_account:
if device['user'] == configfile['username']:
continue
if device['platform'] not in platform.keys():
platform[device['platform']] = 1
else:
platform[device['platform']] += 1
if device['client_version'] not in client_version.keys():
client_version[device['client_version']] = 1
else:
client_version[device['client_version']] += 1
data['platform'] = []
for k, v in platform.items():
data['platform'].append({'os_name': k,
'clients':v})
data['client_version'] = []
for k, v in client_version.items():
data['client_version'].append({'client_version': k,
'clients':v})
return data
def resort_groups(group_list):
data = {'count': len(group_list)}
return data
def resort_accounts(account_list):
if account_identifier in ['name', 'email']:
identifier = account_identifier
else:
identifier = 'name'
accepted_key_list = ['is_active', 'usage']
data = []
for user_account in account_list:
# don't list information assigned to monitor account
if hide_monitoring_account:
if user_account['email'] == configfile['username']:
continue
new_account = {}
new_account['owner'] = user_account[identifier]
new_account['repos'] = len(user_account['repos'])
new_account['trash_repos'] = len(user_account['trash_repos'])
for k in user_account.keys():
if k not in accepted_key_list:
continue
new_account[k] = user_account[k]
data.append(new_account)
return sorted(data, key=lambda k: k['owner'].lower())
# ------------------------ MAIN --------------------------------------------------------
with open(CONFIGFILE, 'r') as json_file:
try:
configfile = json.load(json_file)
except json.decoder.JSONDecodeError as e:
error = 1
error_string = "Configfile Error: '%s'" % e
if not error:
url = configfile['url']
username = configfile['username']
password = configfile['password']
try:
account_identifier = configfile['account_identifier']
except KeyError:
account_identifier = None
try:
hide_monitoring_account = configfile['hide_monitoring_account']
except KeyError:
hide_monitoring_account = False
# get token
login_data = {'username': username, 'password': password}
ret = get_data('api2/auth-token/', data=login_data)
if type(ret) != str:
if 'token' in ret.keys():
token = ret['token']
else:
error = 1
try:
error_string = json.dumps(ret)
except:
error_string = ret
else:
error = 1
error_string = ret
data = {}
if not error:
ret= get_account_information()
if not error:
data['accounts'] = resort_accounts(ret)
data['devices'] = resort_devices(get_devices()['devices'])
data['groups'] = resort_groups(get_groups()['groups'])
data['sysinfo'] = get_sysinfo()
output = {'error': error,
'errorString': error_string,
'version': version,
'data': data
}
print(json.dumps(output))

View File

@@ -96,14 +96,19 @@ def main(args):
if p.returncode != 0:
return p.returncode
output['pools'] = []
fields = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup']
fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup']
for l in p.stdout.splitlines():
p = dict(zip(fields, l.split('\t')))
if p['ckpoint'] == '-':
p['ckpoint'] = 0
if p['expandsz'] == '-':
p['expandsz'] = 0
p['frag'] = p['frag'].rstrip('%')
if p['frag'] == '-':
p['frag'] = 0
p['cap'] = p['cap'].rstrip('%')
if p['cap'] == '-':
p['cap'] = 0
p['dedup'] = p['dedup'].rstrip('x')
output['pools'].append(p)

View File

@@ -107,20 +107,20 @@ def main(args):
ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0
MRU_PERCENT = MRU_HITS / ARC_HITS * 100
MFU_PERCENT = MFU_HITS / ARC_HITS * 100
MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100
MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100
MRU_PERCENT = MRU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
MFU_PERCENT = MFU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100
PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100
METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100
PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100
DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0
DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100
PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100
METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100
PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100
DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0
# pools
proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True)