Migrate Python scripts to Python 3 (#10759)

* Migrate to python3

* Migrate to python3

* Migrate to python3

* Migrate to python3

* Code refactoring and python 2 compat

* Code refactoring and python 2 compat

* Code refactoring and python 2 compat

* Code refactoring and python 2 compat

* Added shared code for wrappers

* Fix python version check

* Allow pure python MySQL library

* move library.py
remove python2 support bits
remove duplicate code

* fix log location

* whitespace?

* fix pre-existing bug

* fix bug when no devices/services exist

* fix pylint issues

* update imports to match

Co-authored-by: Tony Murray <murraytony@gmail.com>
This commit is contained in:
Orsiris de Jong
2020-05-15 07:37:34 +02:00
committed by GitHub
parent 60c03baf76
commit 1dd0d46edd
6 changed files with 967 additions and 1097 deletions

View File

@@ -51,6 +51,6 @@ script:
- test -z "$BROWSER_TEST" || php artisan config:clear
- test -z "$BROWSER_TEST" || php artisan dusk
- bash -n daily.sh
- pylint -E poller-wrapper.py discovery-wrapper.py
- pylint -E poller-wrapper.py discovery-wrapper.py services-wrapper.py
- bash scripts/deploy-docs.sh
- set +e

249
LibreNMS/library.py Normal file
View File

@@ -0,0 +1,249 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import logging
import tempfile
import subprocess
import threading
import time
from logging.handlers import RotatingFileHandler
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
try:
import MySQLdb
except ImportError as exc:
print('ERROR: missing the mysql python module please run:')
print('pip install -r requirements.txt')
print('ERROR: %s' % exc)
sys.exit(2)
logger = logging.getLogger(__name__)
# Logging functions ########################################################
FORMATTER = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
def logger_get_console_handler():
try:
console_handler = logging.StreamHandler(sys.stdout)
except OSError as exc:
print('Cannot log to stdout, trying stderr. Message %s' % exc)
try:
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(FORMATTER)
return console_handler
except OSError as exc:
print('Cannot log to stderr neither. Message %s' % exc)
return False
else:
console_handler.setFormatter(FORMATTER)
return console_handler
def logger_get_file_handler(log_file):
err_output = None
try:
file_handler = RotatingFileHandler(log_file, mode='a', encoding='utf-8', maxBytes=1024000, backupCount=3)
except OSError as exc:
try:
print('Cannot create logfile. Trying to obtain temporary log file.\nMessage: %s' % exc)
err_output = str(exc)
temp_log_file = tempfile.gettempdir() + os.sep + __name__ + '.log'
print('Trying temporary log file in ' + temp_log_file)
file_handler = RotatingFileHandler(temp_log_file, mode='a', encoding='utf-8', maxBytes=1000000,
backupCount=1)
file_handler.setFormatter(FORMATTER)
err_output += '\nUsing [%s]' % temp_log_file
return file_handler, err_output
except OSError as exc:
print('Cannot create temporary log file either. Will not log to file. Message: %s' % exc)
return False
else:
file_handler.setFormatter(FORMATTER)
return file_handler, err_output
def logger_get_logger(log_file=None, temp_log_file=None, debug=False):
# If a name is given to getLogger, than modules can't log to the root logger
_logger = logging.getLogger()
if debug is True:
_logger.setLevel(logging.DEBUG)
else:
_logger.setLevel(logging.INFO)
console_handler = logger_get_console_handler()
if console_handler:
_logger.addHandler(console_handler)
if log_file is not None:
file_handler, err_output = logger_get_file_handler(log_file)
if file_handler:
_logger.addHandler(file_handler)
_logger.propagate = False
if err_output is not None:
print(err_output)
_logger.warning('Failed to use log file [%s], %s.', log_file, err_output)
if temp_log_file is not None:
if os.path.isfile(temp_log_file):
try:
os.remove(temp_log_file)
except OSError:
logger.warning('Cannot remove temp log file [%s].' % temp_log_file)
file_handler, err_output = logger_get_file_handler(temp_log_file)
if file_handler:
_logger.addHandler(file_handler)
_logger.propagate = False
if err_output is not None:
print(err_output)
_logger.warning('Failed to use log file [%s], %s.', log_file, err_output)
return _logger
# Generic functions ########################################################
def check_for_file(file):
try:
with open(file) as f:
pass
except IOError as exc:
logger.error('Oh dear... %s does not seem readable' % file)
logger.debug('ERROR:', exc_info=True)
sys.exit(2)
def command_runner(command, valid_exit_codes=None, timeout=30, shell=False, decoder='utf-8'):
"""
command_runner 2019103101
Whenever we can, we need to avoid shell=True in order to preseve better security
Runs system command, returns exit code and stdout/stderr output, and logs output on error
valid_exit_codes is a list of codes that don't trigger an error
"""
try:
# universal_newlines=True makes netstat command fail under windows
# timeout does not work under Python 2.7 with subprocess32 < 3.5
# decoder may be unicode_escape for dos commands or utf-8 for powershell
if sys.version_info >= (3, 0):
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell,
timeout=timeout, universal_newlines=False)
else:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell,
universal_newlines=False)
output = output.decode(decoder, errors='ignore')
except subprocess.CalledProcessError as exc:
exit_code = exc.returncode
try:
output = exc.output
try:
output = output.decode(decoder, errors='ignore')
except Exception as subexc:
logger.debug(subexc, exc_info=True)
logger.debug('Cannot properly decode error. Text is %s' % output)
except Exception:
output = "command_runner: Could not obtain output from command."
if exit_code in valid_exit_codes if valid_exit_codes is not None else [0]:
logger.debug('Command [%s] returned with exit code [%s]. Command output was:' % (command, exit_code))
if output:
logger.debug(output)
return exc.returncode, output
else:
logger.error('Command [%s] failed with exit code [%s]. Command output was:' %
(command, exc.returncode))
logger.error(output)
return exc.returncode, output
# OSError if not a valid executable
except OSError as exc:
logger.error('Command [%s] returned:\n%s.' % (command, exc))
return None, exc
except subprocess.TimeoutExpired:
logger.error('Timeout [%s seconds] expired for command [%s] execution.' % (timeout, command))
return None, 'Timeout of %s seconds expired.' % timeout
else:
logger.debug('Command [%s] returned with exit code [0]. Command output was:' % command)
#if output:
# logger.debug(output)
return 0, output
# Config functions #########################################################
def get_config_data(install_dir):
_, conf = command_runner(['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir])
return conf
# Database functions #######################################################
def db_open(db_socket, db_server, db_port, db_username, db_password, db_dbname):
try:
if db_socket:
database = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password,
db=db_dbname)
else:
database = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return database
except Exception as dbexc:
print('ERROR: Could not connect to MySQL database!')
print('ERROR: %s' % dbexc)
sys.exit(2)
class DB:
conn = None
def __init__(self, db_socket, db_server, db_port, db_username, db_password, db_dbname):
self.db_socket = db_socket
self.db_server = db_server
self.db_port = db_port
self.db_username = db_username
self.db_password = db_password
self.db_dbname = db_dbname
self.in_use = threading.Lock()
self.connect()
def connect(self):
self.in_use.acquire(True)
while True:
try:
self.conn.close()
except:
pass
try:
if self.db_port == 0:
self.conn = MySQLdb.connect(host=self.db_server, user=self.db_username, passwd=self.db_password, db=self.db_dbname)
else:
self.conn = MySQLdb.connect(host=self.db_server, port=self.db_port, user=self.db_username, passwd=self.db_password,
db=self.db_dbname)
break
except (AttributeError, MySQLdb.OperationalError):
logger.warning('WARNING: MySQL Error, reconnecting.')
time.sleep(.5)
self.conn.autocommit(True)
self.conn.ping(True)
self.in_use.release()
def query(self, sql):
self.in_use.acquire(True)
while True:
try:
cursor = self.conn.cursor()
cursor.execute(sql)
ret = cursor.fetchall()
cursor.close()
self.in_use.release()
return ret
except (AttributeError, MySQLdb.OperationalError):
logger.warning('WARNING: MySQL Operational Error during query, reconnecting.')
self.in_use.release()
self.connect()
except (AttributeError, MySQLdb.ProgrammingError):
logger.warning('WARNING: MySQL Programming Error during query, attempting query again.')
cursor.close()

View File

@@ -1,4 +1,4 @@
#! /usr/bin/env python2
#! /usr/bin/env python3
"""
discovery-wrapper A small tool which wraps around discovery and tries to
guide the discovery process with a more modern approach with a
@@ -7,7 +7,8 @@
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <neil@librenms.org>
Date: Sep 2016
Orsiris de Jong <contact@netpower.fr>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
@@ -15,6 +16,10 @@
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -31,95 +36,34 @@
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import Queue
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
"""
Fetch configuration details from the config_to_json.php script
"""
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
discovery_path = config['install_dir'] + '/discovery.php'
log_dir = config['log_dir']
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
db_server = config['db_host']
db_socket = None
db_dbname = config['db_name']
def db_open():
try:
if db_socket:
db = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
discovery_group = str(config['distributed_poller_group'])
else:
discovery_group = False
APP_NAME = "discovery_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
distdisco = False
real_duration = 0
discovered_devices = 0
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC0
def memc_alive():
try:
global memc
@@ -142,98 +86,8 @@ def memc_touch(key, time):
except:
pass
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("discovery.master")) == config['distributed_poller_name']:
print "This system is already joined as the discovery master."
sys.exit(2)
if memc_alive():
if memc.get("discovery.master") is None:
print "Registered as Master"
memc.set("discovery.master", config['distributed_poller_name'], 30)
memc.set("discovery.nodes", 0, 3600)
IsNode = False
else:
print "Registered as Node joining Master %s" % memc.get("discovery.master")
IsNode = True
memc.incr("discovery.nodes")
distdisco = True
else:
print "Could not connect to memcached, disabling distributed discovery."
distdisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed discovery."
distdisco = False
else:
distdisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
discovered_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 1
"""
usage = "usage: %prog [options] <workers> (Default: 1 Do not set too high)"
description = "Spawn multiple discovery.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
"""
This query specificly orders the results depending on the last_discovered_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
discover the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if discovery_group is not False:
query = "select device_id from devices where poller_group IN(" + discovery_group + ") and disabled = 0 order by last_polled_timetaken desc"
else:
query = "select device_id from devices where disabled = 0 order by last_polled_timetaken desc"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distdisco and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
# EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
@@ -247,7 +101,7 @@ db.close()
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global distdisco
if distdisco:
@@ -255,11 +109,11 @@ def printworker():
memc_touch('discovery.master', 30)
nodes = memc.get('discovery.nodes')
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
distdisco = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch('discovery.nodes', 30)
@@ -274,7 +128,7 @@ def printworker():
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
# EOC4
global real_duration
global per_device_duration
global discovered_devices
@@ -282,11 +136,12 @@ def printworker():
per_device_duration[device_id] = elapsed_time
discovered_devices += 1
if elapsed_time < 300:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
This class will fork off single instances of the discovery.php process, record
how long it takes, and push the resulting reports to the printer queue
@@ -296,24 +151,25 @@ def printworker():
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not distdisco or memc.get('discovery.device.' + str(device_id)) is None:
if distdisco:
result = memc.add('discovery.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print "This device (%s) appears to be being discovered by another discovery node" % (device_id)
print("This device (%s) appears to be being discovered by another discovery node" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not discovering Device %s as Node. Master will discover it." % device_id
print("Lost Memcached, Not discovering Device %s as Node. Master will discover it." % device_id)
poll_queue.task_done()
continue
# EOC5
# EOC5
try:
start_time = time.time()
output = "-d >> %s/discover_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
command = "/usr/bin/env php %s -h %s %s 2>&1" % (discovery_path, device_id, output)
# TODO: Replace with command_runner
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
@@ -324,73 +180,208 @@ def poll_worker():
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the discovery at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
for device_id in devices_list:
poll_queue.put(device_id)
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
LNMS.check_for_file(config_file)
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
conf = LNMS.get_config_data(install_dir)
config = json.loads(conf)
except:
print("ERROR: Could not load or parse configuration, are PATHs correct?")
sys.exit(2)
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
discovery_path = config['install_dir'] + '/discovery.php'
log_dir = config['log_dir']
total_time = int(time.time() - s_time)
# TODO: Use LibreNMS.DB
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
print "INFO: discovery-wrapper polled %s devices in %s seconds with %s workers" % (discovered_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distdisco or memc_alive():
master = memc.get("discovery.master")
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all discovery-nodes to finish"
nodes = memc.get("discovery.nodes")
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get("discovery.nodes")
except:
pass
print "Clearing Locks"
x = minlocks
while x <= maxlocks:
memc.delete('discovery.device.' + str(x))
x = x + 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete("discovery.master")
memc.delete("discovery.nodes")
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
memc.decr("discovery.nodes")
print "Finished %s." % time.time()
# EOC6
db_server = config['db_host']
db_socket = None
show_stopper = False
db_dbname = config['db_name']
if total_time > 21600:
print "WARNING: the process took more than 6 hours to finish, you need faster hardware or more threads"
print "INFO: in sequential style discovery the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > 3600:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than 3600 seconds, the script cannot recommend you what to do."
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
discovery_group = str(config['distributed_poller_group'])
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
discovery_group = False
sys.exit(2)
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("discovery.master")) == config['distributed_poller_name']:
print("This system is already joined as the discovery master.")
sys.exit(2)
if memc_alive():
if memc.get("discovery.master") is None:
print("Registered as Master")
memc.set("discovery.master", config['distributed_poller_name'], 30)
memc.set("discovery.nodes", 0, 3600)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get("discovery.master"))
IsNode = True
memc.incr("discovery.nodes")
distdisco = True
else:
print("Could not connect to memcached, disabling distributed discovery.")
distdisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python-memcache")
print("On other systems: easy_install python-memcached")
print("Disabling distributed discovery.")
distdisco = False
else:
distdisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
discovered_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 1
"""
usage = "usage: %prog [options] <workers> (Default: 1 Do not set too high)"
description = "Spawn multiple discovery.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
"""
This query specificly orders the results depending on the last_discovered_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
discover the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if discovery_group is not False:
query = "select device_id from devices where poller_group IN(" + discovery_group + ") and disabled = 0 order by last_polled_timetaken desc"
else:
query = "select device_id from devices where disabled = 0 order by last_polled_timetaken desc"
# EOC2
db = LNMS.db_open(db_socket, db_server, db_port, db_username, db_password, db_dbname)
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distdisco and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print("INFO: starting the discovery at %s with %s threads, slowest devices first" % (
time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: discovery-wrapper polled %s devices in %s seconds with %s workers" % (
discovered_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distdisco or memc_alive():
master = memc.get("discovery.master")
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all discovery-nodes to finish")
nodes = memc.get("discovery.nodes")
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get("discovery.nodes")
except:
pass
print("Clearing Locks")
x = minlocks
while x <= maxlocks:
memc.delete('discovery.device.' + str(x))
x = x + 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete("discovery.master")
memc.delete("discovery.nodes")
else:
memc.decr("discovery.nodes")
print("Finished %s." % time.time())
# EOC6
show_stopper = False
if total_time > 21600:
print("WARNING: the process took more than 6 hours to finish, you need faster hardware or more threads")
print("INFO: in sequential style discovery the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > 3600:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print("ERROR: Some devices are taking more than 3600 seconds, the script cannot recommend you what to do.")
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)

View File

@@ -1,360 +0,0 @@
#! /usr/bin/env python2
"""
poller-service A service to wrap SNMP polling. It will poll up to $threads devices at a time, and will not re-poll
devices that have been polled within the last $poll_frequency seconds. It will prioritize devices based
on the last time polled. If resources are sufficient, this service should poll every device every
$poll_frequency seconds, but should gracefully degrade if resources are inefficient, polling devices as
frequently as possible. This service is based on Job Snijders' poller-wrapper.py.
Author: Clint Armstrong <clint@clintarmstrong.net>
Date: July 2015
License: BSD 2-Clause
Copyright (c) 2015, Clint Armstrong
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
import os
import subprocess
import sys
import threading
import time
import MySQLdb
import logging
import logging.handlers
from datetime import datetime, timedelta
from collections import namedtuple
log = logging.getLogger('poller-service')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('poller-service: %(message)s')
handler = logging.handlers.SysLogHandler(address='/dev/log')
handler.setFormatter(formatter)
log.addHandler(handler)
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
log.info('INFO: Starting poller-service')
class DB:
conn = None
def __init__(self):
self.in_use = threading.Lock()
self.connect()
def connect(self):
self.in_use.acquire(True)
while True:
try:
self.conn.close()
except:
pass
try:
if db_port == 0:
self.conn = MySQLdb.connect(host=db_server, user=db_username, passwd=db_password, db=db_dbname)
else:
self.conn = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
break
except (AttributeError, MySQLdb.OperationalError):
log.warning('WARNING: MySQL Error, reconnecting.')
time.sleep(.5)
self.conn.autocommit(True)
self.conn.ping(True)
self.in_use.release()
def query(self, sql):
self.in_use.acquire(True)
while True:
try:
cursor = self.conn.cursor()
cursor.execute(sql)
ret = cursor.fetchall()
cursor.close()
self.in_use.release()
return ret
except (AttributeError, MySQLdb.OperationalError):
log.warning('WARNING: MySQL Operational Error during query, reconnecting.')
self.in_use.release()
self.connect()
except (AttributeError, MySQLdb.ProgrammingError):
log.warning('WARNING: MySQL Programming Error during query, attempting query again.')
cursor.close()
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
log.critical("ERROR: Could not execute: %s" % config_cmd)
sys.exit(2)
return proc.communicate()[0].decode()
try:
with open(config_file) as f:
pass
except IOError as e:
log.critical("ERROR: Oh dear... %s does not seem readable" % config_file)
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
log.critical("ERROR: Could not load or parse configuration, are PATHs correct?")
sys.exit(2)
try:
loglevel = int(config['poller_service_loglevel'])
except KeyError:
loglevel = 20
except ValueError:
loglevel = logging.getLevelName(config['poller_service_loglevel'])
try:
log.setLevel(loglevel)
except ValueError:
log.warning('ERROR: {0} is not a valid log level. If using python 3.4.0-3.4.1 you must specify loglevel by number'.format(str(loglevel)))
log.setLevel(20)
poller_path = config['install_dir'] + '/poller.php'
discover_path = config['install_dir'] + '/discovery.php'
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_host'][:5].lower() == 'unix:':
db_server = config['db_host']
db_port = 0
elif config['db_socket']:
db_server = config['db_socket']
db_port = 0
else:
db_server = config['db_host']
db_dbname = config['db_name']
try:
amount_of_workers = int(config['poller_service_workers'])
if amount_of_workers == 0:
amount_of_workers = 16
except KeyError:
amount_of_workers = 16
try:
poll_frequency = int(config['poller_service_poll_frequency'])
if poll_frequency == 0:
poll_frequency = 300
except KeyError:
poll_frequency = 300
try:
discover_frequency = int(config['poller_service_discover_frequency'])
if discover_frequency == 0:
discover_frequency = 21600
except KeyError:
discover_frequency = 21600
try:
down_retry = int(config['poller_service_down_retry'])
if down_retry == 0:
down_retry = 60
except KeyError:
down_retry = 60
try:
retry_query = int(config['poller_service_retry_query'])
if retry_query == 0:
retry_query = 1
except KeyError:
retry_query = 1
try:
single_connection = bool(config['poller_service_single_connection'])
except KeyError:
single_connection = False
db = DB()
def lockFree(lock, db=db):
query = "SELECT IS_FREE_LOCK('{0}')".format(lock)
return db.query(query)[0][0] == 1
def getLock(lock, db=db):
query = "SELECT GET_LOCK('{0}', 0)".format(lock)
return db.query(query)[0][0] == 1
def releaseLock(lock, db=db):
query = "SELECT RELEASE_LOCK('{0}')".format(lock)
cursor = db.query(query)
return db.query(query)[0][0] == 1
def sleep_until(timestamp):
now = datetime.now()
if timestamp > now:
sleeptime = (timestamp - now).seconds
else:
sleeptime = 0
time.sleep(sleeptime)
poller_group = ('and poller_group IN({0}) '
.format(str(config['distributed_poller_group'])) if 'distributed_poller_group' in config else '')
# Add last_polled and last_polled_timetaken so we can sort by the time the last poll started, with the goal
# of having each device complete a poll within the given time range.
dev_query = ('SELECT device_id, status, '
'CAST( '
' DATE_ADD( '
' DATE_SUB( '
' last_polled, '
' INTERVAL last_polled_timetaken SECOND '
' ), '
' INTERVAL {0} SECOND) '
' AS DATETIME '
') AS next_poll, '
'CAST( '
' DATE_ADD( '
' DATE_SUB( '
' last_discovered, '
' INTERVAL last_discovered_timetaken SECOND '
' ), '
' INTERVAL {1} SECOND) '
' AS DATETIME '
') as next_discovery '
'FROM devices WHERE '
'disabled = 0 '
'AND IS_FREE_LOCK(CONCAT("poll.", device_id)) '
'AND IS_FREE_LOCK(CONCAT("discovery.", device_id)) '
'AND IS_FREE_LOCK(CONCAT("queue.", device_id)) '
'AND ( last_poll_attempted < DATE_SUB(NOW(), INTERVAL {2} SECOND ) '
' OR last_poll_attempted IS NULL ) '
'{3} '
'ORDER BY next_poll asc '
'LIMIT 1 ').format(poll_frequency,
discover_frequency,
down_retry,
poller_group)
next_update = datetime.now() + timedelta(minutes=1)
devices_scanned = 0
dont_query_until = datetime.fromtimestamp(0)
def poll_worker():
global dev_query
global devices_scanned
global dont_query_until
global single_connection
thread_id = threading.current_thread().name
if single_connection:
global db
else:
db = DB()
while True:
if datetime.now() < dont_query_until:
time.sleep(1)
continue
dev_row = db.query(dev_query)
if len(dev_row) < 1:
dont_query_until = datetime.now() + timedelta(seconds=retry_query)
time.sleep(1)
continue
device_id, status, next_poll, next_discovery = dev_row[0]
if not getLock('queue.{0}'.format(device_id), db):
releaseLock('queue.{0}'.format(device_id), db)
continue
if next_poll and next_poll > datetime.now():
log.debug('DEBUG: Thread {0} Sleeping until {1} before polling {2}'.format(thread_id, next_poll, device_id))
sleep_until(next_poll)
action = 'poll'
if (not next_discovery or next_discovery < datetime.now()) and status == 1:
action = 'discovery'
log.debug('DEBUG: Thread {0} Starting {1} of device {2}'.format(thread_id, action, device_id))
devices_scanned += 1
db.query('UPDATE devices SET last_poll_attempted = NOW() WHERE device_id = {0}'.format(device_id))
if not getLock('{0}.{1}'.format(action, device_id), db):
releaseLock('{0}.{1}'.format(action, device_id), db)
releaseLock('queue.{0}'.format(device_id), db)
continue
releaseLock('queue.{0}'.format(device_id), db)
try:
start_time = time.time()
path = poller_path
if action == 'discovery':
path = discover_path
command = "/usr/bin/env php %s -h %s >> /dev/null 2>&1" % (path, device_id)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
if elapsed_time < 300:
log.debug("DEBUG: Thread {0} finished {1} of device {2} in {3} seconds".format(thread_id, action, device_id, elapsed_time))
else:
log.warning("WARNING: Thread {0} finished {1} of device {2} in {3} seconds".format(thread_id, action, device_id, elapsed_time))
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
finally:
releaseLock('{0}.{1}'.format(action, device_id), db)
for i in range(0, amount_of_workers):
t = threading.Thread(target=poll_worker)
t.name = i
t.daemon = True
t.start()
while True:
sleep_until(next_update)
seconds_taken = (datetime.now() - (next_update - timedelta(minutes=1))).seconds
update_query = ('INSERT INTO pollers(poller_name, '
' last_polled, '
' devices, '
' time_taken) '
' values("{0}", NOW(), "{1}", "{2}") '
'ON DUPLICATE KEY UPDATE '
' last_polled=values(last_polled), '
' devices=values(devices), '
' time_taken=values(time_taken) ').format(config['distributed_poller_name'].strip(),
devices_scanned,
seconds_taken)
try:
db.query(update_query)
except:
log.critical('ERROR: MySQL query error. Is your schema up to date?')
sys.exit(2)
log.info('INFO: {0} devices scanned in the last minute'.format(devices_scanned))
devices_scanned = 0
next_update = datetime.now() + timedelta(minutes=1)

View File

@@ -1,11 +1,12 @@
#! /usr/bin/env python2
#! /usr/bin/env python3
"""
poller-wrapper A small tool which wraps around the poller and tries to
guide the polling process with a more modern approach with a
Queue and workers
Author: Job Snijders <job.snijders@atrato.com>
Date: Jan 2013
Authors: Job Snijders <job.snijders@atrato.com>
Orsiris de Jong <contact@netpower.fr>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
@@ -13,103 +14,48 @@
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 2.7.3 / PHP 5.3.10-1ubuntu3.4 / Ubuntu 12.04 LTS
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8.0
License: To the extent possible under law, Job Snijders has waived all
copyright and related or neighboring rights to this script.
This script has been put into the Public Domain. This work is
published from: The Netherlands.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import Queue
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
APP_NAME = "poller_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
distpoll = False
real_duration = 0
polled_devices = 0
"""
Fetch configuration details from the config_to_json.php script
Threading helper functions
"""
ob_install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = ob_install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % ob_install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
poller_path = config['install_dir'] + '/poller.php'
log_dir = config['log_dir']
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
db_server = config['db_host']
db_socket = None
db_dbname = config['db_name']
def db_open():
try:
if db_socket:
db = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
poller_group = str(config['distributed_poller_group'])
else:
poller_group = False
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC0
def memc_alive():
try:
global memc
@@ -136,110 +82,7 @@ def memc_touch(key, time):
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
if 'rrd' in config and 'step' in config['rrd']:
step = config['rrd']['step']
else:
step = 300
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
time_tag = str(get_time_tag(step))
master_tag = "poller.master." + time_tag
nodes_tag = "poller.nodes." + time_tag
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if memc_alive():
distpoll = True
memc.add(nodes_tag, 0, step)
if memc.add(master_tag, config['distributed_poller_name'], 10):
print "Registered as Master"
IsNode = False
else:
if str(memc.get(master_tag)) == config['distributed_poller_name']:
print "This system is already joined as the poller master."
sys.exit(2)
else:
print "Registered as Node joining Master %s" % memc.get(master_tag)
memc.incr(nodes_tag)
IsNode = True
else:
print "Could not connect to memcached, disabling distributed poller."
distpoll = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed poller."
distpoll = False
else:
distpoll = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
polled_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 16 (Do not set too high)"
description = "Spawn multiple poller.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 16
devices_list = []
"""
This query specificly orders the results depending on the last_polled_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
poll the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if poller_group is not False:
query = "select device_id from devices where poller_group IN(" + poller_group + ") and disabled = 0 order by last_polled_timetaken desc"
else:
query = "select device_id from devices where disabled = 0 order by last_polled_timetaken desc"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distpoll and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
#EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
@@ -248,12 +91,10 @@ db.close()
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global distpoll
if distpoll:
@@ -261,11 +102,11 @@ def printworker():
memc_touch(master_tag, 10)
nodes = memc.get(nodes_tag)
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
distpoll = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch(nodes_tag, 10)
@@ -280,7 +121,7 @@ def printworker():
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
# EOC4
global real_duration
global per_device_duration
global polled_devices
@@ -288,38 +129,39 @@ def printworker():
per_device_duration[device_id] = elapsed_time
polled_devices += 1
if elapsed_time < step:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
This class will fork off single instances of the poller.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not distpoll or memc.get('poller.device.%s.%s'% (device_id, time_tag)) is None:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not distpoll or memc.get('poller.device.%s.%s' % (device_id, time_tag)) is None:
if distpoll:
result = memc.add('poller.device.%s.%s'% (device_id, time_tag), config['distributed_poller_name'], step)
result = memc.add('poller.device.%s.%s' % (device_id, time_tag), config['distributed_poller_name'],
step)
if not result:
print "This device (%s) appears to be being polled by another poller" % (device_id)
print("This device (%s) appears to be being polled by another poller" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not polling Device %s as Node. Master will poll it." % device_id
print("Lost Memcached, Not polling Device %s as Node. Master will poll it." % device_id)
poll_queue.task_done()
continue
# EOC5
# EOC5
try:
start_time = time.time()
output = "-d >> %s/poll_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
command = "/usr/bin/env php %s -h %s %s 2>&1" % (poller_path, device_id, output)
# TODO: replace with command_runner
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
@@ -330,88 +172,238 @@ def poll_worker():
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the poller at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
for device_id in devices_list:
poll_queue.put(device_id)
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
LNMS.check_for_file(config_file)
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
conf = LNMS.get_config_data(install_dir)
config = json.loads(conf)
except:
print("ERROR: Could not load or parse configuration, are PATHs correct?")
sys.exit(2)
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
poller_path = config['install_dir'] + '/poller.php'
log_dir = config['log_dir']
total_time = int(time.time() - s_time)
# TODO: Use LibreNMS.DB
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
print "INFO: poller-wrapper polled %s devices in %s seconds with %s workers" % (polled_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distpoll or memc_alive():
master = memc.get(master_tag)
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all poller-nodes to finish"
nodes = memc.get(nodes_tag)
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get(nodes_tag)
except:
pass
print "Clearing Locks for %s" % time_tag
x = minlocks
while x <= maxlocks:
res = memc.delete('poller.device.%s.%s' % (x, time_tag))
x += 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete(master_tag)
memc.delete(nodes_tag)
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
memc.decr(nodes_tag)
print "Finished %.3fs after interval start." % (time.time() - int(time_tag))
# EOC6
db_server = config['db_host']
db_socket = None
show_stopper = False
db_dbname = config['db_name']
db = db_open()
cursor = db.cursor()
query = "update pollers set last_polled=NOW(), devices='%d', time_taken='%d' where poller_name='%s'" % (polled_devices,
total_time, config['distributed_poller_name'])
response = cursor.execute(query)
if response == 1:
db.commit()
else:
query = "insert into pollers set poller_name='%s', last_polled=NOW(), devices='%d', time_taken='%d'" % (
config['distributed_poller_name'], polled_devices, total_time)
if 'rrd' in config and 'step' in config['rrd']:
step = config['rrd']['step']
else:
step = 300
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
poller_group = str(config['distributed_poller_group'])
else:
poller_group = False
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
time_tag = str(get_time_tag(step))
master_tag = "poller.master." + time_tag
nodes_tag = "poller.nodes." + time_tag
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get(master_tag)) == config['distributed_poller_name']:
print("This system is already joined as the poller master.")
sys.exit(2)
if memc_alive():
if memc.get(master_tag) is None:
print("Registered as Master")
memc.set(master_tag, config['distributed_poller_name'], 10)
memc.set(nodes_tag, 0, step)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get(master_tag))
IsNode = True
memc.incr(nodes_tag)
distpoll = True
else:
print("Could not connect to memcached, disabling distributed poller.")
distpoll = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python-memcache")
print("On other systems: easy_install python-memcached")
print("Disabling distributed poller.")
distpoll = False
else:
distpoll = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
polled_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 16 (Do not set too high)"
description = "Spawn multiple poller.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 16
devices_list = []
"""
This query specificly orders the results depending on the last_polled_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
poll the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if poller_group is not False:
query = 'select device_id from devices where poller_group IN(' + poller_group + \
') and disabled = 0 order by last_polled_timetaken desc'
else:
query = 'select device_id from devices where disabled = 0 order by last_polled_timetaken desc'
# EOC2
db = LNMS.db_open(db_socket, db_server, db_port, db_username, db_password, db_dbname)
cursor = db.cursor()
cursor.execute(query)
db.commit()
db.close()
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distpoll and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
if total_time > step:
print "WARNING: the process took more than %s seconds to finish, you need faster hardware or more threads" % step
print "INFO: in sequential style polling the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > step:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than %s seconds, the script cannot recommend you what to do." % step
print(
"INFO: starting the poller at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: poller-wrapper polled %s devices in %s seconds with %s workers" % (
polled_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distpoll or memc_alive():
master = memc.get(master_tag)
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all poller-nodes to finish")
nodes = memc.get(nodes_tag)
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get(nodes_tag)
except:
pass
print("Clearing Locks for %s" % time_tag)
x = minlocks
while x <= maxlocks:
res = memc.delete('poller.device.%s.%s' % (x, time_tag))
x += 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete(master_tag)
memc.delete(nodes_tag)
else:
memc.decr(nodes_tag)
print("Finished %.3fs after interval start." % (time.time() - int(time_tag)))
# EOC6
show_stopper = False
db = LNMS.db_open(db_socket, db_server, db_port, db_username, db_password, db_dbname)
cursor = db.cursor()
query = "update pollers set last_polled=NOW(), devices='%d', time_taken='%d' where poller_name='%s'" % (
polled_devices,
total_time,
config['distributed_poller_name'])
response = cursor.execute(query)
if response == 1:
db.commit()
else:
recommend = int(total_time / step * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
query = "insert into pollers set poller_name='%s', last_polled=NOW(), devices='%d', time_taken='%d'" % (
config['distributed_poller_name'], polled_devices, total_time)
cursor.execute(query)
db.commit()
db.close()
sys.exit(2)
if total_time > step:
print(
"WARNING: the process took more than %s seconds to finish, you need faster hardware or more threads" % step)
print("INFO: in sequential style polling the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > step:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print(
"ERROR: Some devices are taking more than %s seconds, the script cannot recommend you what to do." % step)
else:
recommend = int(total_time / step * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)

View File

@@ -1,4 +1,4 @@
#! /usr/bin/env python2
#! /usr/bin/env python3
"""
services-wrapper A small tool which wraps around check-services.php and tries to
guide the services process with a more modern approach with a
@@ -7,7 +7,8 @@
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <neil@librenms.org>
Date: Oct 2016
Orsiris de Jong <contact@netpower.fr>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
@@ -15,6 +16,10 @@
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -31,102 +36,45 @@
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import Queue
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
APP_NAME = "services_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
servicedisco = False
real_duration = 0
service_devices = 0
"""
Fetch configuration details from the config_to_json.php script
Threading helper functions
"""
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
service_path = config['install_dir'] + '/check-services.php'
log_dir = config['log_dir']
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
db_server = config['db_host']
db_socket = None
db_dbname = config['db_name']
def db_open():
try:
if db_socket:
db = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
service_group = str(config['distributed_poller_group'])
else:
service_group = False
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC0
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('service.ping.' + key, key, 60)
if memc.get('service.ping.' + key) == key:
memc.delete('service.ping.' + key)
memc.set('poller.ping.' + key, key, 60)
if memc.get('poller.ping.' + key) == key:
memc.delete('poller.ping.' + key)
return True
else:
return False
@@ -142,92 +90,12 @@ def memc_touch(key, time):
except:
pass
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("service.master")) == config['distributed_poller_name']:
print "This system is already joined as the service master."
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print "Registered as Master"
memc.set("service.master", config['distributed_poller_name'], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print "Registered as Node joining Master %s" % memc.get("service.master")
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print "Could not connect to memcached, disabling distributed service checks."
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed discovery."
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
#EOC0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 1 (Do not set too high)"
description = "Spawn multiple check-services.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if service_group is not False:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN(" + service_group + ") AND `devices`.`disabled` = 0"
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
"""
A seperate queue and a single worker for printing information to the screen prevents
@@ -241,7 +109,7 @@ db.close()
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global servicedisco
if servicedisco:
@@ -249,11 +117,11 @@ def printworker():
memc_touch('service.master', 10)
nodes = memc.get('service.nodes')
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
servicedisco = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch('service.nodes', 10)
@@ -268,7 +136,7 @@ def printworker():
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
# EOC4
global real_duration
global per_device_duration
global service_devices
@@ -276,9 +144,9 @@ def printworker():
per_device_duration[device_id] = elapsed_time
service_devices += 1
if elapsed_time < 300:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
@@ -290,23 +158,24 @@ def printworker():
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not servicedisco or memc.get('service.device.' + str(device_id)) is None:
if servicedisco:
result = memc.add('service.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print "This device (%s) appears to be being service checked by another service node" % (device_id)
print("This device (%s) appears to be being service checked by another service node" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not service checking Device %s as Node. Master will check it." % device_id
print("Lost Memcached, Not service checking Device %s as Node. Master will check it." % device_id)
poll_queue.task_done()
continue
# EOC5
# EOC5
try:
start_time = time.time()
output = "-d >> %s/services_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
# TODO replace with command_runner
command = "/usr/bin/env php %s -h %s %s 2>&1" % (service_path, device_id, output)
subprocess.check_call(command, shell=True)
@@ -318,73 +187,202 @@ def poll_worker():
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the service check at %s with %s threads" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
for device_id in devices_list:
poll_queue.put(device_id)
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
LNMS.check_for_file(config_file)
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
conf = LNMS.get_config_data(install_dir)
config = json.loads(conf)
except:
print("ERROR: Could not load or parse configuration, are PATHs correct?")
sys.exit(2)
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
service_path = config['install_dir'] + '/check-services.php'
log_dir = config['log_dir']
total_time = int(time.time() - s_time)
# TODO: Use LibreNMS.DB
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
print "INFO: services-wrapper checked %s devices in %s seconds with %s workers" % (service_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all service-nodes to finish"
nodes = memc.get("service.nodes")
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print "Clearing Locks"
x = minlocks
while x <= maxlocks:
memc.delete('service.device.' + str(x))
x = x + 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete("service.master")
memc.delete("service.nodes")
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
memc.decr("service.nodes")
print "Finished %s." % time.time()
# EOC6
db_server = config['db_host']
db_socket = None
show_stopper = False
db_dbname = config['db_name']
if total_time > 300:
print "WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads"
print "INFO: in sequential style service checks the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > 300:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do."
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
service_group = str(config['distributed_poller_group'])
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
service_group = False
sys.exit(2)
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("service.master")) == config['distributed_poller_name']:
print("This system is already joined as the service master.")
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print("Registered as Master")
memc.set("service.master", config['distributed_poller_name'], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get("service.master"))
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print("Could not connect to memcached, disabling distributed service checks.")
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python-memcache")
print("On other systems: easy_install python-memcached")
print("Disabling distributed discovery.")
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 1 (Do not set too high)"
description = "Spawn multiple check-services.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if service_group is not False:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN(" + service_group + ") AND `devices`.`disabled` = 0"
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = LNMS.db_open(db_socket, db_server, db_port, db_username, db_password, db_dbname)
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print("INFO: starting the service check at %s with %s threads" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: services-wrapper checked %s devices in %s seconds with %s workers" % (service_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all service-nodes to finish")
nodes = memc.get("service.nodes")
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print("Clearing Locks")
x = minlocks
while x <= maxlocks:
memc.delete('service.device.' + str(x))
x = x + 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete("service.master")
memc.delete("service.nodes")
else:
memc.decr("service.nodes")
print("Finished %s." % time.time())
# EOC6
show_stopper = False
if total_time > 300:
print("WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads")
print("INFO: in sequential style service checks the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > 300:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print("ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do.")
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)