mirror of
				https://github.com/librenms/librenms-agent.git
				synced 2024-05-09 09:54:52 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			778 lines
		
	
	
		
			27 KiB
		
	
	
	
		
			Perl
		
	
	
	
	
	
			
		
		
	
	
			778 lines
		
	
	
		
			27 KiB
		
	
	
	
		
			Perl
		
	
	
	
	
	
#!/usr/bin/env perl
 | 
						|
 | 
						|
#Copyright (c) 2023, Zane C. Bowers-Hadley
 | 
						|
#All rights reserved.
 | 
						|
#
 | 
						|
#Redistribution and use in source and binary forms, with or without modification,
 | 
						|
#are permitted provided that the following conditions are met:
 | 
						|
#
 | 
						|
#   * Redistributions of source code must retain the above copyright notice,
 | 
						|
#    this list of conditions and the following disclaimer.
 | 
						|
#   * Redistributions in binary form must reproduce the above copyright notice,
 | 
						|
#    this list of conditions and the following disclaimer in the documentation
 | 
						|
#    and/or other materials provided with the distribution.
 | 
						|
#
 | 
						|
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 | 
						|
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | 
						|
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | 
						|
#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
 | 
						|
#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 | 
						|
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
						|
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 | 
						|
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
 | 
						|
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 | 
						|
#THE POSSIBILITY OF SUCH DAMAGE.
 | 
						|
 | 
						|
=for comment
 | 
						|
 | 
						|
Add this to snmpd.conf as below and restart snmpd.
 | 
						|
 | 
						|
    extend cape /etc/snmp/extends/cape
 | 
						|
 | 
						|
Supported command line options are as below.
 | 
						|
 | 
						|
    -c <ini>     Config INI file.
 | 
						|
                 Default: /usr/local/etc/cape_extend.ini
 | 
						|
 | 
						|
Depends can be installed via...
 | 
						|
 | 
						|
 | 
						|
    apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl libdbi-perl libdbd-pg-perl
 | 
						|
 | 
						|
 | 
						|
The defeault setttings are...
 | 
						|
 | 
						|
    # DBI connection DSN
 | 
						|
	dsn=dbi:Pg:dbname=cape
 | 
						|
 | 
						|
    # DB user
 | 
						|
	user=cape
 | 
						|
 | 
						|
    # DB PW
 | 
						|
	pass=
 | 
						|
 | 
						|
    # CAPEv2 cuckoo log file
 | 
						|
    clog=/opt/CAPEv2/log/cuckoo.log
 | 
						|
 | 
						|
    # CAPEv2 process log file
 | 
						|
	plog=/opt/CAPEv2/log/process.log
 | 
						|
 | 
						|
    # storage location
 | 
						|
    storage=/opt/CAPEv2/storage
 | 
						|
 | 
						|
    # 0/1 for if it is okay for the process log to not exist
 | 
						|
    # this enables it to work with cuckoo as well as CAPEv2
 | 
						|
	mplogok=1
 | 
						|
 | 
						|
    # list of ignores
 | 
						|
	ignores=/usr/local/etc/cape_extend.ignores
 | 
						|
 | 
						|
    # send errors along for inclusion in the event log
 | 
						|
	sendErrors=1
 | 
						|
 | 
						|
    # send criticals along for inclusion in the event log
 | 
						|
	sendCriticals=1
 | 
						|
 | 
						|
    # send warnings along for inclusion in the event log
 | 
						|
	sendWarnings= 1
 | 
						|
 | 
						|
    # don't use analysis_started_on. analysis_finished_on. processing_started_on,
 | 
						|
    # processing_finished_on, signatures_started_on, signatures_finished_on,
 | 
						|
    # reporting_started_on, or reporting_finished_on with the SQL statement
 | 
						|
    #
 | 
						|
    # This is specifically for supporting ancient cuckoo instances.
 | 
						|
    cuckoosql=0
 | 
						|
 | 
						|
The ignores file will only be used if it exists. The format is as below.
 | 
						|
 | 
						|
    <ignore level> <pattern>
 | 
						|
 | 
						|
This the ignore level will be lower cased. The seperator bween the level and
 | 
						|
the regexp pattern is /[\ \t]+/. So if you want to ignore the two warnings
 | 
						|
generated when VM traffic is dropped, you would use the two lines such as below.
 | 
						|
 | 
						|
    WARNING PCAP file does not exist at path
 | 
						|
    WARNING Unable to Run Suricata: Pcap file
 | 
						|
 | 
						|
In 'conf/reporting.conf' for cape, 'litereport' will need enabled. 'keys_to_copy'
 | 
						|
should include 'signatures' and 'detections'.
 | 
						|
 | 
						|
=cut
 | 
						|
 | 
						|
#    # location of the IP cache to use
 | 
						|
#    ip_cache=/var/cache/cape_extend_ip
 | 
						|
 | 
						|
#    # subnets not to count for IP accounting
 | 
						|
#    ip_ignore=/usr/local/etc/cape_ip_ignore
 | 
						|
 | 
						|
use strict;
 | 
						|
use warnings;
 | 
						|
use Getopt::Long;
 | 
						|
use File::ReadBackwards;
 | 
						|
use JSON;
 | 
						|
use Config::Tiny;
 | 
						|
use DBI;
 | 
						|
use Time::Piece;
 | 
						|
use File::Slurp      qw(read_file);
 | 
						|
use Statistics::Lite qw(:all);
 | 
						|
 | 
						|
sub version {
 | 
						|
	print "cape v. 0.0.1\n";
 | 
						|
}
 | 
						|
 | 
						|
sub help {
 | 
						|
	&version;
 | 
						|
 | 
						|
	print '
 | 
						|
 | 
						|
-c <ini>     Config INI file.
 | 
						|
             Default: /usr/local/etc/cape_extend.ini
 | 
						|
';
 | 
						|
}
 | 
						|
 | 
						|
# get the commandline options
 | 
						|
my $help     = 0;
 | 
						|
my $version  = 0;
 | 
						|
my $ini_file = '/usr/local/etc/cape_extend.ini';
 | 
						|
Getopt::Long::Configure('no_ignore_case');
 | 
						|
Getopt::Long::Configure('bundling');
 | 
						|
GetOptions(
 | 
						|
	'version' => \$version,
 | 
						|
	'v'       => \$version,
 | 
						|
	'help'    => \$help,
 | 
						|
	'h'       => \$help,
 | 
						|
	'i=s'     => \$ini_file,
 | 
						|
);
 | 
						|
 | 
						|
# print version or help if requested
 | 
						|
if ($help) {
 | 
						|
	&help;
 | 
						|
	exit 42;
 | 
						|
}
 | 
						|
if ($version) {
 | 
						|
	&version;
 | 
						|
	exit 42;
 | 
						|
}
 | 
						|
 | 
						|
# time
 | 
						|
my $current_time = time;
 | 
						|
my $target_time  = $current_time - 300;
 | 
						|
 | 
						|
my $return_json = {
 | 
						|
	data => {
 | 
						|
		error                  => 0,
 | 
						|
		errors                 => [],
 | 
						|
		info                   => 0,
 | 
						|
		debug                  => 0,
 | 
						|
		warning                => 0,
 | 
						|
		warnings               => [],
 | 
						|
		critical               => 0,
 | 
						|
		criticals              => [],
 | 
						|
		banned                 => 0,
 | 
						|
		pending                => 0,
 | 
						|
		running                => 0,
 | 
						|
		completed              => 0,
 | 
						|
		distributed            => 0,
 | 
						|
		reported               => 0,
 | 
						|
		recovered              => 0,
 | 
						|
		failed_analysis        => 0,
 | 
						|
		failed_processing      => 0,
 | 
						|
		failed_reporting       => 0,
 | 
						|
		dropped_files          => 0,
 | 
						|
		running_processes      => 0,
 | 
						|
		api_calls              => 0,
 | 
						|
		domains                => 0,
 | 
						|
		signatures_total       => 0,
 | 
						|
		signatures_alert       => 0,
 | 
						|
		files_written          => 0,
 | 
						|
		registry_keys_modified => 0,
 | 
						|
		crash_issues           => 0,
 | 
						|
		anti_issues            => 0,
 | 
						|
		timedout               => 0,
 | 
						|
		pkg_stats              => {},
 | 
						|
		total_tasks            => 0,
 | 
						|
		wrong_pkg              => 0,
 | 
						|
		detections_stats       => {},
 | 
						|
	},
 | 
						|
	error       => 0,
 | 
						|
	errorString => '',
 | 
						|
	version     => 1,
 | 
						|
};
 | 
						|
 | 
						|
# holds a list of reported tasks
 | 
						|
my $reported = {};
 | 
						|
 | 
						|
my @stats_for = (
 | 
						|
	'dropped_files',    'running_processes', 'api_calls',     'domains',
 | 
						|
	'signatures_total', 'signatures_alert',  'files_written', 'registry_keys_modified',
 | 
						|
	'crash_issues',     'anti_issues',       'malscore',      'severity',
 | 
						|
	'confidence',       'weight'
 | 
						|
);
 | 
						|
 | 
						|
my $ag_stats = {
 | 
						|
	dropped_files          => [],
 | 
						|
	running_processes      => [],
 | 
						|
	api_calls              => [],
 | 
						|
	domains                => [],
 | 
						|
	signatures_total       => [],
 | 
						|
	signatures_alert       => [],
 | 
						|
	files_written          => [],
 | 
						|
	registry_keys_modified => [],
 | 
						|
	crash_issues           => [],
 | 
						|
	anti_issues            => [],
 | 
						|
	malscore               => [],
 | 
						|
	severity               => [],
 | 
						|
	confidence             => [],
 | 
						|
	weight                 => [],
 | 
						|
};
 | 
						|
 | 
						|
my $pkg_stats = {};
 | 
						|
 | 
						|
# used for checking if the level value is somethingw understand
 | 
						|
my $level_check = { info => 1, debug => 1, error => 1, warning => 1, critical => 1 };
 | 
						|
 | 
						|
# read the config and put together the defaults
 | 
						|
my $defaults = {
 | 
						|
	dsn           => 'dbi:Pg:dbname=cape',
 | 
						|
	user          => 'cape',
 | 
						|
	pass          => '',
 | 
						|
	clog          => '/opt/CAPEv2/log/cuckoo.log',
 | 
						|
	plog          => '/opt/CAPEv2/log/process.log',
 | 
						|
	storage       => '/opt/CAPEv2/storage',
 | 
						|
	mplogok       => 1,
 | 
						|
	ignores       => '/usr/local/etc/cape_extend.ignores',
 | 
						|
	ip_cache      => '/var/cache/cape_extend_ip',
 | 
						|
	ip_ignore     => '/usr/local/etc/cape_ip_ignore',
 | 
						|
	sendErrors    => 1,
 | 
						|
	sendCriticals => 1,
 | 
						|
	sendWarnings  => 1,
 | 
						|
	cuckoosql     => 0,
 | 
						|
};
 | 
						|
my $config = Config::Tiny->read( $ini_file, 'utf8' );
 | 
						|
if ( !defined($config) ) {
 | 
						|
	$config = $defaults;
 | 
						|
}
 | 
						|
else {
 | 
						|
	$config = $config->{_};
 | 
						|
 | 
						|
	# reel in the defaults
 | 
						|
	foreach my $default_key ( keys( %{$defaults} ) ) {
 | 
						|
		if ( !defined( $config->{$default_key} ) ) {
 | 
						|
			$config->{$default_key} = $defaults->{$default_key};
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
# read in the ignore file
 | 
						|
my $ignores = { info => [], debug => [], error => [], warning => [], critical => [] };
 | 
						|
if ( -f $config->{ignores} ) {
 | 
						|
	my $ignore_raw   = read_file( $config->{ignores} );
 | 
						|
	my @ignore_split = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ignore_raw ) ) );
 | 
						|
	foreach my $to_ignore (@ignore_split) {
 | 
						|
		my ( $ignore_level, $pattern ) = split( /[\ \t]+/, $to_ignore, 2 );
 | 
						|
		if ( defined($ignore_level) and defined($pattern) ) {
 | 
						|
			$ignore_level = lc($ignore_level);
 | 
						|
			push( @{ $ignores->{$ignore_level} }, $pattern );
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
# # process the IP ignore file
 | 
						|
# my @ip_ignores;
 | 
						|
# if ( -f $config->{ip_ignore} ) {
 | 
						|
# 	my $ip_ignore_raw = read_file( $config->{ip_ignores} );
 | 
						|
# 	@ip_ignores = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ip_ignore_raw ) ) );
 | 
						|
# }
 | 
						|
 | 
						|
# # process the IP ignore file
 | 
						|
# my %ip_cache;
 | 
						|
# if ( -f $config->{ip_ignore} ) {
 | 
						|
# 	my $ip_cache_raw   = read_file( $config->{ignores} );
 | 
						|
# 	# IP,count,time
 | 
						|
# 	# Time is unix time.
 | 
						|
# 	my @ip_cache_split = grep( !/^[0-9a-fA-F\:\.]+\,[0-9]+\,[0-9]+$/, split( /\n/, $ip_cache_raw ) );
 | 
						|
# 	foreach my $line (@ip_cache_split) {
 | 
						|
# 		my ( $ip, $ip_count, $ip_time ) = split( /\,/ . $line );
 | 
						|
# 		$ip_cache{$ip} = { count => $ip_count, time => $ip_time };
 | 
						|
# 	}
 | 
						|
# }
 | 
						|
 | 
						|
# put together the list of logs to read
 | 
						|
my @logs;
 | 
						|
if ( !-f $config->{clog} ) {
 | 
						|
	$return_json->{error} = '"' . $defaults->{clog} . '" does not exist';
 | 
						|
}
 | 
						|
else {
 | 
						|
	push( @logs, $config->{clog} );
 | 
						|
}
 | 
						|
if ( !-f $config->{plog} && !$config->{mplogok} ) {
 | 
						|
	$return_json->{error} = '"' . $defaults->{clog} . '" does not exist';
 | 
						|
}
 | 
						|
else {
 | 
						|
	push( @logs, $config->{plog} );
 | 
						|
}
 | 
						|
 | 
						|
#
 | 
						|
# process all the log lines, counting them
 | 
						|
#
 | 
						|
 | 
						|
my $process_loop = 0;
 | 
						|
my $process_logs = 1;
 | 
						|
while ( $process_logs && defined( $logs[$process_loop] ) ) {
 | 
						|
	my $log = $logs[$process_loop];
 | 
						|
 | 
						|
	my $bw;
 | 
						|
	eval { $bw = File::ReadBackwards->new($log); };
 | 
						|
 | 
						|
	my $continue      = 1;
 | 
						|
	my $current_entry = '';
 | 
						|
	while ( defined($bw) && defined( my $log_line = $bw->readline ) && $continue ) {
 | 
						|
		$current_entry = $log_line . $current_entry;
 | 
						|
		if (
 | 
						|
			(
 | 
						|
				$current_entry
 | 
						|
				=~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/
 | 
						|
			)
 | 
						|
			|| ( $current_entry
 | 
						|
				=~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/
 | 
						|
			)
 | 
						|
			)
 | 
						|
		{
 | 
						|
			my ( $date, $time, $log_task_id, $lib, $level, $entry );
 | 
						|
 | 
						|
			# parse it and blank it for when we get to the next one.
 | 
						|
			if ( $current_entry
 | 
						|
				=~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/
 | 
						|
				)
 | 
						|
			{
 | 
						|
				( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 );
 | 
						|
			}
 | 
						|
			else {
 | 
						|
				( $date, $time, $log_task_id, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 6 );
 | 
						|
				$entry = $log_task_id . ': ' . $entry;
 | 
						|
			}
 | 
						|
			$current_entry = '';
 | 
						|
 | 
						|
			# chomp off the seconds place after the ,
 | 
						|
			$time =~ s/\,.*//;
 | 
						|
			my $t = Time::Piece->strptime( $date . 'T' . $time, '%Y-%m-%dT%H:%M:%S' );
 | 
						|
 | 
						|
			if ( $t->epoch <= $target_time ) {
 | 
						|
				$continue = 0;
 | 
						|
			}
 | 
						|
			else {
 | 
						|
				$level = lc($level);
 | 
						|
				$level =~ s/\://;
 | 
						|
				if ( defined( $level_check->{$level} ) ) {
 | 
						|
					my $add_it     = 1;
 | 
						|
					my $ignore_int = 0;
 | 
						|
					foreach ( @{ $ignores->{$level} } ) {
 | 
						|
						my $test = $_;
 | 
						|
						if ( $entry =~ /$test/ ) {
 | 
						|
							$add_it = 0;
 | 
						|
						}
 | 
						|
						$ignore_int++;
 | 
						|
					}
 | 
						|
					if ($add_it) {
 | 
						|
						$return_json->{data}->{$level}++;
 | 
						|
						if ( $level eq 'error' and $config->{sendErrors} ) {
 | 
						|
							push( @{ $return_json->{data}->{errors} }, $entry );
 | 
						|
						}
 | 
						|
						elsif ( $level eq 'warning' and $config->{sendWarnings} ) {
 | 
						|
							push( @{ $return_json->{data}->{warnings} }, $entry );
 | 
						|
						}
 | 
						|
						elsif ( $level eq 'critical' and $config->{sendCriticals} ) {
 | 
						|
							push( @{ $return_json->{data}->{criticals} }, $entry );
 | 
						|
						}
 | 
						|
					}
 | 
						|
					if ( $level eq 'warning' && $entry =~ /submitted\ the\ job\ with\ wrong\ package/ ) {
 | 
						|
						$return_json->{wrong_pkg}++;
 | 
						|
					}
 | 
						|
				}
 | 
						|
			}
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	$process_loop++;
 | 
						|
}
 | 
						|
 | 
						|
#
 | 
						|
# put together query for getting the current tasks
 | 
						|
#
 | 
						|
my $query;
 | 
						|
if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) {
 | 
						|
	$query = "select id,status,package from tasks where ( status != 'pending' ) and '.
 | 
						|
'( added_on > FROM_UNIXTIME('"
 | 
						|
		. $target_time
 | 
						|
		. "')) or "
 | 
						|
		. "( started_on > FROM_UNIXTIME('"
 | 
						|
		. $target_time
 | 
						|
		. "')) or "
 | 
						|
		. "( completed_on > FROM_UNIXTIME('"
 | 
						|
		. $target_time . "')); ";
 | 
						|
}
 | 
						|
else {
 | 
						|
	$query
 | 
						|
		= "select id,status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where"
 | 
						|
		. " (status != 'pending') and "
 | 
						|
		. "  ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
		. "( started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
		. "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' ) ";
 | 
						|
	if ( !$config->{cuckoosql} ) {
 | 
						|
		$query
 | 
						|
			= $query
 | 
						|
			. " or ( analysis_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "(analysis_finished_on  > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( processing_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( processing_finished_on  > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( signatures_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( signatures_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( reporting_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or "
 | 
						|
			. "( reporting_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' );";
 | 
						|
	}
 | 
						|
	else {
 | 
						|
		$query = $query . ';';
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
eval {
 | 
						|
	my $dbh = DBI->connect( $config->{dsn}, $config->{user}, $config->{pass} ) || die($DBI::errstr);
 | 
						|
 | 
						|
	eval {
 | 
						|
		my $sth_pending = $dbh->prepare("select * from tasks where status = 'pending'");
 | 
						|
		$sth_pending->execute;
 | 
						|
		$return_json->{data}{pending} = $sth_pending->rows;
 | 
						|
	};
 | 
						|
 | 
						|
	my $sth = $dbh->prepare($query);
 | 
						|
	$sth->execute;
 | 
						|
	my $task_status;
 | 
						|
	my $task_package;
 | 
						|
	my $dropped_files;
 | 
						|
	my $running_processes;
 | 
						|
	my $api_calls;
 | 
						|
	my $domains;
 | 
						|
	my $signatures_total;
 | 
						|
	my $signatures_alert;
 | 
						|
	my $files_written;
 | 
						|
	my $registry_keys_modified;
 | 
						|
	my $crash_issues;
 | 
						|
	my $anti_issues;
 | 
						|
	my $timedout;
 | 
						|
	my $task_id;
 | 
						|
	#
 | 
						|
	# MySQL is basically for old Cuckoo support.
 | 
						|
	# CAPEv2 does not really play nice with it because of column issues
 | 
						|
	#
 | 
						|
	if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) {
 | 
						|
		$sth->bind_columns( undef, \$task_status, \$task_package );
 | 
						|
		while ( $sth->fetch ) {
 | 
						|
			if ( defined( $return_json->{data}->{$task_status} ) ) {
 | 
						|
				$return_json->{data}->{$task_status}++;
 | 
						|
				$return_json->{data}->{total_tasks}++;
 | 
						|
			}
 | 
						|
		}
 | 
						|
	}
 | 
						|
	else {
 | 
						|
		$sth->bind_columns(
 | 
						|
			undef,              \$task_id,           \$task_status,   \$task_package,
 | 
						|
			\$dropped_files,    \$running_processes, \$api_calls,     \$domains,
 | 
						|
			\$signatures_total, \$signatures_alert,  \$files_written, \$registry_keys_modified,
 | 
						|
			\$crash_issues,     \$anti_issues,       \$timedout
 | 
						|
		);
 | 
						|
		while ( $sth->fetch ) {
 | 
						|
			if ( defined( $return_json->{data}->{$task_status} ) ) {
 | 
						|
				$return_json->{data}->{$task_status}++;
 | 
						|
				$return_json->{data}->{total_tasks}++;
 | 
						|
			}
 | 
						|
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$reported->{$task_id} = {
 | 
						|
					package                => $task_package,
 | 
						|
					dropped_files          => $dropped_files,
 | 
						|
					running_processes      => $running_processes,
 | 
						|
					domains                => $domains,
 | 
						|
					api_calls              => $api_calls,
 | 
						|
					signatures_total       => $signatures_total,
 | 
						|
					signatures_alert       => $signatures_alert,
 | 
						|
					files_written          => $files_written,
 | 
						|
					registry_keys_modified => $registry_keys_modified,
 | 
						|
					crash_issue            => $crash_issues,
 | 
						|
					anti_issues            => $anti_issues,
 | 
						|
					timedout               => $timedout,
 | 
						|
				};
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($task_package) || $task_package eq '' ) {
 | 
						|
				$task_package = 'generic';
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($running_processes) ) {
 | 
						|
				$running_processes = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{running_processes} += $running_processes;
 | 
						|
				push( @{ $ag_stats->{running_processes} }, $running_processes );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($api_calls) ) {
 | 
						|
				$api_calls = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{api_calls} += $api_calls;
 | 
						|
				push( @{ $ag_stats->{api_calls} }, $api_calls );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($domains) ) {
 | 
						|
				$domains = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{domains} += $domains;
 | 
						|
				push( @{ $ag_stats->{domains} }, $domains );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($signatures_alert) ) {
 | 
						|
				$signatures_alert = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{signatures_alert} += $signatures_alert;
 | 
						|
				push( @{ $ag_stats->{signatures_alert} }, $signatures_alert );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($signatures_total) ) {
 | 
						|
				$signatures_total = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{signatures_total} += $signatures_total;
 | 
						|
				push( @{ $ag_stats->{signatures_total} }, $signatures_total );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($files_written) ) {
 | 
						|
				$files_written = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{files_written} += $files_written;
 | 
						|
				push( @{ $ag_stats->{files_written} }, $files_written );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($registry_keys_modified) ) {
 | 
						|
				$registry_keys_modified = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{registry_keys_modified} += $registry_keys_modified;
 | 
						|
				push( @{ $ag_stats->{registry_keys_modified} }, $registry_keys_modified );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($crash_issues) ) {
 | 
						|
				$crash_issues = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{crash_issues} += $crash_issues;
 | 
						|
				push( @{ $ag_stats->{crash_issues} }, $crash_issues );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($anti_issues) ) {
 | 
						|
				$anti_issues = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{anti_issues} += $anti_issues;
 | 
						|
				push( @{ $ag_stats->{anti_issues} }, $anti_issues );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( !defined($dropped_files) ) {
 | 
						|
				$dropped_files = 0;
 | 
						|
			}
 | 
						|
			if ( $task_status eq 'reported' ) {
 | 
						|
				$return_json->{data}->{dropped_files} += $dropped_files;
 | 
						|
				push( @{ $ag_stats->{dropped_files} }, $dropped_files );
 | 
						|
			}
 | 
						|
 | 
						|
			# put per package stats together
 | 
						|
			if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) {
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package} = {
 | 
						|
					dropped_files          => $dropped_files,
 | 
						|
					running_processes      => $running_processes,
 | 
						|
					api_calls              => $api_calls,
 | 
						|
					domains                => $domains,
 | 
						|
					signatures_total       => $signatures_total,
 | 
						|
					signatures_alert       => $signatures_alert,
 | 
						|
					files_written          => $files_written,
 | 
						|
					registry_keys_modified => $registry_keys_modified,
 | 
						|
					crash_issues           => $crash_issues,
 | 
						|
					anti_issues            => $anti_issues,
 | 
						|
					banned                 => 0,
 | 
						|
					pending                => 0,
 | 
						|
					running                => 0,
 | 
						|
					completed              => 0,
 | 
						|
					distributed            => 0,
 | 
						|
					reported               => 0,
 | 
						|
					recovered              => 0,
 | 
						|
					failed_analysis        => 0,
 | 
						|
					failed_processing      => 0,
 | 
						|
					failed_reporting       => 0,
 | 
						|
					tasks                  => 1,
 | 
						|
				};
 | 
						|
				$pkg_stats->{$task_package} = {
 | 
						|
					dropped_files          => [$dropped_files],
 | 
						|
					running_processes      => [$running_processes],
 | 
						|
					api_calls              => [$api_calls],
 | 
						|
					domains                => [$domains],
 | 
						|
					signatures_total       => [$signatures_total],
 | 
						|
					signatures_alert       => [$signatures_alert],
 | 
						|
					files_written          => [$files_written],
 | 
						|
					registry_keys_modified => [$registry_keys_modified],
 | 
						|
					crash_issues           => [$crash_issues],
 | 
						|
					anti_issues            => [$anti_issues],
 | 
						|
					malscore               => [],
 | 
						|
					confidence             => [],
 | 
						|
					severity               => [],
 | 
						|
				};
 | 
						|
			}
 | 
						|
			else {
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{tasks}++;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files}     += $dropped_files;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{api_calls}         += $api_calls;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{domains}           += $domains;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total}  += $signatures_total;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert}  += $signatures_alert;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{files_written}     += $files_written;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified}
 | 
						|
					+= $registry_keys_modified;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues;
 | 
						|
				$return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues}  += $anti_issues;
 | 
						|
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{dropped_files} },          $dropped_files );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{running_processes} },      $running_processes );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{api_calls} },              $api_calls );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{domains} },                $domains );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{signatures_total} },       $signatures_total );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{signatures_alert} },       $signatures_alert );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{files_written} },          $files_written );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{crash_issues} },           $crash_issues );
 | 
						|
				push( @{ $pkg_stats->{$task_package}->{anti_issues} },            $anti_issues );
 | 
						|
			}
 | 
						|
			$return_json->{data}->{pkg_stats}->{$task_package}->{$task_status}++;
 | 
						|
 | 
						|
			# timedout value is not a perl boolean
 | 
						|
			if ( $timedout =~ /^[Ff]/ ) {
 | 
						|
				$return_json->{data}->{timedout}++;
 | 
						|
			}
 | 
						|
		}
 | 
						|
	}
 | 
						|
};
 | 
						|
if ($@) {
 | 
						|
	$return_json->{error}       = 2;
 | 
						|
	$return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@;
 | 
						|
}
 | 
						|
 | 
						|
#
 | 
						|
# put together the stats for the reported items
 | 
						|
#
 | 
						|
foreach my $task_id ( keys( %{$reported} ) ) {
 | 
						|
	eval {
 | 
						|
		my $report  = decode_json( read_file( $config->{storage} . '/analyses/' . $task_id . '/reports/lite.json' ) );
 | 
						|
		my $package = $report->{info}{package};
 | 
						|
		if ( defined( $report->{malscore} ) ) {
 | 
						|
			push( @{ $ag_stats->{malscore} },            $report->{malscore} );
 | 
						|
			push( @{ $pkg_stats->{$package}{malscore} }, $report->{malscore} );
 | 
						|
		}
 | 
						|
 | 
						|
		my $sig_int = 0;
 | 
						|
		while ( defined( $report->{signatures}[$sig_int] ) ) {
 | 
						|
			if ( defined( $report->{signatures}[$sig_int]{confidence} ) ) {
 | 
						|
				push( @{ $ag_stats->{confidence} },            $report->{signatures}[$sig_int]{confidence} );
 | 
						|
				push( @{ $pkg_stats->{$package}{confidence} }, $report->{signatures}[$sig_int]{confidence} );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( defined( $report->{signatures}[$sig_int]{severity} ) ) {
 | 
						|
				push( @{ $ag_stats->{severity} },            $report->{signatures}[$sig_int]{severity} );
 | 
						|
				push( @{ $pkg_stats->{$package}{severity} }, $report->{signatures}[$sig_int]{severity} );
 | 
						|
			}
 | 
						|
 | 
						|
			if ( defined( $report->{signatures}[$sig_int]{weight} ) ) {
 | 
						|
				push( @{ $ag_stats->{weight} },            $report->{signatures}[$sig_int]{weight} );
 | 
						|
				push( @{ $pkg_stats->{$package}{weight} }, $report->{signatures}[$sig_int]{weight} );
 | 
						|
			}
 | 
						|
 | 
						|
			$sig_int++;
 | 
						|
		}
 | 
						|
	};
 | 
						|
}
 | 
						|
 | 
						|
#
 | 
						|
# compute the aggregate stats
 | 
						|
#
 | 
						|
foreach my $current_entry (@stats_for) {
 | 
						|
	if ( $#{ $ag_stats->{$current_entry} } > 0 ) {
 | 
						|
		$return_json->{data}{ 'min.' . $current_entry }    = min( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'max.' . $current_entry }    = max( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'range.' . $current_entry }  = range( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'mean.' . $current_entry }   = mean( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'median.' . $current_entry } = median( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'mode.' . $current_entry }   = mode( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'v.' . $current_entry }      = variance( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'sd.' . $current_entry }     = stddev( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'vp.' . $current_entry }     = variancep( @{ $ag_stats->{$current_entry} } );
 | 
						|
		$return_json->{data}{ 'sdp.' . $current_entry }    = stddevp( @{ $ag_stats->{$current_entry} } );
 | 
						|
	}
 | 
						|
	else {
 | 
						|
		$return_json->{data}{ 'min.' . $current_entry }    = 0;
 | 
						|
		$return_json->{data}{ 'max.' . $current_entry }    = 0;
 | 
						|
		$return_json->{data}{ 'range.' . $current_entry }  = 0;
 | 
						|
		$return_json->{data}{ 'mean.' . $current_entry }   = 0;
 | 
						|
		$return_json->{data}{ 'median.' . $current_entry } = 0;
 | 
						|
		$return_json->{data}{ 'mode.' . $current_entry }   = 0;
 | 
						|
		$return_json->{data}{ 'v.' . $current_entry }      = 0;
 | 
						|
		$return_json->{data}{ 'sd.' . $current_entry }     = 0;
 | 
						|
		$return_json->{data}{ 'vp.' . $current_entry }     = 0;
 | 
						|
		$return_json->{data}{ 'sdp.' . $current_entry }    = 0;
 | 
						|
	}
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
#
 | 
						|
# compute the stats for each package
 | 
						|
#
 | 
						|
foreach my $current_pkg ( keys( %{$pkg_stats} ) ) {
 | 
						|
	foreach my $current_entry (@stats_for) {
 | 
						|
		if ( $#{ $pkg_stats->{$current_pkg}{$current_entry} } > 0 ) {
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry }
 | 
						|
				= min( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry }
 | 
						|
				= max( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry }
 | 
						|
				= range( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry }
 | 
						|
				= mean( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry }
 | 
						|
				= median( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry }
 | 
						|
				= mode( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry }
 | 
						|
				= variance( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry }
 | 
						|
				= stddev( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry }
 | 
						|
				= variancep( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry }
 | 
						|
				= stddevp( @{ $pkg_stats->{$current_pkg}{$current_entry} } );
 | 
						|
		}
 | 
						|
		else {
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry }    = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry }    = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry }  = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry }   = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry } = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry }   = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry }      = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry }     = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry }     = 0;
 | 
						|
			$return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry }    = 0;
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
print encode_json($return_json) . "\n";
 |