1
0
mirror of https://github.com/librenms/librenms-agent.git synced 2024-05-09 09:54:52 +00:00
Files
librenms-librenms-agent/snmp/zfs
2023-04-28 08:47:56 -05:00

430 lines
14 KiB
Perl
Executable File

#!/usr/bin/env perl
=head1 DESCRIPTION
This is a SNMP extend for ZFS for use with LibreNMS.
For more information, see L<https://docs.librenms.org/#Extensions/Applications/#zfs>.
=head1 SWITCHES
=head2 -p
Pretty print the JSON. If used with -b, this switch will be ignored.
=head2 -b
Gzip the output and convert to Base64.
=head2 -s
Include the full information for `zpool status $pool` for each pool
in the return.
=head1 SNMPD SETUP EXAMPLES
extend zfs /etc/snmp/zfs
=head1 REQUIREMENTS
The requirements may be installed via CPAN like below for Linux.
apt-get install cpanminus zlib1g-dev
Or on FreeBSD via pkg...
pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-Gzip-Faster
=cut
#Copyright (c) 2023, Zane C. Bowers-Hadley
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification,
#are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
#THE POSSIBILITY OF SUCH DAMAGE.
# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska
# for zfs-stats and figuring out the math for all the stats
use strict;
use warnings;
use JSON;
use Getopt::Std;
use File::Slurp;
use MIME::Base64;
use Gzip::Faster;
$Getopt::Std::STANDARD_HELP_VERSION = 1;
sub main::VERSION_MESSAGE {
print "FreeBSD ZFS v3 stats extend 0.0.1\n";
}
sub main::HELP_MESSAGE {
}
#this will be dumped to json at the end
my %tojson;
#gets the options
my %opts = ();
getopts( 'pbs', \%opts );
#process each pool and shove them into JSON
my $zpool_output = `/sbin/zpool list -pH`;
my @pools = split( /\n/, $zpool_output );
my $pools_int = 0;
$tojson{online} = 0;
$tojson{degraded} = 0;
$tojson{offline} = 0;
$tojson{faulted} = 0;
$tojson{health} = 1;
$tojson{unavail} = 0;
$tojson{removed} = 0;
$tojson{unknown} = 0;
my @toShoveIntoJSON;
while ( defined( $pools[$pools_int] ) ) {
my %newPool;
my $pool = $pools[$pools_int];
chomp($pool);
$pool =~ s/[\t\ ]+/,/g;
$pool =~ s/\,\-\,/\,0\,/g;
$pool =~ s/\%//g;
$pool =~ s/\,([0-1\.]*)x\,/,$1,/;
(
$newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free},
$newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap},
$newPool{dedup}, $newPool{health}, $newPool{altroot}
) = split( /\,/, $pool );
if ($opts{s}) {
$newPool{status} = `zpool status $newPool{name}`;
}
if ( $newPool{health} eq 'ONLINE' ) {
$newPool{health} = 0;
$tojson{online}++;
}
elsif ( $newPool{health} eq 'DEGRADED' ) {
$newPool{health} = 1;
$tojson{health} = 0;
$tojson{degraded}++;
}
elsif ( $newPool{health} eq 'OFFLINE' ) {
$newPool{health} = 2;
$tojson{offline}++;
}
elsif ( $newPool{health} eq 'FAULTED' ) {
$newPool{health} = 3;
$tojson{health} = 0;
$tojson{faulted}++;
}
elsif ( $newPool{health} eq 'UNAVAIL' ) {
$newPool{health} = 4;
$tojson{health} = 0;
$tojson{unavail}++;
}
elsif ( $newPool{health} eq 'REMOVED' ) {
$newPool{health} = 5;
$tojson{health} = 0;
$tojson{removed}++;
}
else {
$newPool{health} = 6;
$tojson{health} = 0;
$tojson{unknown}++;
}
if ( $newPool{expandsz} eq '-' ) {
$newPool{expandsz} = 0;
}
my $iostat = `zpool iostat -l -q -p -H $newPool{name}`;
chomp($iostat);
$iostat =~ s/\t/,/g;
$iostat =~ s/\,\-\,\-\,/\,0\,0\,/g;
$iostat =~ s/\%//g;
$iostat =~ s/\,([0-1\.]*)x\,/,$1,/;
chomp($iostat);
my $parsed;
(
$parsed, $parsed, $newPool{operations_r}, $newPool{operations_w},
$newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w},
$newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w},
$newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p},
$newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p},
$newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p},
$newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a},
) = split( /\,/, $iostat );
my @pool_keys = keys(%newPool);
foreach my $item (@pool_keys) {
if ( $item ne 'altroot' && $newPool{$item} eq '-' ) {
$newPool{$item} = 0;
}
}
push( @toShoveIntoJSON, \%newPool );
$pools_int++;
}
$tojson{pools} = \@toShoveIntoJSON;
#
# OS specific bits
#
my $stats_stuff = {};
if ( $^O eq 'freebsd' ) {
my @to_pull = ( 'kstat.zfs', 'vfs.zfs', );
my @sysctls_pull = `/sbin/sysctl -q @to_pull`;
foreach my $stat (@sysctls_pull) {
chomp($stat);
my ( $var, $val ) = split( /:/, $stat, 2 );
# If $val is empty, skip it. Likely a var with a newline before
# the data so it is trying to "split" the data.
if ( length $val ) {
$val =~ s/^ //;
$var =~ s/^.*\.arcstats\.//;
$stats_stuff->{$var} = $val;
}
}
}
elsif ( $^O eq 'linux' ) {
my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats');
foreach my $line (@arcstats_lines) {
chomp($line);
my ( $stat, $int, $value ) = split( /[\t\ ]+/, $line, 3 );
$stats_stuff->{$stat} = $value;
}
}
# does not seem to exist for me, but some of these don't seem to be created till needed
if ( !defined( $stats_stuff->{"recycle_miss"} ) ) {
$stats_stuff->{"recycle_miss"} = 0;
}
##
## ARC misc
##
$tojson{deleted} = $stats_stuff->{"deleted"};
$tojson{evict_skip} = $stats_stuff->{"evict_skip"};
$tojson{mutex_skip} = $stats_stuff->{'mutex_miss'};
$tojson{recycle_miss} = $stats_stuff->{"recycle_miss"};
##
## ARC size
##
my $target_size_percent = $stats_stuff->{"c"} / $stats_stuff->{"c_max"} * 100;
my $arc_size_percent = $stats_stuff->{"size"} / $stats_stuff->{"c_max"} * 100;
my $target_size_adaptive_ratio = $stats_stuff->{"c"} / $stats_stuff->{"c_max"};
my $min_size_percent = $stats_stuff->{"c_min"} / $stats_stuff->{"c_max"} * 100;
$tojson{arc_size} = $stats_stuff->{"size"};
$tojson{target_size_max} = $stats_stuff->{"c_max"};
$tojson{target_size_min} = $stats_stuff->{"c_min"};
$tojson{target_size} = $stats_stuff->{"c"};
$tojson{target_size_per} = $target_size_percent;
$tojson{arc_size_per} = $arc_size_percent;
$tojson{target_size_arat} = $target_size_adaptive_ratio;
$tojson{min_size_per} = $min_size_percent;
##
## ARC size breakdown
##
my $mfu_size;
my $recently_used_percent;
my $frequently_used_percent;
if ( $stats_stuff->{"size"} >= $stats_stuff->{"c"} ) {
$mfu_size = $stats_stuff->{"size"} - $stats_stuff->{"p"};
$recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"size"} * 100;
$frequently_used_percent = $mfu_size / $stats_stuff->{"size"} * 100;
}
else {
$mfu_size = $stats_stuff->{"c"} - $stats_stuff->{"p"};
$recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"c"} * 100;
$frequently_used_percent = $mfu_size / $stats_stuff->{"c"} * 100;
}
$tojson{p} = $stats_stuff->{"p"};
##
## ARC efficiency
##
my $arc_hits = $stats_stuff->{"hits"};
my $arc_misses = $stats_stuff->{"misses"};
my $demand_data_hits = $stats_stuff->{"demand_data_hits"};
my $demand_data_misses = $stats_stuff->{"demand_data_misses"};
my $demand_metadata_hits = $stats_stuff->{"demand_metadata_hits"};
my $demand_metadata_misses = $stats_stuff->{"demand_metadata_misses"};
my $mfu_ghost_hits = $stats_stuff->{"mfu_ghost_hits"};
my $mfu_hits = $stats_stuff->{"mfu_hits"};
my $mru_ghost_hits = $stats_stuff->{"mru_ghost_hits"};
my $mru_hits = $stats_stuff->{"mru_hits"};
my $prefetch_data_hits = $stats_stuff->{"prefetch_data_hits"};
my $prefetch_data_misses = $stats_stuff->{"prefetch_data_misses"};
my $prefetch_metadata_hits = $stats_stuff->{"prefetch_metadata_hits"};
my $prefetch_metadata_misses = $stats_stuff->{"prefetch_metadata_misses"};
##
## ARC efficiency, common
##
my $anon_hits = $arc_hits - ( $mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits );
my $arc_accesses_total = $arc_hits + $arc_misses;
my $demand_data_total = $demand_data_hits + $demand_data_misses;
my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses;
my $real_hits = $mfu_hits + $mru_hits;
my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100;
my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100;
my $actual_hit_percent = $real_hits / $arc_accesses_total * 100;
my $data_demand_percent = 0;
if ( $demand_data_total != 0 ) {
$data_demand_percent = $demand_data_hits / $demand_data_total * 100;
}
my $data_prefetch_percent = 0;
if ( $prefetch_data_total != 0 ) {
$data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100;
}
my $anon_hits_percent;
if ( $anon_hits != 0 ) {
$anon_hits_percent = $anon_hits / $arc_hits * 100;
}
else {
$anon_hits_percent = 0;
}
my $mru_percent = $mru_hits / $arc_hits * 100;
my $mfu_percent = $mfu_hits / $arc_hits * 100;
my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100;
my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100;
my $demand_hits_percent = $demand_data_hits / $arc_hits * 100;
my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100;
my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100;
my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100;
my $demand_misses_percent = $demand_data_misses / $arc_misses * 100;
my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100;
my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100;
my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100;
# ARC misc. efficient stats
$tojson{arc_hits} = $arc_hits;
$tojson{arc_misses} = $arc_misses;
$tojson{demand_data_hits} = $demand_data_hits;
$tojson{demand_data_misses} = $demand_data_misses;
$tojson{demand_meta_hits} = $demand_metadata_hits;
$tojson{demand_meta_misses} = $demand_metadata_misses;
$tojson{mfu_ghost_hits} = $mfu_ghost_hits;
$tojson{mfu_hits} = $mfu_hits;
$tojson{mru_ghost_hits} = $mru_ghost_hits;
$tojson{mru_hits} = $mru_hits;
$tojson{pre_data_hits} = $prefetch_data_hits;
$tojson{pre_data_misses} = $prefetch_data_misses;
$tojson{pre_meta_hits} = $prefetch_metadata_hits;
$tojson{pre_meta_misses} = $prefetch_metadata_misses;
$tojson{anon_hits} = $anon_hits;
$tojson{arc_accesses_total} = $arc_accesses_total;
$tojson{demand_data_total} = $demand_data_total;
$tojson{pre_data_total} = $prefetch_data_total;
$tojson{real_hits} = $real_hits;
# ARC efficient percents
$tojson{cache_hits_per} = $cache_hit_percent;
$tojson{cache_miss_per} = $cache_miss_percent;
$tojson{actual_hit_per} = $actual_hit_percent;
$tojson{data_demand_per} = $data_demand_percent;
$tojson{data_pre_per} = $data_prefetch_percent;
$tojson{anon_hits_per} = $anon_hits_percent;
$tojson{mru_per} = $mru_percent;
$tojson{mfu_per} = $mfu_percent;
$tojson{mru_ghost_per} = $mru_ghost_percent;
$tojson{mfu_ghost_per} = $mfu_ghost_percent;
$tojson{demand_hits_per} = $demand_hits_percent;
$tojson{pre_hits_per} = $prefetch_hits_percent;
$tojson{meta_hits_per} = $metadata_hits_percent;
$tojson{pre_meta_hits_per} = $prefetch_metadata_hits_percent;
$tojson{demand_misses_per} = $demand_misses_percent;
$tojson{pre_misses_per} = $prefetch_misses_percent;
$tojson{meta_misses_per} = $metadata_misses_percent;
$tojson{pre_meta_misses_per} = $prefetch_metadata_misses_percent;
$tojson{mfu_size} = $mfu_size;
$tojson{rec_used_per} = $recently_used_percent;
$tojson{freq_used_per} = $frequently_used_percent;
##
## pull in the l2 stats
##
my @l2_keys = grep( /l2\_/, keys( %{$stats_stuff} ) );
foreach my $item (@l2_keys) {
$tojson{$item} = $stats_stuff->{$item};
}
$tojson{l2_errors} = $tojson{l2_writes_error} + $tojson{l2_cksum_bad} + $tojson{l2_io_error};
$tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses};
##
## print the results
##
my %head_hash;
$head_hash{'data'} = \%tojson;
$head_hash{'version'} = 3;
$head_hash{'error'} = 0;
$head_hash{'errorString'} = '';
my $j = JSON->new;
if ( $opts{p} && ! $opts{b} ) {
$j->pretty(1);
}
my $return_string = $j->encode( \%head_hash );
if ( !$opts{p} && ! $opts{b} ) {
print $return_string."\n";
exit 0;
}elsif (!$opts{b}) {
print $return_string;
exit 0;
}
my $compressed = encode_base64( gzip($return_string) );
$compressed =~ s/\n//g;
$compressed = $compressed . "\n";
if ( length($compressed) > length($return_string) ) {
print $return_string."\n";
}
else {
print $compressed;
}
exit 0;