2023-03-19 14:58:18 -06:00
|
|
|
#!/usr/bin/env perl
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
=head1 NAME
|
|
|
|
|
|
|
|
zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS
|
2023-03-19 14:58:18 -06:00
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
=head1 VERSION
|
|
|
|
|
|
|
|
0.1.0
|
|
|
|
|
|
|
|
=head1 DESCRIPTION
|
2023-03-19 14:58:18 -06:00
|
|
|
|
2023-12-16 21:53:04 -05:00
|
|
|
For more information, see L<https://docs.librenms.org/Extensions/Applications/#zfs>.
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
=head1 SWITCHES
|
|
|
|
|
|
|
|
=head2 -p
|
|
|
|
|
2023-04-28 07:47:56 -06:00
|
|
|
Pretty print the JSON. If used with -b, this switch will be ignored.
|
|
|
|
|
|
|
|
=head2 -b
|
|
|
|
|
|
|
|
Gzip the output and convert to Base64.
|
|
|
|
|
|
|
|
=head2 -s
|
|
|
|
|
|
|
|
Include the full information for `zpool status $pool` for each pool
|
|
|
|
in the return.
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
=head1 SNMPD SETUP EXAMPLES
|
|
|
|
|
|
|
|
extend zfs /etc/snmp/zfs
|
|
|
|
|
2023-04-28 07:47:56 -06:00
|
|
|
=head1 REQUIREMENTS
|
|
|
|
|
|
|
|
The requirements may be installed via CPAN like below for Linux.
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
apt-get install cpanminus File::Slurp MIME::Base64 JSON
|
2023-04-28 07:47:56 -06:00
|
|
|
|
|
|
|
Or on FreeBSD via pkg...
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
pkg install p5-JSON p5-File-Slurp p5-MIME-Base64
|
2023-04-28 07:47:56 -06:00
|
|
|
|
2023-03-19 14:58:18 -06:00
|
|
|
=cut
|
|
|
|
|
|
|
|
#Copyright (c) 2023, Zane C. Bowers-Hadley
|
|
|
|
#All rights reserved.
|
|
|
|
#
|
|
|
|
#Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
#are permitted provided that the following conditions are met:
|
|
|
|
#
|
|
|
|
# * Redistributions of source code must retain the above copyright notice,
|
|
|
|
# this list of conditions and the following disclaimer.
|
|
|
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
# this list of conditions and the following disclaimer in the documentation
|
|
|
|
# and/or other materials provided with the distribution.
|
|
|
|
#
|
|
|
|
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
|
|
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
|
|
|
#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
|
|
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
|
|
|
#THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska
|
|
|
|
# for zfs-stats and figuring out the math for all the stats
|
2023-12-19 21:10:57 -06:00
|
|
|
#
|
|
|
|
# Thanks to dlangille for pointing out the issues on 14 and Bobzikwick figuring out the fix in issues/501
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use JSON;
|
2023-12-19 21:10:57 -06:00
|
|
|
use Getopt::Long;
|
2023-03-19 14:58:18 -06:00
|
|
|
use File::Slurp;
|
2023-04-28 07:47:56 -06:00
|
|
|
use MIME::Base64;
|
2023-12-19 21:10:57 -06:00
|
|
|
use IO::Compress::Gzip qw(gzip $GzipError);
|
|
|
|
use Pod::Usage;
|
2023-03-19 14:58:18 -06:00
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
#$Getopt::Std::STANDARD_HELP_VERSION = 1;
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
sub main::VERSION_MESSAGE {
|
2023-12-19 21:10:57 -06:00
|
|
|
pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, );
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
sub main::HELP_MESSAGE {
|
2023-12-19 21:10:57 -06:00
|
|
|
pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, );
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#this will be dumped to json at the end
|
|
|
|
my %tojson;
|
|
|
|
|
|
|
|
#gets the options
|
2023-12-19 21:10:57 -06:00
|
|
|
my %opts;
|
|
|
|
my $opts_p;
|
|
|
|
my $opts_b;
|
|
|
|
my $opts_s;
|
|
|
|
my $version;
|
|
|
|
my $help;
|
|
|
|
#getopts( 'pbs', \%opts );
|
|
|
|
GetOptions(
|
|
|
|
p => \$opts_p,
|
|
|
|
b => \$opts_b,
|
|
|
|
s => \$opts_s,
|
|
|
|
v => \$version,
|
|
|
|
version => \$version,
|
|
|
|
h => \$help,
|
|
|
|
help => \$help,
|
|
|
|
);
|
|
|
|
$opts{p} = $opts_p;
|
|
|
|
$opts{b} = $opts_b;
|
|
|
|
$opts{s} = $opts_s;
|
|
|
|
|
|
|
|
if ($version) {
|
|
|
|
pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, );
|
|
|
|
}
|
|
|
|
|
|
|
|
if ($help) {
|
|
|
|
pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, );
|
|
|
|
}
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
#process each pool and shove them into JSON
|
|
|
|
my $zpool_output = `/sbin/zpool list -pH`;
|
|
|
|
my @pools = split( /\n/, $zpool_output );
|
|
|
|
my $pools_int = 0;
|
|
|
|
$tojson{online} = 0;
|
|
|
|
$tojson{degraded} = 0;
|
|
|
|
$tojson{offline} = 0;
|
|
|
|
$tojson{faulted} = 0;
|
|
|
|
$tojson{health} = 1;
|
|
|
|
$tojson{unavail} = 0;
|
|
|
|
$tojson{removed} = 0;
|
|
|
|
$tojson{unknown} = 0;
|
|
|
|
my @toShoveIntoJSON;
|
|
|
|
|
|
|
|
while ( defined( $pools[$pools_int] ) ) {
|
|
|
|
my %newPool;
|
|
|
|
|
|
|
|
my $pool = $pools[$pools_int];
|
|
|
|
chomp($pool);
|
|
|
|
$pool =~ s/[\t\ ]+/,/g;
|
|
|
|
$pool =~ s/\,\-\,/\,0\,/g;
|
|
|
|
$pool =~ s/\%//g;
|
|
|
|
$pool =~ s/\,([0-1\.]*)x\,/,$1,/;
|
|
|
|
|
|
|
|
(
|
|
|
|
$newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free},
|
|
|
|
$newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap},
|
|
|
|
$newPool{dedup}, $newPool{health}, $newPool{altroot}
|
|
|
|
) = split( /\,/, $pool );
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( $opts{s} ) {
|
2023-04-28 07:47:56 -06:00
|
|
|
$newPool{status} = `zpool status $newPool{name}`;
|
|
|
|
}
|
|
|
|
|
2023-03-19 14:58:18 -06:00
|
|
|
if ( $newPool{health} eq 'ONLINE' ) {
|
|
|
|
$newPool{health} = 0;
|
|
|
|
$tojson{online}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $newPool{health} eq 'DEGRADED' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 1;
|
|
|
|
$tojson{health} = 0;
|
|
|
|
$tojson{degraded}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $newPool{health} eq 'OFFLINE' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 2;
|
|
|
|
$tojson{offline}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $newPool{health} eq 'FAULTED' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 3;
|
|
|
|
$tojson{health} = 0;
|
|
|
|
$tojson{faulted}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $newPool{health} eq 'UNAVAIL' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 4;
|
|
|
|
$tojson{health} = 0;
|
|
|
|
$tojson{unavail}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $newPool{health} eq 'REMOVED' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 5;
|
|
|
|
$tojson{health} = 0;
|
|
|
|
$tojson{removed}++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} else {
|
2023-03-19 14:58:18 -06:00
|
|
|
$newPool{health} = 6;
|
|
|
|
$tojson{health} = 0;
|
|
|
|
$tojson{unknown}++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( $newPool{expandsz} eq '-' ) {
|
|
|
|
$newPool{expandsz} = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
my $iostat = `zpool iostat -l -q -p -H $newPool{name}`;
|
|
|
|
chomp($iostat);
|
|
|
|
$iostat =~ s/\t/,/g;
|
|
|
|
$iostat =~ s/\,\-\,\-\,/\,0\,0\,/g;
|
|
|
|
$iostat =~ s/\%//g;
|
|
|
|
$iostat =~ s/\,([0-1\.]*)x\,/,$1,/;
|
|
|
|
chomp($iostat);
|
|
|
|
my $parsed;
|
|
|
|
(
|
|
|
|
$parsed, $parsed, $newPool{operations_r}, $newPool{operations_w},
|
|
|
|
$newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w},
|
|
|
|
$newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w},
|
|
|
|
$newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p},
|
|
|
|
$newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p},
|
|
|
|
$newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p},
|
|
|
|
$newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a},
|
|
|
|
) = split( /\,/, $iostat );
|
|
|
|
|
|
|
|
my @pool_keys = keys(%newPool);
|
|
|
|
foreach my $item (@pool_keys) {
|
|
|
|
if ( $item ne 'altroot' && $newPool{$item} eq '-' ) {
|
|
|
|
$newPool{$item} = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
push( @toShoveIntoJSON, \%newPool );
|
|
|
|
|
|
|
|
$pools_int++;
|
2023-12-19 21:10:57 -06:00
|
|
|
} ## end while ( defined( $pools[$pools_int] ) )
|
2023-03-19 14:58:18 -06:00
|
|
|
$tojson{pools} = \@toShoveIntoJSON;
|
|
|
|
|
|
|
|
#
|
|
|
|
# OS specific bits
|
|
|
|
#
|
|
|
|
my $stats_stuff = {};
|
|
|
|
if ( $^O eq 'freebsd' ) {
|
|
|
|
my @to_pull = ( 'kstat.zfs', 'vfs.zfs', );
|
|
|
|
my @sysctls_pull = `/sbin/sysctl -q @to_pull`;
|
|
|
|
foreach my $stat (@sysctls_pull) {
|
|
|
|
chomp($stat);
|
|
|
|
my ( $var, $val ) = split( /:/, $stat, 2 );
|
|
|
|
|
|
|
|
# If $val is empty, skip it. Likely a var with a newline before
|
|
|
|
# the data so it is trying to "split" the data.
|
|
|
|
if ( length $val ) {
|
|
|
|
$val =~ s/^ //;
|
|
|
|
$var =~ s/^.*\.arcstats\.//;
|
|
|
|
$stats_stuff->{$var} = $val;
|
|
|
|
}
|
2023-12-19 21:10:57 -06:00
|
|
|
} ## end foreach my $stat (@sysctls_pull)
|
2023-03-19 14:58:18 -06:00
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( $^O eq 'linux' ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats');
|
|
|
|
foreach my $line (@arcstats_lines) {
|
|
|
|
chomp($line);
|
|
|
|
my ( $stat, $int, $value ) = split( /[\t\ ]+/, $line, 3 );
|
|
|
|
$stats_stuff->{$stat} = $value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
# does not seem to exist for me, but some of these don't seem to be created till needed
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( !defined( $stats_stuff->{recycle_miss} ) ) {
|
|
|
|
$stats_stuff->{recycle_miss} = 0;
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
##
|
|
|
|
## ARC misc
|
|
|
|
##
|
2023-12-19 21:10:57 -06:00
|
|
|
$tojson{deleted} = $stats_stuff->{deleted};
|
|
|
|
$tojson{evict_skip} = $stats_stuff->{evict_skip};
|
|
|
|
$tojson{mutex_skip} = $stats_stuff->{mutex_miss};
|
|
|
|
$tojson{recycle_miss} = $stats_stuff->{recycle_miss};
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
##
|
|
|
|
## ARC size
|
|
|
|
##
|
2023-12-19 21:10:57 -06:00
|
|
|
my $target_size_percent = $stats_stuff->{c} / $stats_stuff->{c_max} * 100;
|
|
|
|
my $arc_size_percent = $stats_stuff->{size} / $stats_stuff->{c_max} * 100;
|
|
|
|
my $target_size_adaptive_ratio = $stats_stuff->{c} / $stats_stuff->{c_max};
|
|
|
|
my $min_size_percent = $stats_stuff->{c_min} / $stats_stuff->{c_max} * 100;
|
|
|
|
|
|
|
|
$tojson{arc_size} = $stats_stuff->{size};
|
|
|
|
$tojson{target_size_max} = $stats_stuff->{c_max};
|
|
|
|
$tojson{target_size_min} = $stats_stuff->{c_min};
|
|
|
|
$tojson{target_size} = $stats_stuff->{c};
|
2023-03-19 14:58:18 -06:00
|
|
|
$tojson{target_size_per} = $target_size_percent;
|
|
|
|
$tojson{arc_size_per} = $arc_size_percent;
|
|
|
|
$tojson{target_size_arat} = $target_size_adaptive_ratio;
|
|
|
|
$tojson{min_size_per} = $min_size_percent;
|
|
|
|
|
|
|
|
##
|
|
|
|
## ARC size breakdown
|
|
|
|
##
|
|
|
|
my $mfu_size;
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( defined( $stats_stuff->{mfu_size} ) ) {
|
|
|
|
$mfu_size = $stats_stuff->{mfu_size};
|
|
|
|
}
|
2023-03-19 14:58:18 -06:00
|
|
|
my $recently_used_percent;
|
|
|
|
my $frequently_used_percent;
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( !defined( $stats_stuff->{p} ) && defined( $stats_stuff->{mfu_size} ) ) {
|
|
|
|
$stats_stuff->{p} = $stats_stuff->{size} - $stats_stuff->{mfu_size};
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( $stats_stuff->{size} >= $stats_stuff->{c} ) {
|
|
|
|
if ( !defined($mfu_size) ) {
|
|
|
|
$mfu_size = $stats_stuff->{size} - $stats_stuff->{p};
|
|
|
|
}
|
|
|
|
$recently_used_percent = $stats_stuff->{p} / $stats_stuff->{size} * 100;
|
|
|
|
$frequently_used_percent = $mfu_size / $stats_stuff->{size} * 100;
|
|
|
|
} else {
|
|
|
|
if ( !defined($mfu_size) ) {
|
|
|
|
$mfu_size = $stats_stuff->{c} - $stats_stuff->{p};
|
|
|
|
}
|
|
|
|
$recently_used_percent = $stats_stuff->{p} / $stats_stuff->{c} * 100;
|
|
|
|
$frequently_used_percent = $mfu_size / $stats_stuff->{c} * 100;
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
$tojson{p} = $stats_stuff->{p};
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
##
|
|
|
|
## ARC efficiency
|
|
|
|
##
|
2023-12-19 21:10:57 -06:00
|
|
|
my $arc_hits = $stats_stuff->{hits};
|
|
|
|
my $arc_misses = $stats_stuff->{misses};
|
|
|
|
my $demand_data_hits = $stats_stuff->{demand_data_hits};
|
|
|
|
my $demand_data_misses = $stats_stuff->{demand_data_misses};
|
|
|
|
my $demand_metadata_hits = $stats_stuff->{demand_metadata_hits};
|
|
|
|
my $demand_metadata_misses = $stats_stuff->{demand_metadata_misses};
|
|
|
|
my $mfu_ghost_hits = $stats_stuff->{mfu_ghost_hits};
|
|
|
|
my $mfu_hits = $stats_stuff->{mfu_hits};
|
|
|
|
my $mru_ghost_hits = $stats_stuff->{mru_ghost_hits};
|
|
|
|
my $mru_hits = $stats_stuff->{mru_hits};
|
|
|
|
my $prefetch_data_hits = $stats_stuff->{prefetch_data_hits};
|
|
|
|
my $prefetch_data_misses = $stats_stuff->{prefetch_data_misses};
|
|
|
|
my $prefetch_metadata_hits = $stats_stuff->{prefetch_metadata_hits};
|
|
|
|
my $prefetch_metadata_misses = $stats_stuff->{prefetch_metadata_misses};
|
2023-03-19 14:58:18 -06:00
|
|
|
##
|
|
|
|
## ARC efficiency, common
|
|
|
|
##
|
|
|
|
|
|
|
|
my $anon_hits = $arc_hits - ( $mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits );
|
|
|
|
my $arc_accesses_total = $arc_hits + $arc_misses;
|
|
|
|
my $demand_data_total = $demand_data_hits + $demand_data_misses;
|
|
|
|
my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses;
|
|
|
|
my $real_hits = $mfu_hits + $mru_hits;
|
|
|
|
|
|
|
|
my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100;
|
|
|
|
my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100;
|
|
|
|
my $actual_hit_percent = $real_hits / $arc_accesses_total * 100;
|
|
|
|
|
|
|
|
my $data_demand_percent = 0;
|
|
|
|
if ( $demand_data_total != 0 ) {
|
|
|
|
$data_demand_percent = $demand_data_hits / $demand_data_total * 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
my $data_prefetch_percent = 0;
|
|
|
|
if ( $prefetch_data_total != 0 ) {
|
|
|
|
$data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
my $anon_hits_percent;
|
|
|
|
if ( $anon_hits != 0 ) {
|
|
|
|
$anon_hits_percent = $anon_hits / $arc_hits * 100;
|
2023-12-19 21:10:57 -06:00
|
|
|
} else {
|
2023-03-19 14:58:18 -06:00
|
|
|
$anon_hits_percent = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
my $mru_percent = $mru_hits / $arc_hits * 100;
|
|
|
|
my $mfu_percent = $mfu_hits / $arc_hits * 100;
|
|
|
|
my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100;
|
|
|
|
my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100;
|
|
|
|
|
|
|
|
my $demand_hits_percent = $demand_data_hits / $arc_hits * 100;
|
|
|
|
my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100;
|
|
|
|
my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100;
|
|
|
|
my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100;
|
|
|
|
|
|
|
|
my $demand_misses_percent = $demand_data_misses / $arc_misses * 100;
|
|
|
|
my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100;
|
|
|
|
my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100;
|
|
|
|
my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100;
|
|
|
|
|
|
|
|
# ARC misc. efficient stats
|
|
|
|
$tojson{arc_hits} = $arc_hits;
|
|
|
|
$tojson{arc_misses} = $arc_misses;
|
|
|
|
$tojson{demand_data_hits} = $demand_data_hits;
|
|
|
|
$tojson{demand_data_misses} = $demand_data_misses;
|
|
|
|
$tojson{demand_meta_hits} = $demand_metadata_hits;
|
|
|
|
$tojson{demand_meta_misses} = $demand_metadata_misses;
|
|
|
|
$tojson{mfu_ghost_hits} = $mfu_ghost_hits;
|
|
|
|
$tojson{mfu_hits} = $mfu_hits;
|
|
|
|
$tojson{mru_ghost_hits} = $mru_ghost_hits;
|
|
|
|
$tojson{mru_hits} = $mru_hits;
|
|
|
|
$tojson{pre_data_hits} = $prefetch_data_hits;
|
|
|
|
$tojson{pre_data_misses} = $prefetch_data_misses;
|
|
|
|
$tojson{pre_meta_hits} = $prefetch_metadata_hits;
|
|
|
|
$tojson{pre_meta_misses} = $prefetch_metadata_misses;
|
|
|
|
$tojson{anon_hits} = $anon_hits;
|
|
|
|
$tojson{arc_accesses_total} = $arc_accesses_total;
|
|
|
|
$tojson{demand_data_total} = $demand_data_total;
|
|
|
|
$tojson{pre_data_total} = $prefetch_data_total;
|
|
|
|
$tojson{real_hits} = $real_hits;
|
|
|
|
|
|
|
|
# ARC efficient percents
|
|
|
|
$tojson{cache_hits_per} = $cache_hit_percent;
|
|
|
|
$tojson{cache_miss_per} = $cache_miss_percent;
|
|
|
|
$tojson{actual_hit_per} = $actual_hit_percent;
|
|
|
|
$tojson{data_demand_per} = $data_demand_percent;
|
|
|
|
$tojson{data_pre_per} = $data_prefetch_percent;
|
|
|
|
$tojson{anon_hits_per} = $anon_hits_percent;
|
|
|
|
$tojson{mru_per} = $mru_percent;
|
|
|
|
$tojson{mfu_per} = $mfu_percent;
|
|
|
|
$tojson{mru_ghost_per} = $mru_ghost_percent;
|
|
|
|
$tojson{mfu_ghost_per} = $mfu_ghost_percent;
|
|
|
|
$tojson{demand_hits_per} = $demand_hits_percent;
|
|
|
|
$tojson{pre_hits_per} = $prefetch_hits_percent;
|
|
|
|
$tojson{meta_hits_per} = $metadata_hits_percent;
|
|
|
|
$tojson{pre_meta_hits_per} = $prefetch_metadata_hits_percent;
|
|
|
|
$tojson{demand_misses_per} = $demand_misses_percent;
|
|
|
|
$tojson{pre_misses_per} = $prefetch_misses_percent;
|
|
|
|
$tojson{meta_misses_per} = $metadata_misses_percent;
|
|
|
|
$tojson{pre_meta_misses_per} = $prefetch_metadata_misses_percent;
|
|
|
|
|
|
|
|
$tojson{mfu_size} = $mfu_size;
|
|
|
|
$tojson{rec_used_per} = $recently_used_percent;
|
|
|
|
$tojson{freq_used_per} = $frequently_used_percent;
|
|
|
|
|
|
|
|
##
|
|
|
|
## pull in the l2 stats
|
|
|
|
##
|
|
|
|
my @l2_keys = grep( /l2\_/, keys( %{$stats_stuff} ) );
|
|
|
|
foreach my $item (@l2_keys) {
|
|
|
|
$tojson{$item} = $stats_stuff->{$item};
|
|
|
|
}
|
|
|
|
$tojson{l2_errors} = $tojson{l2_writes_error} + $tojson{l2_cksum_bad} + $tojson{l2_io_error};
|
|
|
|
$tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses};
|
|
|
|
|
|
|
|
##
|
|
|
|
## print the results
|
|
|
|
##
|
|
|
|
|
|
|
|
my %head_hash;
|
2023-12-19 21:10:57 -06:00
|
|
|
$head_hash{data} = \%tojson;
|
|
|
|
$head_hash{version} = 3;
|
|
|
|
$head_hash{error} = 0;
|
|
|
|
$head_hash{errorString} = '';
|
2023-03-19 14:58:18 -06:00
|
|
|
|
|
|
|
my $j = JSON->new;
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( $opts{p} && !$opts{b} ) {
|
2023-03-19 14:58:18 -06:00
|
|
|
$j->pretty(1);
|
|
|
|
}
|
|
|
|
|
2023-04-28 07:47:56 -06:00
|
|
|
my $return_string = $j->encode( \%head_hash );
|
2023-03-19 14:58:18 -06:00
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
if ( !$opts{p} && !$opts{b} ) {
|
|
|
|
print $return_string. "\n";
|
2023-04-28 07:47:56 -06:00
|
|
|
exit 0;
|
2023-12-19 21:10:57 -06:00
|
|
|
} elsif ( !$opts{b} ) {
|
2023-04-28 07:47:56 -06:00
|
|
|
print $return_string;
|
|
|
|
exit 0;
|
|
|
|
}
|
|
|
|
|
2023-12-19 21:10:57 -06:00
|
|
|
my $compressed_string;
|
|
|
|
gzip \$return_string => \$compressed_string;
|
|
|
|
my $compressed = encode_base64($compressed_string);
|
2023-04-28 07:47:56 -06:00
|
|
|
$compressed =~ s/\n//g;
|
|
|
|
$compressed = $compressed . "\n";
|
|
|
|
if ( length($compressed) > length($return_string) ) {
|
2023-12-19 21:10:57 -06:00
|
|
|
print $return_string. "\n";
|
|
|
|
} else {
|
2023-04-28 07:47:56 -06:00
|
|
|
print $compressed;
|
2023-03-19 14:58:18 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
exit 0;
|