mirror of
https://github.com/librenms/librenms.git
synced 2024-10-07 16:52:45 +00:00
* initial work on add the ability to save/fetch app data
* update to use get_app_data for ZFS
* update the poller for the new app_data stuff
* ZFS now logs changes to pools
* add schema update for app_data stuff
* small formatting fix
* add a missing \
* now adds a column
* sql-schema is no longer used, so remove the file that was added here
* misc cleanups
* rename the method in database/migrations/2022_07_03_1947_add_app_data.php
* hopefully fix the migration bit
* add the column to misc/db_schema.yaml
* more misc small DB fixes
* update the test as the json column uses collat of utf8mb4_bin
* revert the last change and try manually setting it to what is expected
* remove a extra ;
* update suricata as well
* correct the instance -> instances in one location to prevent the old instance list from being stomped
* remove a extra ;
* update fail2ban to use it as well
* remove two unused functions as suricata and fail2ban no longer use components
* style cleanup
* postgres poller updated to use it
* update html side of the postgres bits
* chronyd now uses app data bits now as well
* portactivity now uses it as well
* style fix
* sort the returned arrays from app_data
* correct log message for port activity
* collocation change
* try re-ordering it
* add in the new data column to the tests
* remove a extra ,
* hmm... ->collate('utf8mb4_unicode_ci') is not usable as apparently collate does not exist
* change the column type from json to longtext
* mv chronyd stuff while I sort out the rest of the tests... damn thing is always buggy
* hmm... fix a missing line then likely move stuff back
* style fix
* add fillable
* add the expexcted data for fail2ban json
* escape a " I missed
* add data for portactivity
* add suricata app data
* add app data to zfs legacy test
* put the moved tests back into place and update zfs-v1 test
* add app data for chronyd test
* add app data for fail2ban legacy test
* update zfs v1 app data
* add some notes on application dev work
* add Developing/Application-Notes.md to mkdocs.yml
* add data column to it
* added various suggestions from bennet-esyoil
* convert from isset to sizeof
* type fix
* fully remove the old save app data function and move it into a helper function... the other still needs cleaned up prior to removal
* update docs
* get_app_data is fully removed now as well
* a few style fixes
* add $casts
* update chronyd test
* attempt to fix the data
* more doc cleanup and try changing the cast
* style fix
* revert the changes to the chronyd test
* apply a few of murrant's suggestions
* document working with ->data as json and non-josn
* remove two no-longer used in this PR exceptions
* ->data now operates transparently
* style fix
* update data tests
* fix json
* test fix
* update the app notes to reflect how app data now works
* app test fix
* app data fix for linux_lsi
* json fix
* minor doc cleanup
* remove duplicate querty and use json_decode instead
* style fix
* modelize the app poller
* use a anon func instead of foreach
* test update
* style cleanup
* style cleanup
* another test cleanup
* more test cleanup
* reverse the test changes and add in some more glue code
* revert one of the test changes
* another small test fix
* Make things use models
Left some array access, but those will still work just fine.
* missed chronyd and portactivity
* rename poll to avoid make it any confusion
* Remove extra save and fix timestamp
* save any changes made to app->data
* nope, that was not it
* What are magic methods and how do they work?
* fix two typos
* update linux_lsi test
* change quote type
Co-authored-by: Tony Murray <murraytony@gmail.com>
192 lines
7.4 KiB
PHP
192 lines
7.4 KiB
PHP
<?php
|
|
|
|
use LibreNMS\Exceptions\JsonAppException;
|
|
use LibreNMS\Exceptions\JsonAppMissingKeysException;
|
|
use LibreNMS\RRD\RrdDefinition;
|
|
|
|
$name = 'zfs';
|
|
// Is set to false later if missing keys are found.
|
|
$not_legacy = 1;
|
|
|
|
try {
|
|
$zfs = json_app_get($device, $name, 1)['data'];
|
|
} catch (JsonAppMissingKeysException $e) {
|
|
//old version with out the data key
|
|
$zfs = $e->getParsedJson();
|
|
} catch (JsonAppException $e) {
|
|
echo PHP_EOL . $name . ':' . $e->getCode() . ':' . $e->getMessage() . PHP_EOL;
|
|
update_application($app, $e->getCode() . ':' . $e->getMessage(), []); // Set empty metrics and error message
|
|
|
|
return;
|
|
}
|
|
|
|
$rrd_name = ['app', $name, $app->app_id];
|
|
$rrd_def = RrdDefinition::make()
|
|
->addDataset('deleted', 'DERIVE', 0)
|
|
->addDataset('evict_skip', 'DERIVE', 0)
|
|
->addDataset('mutex_skip', 'DERIVE', 0)
|
|
->addDataset('recycle_miss', 'DERIVE', 0)
|
|
->addDataset('arc_size', 'GAUGE', 0)
|
|
->addDataset('target_size_max', 'GAUGE', 0)
|
|
->addDataset('target_size_min', 'GAUGE', 0)
|
|
->addDataset('target_size', 'GAUGE', 0)
|
|
->addDataset('target_size_per', 'GAUGE', 0)
|
|
->addDataset('arc_size_per', 'GAUGE', 0)
|
|
->addDataset('target_size_arat', 'GAUGE', 0)
|
|
->addDataset('min_size_per', 'GAUGE', 0)
|
|
->addDataset('mfu_size', 'GAUGE', 0)
|
|
->addDataset('p', 'GAUGE', 0)
|
|
->addDataset('rec_used_per', 'GAUGE', 0)
|
|
->addDataset('freq_used_per', 'GAUGE', 0)
|
|
->addDataset('arc_hits', 'DERIVE', 0)
|
|
->addDataset('arc_misses', 'DERIVE', 0)
|
|
->addDataset('demand_data_hits', 'DERIVE', 0)
|
|
->addDataset('demand_data_misses', 'DERIVE', 0)
|
|
->addDataset('demand_meta_hits', 'DERIVE', 0)
|
|
->addDataset('demand_meta_misses', 'DERIVE', 0)
|
|
->addDataset('mfu_ghost_hits', 'DERIVE', 0)
|
|
->addDataset('mfu_hits', 'DERIVE', 0)
|
|
->addDataset('mru_ghost_hits', 'DERIVE', 0)
|
|
->addDataset('mru_hits', 'DERIVE', 0)
|
|
->addDataset('pre_data_hits', 'DERIVE', 0)
|
|
->addDataset('pre_data_misses', 'DERIVE', 0)
|
|
->addDataset('pre_meta_hits', 'DERIVE', 0)
|
|
->addDataset('pre_meta_misses', 'DERIVE', 0)
|
|
->addDataset('anon_hits', 'DERIVE', 0)
|
|
->addDataset('arc_accesses_total', 'DERIVE', 0)
|
|
->addDataset('demand_data_total', 'DERIVE', 0)
|
|
->addDataset('pre_data_total', 'DERIVE', 0)
|
|
->addDataset('real_hits', 'DERIVE', 0)
|
|
->addDataset('cache_hits_per', 'GAUGE', 0)
|
|
->addDataset('cache_miss_per', 'GAUGE', 0)
|
|
->addDataset('actual_hit_per', 'GAUGE', 0)
|
|
->addDataset('data_demand_per', 'GAUGE', 0)
|
|
->addDataset('data_pre_per', 'GAUGE', 0)
|
|
->addDataset('anon_hits_per', 'GAUGE', 0)
|
|
->addDataset('mru_per', 'GAUGE', 0)
|
|
->addDataset('mfu_per', 'GAUGE', 0)
|
|
->addDataset('mru_ghost_per', 'GAUGE', 0)
|
|
->addDataset('mfu_ghost_per', 'GAUGE', 0)
|
|
->addDataset('demand_hits_per', 'GAUGE', 0)
|
|
->addDataset('pre_hits_per', 'GAUGE', 0)
|
|
->addDataset('meta_hits_per', 'GAUGE', 0)
|
|
->addDataset('pre_meta_hits_per', 'GAUGE', 0)
|
|
->addDataset('demand_misses_per', 'GAUGE', 0)
|
|
->addDataset('pre_misses_per', 'GAUGE', 0)
|
|
->addDataset('meta_misses_per', 'GAUGE', 0)
|
|
->addDataset('pre_meta_misses_per', 'GAUGE', 0);
|
|
|
|
$fields = [
|
|
'deleted' => $zfs['deleted'],
|
|
'evict_skip' => $zfs['evict_skip'],
|
|
'mutex_skip' => $zfs['mutex_skip'],
|
|
'recycle_miss' => $zfs['recycle_miss'],
|
|
'arc_size' => $zfs['arc_size'],
|
|
'target_size_max' => $zfs['target_size_max'],
|
|
'target_size_min' => $zfs['target_size_min'],
|
|
'target_size' => $zfs['target_size'],
|
|
'target_size_per' => $zfs['target_size_per'],
|
|
'arc_size_per' => $zfs['arc_size_per'],
|
|
'target_size_arat' => $zfs['target_size_arat'],
|
|
'min_size_per' => $zfs['min_size_per'],
|
|
'mfu_size' => $zfs['mfu_size'],
|
|
'p' => $zfs['p'],
|
|
'rec_used_per' => $zfs['rec_used_per'],
|
|
'freq_used_per' => $zfs['freq_used_per'],
|
|
'arc_hits' => $zfs['arc_hits'],
|
|
'arc_misses' => $zfs['arc_misses'],
|
|
'demand_data_hits' => $zfs['demand_data_hits'],
|
|
'demand_data_misses' => $zfs['demand_data_misses'],
|
|
'demand_meta_hits' => $zfs['demand_meta_hits'],
|
|
'demand_meta_misses' => $zfs['demand_meta_misses'],
|
|
'mfu_ghost_hits' => $zfs['mfu_ghost_hits'],
|
|
'mfu_hits' => $zfs['mfu_hits'],
|
|
'mru_ghost_hits' => $zfs['mru_ghost_hits'],
|
|
'mru_hits' => $zfs['mru_hits'],
|
|
'pre_data_hits' => $zfs['pre_data_hits'],
|
|
'pre_data_misses' => $zfs['pre_data_misses'],
|
|
'pre_meta_hits' => $zfs['pre_meta_hits'],
|
|
'pre_meta_misses' => $zfs['pre_meta_misses'],
|
|
'anon_hits' => $zfs['anon_hits'],
|
|
'arc_accesses_total' => $zfs['arc_accesses_total'],
|
|
'demand_data_total' => $zfs['demand_data_total'],
|
|
'pre_data_total' => $zfs['pre_data_total'],
|
|
'real_hits' => $zfs['real_hits'],
|
|
'cache_hits_per' => $zfs['cache_hits_per'],
|
|
'cache_miss_per' => $zfs['cache_miss_per'],
|
|
'actual_hit_per' => $zfs['actual_hit_per'],
|
|
'data_demand_per' => $zfs['data_demand_per'],
|
|
'data_pre_per' => $zfs['data_pre_per'],
|
|
'anon_hits_per' => $zfs['anon_hits_per'],
|
|
'mru_per' => $zfs['mru_per'],
|
|
'mfu_per' => $zfs['mfu_per'],
|
|
'mru_ghost_per' => $zfs['mru_ghost_per'],
|
|
'mfu_ghost_per' => $zfs['mfu_ghost_per'],
|
|
'demand_hits_per' => $zfs['demand_hits_per'],
|
|
'pre_hits_per' => $zfs['pre_hits_per'],
|
|
'meta_hits_per' => $zfs['meta_hits_per'],
|
|
'pre_meta_hits_per' => $zfs['pre_meta_hits_per'],
|
|
'demand_misses_per' => $zfs['demand_misses_per'],
|
|
'pre_misses_per' => $zfs['pre_misses_per'],
|
|
'meta_misses_per' => $zfs['meta_misses_per'],
|
|
'pre_meta_misses_per' => $zfs['pre_meta_misses_per'],
|
|
];
|
|
|
|
$tags = ['name' => $name, 'app_id' => $app->app_id, 'rrd_def' => $rrd_def, 'rrd_name' => $rrd_name];
|
|
data_update($device, 'app', $tags, $fields);
|
|
|
|
//
|
|
// process additional info returned
|
|
//
|
|
|
|
$pools = [];
|
|
$pool_rrd_def = RrdDefinition::make()
|
|
->addDataset('size', 'GAUGE', 0)
|
|
->addDataset('alloc', 'GAUGE', 0)
|
|
->addDataset('free', 'GAUGE', 0)
|
|
->addDataset('expandsz', 'GAUGE', 0)
|
|
->addDataset('frag', 'GAUGE', 0)
|
|
->addDataset('cap', 'GAUGE', 0)
|
|
->addDataset('dedup', 'GAUGE', 0);
|
|
|
|
$metrics = $zfs; // copy $zfs data to $metrics
|
|
unset($metrics['pools']); // remove pools it is an array, re-add data below
|
|
|
|
foreach ($zfs['pools'] as $pool) {
|
|
$pools[] = $pool['name'];
|
|
$rrd_name = ['app', $name, $app->app_id, $pool['name']];
|
|
$fields = [
|
|
'alloc' => $pool['alloc'],
|
|
'size' => $pool['size'],
|
|
'free' => $pool['free'],
|
|
'expandsz' => $pool['expandsz'],
|
|
'frag' => set_numeric($pool['frag'], -1),
|
|
'cap' => $pool['cap'],
|
|
'dedup' => $pool['dedup'],
|
|
];
|
|
|
|
$tags = ['name' => $name, 'app_id' => $app->app_id, 'rrd_def' => $pool_rrd_def, 'rrd_name' => $rrd_name];
|
|
data_update($device, 'app', $tags, $fields);
|
|
|
|
// insert flattened pool metrics into the metrics array
|
|
foreach ($fields as $field => $value) {
|
|
$metrics['pool_' . $pool['name'] . '_' . $field] = $value;
|
|
}
|
|
}
|
|
|
|
// check for added or removed pools
|
|
$old_pools = $app->data['pools'] ?? [];
|
|
$added_pools = array_diff($pools, $old_pools);
|
|
$removed_pools = array_diff($old_pools, $pools);
|
|
|
|
// if we have any source pools, save and log
|
|
if (count($added_pools) > 0 || count($removed_pools) > 0) {
|
|
$app->data = ['pools' => $pools];
|
|
$log_message = 'ZFS Pool Change:';
|
|
$log_message .= count($added_pools) > 0 ? ' Added ' . implode(',', $added_pools) : '';
|
|
$log_message .= count($removed_pools) > 0 ? ' Removed ' . implode(',', $added_pools) : '';
|
|
log_event($log_message, $device, 'application');
|
|
}
|
|
|
|
update_application($app, 'OK', $metrics);
|