Changed qos to collectd less data and smaller rrds

Updated ntfs collectd samba and mpd
This commit is contained in:
Maniacikarus
2008-08-29 07:16:45 +02:00
parent dae27b6b91
commit e19fc87f1d
14 changed files with 27 additions and 110 deletions

View File

@@ -33,14 +33,6 @@ my $heartbeat=$STEP*2;
my @rrd_data_sources =
("-s", $STEP,
"DS:bytes:COUNTER:$heartbeat:0:U",
"DS:bits:COUNTER:$heartbeat:0:U",
"DS:pkts:COUNTER:$heartbeat:0:U",
"DS:dropped:COUNTER:$heartbeat:0:U",
"DS:overlimits:COUNTER:$heartbeat:0:U",
"DS:lended:COUNTER:$heartbeat:0:U",
"DS:borrowed:COUNTER:$heartbeat:0:U",
"DS:giants:COUNTER:$heartbeat:0:U",
"DS:backlog:GAUGE:$heartbeat:0:U",
"RRA:AVERAGE:0.5:1:43200",
"RRA:AVERAGE:0.5:7:8640",
"RRA:AVERAGE:0.5:31:8640",

View File

@@ -25,19 +25,6 @@
##
##########################################
our $event_file_all = "${event_datadir}changes.evt";
sub get_filename_event($) {
my $class_device = "$_[0]";
my $filename = "${event_datadir}class_${class_device}.evt";
return $filename;
}
sub get_filename_bandwidth_info($) {
my $class_device = "$_[0]";
my $filename = "${event_datadir}class_${class_device}_bandwidth.evt";
return $filename;
}
sub update_event_file($$$) {
my $filename = $_[0];
my $information = $_[1];

View File

@@ -210,27 +210,6 @@ sub parse_class($) {
$classes_data{$hash}{last_update} = $timestamp;
update_counter( $hash, $timestamp, "bytes" , $bytes);
#(yes I know its bad/redundant, but it makes in easier elsewhere)
update_counter( $hash, $timestamp, "bits" , $bytes*8);
update_counter( $hash, $timestamp, "pkts" , $pkts);
update_counter( $hash, $timestamp, "dropped" , $dropped);
update_counter( $hash, $timestamp, "overlimits", $overlimits);
update_counter( $hash, $timestamp, "lended" , $lended);
update_counter( $hash, $timestamp, "borrowed" , $borrowed);
update_counter( $hash, $timestamp, "giants" , $giants);
# Not a counter value...
$classes_data{$hash}{backlog} = $backlog;
# Update the info data
# (remember to update the "type" first)
update_info( $hash, $timestamp, "type" , $type);
update_info( $hash, $timestamp, "parent", $parent);
update_info( $hash, $timestamp, "leaf" , $leaf);
update_info( $hash, $timestamp, "prio" , $prio);
update_info( $hash, $timestamp, "rate" , $rate);
update_info( $hash, $timestamp, "ceil" , $ceil);
update_info( $hash, $timestamp, "burst" , $burst);
update_info( $hash, $timestamp, "cburst", $cburst);
#print "\n";
}
@@ -259,15 +238,11 @@ sub parse_class($) {
my $upperlimit_m2 = $18;
#print "\nType: $type\n";
my ($bytes, $pkts, $dropped, $overlimits);
my $bytes;
if ($tc_output[$i + 1] =~ m/Sent (\d+) bytes (\d+) pkts \(dropped (\d+), overlimits (\d+)\)/ ) {
$bytes = $1;
$pkts = $2;
$dropped = $3;
$overlimits = $4;
#print "bytes: $bytes\n"."pkts: $pkts\n";
#print "dropped: $dropped\n"."overlimits: $overlimits\n";
} else {
} else {
print "$timestamp: ERROR(+1) - Unable to parse (class ${class}_$device): ";
print "\"$tc_output[$i + 1]\"\n";
$return_val="";
@@ -316,40 +291,6 @@ sub parse_class($) {
# (need a function call for error checking)
$classes_data{$hash}{last_update} = $timestamp;
update_counter( $hash, $timestamp, "bytes" , $bytes);
#(yes I know its bad/redundant, but it makes in easier elsewhere)
update_counter( $hash, $timestamp, "bits" , $bytes*8);
update_counter( $hash, $timestamp, "pkts" , $pkts);
update_counter( $hash, $timestamp, "dropped" , $dropped);
update_counter( $hash, $timestamp, "overlimits", $overlimits);
# Not a counter value...
$classes_data{$hash}{backlog} = $backlog;
#
# Extra HFSC counters
$classes_data{$hash}{hfsc_period} = $period;
update_counter( $hash, $timestamp, "hfsc_work" , $work);
update_counter( $hash, $timestamp, "hfsc_rtwork" , $rtwork);
# HFSC - Update the info data
# (remember to update the "type" first)
update_info( $hash, $timestamp, "type" , $type);
update_info( $hash, $timestamp, "parent", $parent);
update_info( $hash, $timestamp, "leaf" , $leaf);
#
# Extra HFSC information
update_info( $hash, $timestamp, "level" , $level);
update_info( $hash, $timestamp, "realtime_m1", $realtime_m1);
update_info( $hash, $timestamp, "realtime_d" , $realtime_d);
update_info( $hash, $timestamp, "realtime_m2", $realtime_m2);
update_info( $hash, $timestamp, "linkshare_m1", $linkshare_m1);
update_info( $hash, $timestamp, "linkshare_d" , $linkshare_d);
update_info( $hash, $timestamp, "linkshare_m2", $linkshare_m2);
update_info( $hash, $timestamp, "upperlimit_m1", $upperlimit_m1);
update_info( $hash, $timestamp, "upperlimit_d" , $upperlimit_d);
update_info( $hash, $timestamp, "upperlimit_m2", $upperlimit_m2);
}

View File

@@ -2,8 +2,8 @@ bin/ntfs-3g
#lib/libntfs-3g.a
#lib/libntfs-3g.la
lib/libntfs-3g.so
lib/libntfs-3g.so.34
lib/libntfs-3g.so.34.0.0
lib/libntfs-3g.so.36
lib/libntfs-3g.so.36.0.0
sbin/mount.ntfs-3g
#usr/include/ntfs-3g
#usr/include/ntfs-3g/attrib.h

View File

@@ -3,10 +3,6 @@ etc/ppp/ip-up
var/ipfire/outgoing/bin/outgoingfw.pl
var/ipfire/urlfilter/autoupdate/autoupdate.urls
usr/sbin/redirect_wrapper
bin/ntfs-3g
lib/libntfs-3g.so
lib/libntfs-3g.so.34
lib/libntfs-3g.so.34.0.0
srv/web/ipfire/cgi-bin/index.cgi
srv/web/ipfire/cgi-bin/backup.cgi
srv/web/ipfire/cgi-bin/outgoingfw.cgi

View File

@@ -3,3 +3,7 @@ etc/init.d/connectd
usr/lib/squid
usr/sbin/squid
srv/web/ipfire/cgi-bin/updatexlrator.cgi
bin/ntfs-3g
lib/libntfs-3g.so
lib/libntfs-3g.so.36
lib/libntfs-3g.so.36.0.0

View File

@@ -3,10 +3,6 @@ etc/ppp/ip-up
var/ipfire/outgoing/bin/outgoingfw.pl
var/ipfire/urlfilter/autoupdate/autoupdate.urls
usr/sbin/redirect_wrapper
bin/ntfs-3g
lib/libntfs-3g.so
lib/libntfs-3g.so.34
lib/libntfs-3g.so.34.0.0
srv/web/ipfire/cgi-bin/index.cgi
srv/web/ipfire/cgi-bin/backup.cgi
srv/web/ipfire/cgi-bin/outgoingfw.cgi

View File

@@ -1,2 +1,6 @@
srv/web/ipfire/cgi-bin/updatexlrator.cgi
etc/rc.d/init.d/connectd
bin/ntfs-3g
lib/libntfs-3g.so
lib/libntfs-3g.so.36
lib/libntfs-3g.so.36.0.0