I have a cacti server with 0.87g and just created a graph to monitor the IO's of a direct attached Diskshelf of a HP Proliant server.
Cacti writes the rrd file, the content of the rrd is just fine.
Code: Select all
rrd_version = "0003"
step = 300
last_update = 1292923350
ds[commands].type = "COUNTER"
ds[commands].minimal_heartbeat = 600
ds[commands].min = 0,0000000000e+00
ds[commands].max = NaN
ds[commands].last_ds = "500"
ds[commands].value = 5,4546627565e+09
ds[commands].unknown_sec = 0
ds[latency].type = "COUNTER"
ds[latency].minimal_heartbeat = 600
ds[latency].min = 0,0000000000e+00
ds[latency].max = NaN
ds[latency].last_ds = "U"
ds[latency].value = NaN
ds[latency].unknown_sec = 150
rra[0].cf = "AVERAGE"
rra[0].rows = 500
rra[0].cur_row = 202
rra[0].pdp_per_row = 1
rra[0].xff = 5,0000000000e-01
rra[0].cdp_prep[0].value = NaN
rra[0].cdp_prep[0].unknown_datapoints = 0
rra[0].cdp_prep[1].value = NaN
rra[0].cdp_prep[1].unknown_datapoints = 0
rra[1].cf = "AVERAGE"
rra[1].rows = 600
rra[1].cur_row = 584
rra[1].pdp_per_row = 1
rra[1].xff = 5,0000000000e-01
rra[1].cdp_prep[0].value = NaN
rra[1].cdp_prep[0].unknown_datapoints = 0
rra[1].cdp_prep[1].value = NaN
rra[1].cdp_prep[1].unknown_datapoints = 0
rra[2].cf = "AVERAGE"
rra[2].rows = 700
rra[2].cur_row = 3
rra[2].pdp_per_row = 6
rra[2].xff = 5,0000000000e-01
rra[2].cdp_prep[0].value = 1,4122845728e+07
rra[2].cdp_prep[0].unknown_datapoints = 3
rra[2].cdp_prep[1].value = 0,0000000000e+00
rra[2].cdp_prep[1].unknown_datapoints = 3
rra[3].cf = "AVERAGE"
rra[3].rows = 775
rra[3].cur_row = 381
rra[3].pdp_per_row = 24
rra[3].xff = 5,0000000000e-01
rra[3].cdp_prep[0].value = 1,4122845728e+07
rra[3].cdp_prep[0].unknown_datapoints = 15
rra[3].cdp_prep[1].value = 0,0000000000e+00
rra[3].cdp_prep[1].unknown_datapoints = 15
rra[4].cf = "AVERAGE"
rra[4].rows = 797
rra[4].cur_row = 772
rra[4].pdp_per_row = 288
rra[4].xff = 5,0000000000e-01
rra[4].cdp_prep[0].value = 1,4122845728e+07
rra[4].cdp_prep[0].unknown_datapoints = 111
rra[4].cdp_prep[1].value = 0,0000000000e+00
rra[4].cdp_prep[1].unknown_datapoints = 111
rra[5].cf = "MAX"
rra[5].rows = 500
rra[5].cur_row = 311
rra[5].pdp_per_row = 1
rra[5].xff = 5,0000000000e-01
rra[5].cdp_prep[0].value = NaN
rra[5].cdp_prep[0].unknown_datapoints = 0
rra[5].cdp_prep[1].value = NaN
rra[5].cdp_prep[1].unknown_datapoints = 0
rra[6].cf = "MAX"
rra[6].rows = 600
rra[6].cur_row = 470
rra[6].pdp_per_row = 1
rra[6].xff = 5,0000000000e-01
rra[6].cdp_prep[0].value = NaN
rra[6].cdp_prep[0].unknown_datapoints = 0
rra[6].cdp_prep[1].value = NaN
rra[6].cdp_prep[1].unknown_datapoints = 0
rra[7].cf = "MAX"
rra[7].rows = 700
rra[7].cur_row = 406
rra[7].pdp_per_row = 6
rra[7].xff = 5,0000000000e-01
rra[7].cdp_prep[0].value = 1,4122845728e+07
rra[7].cdp_prep[0].unknown_datapoints = 3
rra[7].cdp_prep[1].value = 0,0000000000e+00
rra[7].cdp_prep[1].unknown_datapoints = 3
rra[8].cf = "MAX"
rra[8].rows = 775
rra[8].cur_row = 19
rra[8].pdp_per_row = 24
rra[8].xff = 5,0000000000e-01
rra[8].cdp_prep[0].value = 1,4122845728e+07
rra[8].cdp_prep[0].unknown_datapoints = 15
rra[8].cdp_prep[1].value = 0,0000000000e+00
rra[8].cdp_prep[1].unknown_datapoints = 15
rra[9].cf = "MAX"
rra[9].rows = 797
rra[9].cur_row = 317
rra[9].pdp_per_row = 288
rra[9].xff = 5,0000000000e-01
rra[9].cdp_prep[0].value = 1,4122845728e+07
rra[9].cdp_prep[0].unknown_datapoints = 111
rra[9].cdp_prep[1].value = 0,0000000000e+00
rra[9].cdp_prep[1].unknown_datapoints = 111
But the graph it creates just gets me something like Current 3.6M and a Maximum Value of 25.1M - see attachment Picture
What did I do wrong ????