[netperf-dev] netperf2 commit notice r584 - trunk/doc/examples
raj at netperf.org
raj at netperf.org
Mon May 21 18:17:54 PDT 2012
Author: raj
Date: 2012-05-21 18:17:54 -0700 (Mon, 21 May 2012)
New Revision: 584
Modified:
trunk/doc/examples/bloat.sh
trunk/doc/examples/runemomniaggdemo.sh
Log:
try to straighten a few kinks in the bloat.sh script and add a comment about socket buffer size to the runemomniaggdemo.sh script to help keep folks from having their bidirectional tests hanging
Modified: trunk/doc/examples/bloat.sh
===================================================================
--- trunk/doc/examples/bloat.sh 2012-05-17 17:34:05 UTC (rev 583)
+++ trunk/doc/examples/bloat.sh 2012-05-22 01:17:54 UTC (rev 584)
@@ -10,24 +10,31 @@
exit -1
fi
+CHUNK=30
+
# first, start the TCP_RR test
RR_START=`date +%s`
echo "Starting netperf TCP_RR at $RR_START"
-netperf -H $1 -l 7200 -t TCP_RR -D 1 -v 2 -- -r 1 2>&1 > netperf_rr.out &
+# a negative value for the demo interval (-D) will cause netperf to
+# make gettimeofday() calls after every transaction. this will result
+# in more accurate demo intervals once the STREAM test kicks-in, but a
+# somewhat lower transaction rate. not unlike enabling histogram
+# mode.
+netperf -H $1 -l 7200 -t TCP_RR -D -0.5 -v 2 -- -r 1 2>&1 > netperf_rr.out &
-# sleep 30 seconds
-sleep 30
+# sleep CHUNK seconds
+sleep $CHUNK
# now run the TCP_STREAM test
STREAM_START=`date +%s`
echo "Starting netperf TCP_STREAM test at $STREAM_START"
-netperf -H $1 -l 90 -t TCP_STREAM -D 1 -v 2 -- -m 64K 2>&1 > netperf_stream.out
+netperf -H $1 -l `expr $CHUNK \* 2` -t TCP_STREAM -D 0.25 -v 2 -- -m 1K 2>&1 > netperf_stream.out
STREAM_STOP=`date +%s`
echo "Netperf TCP_STREAM test stopped at $STREAM_STOP"
-# sleep another 30 seconds
-sleep 30
+# sleep another CHUNK seconds
+sleep $CHUNK
pkill -ALRM netperf
RR_STOP=`date +%s`
@@ -76,18 +83,26 @@
SIZE="-w $WIDTH -h 400"
-# we want to find the scaling factor for the throughput
+# we want to find the scaling factor for the throughput, with the goal
+# being that latency can go to the top of the charts and throughput
+# will go half-way up
-SCALE=`$RRDTOOL graph /dev/null \
+MAXLATMAXBPS=`$RRDTOOL graph /dev/null \
--start $MIN_TIMESTAMP --end $MAX_TIMESTAMP \
DEF:trans=netperf_rr.rrd:tps:AVERAGE \
CDEF:latency=1.0,trans,/ \
+ VDEF:maxlatency=latency,MAXIMUM \
DEF:mbps=netperf_stream.rrd:mbps:AVERAGE \
- CDEF:bps=mbps,1000000,\* \
- CDEF:scale=bps,latency,/ \
- VDEF:maxscale=scale,MAXIMUM \
- PRINT:maxscale:"%.20lf" | sed 1d
-`
+ CDEF:bps=mbps,2000000,\* \
+ VDEF:maxbps=bps,MAXIMUM \
+ PRINT:maxlatency:"%.20lf" \
+ PRINT:maxbps:"%.20lf" | sed 1d`
+
+# should I check the completion status of the previous command?
+# probably :)
+
+SCALE=`echo $MAXLATMAXBPS | awk '{print $2/$1}'`
+
$RRDTOOL graph bloat.png --imgformat PNG \
$SIZE \
--lower-limit 0 \
Modified: trunk/doc/examples/runemomniaggdemo.sh
===================================================================
--- trunk/doc/examples/runemomniaggdemo.sh 2012-05-17 17:34:05 UTC (rev 583)
+++ trunk/doc/examples/runemomniaggdemo.sh 2012-05-22 01:17:54 UTC (rev 584)
@@ -1,4 +1,4 @@
-# this is a quick and dirty migration of runemomniagg2.sh to the
+ this is a quick and dirty migration of runemomniagg2.sh to the
# --enable-demo mode of aggregate testing
function kill_netperfs {
@@ -101,6 +101,10 @@
DO_STREAM=0;
DO_MAERTS=0;
+# NOTE! The Bidir test depends on being able to set a socket buffer
+# size greater than 13 * 64KB or 832 KB or there is a risk of the test
+# hanging. If you are running linux, make certain that
+# net.core.[r|w]mem_max are sufficiently large
DO_BIDIR=0;
DO_RRAGG=1;
More information about the netperf-dev
mailing list