[netperf-dev] netperf4 commit notice r57 - trunk/src

burger at netperf.org burger at netperf.org
Fri Feb 3 13:38:13 PST 2006


Author: burger
Date: 2006-02-03 13:38:09 -0800 (Fri, 03 Feb 2006)
New Revision: 57

Modified:
   trunk/src/netconfidence.c
   trunk/src/netconfidence.h
   trunk/src/netlib_hpux.c
   trunk/src/netlib_linux.c
   trunk/src/netperf.c
   trunk/src/netperf.h
   trunk/src/nettest_bsd.c
   trunk/src/nettest_bsd.h
   trunk/src/nettest_dns.c
   trunk/src/nettest_dns.h
   trunk/src/nettest_vst.c
   trunk/src/nettest_vst.h
Log:
Fixed confidence level generation and loops for vst, bsd and dns tests.
Fixed service demand gerneration for vst, bsd and dns tests.
Fixed memset call to not pass sizeof(size_t) as len for initialization
of report dependent data structure for vst, bsd, and dns tests.
        Stephen Burger



Modified: trunk/src/netconfidence.c
===================================================================
--- trunk/src/netconfidence.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netconfidence.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -123,8 +123,15 @@
     }
   }
   if (level == 0) {
-    fprintf(where,"Invalid confidence_level of %s", desired_level);
-    fprintf(where," was specified! Using default confidence_level of 99%%\n");
+    if (desired_level == NULL) {
+      fprintf(where,"No confidence_level specified!  ");
+    }
+    else {
+      fprintf(where,
+              "Invalid confidence_level of '%s' was specified.\n",
+              desired_level);
+    }
+    fprintf(where, "Using default confidence_level of 99%%\n");
     fflush(where);
     level = 7;
   }
@@ -135,15 +142,30 @@
 double
 set_confidence_interval(char *desired_interval)
 {
-  int interval;
+  double interval;
+  double requested;
 
-  interval = atof(desired_interval);
-  if ((interval > 0.5) || (interval <= 0.0)) {
-    fprintf(where,"Using default confidence_interval of 0.05 instead of %s\n",
-            desired_interval);
-    fflush(where);
+  requested = 0.0;
+  if (desired_interval != NULL) {
+    requested = atof(desired_interval);
+  }
+  if ((requested <= 0.0) || (requested > 0.5)) {
     interval = 0.05;
   }
+  else {
+    interval = requested;
+  }
+  if (interval != requested) {
+    fprintf(where,"Using default confidence_interval of %.3f",interval);
+    if (desired_interval != NULL) {
+      fprintf(where," instead of %s", desired_interval);
+    }
+  }
+  else {
+    fprintf(where,"Using specified confidence_interval of %.3f",interval);
+  }
+  fprintf(where,"\n");
+  fflush(where);
   return(interval);
 }
 
@@ -211,53 +233,39 @@
   return(sqrt(variance));
 }
 
+
+/* get_confidence calculates the measured mean, and confidence interval  */
+/* for given samples. the calculated measured mean is written to the     */
+/* address contained in avg. the computed confidence interval is written */
+/* to the address in intvl. get_confidence returns the calculated        */
+/* confidence range as a percentage/100 of mean.  sgb  2/2/2006          */
+
 double
-get_confidence(double *values, confidence_t *confidence, double *avg)
+get_confidence(values, conf_globals, avg, intvl)
+  double       *values;
+  confidence_t *conf_globals;
+  double       *avg;
+  double       *intvl;
 {
   double mean;
   double sigma;
   double interval;
-  int    count = confidence->count;
-  double delta = -1.0/0.0;
+  int    count = conf_globals->count;
+  double percent;
 
   /* _                          */
   /* X +/- t[a/2] * (s/sqrt(n)) */
   /*                            */
 
   sigma    = sample_stddev(values, count, &mean);
-  if (confidence->count > 1) {
-    interval = tdist(confidence->level, count-1) * sigma / sqrt(count);
-    delta    = (mean * confidence->interval) - (2.0 * interval);
+  if (count > 1) {
+    interval = tdist(conf_globals->level, count-1) * sigma / sqrt(count);
+    percent  = (2.0 * interval) / mean;
   }
+
   *avg     = mean;
+  *intvl   = interval;
 
-  return(delta);
+  return(percent);
 }
 
- /* display_confidence() is called when we could not achieve the */
- /* desired confidence in the results. it will print the achieved */
- /* confidence to "where" raj 11/94 */
-void
-display_confidence(double confidence_result, double confidence_local_cpu)
-
-{
-  fprintf(where,
-          "!!! WARNING\n");
-  fprintf(where,
-          "!!! Desired confidence was not achieved within ");
-  fprintf(where,
-          "the specified iterations.\n");
-  fprintf(where,
-          "!!! This implies that there was variability in ");
-  fprintf(where,
-          "the test environment that\n");
-  fprintf(where,
-          "!!! must be investigated before going further.\n");
-  fprintf(where,
-          "!!! Confidence intervals: Throughput      : %4.1f%%\n",
-           confidence_result *100.0);
-  fprintf(where,
-          "!!!                       Local CPU util  : %4.1f%%\n",
-           confidence_local_cpu *100.0);
-  /* remote cpu is not supported for netperf4 */
-}

Modified: trunk/src/netconfidence.h
===================================================================
--- trunk/src/netconfidence.h	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netconfidence.h	2006-02-03 21:38:09 UTC (rev 57)
@@ -45,7 +45,10 @@
   double interval;
 } confidence_t;
 
-extern double get_confidence( double *values, confidence_t *conf, double *mean );
+extern double get_confidence( double       *values,
+                              confidence_t *conf,
+                              double       *mean,
+                              double       *interval );
 
 double set_confidence_interval( char *desired_interval );
 

Modified: trunk/src/netlib_hpux.c
===================================================================
--- trunk/src/netlib_hpux.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netlib_hpux.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -1,5 +1,5 @@
 static char netlib_specific_id[]="\
-@(#)(c) Copyright 2006 Hewlett-Packard Company, $Id: netlib_hpux.c 21 2006-1-19 01:07:54Z burger $";
+@(#)(c) Copyright 2006 Hewlett-Packard Company, $Id$";
 
 /*
 

Modified: trunk/src/netlib_linux.c
===================================================================
--- trunk/src/netlib_linux.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netlib_linux.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -1,5 +1,5 @@
 static char netlib_specific_id[]="\
-@(#)(c) Copyright 2006, Hewlett-Packard Company, $Id: netlib_hpux.c 21 2006-1-19 01:07:54Z burger $";
+@(#)(c) Copyright 2006, Hewlett-Packard Company, $Id$";
 
 /*
 

Modified: trunk/src/netperf.c
===================================================================
--- trunk/src/netperf.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netperf.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -1423,7 +1423,11 @@
        of any report_data structures that are required across the multiple
        invocations that occur during the loop.  */
 
+    fflush(where);
     if (min_count > 1) {
+      fprintf(where, 
+              "!!! TEST RUN USING CONFIDENCE  max_count = %d  min_count = %d\n",
+              max_count, min_count);
       /* initialize confidence information */
       desired_level = (char*)(xmlGetProp(my_cmd,(const xmlChar *)"confidence"));
       dsrd_interval = (char*)(xmlGetProp(my_cmd,(const xmlChar *)"interval"));

Modified: trunk/src/netperf.h
===================================================================
--- trunk/src/netperf.h	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/netperf.h	2006-02-03 21:38:09 UTC (rev 57)
@@ -50,7 +50,7 @@
 #define HIST  void*
 #endif
 
-#ifndef WIN32
+#ifdef WIN32
 #define NETPERF_DEBUG_LOG_DIR "c:\\temp\\"
 #define NETPERF_DEBUG_LOG_PREFIX  "netperf"
 #define NETPERF_DEBUG_LOG_SUFFIX  ".log"

Modified: trunk/src/nettest_bsd.c
===================================================================
--- trunk/src/nettest_bsd.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_bsd.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -2396,6 +2396,7 @@
   bsd_results_t *rd;
   FILE          *outfd;
   int            max_count;
+  size_t         malloc_size;
 
   rd        = test_set->report_data;
   max_count = test_set->confidence.max_count;
@@ -2417,10 +2418,14 @@
     outfd = stdout;
   }
   /* allocate and initialize report data */
-  rd = malloc(sizeof(bsd_results_t) + 7 * max_count * sizeof(double));
+  malloc_size = sizeof(bsd_results_t) + 7 * max_count * sizeof(double);
+  rd = malloc(malloc_size);
   if (rd) {
-    memset(rd, 0,
-           sizeof(sizeof(bsd_results_t) + 7 * max_count * sizeof(double)));
+
+    /* original code took sizeof a math equation so memset only zeroed the */
+    /* first sizeof(size_t) bytes.  This should work better  sgb 20060203  */
+
+    memset(rd, 0, malloc_size);
     rd->max_count      = max_count;
     rd->results        = &(rd->results_start);
     rd->xmit_results   = &(rd->results[max_count]);
@@ -2528,9 +2533,9 @@
               xmlGetProp(stats, (const xmlChar *)cntr_name[i]));
     }
   }
-  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000;
-  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000);
-  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000);
+  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000.0;
+  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000.0);
+  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000.0);
   xmit_trans_rate = test_cntr[TST_X_TRANS]/elapsed_seconds;
   recv_trans_rate = test_cntr[TST_R_TRANS]/elapsed_seconds;
   if (test_set->debug) {
@@ -2590,7 +2595,7 @@
   /* end of printing bsd per test instance results */
 }
 
-static void
+static double
 process_sys_stats(tset_t *test_set, xmlNodePtr stats, xmlChar *tid)
 {
   int            i;
@@ -2686,6 +2691,7 @@
     fflush(outfd);
   }
   /* end of printing sys stats instance results */
+  return(local_cpus);
 }
 
 static void
@@ -2700,6 +2706,7 @@
   int            count; 
   int            index; 
   int            num_of_tests;
+  double         num_of_cpus;
  
   rd        = test_set->report_data;
   proc_name = "process_stats_for_run";
@@ -2727,6 +2734,7 @@
   rd->run_time[index]      =  0.0;
 
   num_of_tests  = 0;
+  num_of_cpus   = 0.0;
   while (set_elt != NULL) {
     int stats_for_test;
     test    = set_elt->test;
@@ -2752,7 +2760,7 @@
       }
       if(!xmlStrcmp(stats->name,(const xmlChar *)"sys_stats")) {
         /* process system statistics */
-        process_sys_stats(test_set, stats, test->id);
+        num_of_cpus = process_sys_stats(test_set, stats, test->id);
         stats_for_test++;
         num_of_tests++;
       }
@@ -2784,6 +2792,7 @@
     }
     set_elt = set_elt->next;
   }
+
   if (rd->result_minimum > rd->results[index]) {
     rd->result_minimum = rd->results[index];
   }
@@ -2791,6 +2800,19 @@
     rd->result_maximum = rd->results[index];
   }
   rd->run_time[index] = rd->run_time[index] / (double)num_of_tests;
+
+  /* now calculate service demand for this test run. Remember the cpu */
+  /* utilization is in the range 0.0 to 1.0 so we need to multiply by */
+  /* the number of cpus and 1,000,000.0 to get to microseconds of cpu */
+  /* time per unit of work.  The result is in transactions per second */
+  /* or in million bits per second so the sd_denominator is factored  */
+  /* in to convert service demand into usec/trans or usec/Kbytes.     */
+
+  if ((rd->results[index] != 0.0) && (num_of_cpus != 0.0)) {
+    rd->servdemand[index] = rd->utilization[index] * num_of_cpus * 1000000.0 /
+                            (rd->results[index] * rd->sd_denominator);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where);
 }
 
 static void
@@ -2798,36 +2820,72 @@
 {
   bsd_results_t *rd;
   double         confidence;
+  double         temp;
   
   rd        = test_set->report_data;
 
+  NETPERF_DEBUG_ENTRY(test_set->debug,test_set->where);
+
     /* calculate confidence and summary result values */
   confidence                    = get_confidence(rd->run_time,
                                       &(test_set->confidence),
-                                      &(rd->ave_time));
+                                      &(rd->ave_time),
+                                      &(temp));
   rd->result_confidence         = get_confidence(rd->results,
                                       &(test_set->confidence),
-                                      &(rd->result_measured_mean));
+                                      &(rd->result_measured_mean),
+                                      &(rd->result_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tresults      conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->result_confidence,
+            rd->result_measured_mean, rd->result_interval);
+    fflush(test_set->where);
+  }
   rd->cpu_util_confidence       = get_confidence(rd->utilization,
                                       &(test_set->confidence),
-                                      &(rd->cpu_util_measured_mean));
+                                      &(rd->cpu_util_measured_mean),
+                                      &(rd->cpu_util_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tcpu_util     conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->cpu_util_confidence,
+            rd->cpu_util_measured_mean, rd->cpu_util_interval);
+    fflush(test_set->where);
+  }
   rd->service_demand_confidence = get_confidence(rd->servdemand,
                                       &(test_set->confidence),
-                                      &(rd->service_demand_measured_mean));
-  if (rd->result_confidence > rd->cpu_util_confidence) {
-    if (rd->cpu_util_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    } else {
-      confidence  = rd->cpu_util_confidence;
-    }
-  } else {
-    if (rd->result_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    } else {
-      confidence  = rd->result_confidence;
-    }
+                                      &(rd->service_demand_measured_mean),
+                                      &(rd->service_demand_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tserv_demand  conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->service_demand_confidence,
+            rd->service_demand_measured_mean, rd->service_demand_interval);
+    fflush(test_set->where);
   }
-  test_set->confidence.value = confidence;
+
+  if (rd->result_confidence >  rd->cpu_util_confidence) {
+    confidence = rd->result_confidence;
+  }
+  else {
+    confidence = rd->cpu_util_confidence;
+  }
+  if (rd->service_demand_confidence > confidence) {
+    confidence = rd->service_demand_confidence;
+  }
+
+  if (test_set->confidence.min_count > 1) {
+    test_set->confidence.value = test_set->confidence.interval - confidence;
+  }
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\t%3drun confidence = %.2f%%\tcheck value = %f\n",
+            test_set->confidence.count,
+            100.0 * confidence, test_set->confidence.value);
+    fflush(test_set->where);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where)
 }
 
 static void
@@ -2906,7 +2964,43 @@
   fflush(outfd);
 }
 
+
 static void
+print_did_not_meet_confidence(tset_t *test_set)
+{
+  bsd_results_t *rd;
+  FILE          *outfd;
+
+  rd    = test_set->report_data;
+  outfd = rd->outfd;
+
+
+  /* print the confidence failed line */
+  fprintf(outfd,"\n");
+  fprintf(outfd,"!!! WARNING\n");
+  fprintf(outfd,"!!! Desired confidence was not achieved within ");
+  fprintf(outfd,"the specified iterations. (%d)\n",
+          test_set->confidence.max_count);
+  fprintf(outfd,
+          "!!! This implies that there was variability in ");
+  fprintf(outfd,
+          "the test environment that\n");
+  fprintf(outfd,
+          "!!! must be investigated before going further.\n");
+  fprintf(outfd,
+          "!!! Confidence intervals: RESULT     : %6.2f%%\n",
+          100.0 * rd->result_confidence);
+  fprintf(outfd,
+          "!!!                       CPU util   : %6.2f%%\n",
+          100.0 * rd->cpu_util_confidence);
+  fprintf(outfd,
+          "!!!                       ServDemand : %6.2f%%\n",
+          100.0 * rd->service_demand_confidence);
+  fflush(outfd);
+}
+
+
+static void
 print_results_summary(tset_t *test_set)
 {
   bsd_results_t *rd;
@@ -2995,14 +3089,14 @@
     fprintf(outfd,"%7.2f ",rd->result_maximum);                   /* 43,8 */
   } else {
     fprintf(outfd,"%7.0f ",rd->result_measured_mean);             /* 19,8 */
-    fprintf(outfd,"%7.2f ",rd->result_confidence);                /* 27,8 */
+    fprintf(outfd,"%7.2f ",rd->result_interval);                  /* 27,8 */
     fprintf(outfd,"%7.0f ",rd->result_minimum);                   /* 35,8 */
     fprintf(outfd,"%7.0f ",rd->result_maximum);                   /* 43,8 */
   }
   fprintf(outfd,"%6.4f ",rd->cpu_util_measured_mean);             /* 51,7 */
-  fprintf(outfd,"%6.4f ",rd->cpu_util_confidence);                /* 58,7 */
+  fprintf(outfd,"%6.4f ",rd->cpu_util_interval);                  /* 58,7 */
   fprintf(outfd,"%6.3f ",rd->service_demand_measured_mean);       /* 65,7 */
-  fprintf(outfd,"%6.3f ",rd->service_demand_confidence);          /* 72,7 */
+  fprintf(outfd,"%6.3f ",rd->service_demand_interval);            /* 72,7 */
   fprintf(outfd,"\n");                                            /* 79,1 */
   fflush(outfd);
 }
@@ -3024,6 +3118,7 @@
     
   /* process statistics for this run */
   process_stats_for_run(test_set);
+
   /* calculate confidence and summary result values */
   update_results_and_confidence(test_set);
 
@@ -3035,12 +3130,12 @@
   max_count    = test_set->confidence.max_count;
   min_count    = test_set->confidence.min_count;
   /* always print summary results at end of last call through loop */
-  if (count == max_count) {
-
-/*  if ((count == max_count) || 
-      ((rd->confidence >= 0) && (count >= min_count))) */
-
+  if ((count >= max_count) || 
+      ((test_set->confidence.value >= 0) && (count >= min_count))) {
     print_results_summary(test_set);
+    if (test_set->confidence.value < 0) {
+      print_did_not_meet_confidence(test_set);
+    }
   }
 } /* end of report_bsd_test_results */
 

Modified: trunk/src/nettest_bsd.h
===================================================================
--- trunk/src/nettest_bsd.h	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_bsd.h	2006-02-03 21:38:09 UTC (rev 57)
@@ -137,12 +137,15 @@
   double *run_time;
   double ave_time;
   double result_measured_mean;
+  double result_interval;
   double result_confidence;
   double result_minimum;
   double result_maximum;
   double cpu_util_measured_mean;
+  double cpu_util_interval;
   double cpu_util_confidence;
   double service_demand_measured_mean;
+  double service_demand_interval;
   double service_demand_confidence;
   double confidence;
   double sd_denominator;

Modified: trunk/src/nettest_dns.c
===================================================================
--- trunk/src/nettest_dns.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_dns.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -1825,6 +1825,7 @@
   dns_results_t *rd;
   FILE          *outfd;
   int            max_count;
+  size_t         malloc_size;
 
   rd        = test_set->report_data;
   max_count = test_set->confidence.max_count;
@@ -1846,10 +1847,14 @@
     outfd = stdout;
   }
   /* allocate and initialize report data */
-  rd = malloc(sizeof(dns_results_t) + 7 * max_count * sizeof(double));
+  malloc_size = sizeof(dns_results_t) + 7 * max_count * sizeof(double);
+  rd = malloc(malloc_size);
   if (rd) {
-    memset(rd, 0,
-           sizeof(sizeof(dns_results_t) + 7 * max_count * sizeof(double)));
+
+    /* original code took sizeof a math equation so memset only zeroed the */
+    /* first sizeof(size_t) bytes.  This should work better  sgb 20060203  */
+
+    memset(rd, 0, malloc_size);
     rd->max_count      = max_count;
     rd->results        = &(rd->results_start);
     rd->xmit_results   = &(rd->results[max_count]);
@@ -1958,9 +1963,9 @@
               xmlGetProp(stats, (const xmlChar *)cntr_name[i]));
     }
   }
-  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000;
-  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000);
-  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000);
+  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000.0;
+  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000.0);
+  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000.0);
   xmit_trans_rate = test_cntr[TST_X_TRANS]/elapsed_seconds;
   recv_trans_rate = test_cntr[TST_R_TRANS]/elapsed_seconds;
   if (test_set->debug) {
@@ -2020,7 +2025,7 @@
   /* end of printing dns per test instance results */
 }
 
-void
+double
 process_sys_stats(tset_t *test_set, xmlNodePtr stats, xmlChar *tid)
 {
   int            i;
@@ -2116,6 +2121,7 @@
     fflush(outfd);
   }
   /* end of printing sys stats instance results */
+  return(local_cpus);
 }
 
 void
@@ -2128,6 +2134,7 @@
   xmlNodePtr     prev_stats;
   int            count; 
   int            index; 
+  double         num_of_cpus;
  
   rd        = test_set->report_data;
   set_elt   = test_set->tests;
@@ -2153,6 +2160,7 @@
   rd->servdemand[index]    =  0.0;
   rd->run_time[index]      =  0.0;
 
+  num_of_cpus  = 0.0;
   while (set_elt != NULL) {
     int stats_for_test;
     test    = set_elt->test;
@@ -2178,7 +2186,7 @@
       }
       if(!xmlStrcmp(stats->name,(const xmlChar *)"sys_stats")) {
         /* process system statistics */
-        process_sys_stats(test_set, stats, test->id);
+        num_of_cpus = process_sys_stats(test_set, stats, test->id);
         stats_for_test++;
       }
       if(!xmlStrcmp(stats->name,(const xmlChar *)"test_stats")) {
@@ -2209,12 +2217,26 @@
     }
     set_elt = set_elt->next;
   }
+
   if (rd->result_minimum > rd->results[index]) {
     rd->result_minimum = rd->results[index];
   }
   if (rd->result_maximum < rd->results[index]) {
     rd->result_maximum = rd->results[index];
   }
+
+  /* now calculate service demand for this test run. Remember the cpu */
+  /* utilization is in the range 0.0 to 1.0 so we need to multiply by */
+  /* the number of cpus and 1,000,000.0 to get to microseconds of cpu */
+  /* time per unit of work.  The result is in transactions per second */
+  /* or in million bits per second so the sd_denominator is factored  */
+  /* in to convert service demand into usec/trans or usec/Kbytes.     */
+
+  if ((rd->results[index] != 0.0) && (num_of_cpus != 0.0)) {
+    rd->servdemand[index] = rd->utilization[index] * num_of_cpus * 1000000.0 /
+                            (rd->results[index] * rd->sd_denominator);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where);
 }
 
 void
@@ -2222,36 +2244,72 @@
 {
   dns_results_t *rd;
   double         confidence;
+  double         temp;
   
   rd        = test_set->report_data;
 
-    /* calculate confidence and summary result values */
+  NETPERF_DEBUG_ENTRY(test_set->debug,test_set->where);
+
+  /* calculate confidence and summary result values */
   confidence                    = get_confidence(rd->run_time,
                                       &(test_set->confidence),
-                                      &(rd->ave_time));
+                                      &(rd->ave_time),
+                                      &(temp));
   rd->result_confidence         = get_confidence(rd->results,
                                       &(test_set->confidence),
-                                      &(rd->result_measured_mean));
+                                      &(rd->result_measured_mean),
+                                      &(rd->result_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tresults      conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->result_confidence,
+            rd->result_measured_mean, rd->result_interval);
+    fflush(test_set->where);
+  }
   rd->cpu_util_confidence       = get_confidence(rd->utilization,
                                       &(test_set->confidence),
-                                      &(rd->cpu_util_measured_mean));
+                                      &(rd->cpu_util_measured_mean),
+                                      &(rd->cpu_util_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tcpu_util     conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->cpu_util_confidence,
+            rd->cpu_util_measured_mean, rd->cpu_util_interval);
+    fflush(test_set->where);
+  }
   rd->service_demand_confidence = get_confidence(rd->servdemand,
                                       &(test_set->confidence),
-                                      &(rd->service_demand_measured_mean));
-  if (rd->result_confidence > rd->cpu_util_confidence) {
-    if (rd->cpu_util_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    } else {
-      confidence  = rd->cpu_util_confidence;
-    }
-  } else {
-    if (rd->result_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    } else {
-      confidence  = rd->result_confidence;
-    }
+                                      &(rd->service_demand_measured_mean),
+                                      &(rd->service_demand_interval));
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\tserv_demand  conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->service_demand_confidence,
+            rd->service_demand_measured_mean, rd->service_demand_interval);
+    fflush(test_set->where);
   }
-  test_set->confidence.value = confidence;
+
+  if (rd->result_confidence >  rd->cpu_util_confidence) {
+    confidence = rd->result_confidence;
+  }
+  else {
+    confidence = rd->cpu_util_confidence;
+  }
+  if (rd->service_demand_confidence > confidence) {
+    confidence = rd->service_demand_confidence;
+  }
+
+  if (test_set->confidence.min_count > 1) {
+    test_set->confidence.value = test_set->confidence.interval - confidence;
+  }
+  if (test_set->debug) {
+    fprintf(test_set->where,
+            "\t%3drun confidence = %.2f%%\tcheck value = %f\n",
+            test_set->confidence.count,
+            100.0 * confidence, test_set->confidence.value);
+    fflush(test_set->where);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where);
 }
 
 void
@@ -2330,6 +2388,42 @@
   fflush(outfd);
 }
 
+
+static void
+print_did_not_meet_confidence(tset_t *test_set)
+{
+  dns_results_t *rd;
+  FILE          *outfd;
+
+  rd    = test_set->report_data;
+  outfd = rd->outfd;
+
+
+  /* print the confidence failed line */
+  fprintf(outfd,"\n");
+  fprintf(outfd,"!!! WARNING\n");
+  fprintf(outfd,"!!! Desired confidence was not achieved within ");
+  fprintf(outfd,"the specified iterations. (%d)\n",
+          test_set->confidence.max_count);
+  fprintf(outfd,
+          "!!! This implies that there was variability in ");
+  fprintf(outfd,
+          "the test environment that\n");
+  fprintf(outfd,
+          "!!! must be investigated before going further.\n");
+  fprintf(outfd,
+          "!!! Confidence intervals: RESULT     : %6.2f%%\n",
+          100.0 * rd->result_confidence);
+  fprintf(outfd,
+          "!!!                       CPU util   : %6.2f%%\n",
+          100.0 * rd->cpu_util_confidence);
+  fprintf(outfd,
+          "!!!                       ServDemand : %6.2f%%\n",
+          100.0 * rd->service_demand_confidence);
+  fflush(outfd);
+}
+
+
 void
 print_results_summary(tset_t *test_set)
 {
@@ -2414,19 +2508,19 @@
   fprintf(outfd,"%-6.2f ",rd->ave_time);                          /* 12,7 */
   if (rd->sd_denominator != 1.0) {
     fprintf(outfd,"%7.2f ",rd->result_measured_mean);             /* 19,8 */
-    fprintf(outfd,"%7.3f ",rd->result_confidence);                /* 27,8 */
+    fprintf(outfd,"%7.3f ",rd->result_interval);                /* 27,8 */
     fprintf(outfd,"%7.2f ",rd->result_minimum);                   /* 35,8 */
     fprintf(outfd,"%7.2f ",rd->result_maximum);                   /* 43,8 */
   } else {
     fprintf(outfd,"%7.0f ",rd->result_measured_mean);             /* 19,8 */
-    fprintf(outfd,"%7.2f ",rd->result_confidence);                /* 27,8 */
+    fprintf(outfd,"%7.2f ",rd->result_interval);                  /* 27,8 */
     fprintf(outfd,"%7.0f ",rd->result_minimum);                   /* 35,8 */
     fprintf(outfd,"%7.0f ",rd->result_maximum);                   /* 43,8 */
   }
   fprintf(outfd,"%6.4f ",rd->cpu_util_measured_mean);             /* 51,7 */
-  fprintf(outfd,"%6.4f ",rd->cpu_util_confidence);                /* 58,7 */
+  fprintf(outfd,"%6.4f ",rd->cpu_util_interval);                  /* 58,7 */
   fprintf(outfd,"%6.3f ",rd->service_demand_measured_mean);       /* 65,7 */
-  fprintf(outfd,"%6.3f ",rd->service_demand_confidence);          /* 72,7 */
+  fprintf(outfd,"%6.3f ",rd->service_demand_interval);            /* 72,7 */
   fprintf(outfd,"\n");                                            /* 79,1 */
   fflush(outfd);
 }
@@ -2437,6 +2531,7 @@
   dns_results_t *rd;
   int count;
   int max_count;
+  int min_count;
 
   rd  = test_set->report_data;
 
@@ -2447,6 +2542,7 @@
     
   /* process statistics for this run */
   process_stats_for_run(test_set);
+
   /* calculate confidence and summary result values */
   update_results_and_confidence(test_set);
 
@@ -2456,10 +2552,15 @@
 
   count        = test_set->confidence.count;
   max_count    = test_set->confidence.max_count;
+  min_count    = test_set->confidence.min_count;
 
   /* always print summary results at end of last call through loop */
-  if (count == max_count) {
+  if ((count >= max_count) ||
+      ((test_set->confidence.value >= 0) && (count >= min_count))) {
     print_results_summary(test_set);
+    if (test_set->confidence.value < 0) {
+      print_did_not_meet_confidence(test_set);
+    }
   }
 } /* end of report_dns_test_results */
 

Modified: trunk/src/nettest_dns.h
===================================================================
--- trunk/src/nettest_dns.h	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_dns.h	2006-02-03 21:38:09 UTC (rev 57)
@@ -127,12 +127,15 @@
   double *run_time;
   double ave_time;
   double result_measured_mean;
+  double result_interval;
   double result_confidence;
   double result_minimum;
   double result_maximum;
   double cpu_util_measured_mean;
+  double cpu_util_interval;
   double cpu_util_confidence;
   double service_demand_measured_mean;
+  double service_demand_interval;
   double service_demand_confidence;
   double confidence;
   double sd_denominator;

Modified: trunk/src/nettest_vst.c
===================================================================
--- trunk/src/nettest_vst.c	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_vst.c	2006-02-03 21:38:09 UTC (rev 57)
@@ -2331,6 +2331,7 @@
   vst_results_t *rd;
   FILE          *outfd;
   int            max_count;
+  size_t         malloc_size;
 
   rd        = test_set->report_data;
   max_count = test_set->confidence.max_count;
@@ -2354,10 +2355,14 @@
     outfd = stdout;
   }
   /* allocate and initialize report data */
-  rd = malloc(sizeof(vst_results_t) + 7 * max_count * sizeof(double));
+  malloc_size = sizeof(vst_results_t) + 7 * max_count * sizeof(double);
+  rd = malloc(malloc_size);
   if (rd) {
-    memset(rd, 0,
-           sizeof(sizeof(vst_results_t) + 7 * max_count * sizeof(double)));
+
+    /* original code took sizeof a math equation so memset only zeroed the */
+    /* first sizeof(size_t) bytes.  This should work better  sgb 20060203  */
+
+    memset(rd, 0, malloc_size);
     rd->max_count      = max_count;
     rd->results        = &(rd->results_start);
     rd->xmit_results   = &(rd->results[max_count]);
@@ -2467,9 +2472,9 @@
               xmlGetProp(stats, (const xmlChar *)cntr_name[i]));
     }
   }
-  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000;
-  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000);
-  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000);
+  elapsed_seconds = test_cntr[TST_E_SEC] + test_cntr[TST_E_USEC]/1000000.0;
+  xmit_rate       = test_cntr[TST_X_BYTES]*8/(elapsed_seconds*1000000.0);
+  recv_rate       = test_cntr[TST_R_BYTES]*8/(elapsed_seconds*1000000.0);
   xmit_trans_rate = test_cntr[TST_X_TRANS]/elapsed_seconds;
   recv_trans_rate = test_cntr[TST_R_TRANS]/elapsed_seconds;
   if (test_set->debug) {
@@ -2527,7 +2532,7 @@
   /* end of printing vst per test instance results */
 }
 
-static void
+static double
 process_sys_stats(tset_t *test_set, xmlNodePtr stats, xmlChar *tid)
 {
   int            i;
@@ -2621,6 +2626,7 @@
     fflush(outfd);
   }
   /* end of printing sys stats instance results */
+  return(local_cpus);
 }
 
 static void
@@ -2634,6 +2640,7 @@
   int            count; 
   int            index;
   int            num_of_tests;
+  double         num_of_cpus;
  
 
   rd        = test_set->report_data;
@@ -2661,6 +2668,7 @@
   rd->run_time[index]      =  0.0;
 
   num_of_tests  = 0;
+  num_of_cpus   = 0.0;
   while (set_elt != NULL) {
     int stats_for_test;
     test    = set_elt->test;
@@ -2687,7 +2695,7 @@
       }
       if(!xmlStrcmp(stats->name,(const xmlChar *)"sys_stats")) {
         /* process system statistics */
-        process_sys_stats(test_set, stats, test->id);
+        num_of_cpus = process_sys_stats(test_set, stats, test->id);
         stats_for_test++;
       }
       if(!xmlStrcmp(stats->name,(const xmlChar *)"test_stats")) {
@@ -2719,7 +2727,7 @@
     }
     set_elt = set_elt->next;
   }
-  
+
   if (rd->result_minimum > rd->results[index]) {
     rd->result_minimum = rd->results[index];
   }
@@ -2727,6 +2735,19 @@
     rd->result_maximum = rd->results[index];
   }
   rd->run_time[index] = rd->run_time[index] / (double)num_of_tests;
+
+  /* now calculate service demand for this test run. Remember the cpu */
+  /* utilization is in the range 0.0 to 1.0 so we need to multiply by */
+  /* the number of cpus and 1,000,000.0 to get to microseconds of cpu */
+  /* time per unit of work.  The result is in transactions per second */
+  /* or in million bits per second so the sd_denominator is factored  */
+  /* in to convert service demand into usec/trans or usec/Kbytes.     */
+
+  if ((rd->results[index] != 0.0) && (num_of_cpus != 0.0)) {
+    rd->servdemand[index] = rd->utilization[index] * num_of_cpus * 1000000.0 /
+                            (rd->results[index] * rd->sd_denominator);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where);
 }
 
 static void
@@ -2734,45 +2755,93 @@
 {
   vst_results_t *rd;
   double         confidence;
+  double         temp;
+  int            loc_debug = 0;
   
   rd        = test_set->report_data;
 
-    /* calculate confidence and summary result values */
+  NETPERF_DEBUG_ENTRY(test_set->debug,test_set->where);
+
+  /* calculate confidence and summary result values */
   confidence                    = get_confidence(rd->xmit_results,
                                       &(test_set->confidence),
-                                      &(rd->xmit_measured_mean));
+                                      &(rd->xmit_measured_mean),
+                                      &(rd->xmit_interval));
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\txmit_results conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * confidence, rd->xmit_measured_mean, rd->xmit_interval);
+    fflush(test_set->where);
+  }
   confidence                    = get_confidence(rd->recv_results,
                                       &(test_set->confidence),
-                                      &(rd->recv_measured_mean));
+                                      &(rd->recv_measured_mean),
+                                      &(rd->recv_interval));
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\trecv_results conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * confidence, rd->recv_measured_mean, rd->recv_interval);
+    fflush(test_set->where);
+  }
   confidence                    = get_confidence(rd->run_time,
                                       &(test_set->confidence),
-                                      &(rd->ave_time));
+                                      &(rd->ave_time),
+                                      &(temp));
   rd->result_confidence         = get_confidence(rd->results,
                                       &(test_set->confidence),
-                                      &(rd->result_measured_mean));
+                                      &(rd->result_measured_mean),
+                                      &(rd->result_interval));
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\tresults      conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->result_confidence, 
+            rd->result_measured_mean, rd->result_interval);
+    fflush(test_set->where);
+  }
   rd->cpu_util_confidence       = get_confidence(rd->utilization,
                                       &(test_set->confidence),
-                                      &(rd->cpu_util_measured_mean));
+                                      &(rd->cpu_util_measured_mean),
+                                      &(rd->cpu_util_interval));
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\tcpu_util     conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->cpu_util_confidence, 
+            rd->cpu_util_measured_mean, rd->cpu_util_interval);
+    fflush(test_set->where);
+  }
   rd->service_demand_confidence = get_confidence(rd->servdemand,
                                       &(test_set->confidence),
-                                      &(rd->service_demand_measured_mean));
-  if (rd->result_confidence > rd->cpu_util_confidence) {
-    if (rd->cpu_util_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    }
-    else {
-      confidence  = rd->cpu_util_confidence;
-    }
+                                      &(rd->service_demand_measured_mean),
+                                      &(rd->service_demand_interval));
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\tserv_demand  conf = %.2f%%\tmean = %10f +/- %8f\n",
+            100.0 * rd->service_demand_confidence, 
+            rd->service_demand_measured_mean, rd->service_demand_interval);
+    fflush(test_set->where);
   }
+
+  if (rd->result_confidence >  rd->cpu_util_confidence) {
+    confidence = rd->result_confidence;
+  }
   else {
-    if (rd->result_confidence > rd->service_demand_confidence) {
-      confidence  = rd->service_demand_confidence;
-    }
-    else {
-      confidence  = rd->result_confidence;
-    }
+    confidence = rd->cpu_util_confidence;
   }
-  test_set->confidence.value = confidence;
+  if (rd->service_demand_confidence > confidence) {
+    confidence = rd->service_demand_confidence;
+  }
+
+  if (test_set->confidence.min_count > 1) {
+    test_set->confidence.value = test_set->confidence.interval - confidence;
+  }
+  if (test_set->debug || loc_debug) {
+    fprintf(test_set->where, 
+            "\t%3drun confidence = %.2f%%\tcheck value = %f\n",
+            test_set->confidence.count,
+            100.0 * confidence, test_set->confidence.value);
+    fflush(test_set->where);
+  }
+  NETPERF_DEBUG_EXIT(test_set->debug,test_set->where);
 }
 
 static void
@@ -2842,7 +2911,43 @@
   fflush(outfd);
 }
 
+
 static void
+print_did_not_meet_confidence(tset_t *test_set)
+{
+  vst_results_t *rd;
+  FILE          *outfd;
+
+  rd    = test_set->report_data;
+  outfd = rd->outfd;
+
+  
+  /* print the confidence failed line */
+  fprintf(outfd,"\n");
+  fprintf(outfd,"!!! WARNING\n");
+  fprintf(outfd,"!!! Desired confidence was not achieved within ");
+  fprintf(outfd,"the specified iterations. (%d)\n",
+          test_set->confidence.max_count);
+  fprintf(outfd,
+          "!!! This implies that there was variability in ");
+  fprintf(outfd,
+          "the test environment that\n");
+  fprintf(outfd,
+          "!!! must be investigated before going further.\n");
+  fprintf(outfd,
+          "!!! Confidence intervals: RESULT     : %6.2f%%\n",
+          100.0 * rd->result_confidence);
+  fprintf(outfd,
+          "!!!                       CPU util   : %6.2f%%\n",
+          100.0 * rd->cpu_util_confidence);
+  fprintf(outfd,
+          "!!!                       ServDemand : %6.2f%%\n", 
+          100.0 * rd->service_demand_confidence);
+  fflush(outfd);
+}
+
+
+static void
 print_results_summary(tset_t *test_set)
 {
   vst_results_t *rd;
@@ -2888,14 +2993,14 @@
   fprintf(outfd,"A%-3d ",test_set->confidence.count);             /*  0,5 */
   fprintf(outfd,"%-6s ",test_set->id);                            /*  5,7 */
   fprintf(outfd,"%-6.2f ",rd->ave_time);                          /* 12,7 */
-    fprintf(outfd,"%7.2f ",rd->result_measured_mean);             /* 19,8 */
-    fprintf(outfd,"%7.3f ",rd->result_confidence);                /* 27,8 */
-    fprintf(outfd,"%7.2f ",rd->xmit_measured_mean);               /* 35,8 */
-    fprintf(outfd,"%7.2f ",rd->recv_measured_mean);               /* 43,8 */
+  fprintf(outfd,"%7.2f ",rd->result_measured_mean);               /* 19,8 */
+  fprintf(outfd,"%7.3f ",rd->result_interval);                    /* 27,8 */
+  fprintf(outfd,"%7.2f ",rd->xmit_measured_mean);                 /* 35,8 */
+  fprintf(outfd,"%7.2f ",rd->recv_measured_mean);                 /* 43,8 */
   fprintf(outfd,"%6.4f ",rd->cpu_util_measured_mean);             /* 51,7 */
-  fprintf(outfd,"%6.4f ",rd->cpu_util_confidence);                /* 58,7 */
+  fprintf(outfd,"%6.4f ",rd->cpu_util_interval);                  /* 58,7 */
   fprintf(outfd,"%6.3f ",rd->service_demand_measured_mean);       /* 65,7 */
-  fprintf(outfd,"%6.3f ",rd->service_demand_confidence);          /* 72,7 */
+  fprintf(outfd,"%6.3f ",rd->service_demand_interval);            /* 72,7 */
   fprintf(outfd,"\n");                                            /* 79,1 */
   fflush(outfd);
 }
@@ -2917,6 +3022,7 @@
     
   /* process statistics for this run */
   process_stats_for_run(test_set);
+
   /* calculate confidence and summary result values */
   update_results_and_confidence(test_set);
   
@@ -2927,12 +3033,14 @@
   count        = test_set->confidence.count;
   max_count    = test_set->confidence.max_count;
   min_count    = test_set->confidence.min_count;
+
   /* always print summary results at end of last call through loop */
-  if (count == max_count) {
-
-/*  if ((count == max_count) || 
-      ((rd->confidence >= 0) && (count >= min_count))) */
+  if ((count >= max_count) || 
+      ((test_set->confidence.value >= 0) && (count >= min_count))) {
     print_results_summary(test_set);
+    if (test_set->confidence.value < 0) {
+      print_did_not_meet_confidence(test_set);
+    }
   }
 } /* end of report_vst_test_results */
 

Modified: trunk/src/nettest_vst.h
===================================================================
--- trunk/src/nettest_vst.h	2006-01-30 19:40:36 UTC (rev 56)
+++ trunk/src/nettest_vst.h	2006-02-03 21:38:09 UTC (rev 57)
@@ -125,14 +125,19 @@
   double *run_time;
   double ave_time;
   double result_measured_mean;
+  double result_interval;
   double result_confidence;
   double result_minimum;
   double result_maximum;
   double xmit_measured_mean;
+  double xmit_interval;
   double recv_measured_mean;
+  double recv_interval;
   double cpu_util_measured_mean;
+  double cpu_util_interval;
   double cpu_util_confidence;
   double service_demand_measured_mean;
+  double service_demand_interval;
   double service_demand_confidence;
   double confidence;
   double sd_denominator;



More information about the netperf-dev mailing list