[netperf-dev] netperf2 commit notice r70 - trunk/src
raj at netperf.org
raj at netperf.org
Mon Apr 17 13:03:44 PDT 2006
Author: raj
Date: 2006-04-17 13:03:42 -0700 (Mon, 17 Apr 2006)
New Revision: 70
Modified:
trunk/src/nettest_bsd.c
trunk/src/nettest_xti.c
trunk/src/nettest_xti.h
Log:
Migrate the XTI tests to the new "DIRTY" routine and do some gratuitous
cleanups in nettest_bsd.c
Modified: trunk/src/nettest_bsd.c
===================================================================
--- trunk/src/nettest_bsd.c 2006-04-14 23:17:13 UTC (rev 69)
+++ trunk/src/nettest_bsd.c 2006-04-17 20:03:42 UTC (rev 70)
@@ -5068,15 +5068,15 @@
(requests_outstanding < first_burst_size)) {
if (debug) {
fprintf(where,
- "injecting, requests_outstanding %d request_cwnd %d burst %d\n",
+ "injecting, req_outstndng %d req_cwnd %d burst %d\n",
requests_outstanding,
request_cwnd,
first_burst_size);
}
- if((len=send(send_socket,
- send_ring->buffer_ptr,
- req_size,
- 0)) != req_size) {
+ if ((len = send(send_socket,
+ send_ring->buffer_ptr,
+ req_size,
+ 0)) != req_size) {
/* we should never hit the end of the test in the first burst */
perror("send_tcp_rr: initial burst data send error");
exit(-1);
@@ -5092,10 +5092,10 @@
HIST_timestamp(&time_one);
#endif /* WANT_HISTOGRAM */
- if((len=send(send_socket,
- send_ring->buffer_ptr,
- req_size,
- 0)) != req_size) {
+ if ((len = send(send_socket,
+ send_ring->buffer_ptr,
+ req_size,
+ 0)) != req_size) {
if (SOCKET_EINTR(len) || (errno == 0)) {
/* we hit the end of a */
/* timed test. */
@@ -5141,7 +5141,7 @@
request_cwnd += 1;
if (debug) {
fprintf(where,
- "increased request_cwnd to %d with first_burst_size %d requests_outstanding %d\n",
+ "incr req_cwnd to %d first_burst %d reqs_outstndng %d\n",
request_cwnd,
first_burst_size,
requests_outstanding);
@@ -5182,23 +5182,23 @@
}
}
- /* At this point we used to call shutdown on the data socket to be */
- /* sure all the data was delivered, but this was not germane in a */
- /* request/response test, and it was causing the tests to "hang" when */
- /* they were being controlled by time. So, I have replaced this */
- /* shutdown call with a call to close that can be found later in the */
- /* procedure. */
+ /* At this point we used to call shutdown on the data socket to be
+ sure all the data was delivered, but this was not germane in a
+ request/response test, and it was causing the tests to "hang"
+ when they were being controlled by time. So, I have replaced
+ this shutdown call with a call to close that can be found later
+ in the procedure. */
- /* this call will always give us the elapsed time for the test, and */
- /* will also store-away the necessaries for cpu utilization */
+ /* this call will always give us the elapsed time for the test,
+ and will also store-away the necessaries for cpu utilization */
cpu_stop(local_cpu_usage,&elapsed_time); /* was cpu being */
/* measured? how long */
/* did we really run? */
- /* Get the statistics from the remote end. The remote will have */
- /* calculated CPU utilization. If it wasn't supposed to care, it */
- /* will return obvious values. */
+ /* Get the statistics from the remote end. The remote will have
+ calculated CPU utilization. If it wasn't supposed to care, it
+ will return obvious values. */
recv_response();
if (!netperf_response.content.serv_errno) {
@@ -5220,16 +5220,16 @@
thruput = nummessages/elapsed_time;
if (local_cpu_usage || remote_cpu_usage) {
- /* We must now do a little math for service demand and cpu */
- /* utilization for the system(s) */
- /* Of course, some of the information might be bogus because */
- /* there was no idle counter in the kernel(s). We need to make */
- /* a note of this for the user's benefit...*/
+ /* We must now do a little math for service demand and cpu
+ utilization for the system(s) Of course, some of the
+ information might be bogus because there was no idle counter in
+ the kernel(s). We need to make a note of this for the user's
+ benefit... */
if (local_cpu_usage) {
local_cpu_utilization = calc_cpu_util(0.0);
- /* since calc_service demand is doing ms/Kunit we will */
- /* multiply the number of transaction by 1024 to get */
- /* "good" numbers */
+ /* since calc_service demand is doing ms/Kunit we will
+ multiply the number of transaction by 1024 to get "good"
+ numbers */
local_service_demand = calc_service_demand((double) nummessages*1024,
0.0,
0.0,
@@ -5242,9 +5242,9 @@
if (remote_cpu_usage) {
remote_cpu_utilization = tcp_rr_result->cpu_util;
- /* since calc_service demand is doing ms/Kunit we will */
- /* multiply the number of transaction by 1024 to get */
- /* "good" numbers */
+ /* since calc_service demand is doing ms/Kunit we will
+ multiply the number of transaction by 1024 to get "good"
+ numbers */
remote_service_demand = calc_service_demand((double) nummessages*1024,
0.0,
remote_cpu_utilization,
@@ -5265,9 +5265,9 @@
remote_service_demand = (float) -1.0;
}
- /* at this point, we want to calculate the confidence information. */
- /* if debugging is on, calculate_confidence will print-out the */
- /* parameters we pass it */
+ /* at this point, we want to calculate the confidence information.
+ if debugging is on, calculate_confidence will print-out the
+ parameters we pass it */
calculate_confidence(confidence_iteration,
elapsed_time,
@@ -5292,14 +5292,14 @@
&local_service_demand,
&remote_service_demand);
- /* We are now ready to print all the information. If the user */
- /* has specified zero-level verbosity, we will just print the */
- /* local service demand, or the remote service demand. If the */
- /* user has requested verbosity level 1, he will get the basic */
- /* "streamperf" numbers. If the user has specified a verbosity */
- /* of greater than 1, we will display a veritable plethora of */
- /* background information from outside of this block as it it */
- /* not cpu_measurement specific... */
+ /* We are now ready to print all the information. If the user has
+ specified zero-level verbosity, we will just print the local
+ service demand, or the remote service demand. If the user has
+ requested verbosity level 1, he will get the basic "streamperf"
+ numbers. If the user has specified a verbosity of greater than 1,
+ we will display a veritable plethora of background information
+ from outside of this block as it it not cpu_measurement
+ specific... */
if (confidence < 0) {
/* we did not hit confidence, but were we asked to look for it? */
Modified: trunk/src/nettest_xti.c
===================================================================
--- trunk/src/nettest_xti.c 2006-04-14 23:17:13 UTC (rev 69)
+++ trunk/src/nettest_xti.c 2006-04-17 20:03:42 UTC (rev 70)
@@ -524,10 +524,6 @@
double bytes_sent;
-#ifdef DIRTY
- int i;
-#endif /* DIRTY */
-
float local_cpu_utilization;
float local_service_demand;
float remote_cpu_utilization;
@@ -887,12 +883,6 @@
}
#endif /* WANT_INTERVALS */
-#ifdef DIRTY
- /* initialize the random number generator for putting dirty stuff */
- /* into the send buffer. raj */
- srand((int) getpid());
-#endif
-
/* before we start, initialize a few variables */
/* We use an "OR" to control test execution. When the test is */
@@ -910,15 +900,10 @@
/* ones into the cache. at some point, we might want to replace */
/* the rand() call with something from a table to reduce our call */
/* overhead during the test, but it is not a high priority item. */
- message_int_ptr = (int *)(send_ring->buffer_ptr);
- for (i = 0; i < loc_dirty_count; i++) {
- *message_int_ptr = rand();
- message_int_ptr++;
- }
- for (i = 0; i < loc_clean_count; i++) {
- loc_dirty_count = *message_int_ptr;
- message_int_ptr++;
- }
+ access_buffer(send_ring->buffer_ptr,
+ send_size,
+ loc_dirty_count,
+ loc_clean_count);
#endif /* DIRTY */
#ifdef WANT_HISTOGRAM
@@ -1253,7 +1238,7 @@
/* implemented as one routine. I could break things-out somewhat, but */
/* didn't feel it was necessary. */
-int
+void
recv_xti_tcp_stream()
{
@@ -1271,8 +1256,6 @@
struct ring_elt *recv_ring;
int *message_int_ptr;
- int dirty_count;
- int clean_count;
int i;
struct xti_tcp_stream_request_struct *xti_tcp_stream_request;
@@ -1564,17 +1547,11 @@
/* them cleanly into the cache. The clean ones will follow any dirty */
/* ones into the cache. */
- dirty_count = xti_tcp_stream_request->dirty_count;
- clean_count = xti_tcp_stream_request->clean_count;
- message_int_ptr = (int *)recv_ring->buffer_ptr;
- for (i = 0; i < dirty_count; i++) {
- *message_int_ptr = rand();
- message_int_ptr++;
- }
- for (i = 0; i < clean_count; i++) {
- dirty_count = *message_int_ptr;
- message_int_ptr++;
- }
+ access_buffer(recv_ring->buffer_ptr,
+ recv_size,
+ xti_tcp_stream_request->dirty_count,
+ xti_tcp_stream_request->clean_count);
+
#endif /* DIRTY */
bytes_received = 0;
@@ -1591,15 +1568,12 @@
recv_ring = recv_ring->next;
#ifdef DIRTY
- message_int_ptr = (int *)(recv_ring->buffer_ptr);
- for (i = 0; i < dirty_count; i++) {
- *message_int_ptr = rand();
- message_int_ptr++;
- }
- for (i = 0; i < clean_count; i++) {
- dirty_count = *message_int_ptr;
- message_int_ptr++;
- }
+
+ access_buffer(recv_ring->buffer_ptr,
+ recv_size,
+ xti_tcp_stream_request->dirty_count,
+ xti_tcp_stream_request->clean_count);
+
#endif /* DIRTY */
}
@@ -1701,7 +1675,7 @@
/* this routine implements the sending (netperf) side of the XTI_TCP_RR */
/* test. */
-int
+void
send_xti_tcp_rr(char remote_host[])
{
@@ -2490,9 +2464,6 @@
int interval_count;
sigset_t signal_set;
#endif /* WANT_INTERVALS */
-#ifdef DIRTY
- int i;
-#endif /* DIRTY */
struct hostent *hp;
struct sockaddr_in server;
@@ -2796,15 +2767,12 @@
/* we are about to send. we may also want to bring some number of */
/* them cleanly into the cache. The clean ones will follow any dirty */
/* ones into the cache. */
- message_int_ptr = (int *)(send_ring->buffer_ptr);
- for (i = 0; i < loc_dirty_count; i++) {
- *message_int_ptr = 4;
- message_int_ptr++;
- }
- for (i = 0; i < loc_clean_count; i++) {
- loc_dirty_count = *message_int_ptr;
- message_int_ptr++;
- }
+
+ access_buffer(send_ring->buffer_ptr,
+ send_size,
+ loc_dirty_count,
+ loc_clean_count);
+
#endif /* DIRTY */
#ifdef WANT_HISTOGRAM
@@ -3090,7 +3058,7 @@
/* this routine implements the receive side (netserver) of the */
/* XTI_UDP_STREAM performance test. */
-int
+void
recv_xti_udp_stream()
{
struct ring_elt *recv_ring;
@@ -3425,7 +3393,7 @@
}
-int send_xti_udp_rr(char remote_host[])
+void send_xti_udp_rr(char remote_host[])
{
char *tput_title = "\
@@ -4171,7 +4139,7 @@
/* this routine implements the receive side (netserver) of a XTI_UDP_RR */
/* test. */
-int
+void
recv_xti_udp_rr()
{
@@ -4557,7 +4525,7 @@
/* this routine implements the receive (netserver) side of a XTI_TCP_RR */
/* test */
-int
+void
recv_xti_tcp_rr()
{
@@ -4988,7 +4956,7 @@
/* it will also look (can look) much like the communication pattern */
/* of http for www access. */
-int
+void
send_xti_tcp_conn_rr(char remote_host[])
{
@@ -5572,7 +5540,7 @@
}
-int
+void
recv_xti_tcp_conn_rr()
{
Modified: trunk/src/nettest_xti.h
===================================================================
--- trunk/src/nettest_xti.h 2006-04-14 23:17:13 UTC (rev 69)
+++ trunk/src/nettest_xti.h 2006-04-17 20:03:42 UTC (rev 70)
@@ -233,23 +233,23 @@
extern void send_xti_tcp_stream(char remote_host[]);
-extern int recv_xti_tcp_stream();
+extern void recv_xti_tcp_stream();
-extern int send_xti_tcp_rr(char remote_host[]);
+extern void send_xti_tcp_rr(char remote_host[]);
extern void send_xti_udp_stream(char remote_host[]);
-extern int recv_xti_udp_stream();
+extern void recv_xti_udp_stream();
-extern int send_xti_udp_rr(char remote_host[]);
+extern void send_xti_udp_rr(char remote_host[]);
-extern int recv_xti_udp_rr();
+extern void recv_xti_udp_rr();
-extern int recv_xti_tcp_rr();
+extern void recv_xti_tcp_rr();
-extern int send_xti_tcp_conn_rr(char remote_host[]);
+extern void send_xti_tcp_conn_rr(char remote_host[]);
-extern int recv_xti_tcp_conn_rr();
+extern void recv_xti_tcp_conn_rr();
extern void scan_xti_args(int argc, char *argv[]);
More information about the netperf-dev
mailing list