[netperf-dev] netperf2 commit notice r505 - trunk/src
raj at netperf.org
raj at netperf.org
Mon Nov 28 16:19:36 PST 2011
Author: raj
Date: 2011-11-28 16:19:36 -0800 (Mon, 28 Nov 2011)
New Revision: 505
Modified:
trunk/src/netlib.c
trunk/src/netlib.h
trunk/src/nettest_bsd.h
trunk/src/nettest_omni.c
Log:
enable setting and retrieval of the congestion control algorithm on the remote side though there may be a bug in either netperf or linux when a listen socket is involved
Modified: trunk/src/netlib.c
===================================================================
--- trunk/src/netlib.c 2011-11-28 23:09:36 UTC (rev 504)
+++ trunk/src/netlib.c 2011-11-29 00:19:36 UTC (rev 505)
@@ -2514,6 +2514,20 @@
}
+/* go back and "undo" the ntohl that recv_request() did, starting with
+ the specified point and going to the end of the request array */
+void
+fixup_request_n(int n)
+{
+ int i;
+ int limit;
+
+ limit = sizeof(netperf_request) / 4;
+ for (i = n; i < limit; i++) {
+ request_array[i] = htonl(request_array[i]);
+ }
+}
+
/* receive a request, only converting the first n ints-worth of the
test-specific data via htonl() before sending on the
connection. the first two ints, which are before the test-specific
Modified: trunk/src/netlib.h
===================================================================
--- trunk/src/netlib.h 2011-11-28 23:09:36 UTC (rev 504)
+++ trunk/src/netlib.h 2011-11-29 00:19:36 UTC (rev 505)
@@ -502,6 +502,7 @@
extern void recv_response_n(int n); /* of the test-specific data via */
extern void send_response_n(int n); /* htonl/ntonl as required */
extern int recv_request_n(int n);
+extern void fixup_request_n(int n);
extern void dump_request();
extern void dump_addrinfo(FILE *dumploc, struct addrinfo *info,
char *host, char *port, int family);
Modified: trunk/src/nettest_bsd.h
===================================================================
--- trunk/src/nettest_bsd.h 2011-11-28 23:09:36 UTC (rev 504)
+++ trunk/src/nettest_bsd.h 2011-11-29 00:19:36 UTC (rev 505)
@@ -84,6 +84,10 @@
uint32_t netserver_ip[4]; /* when netperf tells netserver his IP */
int32_t socket_prio; /* what netserver should use for socket prio */
int32_t socket_tos; /* what netserver should use for socket tos */
+ /* there are 38 "ints" above here, add another and you will need to
+ adjust the define below */
+#define OMNI_REQUEST_CONV_CUTOFF 38
+ char cong_control[16]; /* the requested congestion control alg */
};
struct omni_response_struct {
@@ -125,6 +129,7 @@
name that long - and still didn't
include the 9NNN model number! */
char security_string[16];
+ char cong_control[16]; /* what the congestion control alg was */
};
struct omni_results_struct {
Modified: trunk/src/nettest_omni.c
===================================================================
--- trunk/src/nettest_omni.c 2011-11-28 23:09:36 UTC (rev 504)
+++ trunk/src/nettest_omni.c 2011-11-29 00:19:36 UTC (rev 505)
@@ -2779,7 +2779,7 @@
we use send. we ass-u-me blocking operations always, so no need
to check for eagain or the like. */
- if (debug > 1) {
+ if (debug > 2) {
fprintf(where,
"send_data sock %d, ring elt %p, bytes %d, dest %p, len %d\n",
data_socket,
@@ -3424,7 +3424,7 @@
if (remote_mask_len)
random_ip_address(remote_res, remote_mask_len);
- data_socket = create_data_socket(local_res);
+ data_socket = omni_create_data_socket(local_res);
if (data_socket == INVALID_SOCKET) {
perror("netperf: send_omni: unable to create data socket");
@@ -3577,6 +3577,14 @@
if (desired_output_groups & OMNI_WANT_REM_CONG)
omni_request->flags |= OMNI_WANT_REM_CONG;
+ /* perhaps this should be made conditional on
+ remote_cong_control_req[0] not being NULL? */
+ strncpy(omni_request->cong_control,
+ remote_cong_control_req,
+ sizeof(omni_request->cong_control));
+ omni_request->cong_control[sizeof(omni_request->cong_control) - 1] =
+ '\0';
+
if (want_keepalive)
omni_request->flags |= OMNI_WANT_KEEPALIVE;
@@ -3634,7 +3642,7 @@
fprintf(where,"netperf: send_omni: requesting OMNI test\n");
}
- send_request();
+ send_request_n(OMNI_REQUEST_CONV_CUTOFF);
/* the response from the remote should contain all the relevant
@@ -3690,6 +3698,13 @@
remote_security_type = nsec_type_to_str(remote_security_type_id);
remote_security_enabled =
nsec_enabled_to_str(remote_security_enabled_num);
+ /* what was the congestion control? */
+ if (desired_output_groups & OMNI_WANT_REM_CONG) {
+ strncpy(remote_cong_control,
+ omni_response->cong_control,
+ sizeof(remote_cong_control));
+ remote_cong_control[sizeof(remote_cong_control) - 1] = '\0';
+ }
}
else {
Set_errno(netperf_response.content.serv_errno);
@@ -3800,7 +3815,7 @@
pick_next_port_number(local_res,remote_res);
- data_socket = create_data_socket(local_res);
+ data_socket = omni_create_data_socket(local_res);
if (data_socket == INVALID_SOCKET) {
perror("netperf: send_omni: unable to create data socket");
@@ -4641,7 +4656,17 @@
fprintf(where,"netserver: recv_omni: entered...\n");
fflush(where);
}
-
+
+ /* netserver has no good way of knowing where the conversion cutoff
+ point is, so we have to fix it after the fact */
+ fixup_request_n(OMNI_REQUEST_CONV_CUTOFF);
+
+ /* thus fixed-up, we can extract the requested congestion control
+ algorithm */
+ strncpy(local_cong_control_req,
+ omni_request->cong_control,
+ sizeof(local_cong_control_req));
+
/* based on what we have been told by the remote netperf, we want to
setup our endpoint for the "data connection" and let the remote
netperf know the situation. */
@@ -4714,7 +4739,7 @@
omni_request->protocol,
0);
- s_listen = create_data_socket(local_res);
+ s_listen = omni_create_data_socket(local_res);
if (s_listen == INVALID_SOCKET) {
netperf_response.content.serv_errno = errno;
@@ -4927,6 +4952,13 @@
sizeof(omni_response->security_string));
omni_response->security_string[sizeof(omni_response->security_string)-1] = 0;
+ if (omni_request->flags & OMNI_WANT_REM_CONG) {
+ get_transport_cong_control(s_listen,
+ local_res->ai_protocol,
+ omni_response->cong_control,
+ sizeof(omni_response->cong_control));
+ }
+
send_response_n(OMNI_RESPONSE_CONV_CUTOFF); /* brittle, but functional */
local_send_calls = 0;
More information about the netperf-dev
mailing list