[netperf-dev] netperf4 commit notice r71 - trunk/src

raj at netperf.org raj at netperf.org
Thu Mar 2 12:10:13 PST 2006


Author: raj
Date: 2006-03-02 12:10:11 -0800 (Thu, 02 Mar 2006)
New Revision: 71

Modified:
   trunk/src/netlib.c
   trunk/src/netmsg.c
   trunk/src/netperf.c
   trunk/src/netperf.h
   trunk/src/netserver.c
Log:
A rather large step towards pthreads or glib and thus Windows. Lots 
of abstraction where possible and less where not.  Known to compile
and run the default tests under linux.


Modified: trunk/src/netlib.c
===================================================================
--- trunk/src/netlib.c	2006-03-02 01:37:25 UTC (rev 70)
+++ trunk/src/netlib.c	2006-03-02 20:10:11 UTC (rev 71)
@@ -401,9 +401,9 @@
           "srvr","tst","test_name","CURR","TEST","RQST");
   for (i = 0; i < TEST_HASH_BUCKETS; i ++) {
     h = &test_hash[i];
-    ret = pthread_mutex_lock(&h->hash_lock);
+    ret = NETPERF_MUTEX_LOCK(&h->hash_lock);
     if (ret) {
-      fprintf(where,"__func__ pthread_mutex_lock returned %d\n",ret);
+      fprintf(where,"%s thread mutex lock returned %d\n",__func__,ret);
       fflush(where);
     }
     test = h->test;
@@ -414,9 +414,9 @@
       }
       test = test->next;
     }
-    ret = pthread_mutex_unlock(&h->hash_lock);
+    ret = NETPERF_MUTEX_UNLOCK(&h->hash_lock);
     if (ret) {
-      fprintf(where,"__func__ pthread_mutex_unlock returned %d\n",ret);
+      fprintf(where,"%s thread mutex unlock returned %d\n",__func__,ret);
       fflush(where);
     }
   }
@@ -613,12 +613,12 @@
   hash_value = TEST_HASH_VALUE(new_test->id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_hash[hash_value].hash_lock));
 
   new_test->next = test_hash[hash_value].test;
   test_hash[hash_value].test = new_test;
 
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
 
   return(NPE_SUCCESS);
 }
@@ -639,7 +639,7 @@
   hash_value = TEST_HASH_VALUE(id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_hash[hash_value].hash_lock));
 
   prev_test    = &(test_hash[hash_value].test);
   test_pointer = test_hash[hash_value].test;
@@ -654,7 +654,7 @@
     test_pointer = test_pointer->next;
   }
 
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
 }
 
 test_t *
@@ -670,7 +670,7 @@
   hash_value = TEST_HASH_VALUE(id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_hash[hash_value].hash_lock));
 
   test_pointer = test_hash[hash_value].test;
   while (test_pointer != NULL) {
@@ -681,7 +681,7 @@
     test_pointer = test_pointer->next;
   }
 
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
   return(test_pointer);
 }
 
@@ -1082,12 +1082,25 @@
 
 
 int
-launch_thread(pthread_t *tid, void *(*start_routine)(void *), void *data)
+launch_thread(NETPERF_THREAD_T *tid, void *(*start_routine)(void *), void *data)
 {
   int rc;
-  pthread_t temp_tid;
 
+#ifdef WITH_GLIB
+  NETPERF_THREAD_T *temp_tid;
 
+  temp_tid = g_thread_create(start_routine,data,FALSE,NULL);
+
+  if (NULL != temp_tid) {
+    rc = 0;
+    *tid = temp_tid;
+  }
+  else {
+    rc = -1;
+  }
+#else
+
+  NETPERF_THREAD_T temp_tid;
   rc = pthread_create(&temp_tid, (pthread_attr_t *)NULL, start_routine, data);
   if (rc != 0) {
     if (debug) {
@@ -1117,6 +1130,7 @@
       rc = NPE_SUCCESS;
     }
   }
+#endif
   return(rc);
 }
 

Modified: trunk/src/netmsg.c
===================================================================
--- trunk/src/netmsg.c	2006-03-02 01:37:25 UTC (rev 70)
+++ trunk/src/netmsg.c	2006-03-02 20:10:11 UTC (rev 71)
@@ -576,7 +576,7 @@
     fprintf(where,"np_idle_message: waiting for mutex\n");
     fflush(where);
   }
-  pthread_mutex_lock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_hash[hash_value].hash_lock));
 
   test = test_hash[hash_value].test;
   while (test != NULL) {
@@ -593,7 +593,7 @@
       fflush(where);
     }
     test->state = TEST_IDLE;
-    rc = pthread_cond_broadcast(&(test_hash[hash_value].condition));
+    rc = NETPERF_COND_BROADCAST(test_hash[hash_value].condition);
     if (debug) {
       fprintf(where," new_state = %d\n",test->state);
       fflush(where);
@@ -603,7 +603,7 @@
     fprintf(where,"np_idle_message: unlocking mutex\n");
     fflush(where);
   }
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
   return(rc);
 }
 
@@ -724,7 +724,7 @@
   hash_value = TEST_HASH_VALUE(testid);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_hash[hash_value].hash_lock));
 
   test = test_hash[hash_value].test;
   while (test != NULL) {
@@ -751,14 +751,14 @@
     } else {
       test->state = TEST_INIT;
     }
-    rc = pthread_cond_broadcast(&(test_hash[hash_value].condition));
+    rc = NETPERF_COND_BROADCAST(test_hash[hash_value].condition);
       
     if (debug) {
       fprintf(where," new_state = %d rc = %d\n",test->state,rc);
       fflush(where);
     }
   }
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
   return(rc);
 }
 

Modified: trunk/src/netperf.c
===================================================================
--- trunk/src/netperf.c	2006-03-02 01:37:25 UTC (rev 70)
+++ trunk/src/netperf.c	2006-03-02 20:10:11 UTC (rev 71)
@@ -359,34 +359,75 @@
 
   for (i = 0; i < SERVER_HASH_BUCKETS; i++) {
     server_hash[i].server = NULL;
+#ifdef WITH_GLIB
+    server_hash[i].hash_lock = G_STATIC_MUTEX_INIT;
+    server_hash[i].condition = g_cond_new();
+    if (NULL == server_hash[i].condition) {
+      /* not sure we will even get here */
+      fprintf(where, "netperf_init: g_cond_new error \n");
+      fflush(where);
+      exit(-2);
+    }
+#else
     rc = pthread_mutex_init(&(server_hash[i].hash_lock), NULL);
     if (rc) {
-      fprintf(where, "netperf_init: pthread_mutex_init error %d\n",rc);
+      fprintf(where, "%s: pthread_mutex_init error %d\n",__func__,rc);
       fflush(where);
       exit(-2);
     }
-    rc = pthread_cond_init(&(server_hash[i].condition), NULL);
+    server_hash[i].condition = 
+      (pthread_cond_t *)malloc(sizeof(pthread_cond_t));
+
+    if (NULL == server_hash[i].condition) {
+      fprintf(where, "%s: unable to malloc a pthread_cond_t \n",__func__);
+      fflush(where);
+      exit(-2);
+    }
+
+    rc = pthread_cond_init((server_hash[i].condition), NULL);
     if (rc) {
       fprintf(where, "netperf_init: pthread_cond_init error %d\n",rc);
       fflush(where);
       exit(-2);
     }
+#endif
   }
 
   for (i = 0; i < TEST_HASH_BUCKETS; i ++) {
     test_hash[i].test = NULL;
+#ifdef WITH_GLIB
+    test_hash[i].hash_lock = G_STATIC_MUTEX_INIT;
+    test_hash[i].condition = g_cond_new();
+    if (NULL == test_hash[i].condition) {
+      /* not sure we will even get here */
+      fprintf(where, "netperf_init: g_cond_new error \n");
+      fflush(where);
+      exit(-2);
+    }
+#else
     rc = pthread_mutex_init(&(test_hash[i].hash_lock), NULL);
     if (rc) {
       fprintf(where, "netperf_init: pthread_mutex_init error %d\n",rc);
       fflush(where);
       exit(-2);
     }
-    rc = pthread_cond_init(&(test_hash[i].condition), NULL);
+
+    test_hash[i].condition = 
+      (pthread_cond_t *)malloc(sizeof(pthread_cond_t));
+
+    if (NULL == test_hash[i].condition) {
+      fprintf(where, "netperf_init: unable to malloc a pthread_cond_t \n");
+      fflush(where);
+      exit(-2);
+    }
+
+    rc = pthread_cond_init((test_hash[i].condition), NULL);
     if (rc) {
       fprintf(where, "netperf_init: pthread_cond_init error %d\n",rc);
       fflush(where);
       exit(-2);
     }
+#endif
   }
 
   netlib_init();
@@ -419,13 +460,13 @@
   hash_value = ((atoi((char *)new_server->id + 1)) % SERVER_HASH_BUCKETS);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(server_hash[hash_value].hash_lock));
 
   new_server->next = server_hash[hash_value].server;
   new_server->lock = &(server_hash[hash_value].hash_lock);
   server_hash[hash_value].server = new_server;
 
-  pthread_mutex_unlock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(server_hash[hash_value].hash_lock));
 
   return(NPE_SUCCESS);
 }
@@ -444,7 +485,7 @@
   hash_value = ((atoi((char *)id + 1)) % SERVER_HASH_BUCKETS);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(server_hash[hash_value].hash_lock));
 
   prev_server    = &(server_hash[hash_value].server);
   server_pointer = server_hash[hash_value].server;
@@ -459,7 +500,8 @@
     server_pointer = server_pointer->next;
   }
 
-  pthread_mutex_unlock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(server_hash[hash_value].hash_lock));
+
 }
 
 
@@ -475,7 +517,7 @@
   hash_value = ((atoi((char *)id + 1)) % SERVER_HASH_BUCKETS);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(server_hash[hash_value].hash_lock));
 
   server_pointer = server_hash[hash_value].server;
   while (server_pointer != NULL) {
@@ -486,7 +528,7 @@
     server_pointer = server_pointer->next;
   }
 
-  pthread_mutex_unlock(&(server_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(server_hash[hash_value].hash_lock));
 
   return(server_pointer);
 }
@@ -500,13 +542,13 @@
   hash_value = TEST_SET_HASH_VALUE(new_test_set->id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_set_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_set_hash[hash_value].hash_lock));
 
   new_test_set->next = test_set_hash[hash_value].test_set;
 
   test_set_hash[hash_value].test_set = new_test_set;
 
-  pthread_mutex_unlock(&(test_set_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_set_hash[hash_value].hash_lock));
 
   return(NPE_SUCCESS);
 }
@@ -524,7 +566,7 @@
   hash_value = TEST_SET_HASH_VALUE(id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_set_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_set_hash[hash_value].hash_lock));
 
   test_set_ptr = test_set_hash[hash_value].test_set;
   while (test_set_ptr != NULL) {
@@ -543,7 +585,9 @@
       test_set_hash[hash_value].test_set = test_set_ptr->next;
     }
   }
-  pthread_mutex_unlock(&(test_set_hash[hash_value].hash_lock));
+
+  NETPERF_MUTEX_UNLOCK(&(test_set_hash[hash_value].hash_lock));
+
   return(NPE_SUCCESS);
 }
 
@@ -559,7 +603,7 @@
   hash_value = TEST_SET_HASH_VALUE(id);
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(test_set_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(test_set_hash[hash_value].hash_lock));
 
   test_set_ptr = test_set_hash[hash_value].test_set;
   while (test_set_ptr != NULL) {
@@ -570,7 +614,7 @@
     test_set_ptr = test_set_ptr->next;
   }
 
-  pthread_mutex_unlock(&(test_set_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_set_hash[hash_value].hash_lock));
   return(test_set_ptr);
 }
 
@@ -698,6 +742,9 @@
       new_server->id        = netserverid;
       if (add_server_to_hash(new_server) == NPE_SUCCESS) {
         new_server->node      = this_netserver;
+#ifdef WITH_GLIB
+	new_server->rwlock = G_STATIC_RW_LOCK_INIT;
+#else
         rc = pthread_rwlock_init(&new_server->rwlock, NULL);
         if (rc) {
           fprintf(where, "instaniate_netservers: ");
@@ -705,6 +752,7 @@
           fflush(where);
           rc = NPE_PTHREAD_RWLOCK_INIT_FAILED;
         }
+#endif
         if (rc == NPE_SUCCESS) {
           rc = instantiate_tests(this_netserver, new_server);
         }
@@ -758,12 +806,12 @@
     fprintf(where,"entering wait_for_version_response\n");
     fflush(where);
   }
-  pthread_mutex_lock(server->lock);
+  NETPERF_MUTEX_LOCK(server->lock);
   while (server->state == NSRV_VERS) {
     fds.fd      = server->sock;
     fds.events  = POLLIN;
     fds.revents = 0;
-    pthread_mutex_unlock(server->lock);
+    NETPERF_MUTEX_UNLOCK(server->lock);
     if (poll(&fds,1,5000) > 0) {
       if (debug) {
         fprintf(where,"wait_for_version_response ");
@@ -779,7 +827,7 @@
         }
         rc = process_message(server, message);
       } else {
-        pthread_mutex_lock(server->lock);
+        NETPERF_MUTEX_LOCK(server->lock);
         server->state  = NSRV_ERROR;
         server->err_fn = (char *)__func__;
         if (rc == 0) {
@@ -787,7 +835,7 @@
         } else {
           server->err_rc = rc;
         }
-        pthread_mutex_unlock(server->lock);
+        NETPERF_MUTEX_UNLOCK(server->lock);
       }
     } else {
       if (debug) {
@@ -795,7 +843,7 @@
         fflush(where);
       }
     }
-    pthread_mutex_lock(server->lock);
+    NETPERF_MUTEX_LOCK(server->lock);
     if (rc == NPE_SUCCESS) {
       /* accepted version string move netserver to NSRV_INIT state */
       server->state     = server->state_req;
@@ -810,7 +858,7 @@
   if (rc != NPE_SUCCESS) {
     report_server_error(server);
   }
-  pthread_mutex_unlock(server->lock);
+  NETPERF_MUTEX_UNLOCK(server->lock);
 
   NETPERF_DEBUG_EXIT(debug,where);
 
@@ -827,11 +875,10 @@
   int              hash_value;
   test_t          *test;
   test_hash_t     *h;
-  struct timespec  delta_time;
-  struct timespec  abstime;
+  NETPERF_ABS_TIMESPEC  delta_time;
+  NETPERF_ABS_TIMESPEC  abstime;
 
-  delta_time.tv_sec  = 1;
-  delta_time.tv_nsec = 0;
+  NETPERF_ABS_TIMESET(delta_time,1,0);
   
   *data = NULL;
 
@@ -839,7 +886,7 @@
   /* find the dependency in the hash list */
   hash_value = TEST_HASH_VALUE(id);
   h = &(test_hash[hash_value]);
-  pthread_mutex_lock(&h->hash_lock);
+  NETPERF_MUTEX_LOCK(&h->hash_lock);
   test = h->test;
   while (test != NULL) {
     if (!xmlStrcmp(test->id,id)) {
@@ -849,11 +896,11 @@
       if (test->state_req == TEST_PREINIT) {
         /* test is not yet initialized initialize it */
         test->state_req = TEST_IDLE;
-        pthread_mutex_unlock(&h->hash_lock);
+        NETPERF_MUTEX_UNLOCK(&h->hash_lock);
 
         rc = launch_thread(&test->tid, initialize_test, test);
 
-        pthread_mutex_lock(&h->hash_lock);
+        NETPERF_MUTEX_LOCK(&h->hash_lock);
         if (rc != NPE_SUCCESS) {
           test->state  = TEST_ERROR;
           test->err_rc = rc;
@@ -865,7 +912,7 @@
 #endif
       /* wait for test to initialize */
       while (test->state == TEST_PREINIT) {
-        pthread_mutex_unlock(&h->hash_lock);
+        NETPERF_MUTEX_UNLOCK(&h->hash_lock);
         if (debug) {
           fprintf(where,
                   "resolve_dependency: waiting on test %s thread %d\n",
@@ -873,9 +920,15 @@
                   test->tid);
           fflush(where);
         }
-        pthread_mutex_lock(&h->hash_lock);
+
+        NETPERF_MUTEX_LOCK(&h->hash_lock);
+
+#ifdef WITH_GLIB
+#else
         get_expiration_time(&delta_time,&abstime);
-        rc = pthread_cond_timedwait(&h->condition, &h->hash_lock, &abstime);
+#endif
+
+        rc = NETPERF_COND_TIMEDWAIT(h->condition, &h->hash_lock, &abstime);
         if (debug) {
             fprintf(where,
                     "resolve_dependency: pthread_cond_wait exited %d\n",rc);
@@ -887,14 +940,14 @@
       if (test->state != TEST_ERROR) {
         if (test->dependent_data != NULL) {
           *data = test->dependent_data;
-          pthread_mutex_unlock(&h->hash_lock);
+          NETPERF_MUTEX_UNLOCK(&h->hash_lock);
           rc = NPE_SUCCESS;
           if (debug) {
             fprintf(where,"resolve_dependency: successful for %s\n",
                     (char *)id);
             fflush(where);
           }
-          pthread_mutex_lock(&h->hash_lock);
+          NETPERF_MUTEX_LOCK(&h->hash_lock);
         } else {
           rc = NPE_DEPENDENCY_NOT_PRESENT;
         }
@@ -906,7 +959,7 @@
 
     test = test->next;
   }
-  pthread_mutex_unlock(&(test_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(test_hash[hash_value].hash_lock));
   return(rc);
 }
 
@@ -964,12 +1017,12 @@
         }  
       }
       /* is the lock around the send required? */
-      pthread_rwlock_wrlock(&server->rwlock);
+      NETPERF_RWLOCK_WRLOCK(&server->rwlock);
       rc = send_control_message(server->sock,
                                 msg,
                                 server->id,
                                 my_nid);
-      pthread_rwlock_unlock(&server->rwlock);
+      NETPERF_RWLOCK_UNLOCK(&server->rwlock);
     } else {
       if (debug) {
         fprintf(where,
@@ -1011,16 +1064,16 @@
 
   for (i = 0; i < TEST_HASH_BUCKETS; i ++) {
     h = &test_hash[i];
-    pthread_mutex_lock(&h->hash_lock);
+    NETPERF_MUTEX_LOCK(&h->hash_lock);
     test = h->test;
     while (test != NULL) {
       if (test->state_req == TEST_PREINIT) {
         if (!xmlStrcmp(server->id, test->server_id)) {
           /* test is not initialized and belongs to this netserver init it */
           test->state_req = TEST_IDLE;
-          pthread_mutex_unlock(&h->hash_lock);
+          NETPERF_MUTEX_UNLOCK(&h->hash_lock);
           rc = launch_thread(&test->tid, initialize_test, test);
-          pthread_mutex_lock(&h->hash_lock);
+          NETPERF_MUTEX_LOCK(&h->hash_lock);
           if (rc != NPE_SUCCESS) {
             test->state = TEST_ERROR;
             test->err_rc = rc;
@@ -1036,7 +1089,7 @@
         test = test->next;
       }
     }
-    pthread_mutex_unlock(&h->hash_lock);
+    NETPERF_MUTEX_UNLOCK(&h->hash_lock);
   }
 
   NETPERF_DEBUG_EXIT(debug,where);
@@ -1056,13 +1109,13 @@
   if (rc == NPE_SUCCESS) {
     initialize_tests(server);
   }
-  pthread_mutex_lock(server->lock);
+  NETPERF_MUTEX_LOCK(server->lock);
 
   while (server->state != NSRV_ERROR) {
     fds.fd      = server->sock;
     fds.events  = POLLIN;
     fds.revents = 0;
-    pthread_mutex_unlock(server->lock);
+    NETPERF_MUTEX_UNLOCK(server->lock);
     if (poll(&fds,1,5000) > 0) {
       rc = recv_control_message(server->sock, &message);
       if (rc > 0) {
@@ -1082,7 +1135,7 @@
         report_servers_test_status(server);
       }
     }
-    pthread_mutex_lock(server->lock);
+    NETPERF_MUTEX_LOCK(server->lock);
     if (rc != NPE_SUCCESS) {
       server->state  = NSRV_ERROR;
       server->err_rc = rc;
@@ -1093,7 +1146,7 @@
   if (rc != NPE_SUCCESS) {
     report_server_error(server);
   }
-  pthread_mutex_unlock(server->lock);
+  NETPERF_MUTEX_UNLOCK(server->lock);
 
   return(server);
 }
@@ -1114,12 +1167,12 @@
   }
   for (i = 0; i < SERVER_HASH_BUCKETS; i ++) {
     h = &server_hash[i];
-    pthread_mutex_lock(&h->hash_lock);
+    NETPERF_MUTEX_LOCK(&h->hash_lock);
     server = h->server;
     while (server != NULL) {
       if (server->state_req == NSRV_INIT) {
         /* netserver worker thread needs to be started */
-        pthread_mutex_unlock(&h->hash_lock);
+        NETPERF_MUTEX_UNLOCK(&h->hash_lock);
         if (debug) {
           fprintf(where,"launching thread for netserver %s\n",server->id);
           fflush(where);
@@ -1131,7 +1184,7 @@
                   server->tid,server->id);
           fflush(where);
         }
-        pthread_mutex_lock(&h->hash_lock);
+        NETPERF_MUTEX_LOCK(&h->hash_lock);
         if (rc != NPE_SUCCESS) {
           server->state = NSRV_ERROR;
           server->err_rc = rc;
@@ -1140,7 +1193,7 @@
       }
       server = server->next;
     }
-    pthread_mutex_unlock(&h->hash_lock);
+    NETPERF_MUTEX_UNLOCK(&h->hash_lock);
   }
 
   NETPERF_DEBUG_EXIT(debug,where);
@@ -1157,11 +1210,10 @@
   server_t        *server;
   test_t          *test;
   test_hash_t     *h;
-  struct timespec  delta_time;
-  struct timespec  abstime;
+  NETPERF_ABS_TIMESPEC delta_time;
+  NETPERF_ABS_TIMESPEC  abstime;
 
-  delta_time.tv_sec  = 1;
-  delta_time.tv_nsec = 0;
+  NETPERF_ABS_TIMESET(delta_time,1,0);
 
   if (debug) {
     fprintf(where,"entering wait_for_tests_to_initialize\n");
@@ -1169,7 +1221,10 @@
   }
   for (i = 0; i < TEST_HASH_BUCKETS; i ++) {
     h = &test_hash[i];
-    pthread_mutex_lock(&h->hash_lock);
+#ifdef WITH_GLIB
+#else
+    NETPERF_MUTEX_LOCK(&h->hash_lock);
+#endif
     test = h->test;
     while (test != NULL) {
       while (test->state != TEST_IDLE) {
@@ -1184,28 +1239,39 @@
           break;
         }
         /* test is not yet initialized wait for it */
+#ifdef WITH_GLIB
+#else
         get_expiration_time(&delta_time,&abstime);
-        prc = pthread_cond_timedwait(&h->condition, &h->hash_lock, &abstime);
+#endif
+        prc = NETPERF_COND_TIMEDWAIT(h->condition, &h->hash_lock, &abstime);
         if (prc != 0) {
           fprintf(where,
-            "wait_for_tests_to_initialize: pthread_cond_wait failed %d\n",prc);
+            "wait_for_tests_to_initialize: thread conditional wait returned %d\n",prc);
           fflush(where);
         }
-        /* since the mutex was unlocked during pthread_cond_wait should the
-           hash chain be restarted incase a new test was inserted ? */
+
+        /* since the mutex was unlocked during the conditional wait
+           should the hash chain be restarted in case a new test was
+           inserted ? */
         /* test = h->test;  for now no */
       } 
       test = test->next;
     }
-    pthread_mutex_unlock(&h->hash_lock);
+#ifdef WITH_GLIB
+#else
+    NETPERF_MUTEX_UNLOCK(&h->hash_lock);
+#endif
   }
   for (i=0;i < SERVER_HASH_BUCKETS; i++) {
     server = server_hash[i].server;
     while (server) {
       /* set the netserver state to NSRV_WORK now are ready to run tests */
-      pthread_mutex_lock(server->lock);
+      NETPERF_MUTEX_LOCK(server->lock);
+
       server->state = server->state_req;
-      pthread_mutex_unlock(server->lock);
+
+      NETPERF_MUTEX_UNLOCK(server->lock);
+
       server = server->next;
     }
   }

Modified: trunk/src/netperf.h
===================================================================
--- trunk/src/netperf.h	2006-03-02 01:37:25 UTC (rev 70)
+++ trunk/src/netperf.h	2006-03-02 20:10:11 UTC (rev 71)
@@ -40,8 +40,24 @@
 #include <libxml/parser.h>
 #include <libxml/tree.h>
 
-#ifdef HAVE_PTHREAD_H
+#ifdef WITH_GLIB
+#include <glib.h>
+#elif defined(HAVE_PTHREAD_H)
 #include <pthread.h>
+#define NETPERF_MUTEX_T pthread_mutex_t
+#define NETPERF_RWLOCK_T pthread_rwlock_t
+#define NETPERF_THREAD_T pthread_t
+#define NETPERF_COND_T pthread_cond_t
+#define NETPERF_ABS_TIMESPEC struct timespec
+#define NETPERF_ABS_TIMESET(base,a,b) base.tv_sec = a;base.tv_nsec=b;
+#define NETPERF_MUTEX_LOCK pthread_mutex_lock
+#define NETPERF_MUTEX_UNLOCK pthread_mutex_unlock
+#define NETPERF_COND_TIMEDWAIT pthread_cond_timedwait
+#define NETPERF_COND_BROADCAST pthread_cond_broadcast
+#define NETPERF_RWLOCK_WRLOCK pthread_rwlock_wrlock
+#define NETPERF_RWLOCK_UNLOCK pthread_rwlock_unlock
+#else
+#error Netperf4 requires either glib or pthreads
 #endif
 
 /* we want to get the definition of uint32_t et al */
@@ -143,10 +159,10 @@
   xmlChar          *id;          /* the id of the server instance. used
                                     in searches and as sanity checks  */
 
-  pthread_rwlock_t rwlock;       /* the mutex used to ensure exclusive
+  NETPERF_RWLOCK_T rwlock;       /* the mutex used to ensure exclusive
                                     access to this servers resources */
 
-  pthread_mutex_t  *lock;        /* the mutex used to ensure exclusive
+  NETPERF_MUTEX_T  *lock;        /* the mutex used to ensure exclusive
                                     access to this servers resources */
 
   xmlNodePtr       node;         /* the xml document node containing the
@@ -167,7 +183,7 @@
   char            *err_fn;       /* procedure which placed this server into
                                     the NSRV_ERROR state. */
 
-  pthread_t        tid;          /* the posix thread-id of the server
+  NETPERF_THREAD_T tid;          /* the posix thread-id of the server
                                     instance within netperf.
                                     Will only be stored in the netperf
                                     process not the netserver process. 
@@ -184,8 +200,8 @@
 #define SERVER_HASH_BUCKETS 4
 
 typedef struct server_hash_elt {
-  pthread_mutex_t  hash_lock;
-  pthread_cond_t   condition;
+  NETPERF_MUTEX_T  hash_lock;
+  NETPERF_COND_T   *condition;
   server_t        *server;
 } server_hash_t;
 
@@ -257,7 +273,7 @@
 
   int        err_no;           /* The errno returned by the failing syscall */
   
-  pthread_t  tid;              /* the posix thread id of the test
+  NETPERF_THREAD_T  tid;       /* the posix thread id of the test
                                   instance within the netserver.
                                   Will only be stored in the netserver
                                   process(es) not the netperf
@@ -321,8 +337,8 @@
 
 
 typedef struct test_hash_elt {
-  pthread_mutex_t  hash_lock;
-  pthread_cond_t   condition;
+  NETPERF_MUTEX_T  hash_lock;
+  NETPERF_COND_T   *condition;
   test_t          *test;
 } test_hash_t;
 
@@ -379,8 +395,8 @@
 
 
 typedef struct test_set_hash_elt {
-  pthread_mutex_t  hash_lock;
-  pthread_cond_t   condition;
+  NETPERF_MUTEX_T  hash_lock;
+  NETPERF_COND_T   *condition;
   tset_t          *test_set;
 } tset_hash_t;
 

Modified: trunk/src/netserver.c
===================================================================
--- trunk/src/netserver.c	2006-03-02 01:37:25 UTC (rev 70)
+++ trunk/src/netserver.c	2006-03-02 20:10:11 UTC (rev 71)
@@ -271,13 +271,13 @@
 
   hash_value = 0;
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(netperf_hash[hash_value].hash_lock));
 
   new_netperf->next = netperf_hash[hash_value].server;
   new_netperf->lock = &(netperf_hash[hash_value].hash_lock);
   netperf_hash[hash_value].server = new_netperf;
 
-  pthread_mutex_unlock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(netperf_hash[hash_value].hash_lock));
 
   return(NPE_SUCCESS);
 }
@@ -297,7 +297,7 @@
   hash_value = 0;
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(netperf_hash[hash_value].hash_lock));
 
   prev_server    = &(netperf_hash[hash_value].server);
   server_pointer = netperf_hash[hash_value].server;
@@ -312,7 +312,7 @@
     server_pointer = server_pointer->next;
   }
 
-  pthread_mutex_unlock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(netperf_hash[hash_value].hash_lock));
 }
 
 
@@ -329,7 +329,7 @@
   hash_value = 0;
 
   /* don't forget to add error checking one day */
-  pthread_mutex_lock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_LOCK(&(netperf_hash[hash_value].hash_lock));
 
   server_pointer = netperf_hash[hash_value].server;
   while (server_pointer != NULL) {
@@ -340,7 +340,7 @@
     server_pointer = server_pointer->next;
   }
 
-  pthread_mutex_unlock(&(netperf_hash[hash_value].hash_lock));
+  NETPERF_MUTEX_UNLOCK(&(netperf_hash[hash_value].hash_lock));
 
   return(server_pointer);
 }
@@ -911,34 +911,75 @@
 
   for (i = 0; i < NETPERF_HASH_BUCKETS; i++) {
     netperf_hash[i].server = NULL;
+#ifdef WITH_GLIB
+    netperf_hash[i].hash_lock = G_STATIC_MUTEX_INIT;
+    netperf_hash[i].condition = g_cond_new();
+    if (NULL == netperf_hash[i].condition) {
+      /* not sure we will even get here */
+      fprintf(where, "%s: g_cond_new error \n",__func__);
+      fflush(where);
+      exit(-2);
+    }
+#else
     rc = pthread_mutex_init(&(netperf_hash[i].hash_lock), NULL);
     if (rc) {
       fprintf(where, "%s: server pthread_mutex_init error %d\n", __func__, rc);
       fflush(where);
       exit(rc);
     }
-    rc = pthread_cond_init(&(netperf_hash[i].condition), NULL);
+
+    netperf_hash[i].condition = 
+      (pthread_cond_t *)malloc(sizeof(pthread_cond_t));
+
+    if (NULL == netperf_hash[i].condition) {
+      fprintf(where, "%s: unable to malloc a pthread_cond_t \n",__func__);
+      fflush(where);
+      exit(-2);
+    }
+      
+    rc = pthread_cond_init(netperf_hash[i].condition, NULL);
     if (rc) {
       fprintf(where, "%s: server pthread_cond_init error %d\n", __func__, rc);
       fflush(where);
       exit(rc);
     }
+#endif
   }
  
   for (i = 0; i < TEST_HASH_BUCKETS; i ++) {
     test_hash[i].test = NULL;
+#ifdef WITH_GLIB
+    test_hash[i].hash_lock = G_STATIC_MUTEX_INIT;
+    test_hash[i].condition = g_cond_new();
+    if (NULL == test_hash[i].condition) {
+      /* not sure we will even get here */
+      fprintf(where, "%s: g_cond_new error \n",__func__);
+      fflush(where);
+      exit(-2);
+    }
+#else
     rc = pthread_mutex_init(&(test_hash[i].hash_lock), NULL);
     if (rc) {
       fprintf(where, "%s: test pthread_mutex_init error %d\n", __func__, rc);
       fflush(where);
       exit(rc);
     }
-    rc = pthread_cond_init(&(test_hash[i].condition), NULL);
+    test_hash[i].condition = 
+      (pthread_cond_t *)malloc(sizeof(pthread_cond_t));
+
+    if (NULL == test_hash[i].condition) {
+      fprintf(where, "%s: unable to malloc a pthread_cond_t \n",__func__);
+      fflush(where);
+      exit(-2);
+    }
+
+    rc = pthread_cond_init(test_hash[i].condition, NULL);
     if (rc) {
       fprintf(where, "%s: test pthread_cond_init error %d\n", __func__, rc);
       fflush(where);
       exit(rc);
     }
+#endif
   }
 
   netlib_init();



More information about the netperf-dev mailing list