rwlock8.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. /*
  2. * rwlock8.c
  3. *
  4. * Hammer on a bunch of rwlocks to test robustness and fairness.
  5. * Printed stats should be roughly even for each thread.
  6. *
  7. * Yield during each access to exercise lock contention code paths
  8. * more than rwlock7.c does (particularly on uni-processor systems).
  9. */
  10. #include "test.h"
  11. #ifdef __GNUC__
  12. #include <stdlib.h>
  13. #endif
  14. #define THREADS 5
  15. #define DATASIZE 7
  16. #define ITERATIONS 10000
  17. /*
  18. * Keep statistics for each thread.
  19. */
  20. typedef struct thread_tag
  21. {
  22. int thread_num;
  23. pthread_t thread_id;
  24. int updates;
  25. int reads;
  26. int changed;
  27. unsigned int seed;
  28. } thread_t;
  29. /*
  30. * Read-write lock and shared data
  31. */
  32. typedef struct data_tag
  33. {
  34. pthread_rwlock_t lock;
  35. int data;
  36. int updates;
  37. } data_t;
  38. static thread_t threads[THREADS];
  39. static data_t data[DATASIZE];
  40. /*
  41. * Thread start routine that uses read-write locks
  42. */
  43. static void *thread_routine (void *arg)
  44. {
  45. thread_t *self = (thread_t*)arg;
  46. int iteration;
  47. int element = 0;
  48. int interval = 1 + rand() % 71;
  49. self->changed = 0;
  50. for (iteration = 0; iteration < ITERATIONS; iteration++)
  51. {
  52. /*
  53. if (iteration % (ITERATIONS / 10) == 0)
  54. {
  55. putchar('.');
  56. fflush(stdout);
  57. }
  58. */
  59. /*
  60. * Each "self->interval" iterations, perform an
  61. * update operation (write lock instead of read
  62. * lock).
  63. */
  64. if ((iteration % interval) == 0)
  65. {
  66. assert(pthread_rwlock_wrlock (&data[element].lock) == 0);
  67. data[element].data = self->thread_num;
  68. data[element].updates++;
  69. self->updates++;
  70. interval = 1 + rand() % 71;
  71. sched_yield();
  72. assert(pthread_rwlock_unlock (&data[element].lock) == 0);
  73. }
  74. else
  75. {
  76. /*
  77. * Look at the current data element to see whether
  78. * the current thread last updated it. Count the
  79. * times, to report later.
  80. */
  81. assert(pthread_rwlock_rdlock (&data[element].lock) == 0);
  82. self->reads++;
  83. if (data[element].data != self->thread_num)
  84. {
  85. self->changed++;
  86. interval = 1 + self->changed % 71;
  87. }
  88. sched_yield();
  89. assert(pthread_rwlock_unlock (&data[element].lock) == 0);
  90. }
  91. element = (element + 1) % DATASIZE;
  92. }
  93. return NULL;
  94. }
  95. int pthread_test_rwlock8()
  96. {
  97. int count;
  98. int data_count;
  99. int thread_updates = 0;
  100. int data_updates = 0;
  101. struct _timeb currSysTime1;
  102. struct _timeb currSysTime2;
  103. /*
  104. * Initialize the shared data.
  105. */
  106. for (data_count = 0; data_count < DATASIZE; data_count++)
  107. {
  108. data[data_count].data = 0;
  109. data[data_count].updates = 0;
  110. assert(pthread_rwlock_init (&data[data_count].lock, NULL) == 0);
  111. }
  112. _ftime(&currSysTime1);
  113. /*
  114. * Create THREADS threads to access shared data.
  115. */
  116. for (count = 0; count < THREADS; count++)
  117. {
  118. threads[count].thread_num = count;
  119. threads[count].updates = 0;
  120. threads[count].reads = 0;
  121. threads[count].seed = 1 + rand() % 71;
  122. assert(pthread_create (&threads[count].thread_id,
  123. NULL, thread_routine, (void*)&threads[count]) == 0);
  124. }
  125. /*
  126. * Wait for all threads to complete, and collect
  127. * statistics.
  128. */
  129. for (count = 0; count < THREADS; count++)
  130. {
  131. assert(pthread_join (threads[count].thread_id, NULL) == 0);
  132. }
  133. for (count = 0; count < THREADS; count++)
  134. {
  135. thread_updates += threads[count].updates;
  136. }
  137. /*
  138. putchar('\n');
  139. fflush(stdout);
  140. */
  141. /*
  142. * Collect statistics for the data.
  143. */
  144. for (data_count = 0; data_count < DATASIZE; data_count++)
  145. {
  146. data_updates += data[data_count].updates;
  147. assert(pthread_rwlock_destroy (&data[data_count].lock) == 0);
  148. }
  149. _ftime(&currSysTime2);
  150. return 0;
  151. }