Ruby  2.5.0dev(2017-10-22revision60238)
thread_sync.c
Go to the documentation of this file.
1 /* included by thread.c */
2 #include "ccan/list/list.h"
3 
4 static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
5 static VALUE rb_eClosedQueueError;
6 
7 /* sync_waiter is always on-stack */
8 struct sync_waiter {
10  struct list_node node;
11 };
12 
13 #define MUTEX_ALLOW_TRAP FL_USER1
14 
15 static int
16 wakeup_one(struct list_head *head)
17 {
18  struct sync_waiter *cur = 0, *next = 0;
19 
20  list_for_each_safe(head, cur, next, node) {
21  list_del_init(&cur->node);
22  if (cur->th->status != THREAD_KILLED) {
24  cur->th->status = THREAD_RUNNABLE;
25  return TRUE;
26  }
27  }
28  return FALSE;
29 }
30 
31 static void
32 wakeup_all(struct list_head *head)
33 {
34  struct sync_waiter *cur = 0, *next = 0;
35 
36  list_for_each_safe(head, cur, next, node) {
37  list_del_init(&cur->node);
38  if (cur->th->status != THREAD_KILLED) {
40  cur->th->status = THREAD_RUNNABLE;
41  }
42  }
43 }
44 
45 /* Mutex */
46 
47 typedef struct rb_mutex_struct {
48  struct rb_thread_struct volatile *th;
50  struct list_head waitq; /* protected by GVL */
51 } rb_mutex_t;
52 
53 #if defined(HAVE_WORKING_FORK)
54 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
55 static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
56 static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
57 #endif
58 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
59 
60 /*
61  * Document-class: Mutex
62  *
63  * Mutex implements a simple semaphore that can be used to coordinate access to
64  * shared data from multiple concurrent threads.
65  *
66  * Example:
67  *
68  * semaphore = Mutex.new
69  *
70  * a = Thread.new {
71  * semaphore.synchronize {
72  * # access shared resource
73  * }
74  * }
75  *
76  * b = Thread.new {
77  * semaphore.synchronize {
78  * # access shared resource
79  * }
80  * }
81  *
82  */
83 
84 #define GetMutexPtr(obj, tobj) \
85  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
86 
87 #define mutex_mark NULL
88 
89 static size_t
90 rb_mutex_num_waiting(rb_mutex_t *mutex)
91 {
92  struct sync_waiter *w = 0;
93  size_t n = 0;
94 
95  list_for_each(&mutex->waitq, w, node) {
96  n++;
97  }
98 
99  return n;
100 }
101 
102 static void
103 mutex_free(void *ptr)
104 {
105  rb_mutex_t *mutex = ptr;
106  if (mutex->th) {
107  /* rb_warn("free locked mutex"); */
108  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
109  if (err) rb_bug("%s", err);
110  }
111  ruby_xfree(ptr);
112 }
113 
114 static size_t
115 mutex_memsize(const void *ptr)
116 {
117  return sizeof(rb_mutex_t);
118 }
119 
120 static const rb_data_type_t mutex_data_type = {
121  "mutex",
122  {mutex_mark, mutex_free, mutex_memsize,},
124 };
125 
126 VALUE
128 {
129  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
130  return Qtrue;
131  }
132  else {
133  return Qfalse;
134  }
135 }
136 
137 static VALUE
138 mutex_alloc(VALUE klass)
139 {
140  VALUE obj;
141  rb_mutex_t *mutex;
142 
143  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
144  list_head_init(&mutex->waitq);
145  return obj;
146 }
147 
148 /*
149  * call-seq:
150  * Mutex.new -> mutex
151  *
152  * Creates a new Mutex
153  */
154 static VALUE
155 mutex_initialize(VALUE self)
156 {
157  return self;
158 }
159 
160 VALUE
162 {
163  return mutex_alloc(rb_cMutex);
164 }
165 
166 /*
167  * call-seq:
168  * mutex.locked? -> true or false
169  *
170  * Returns +true+ if this lock is currently held by some thread.
171  */
172 VALUE
174 {
175  rb_mutex_t *mutex;
176  GetMutexPtr(self, mutex);
177  return mutex->th ? Qtrue : Qfalse;
178 }
179 
180 static void
181 mutex_locked(rb_thread_t *th, VALUE self)
182 {
183  rb_mutex_t *mutex;
184  GetMutexPtr(self, mutex);
185 
186  if (th->keeping_mutexes) {
187  mutex->next_mutex = th->keeping_mutexes;
188  }
189  th->keeping_mutexes = mutex;
190 }
191 
192 /*
193  * call-seq:
194  * mutex.try_lock -> true or false
195  *
196  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
197  * lock was granted.
198  */
199 VALUE
201 {
202  rb_mutex_t *mutex;
203  VALUE locked = Qfalse;
204  GetMutexPtr(self, mutex);
205 
206  if (mutex->th == 0) {
207  rb_thread_t *th = GET_THREAD();
208  mutex->th = th;
209  locked = Qtrue;
210 
211  mutex_locked(th, self);
212  }
213 
214  return locked;
215 }
216 
217 /*
218  * At maximum, only one thread can use cond_timedwait and watch deadlock
219  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
220  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
221  */
222 static const rb_thread_t *patrol_thread = NULL;
223 
224 /*
225  * call-seq:
226  * mutex.lock -> self
227  *
228  * Attempts to grab the lock and waits if it isn't available.
229  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
230  */
231 VALUE
233 {
234  rb_thread_t *th = GET_THREAD();
235  rb_mutex_t *mutex;
236  GetMutexPtr(self, mutex);
237 
238  /* When running trap handler */
239  if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
241  rb_raise(rb_eThreadError, "can't be called from trap context");
242  }
243 
244  if (rb_mutex_trylock(self) == Qfalse) {
245  struct sync_waiter w;
246 
247  if (mutex->th == th) {
248  rb_raise(rb_eThreadError, "deadlock; recursive locking");
249  }
250 
251  w.th = th;
252 
253  while (mutex->th != th) {
254  enum rb_thread_status prev_status = th->status;
255  struct timeval *timeout = 0;
256  struct timeval tv = { 0, 100000 }; /* 100ms */
257 
259  th->locking_mutex = self;
260  th->vm->sleeper++;
261  /*
262  * Carefully! while some contended threads are in native_sleep(),
263  * vm->sleeper is unstable value. we have to avoid both deadlock
264  * and busy loop.
265  */
266  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
267  !patrol_thread) {
268  timeout = &tv;
269  patrol_thread = th;
270  }
271 
272  list_add_tail(&mutex->waitq, &w.node);
273  native_sleep(th, timeout); /* release GVL */
274  list_del(&w.node);
275  if (!mutex->th) {
276  mutex->th = th;
277  }
278 
279  if (patrol_thread == th)
280  patrol_thread = NULL;
281 
282  th->locking_mutex = Qfalse;
283  if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th)) {
284  rb_check_deadlock(th->vm);
285  }
286  if (th->status == THREAD_STOPPED_FOREVER) {
287  th->status = prev_status;
288  }
289  th->vm->sleeper--;
290 
291  if (mutex->th == th) mutex_locked(th, self);
292 
294  }
295  }
296  return self;
297 }
298 
299 /*
300  * call-seq:
301  * mutex.owned? -> true or false
302  *
303  * Returns +true+ if this lock is currently held by current thread.
304  */
305 VALUE
307 {
308  VALUE owned = Qfalse;
309  rb_thread_t *th = GET_THREAD();
310  rb_mutex_t *mutex;
311 
312  GetMutexPtr(self, mutex);
313 
314  if (mutex->th == th)
315  owned = Qtrue;
316 
317  return owned;
318 }
319 
320 static const char *
321 rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
322 {
323  const char *err = NULL;
324 
325  if (mutex->th == 0) {
326  err = "Attempt to unlock a mutex which is not locked";
327  }
328  else if (mutex->th != th) {
329  err = "Attempt to unlock a mutex which is locked by another thread";
330  }
331  else {
332  struct sync_waiter *cur = 0, *next = 0;
333  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
334 
335  mutex->th = 0;
336  list_for_each_safe(&mutex->waitq, cur, next, node) {
337  list_del_init(&cur->node);
338  switch (cur->th->status) {
339  case THREAD_RUNNABLE: /* from someone else calling Thread#run */
340  case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
342  goto found;
343  case THREAD_STOPPED: /* probably impossible */
344  rb_bug("unexpected THREAD_STOPPED");
345  case THREAD_KILLED:
346  /* not sure about this, possible in exit GC? */
347  rb_bug("unexpected THREAD_KILLED");
348  continue;
349  }
350  }
351  found:
352  while (*th_mutex != mutex) {
353  th_mutex = &(*th_mutex)->next_mutex;
354  }
355  *th_mutex = mutex->next_mutex;
356  mutex->next_mutex = NULL;
357  }
358 
359  return err;
360 }
361 
362 /*
363  * call-seq:
364  * mutex.unlock -> self
365  *
366  * Releases the lock.
367  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
368  */
369 VALUE
371 {
372  const char *err;
373  rb_mutex_t *mutex;
374  GetMutexPtr(self, mutex);
375 
376  err = rb_mutex_unlock_th(mutex, GET_THREAD());
377  if (err) rb_raise(rb_eThreadError, "%s", err);
378 
379  return self;
380 }
381 
382 #if defined(HAVE_WORKING_FORK)
383 static void
384 rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
385 {
386  if (th->keeping_mutexes) {
387  rb_mutex_abandon_all(th->keeping_mutexes);
388  }
389  th->keeping_mutexes = NULL;
390 }
391 
392 static void
393 rb_mutex_abandon_locking_mutex(rb_thread_t *th)
394 {
395  rb_mutex_t *mutex;
396 
397  if (!th->locking_mutex) return;
398 
399  GetMutexPtr(th->locking_mutex, mutex);
400  if (mutex->th == th)
401  rb_mutex_abandon_all(mutex);
402  th->locking_mutex = Qfalse;
403 }
404 
405 static void
406 rb_mutex_abandon_all(rb_mutex_t *mutexes)
407 {
408  rb_mutex_t *mutex;
409 
410  while (mutexes) {
411  mutex = mutexes;
412  mutexes = mutex->next_mutex;
413  mutex->th = 0;
414  mutex->next_mutex = 0;
415  list_head_init(&mutex->waitq);
416  }
417 }
418 #endif
419 
420 static VALUE
421 rb_mutex_sleep_forever(VALUE time)
422 {
423  rb_thread_sleep_deadly_allow_spurious_wakeup();
424  return Qnil;
425 }
426 
427 static VALUE
428 rb_mutex_wait_for(VALUE time)
429 {
430  struct timeval *t = (struct timeval *)time;
431  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
432  return Qnil;
433 }
434 
435 VALUE
436 rb_mutex_sleep(VALUE self, VALUE timeout)
437 {
438  time_t beg, end;
439  struct timeval t;
440 
441  if (!NIL_P(timeout)) {
442  t = rb_time_interval(timeout);
443  }
444  rb_mutex_unlock(self);
445  beg = time(0);
446  if (NIL_P(timeout)) {
447  rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
448  }
449  else {
450  rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
451  }
452  end = time(0) - beg;
453  return INT2FIX(end);
454 }
455 
456 /*
457  * call-seq:
458  * mutex.sleep(timeout = nil) -> number
459  *
460  * Releases the lock and sleeps +timeout+ seconds if it is given and
461  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
462  * the current thread.
463  *
464  * When the thread is next woken up, it will attempt to reacquire
465  * the lock.
466  *
467  * Note that this method can wakeup without explicit Thread#wakeup call.
468  * For example, receiving signal and so on.
469  */
470 static VALUE
471 mutex_sleep(int argc, VALUE *argv, VALUE self)
472 {
473  VALUE timeout;
474 
475  rb_scan_args(argc, argv, "01", &timeout);
476  return rb_mutex_sleep(self, timeout);
477 }
478 
479 /*
480  * call-seq:
481  * mutex.synchronize { ... } -> result of the block
482  *
483  * Obtains a lock, runs the block, and releases the lock when the block
484  * completes. See the example under +Mutex+.
485  */
486 
487 VALUE
488 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
489 {
490  rb_mutex_lock(mutex);
491  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
492 }
493 
494 /*
495  * call-seq:
496  * mutex.synchronize { ... } -> result of the block
497  *
498  * Obtains a lock, runs the block, and releases the lock when the block
499  * completes. See the example under +Mutex+.
500  */
501 static VALUE
502 rb_mutex_synchronize_m(VALUE self, VALUE args)
503 {
504  if (!rb_block_given_p()) {
505  rb_raise(rb_eThreadError, "must be called with a block");
506  }
507 
508  return rb_mutex_synchronize(self, rb_yield, Qundef);
509 }
510 
512 {
513  Check_TypedStruct(self, &mutex_data_type);
514 
515  if (val)
517  else
519 }
520 
521 /* Queue */
522 
523 #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
524 PACKED_STRUCT_UNALIGNED(struct rb_queue {
525  struct list_head waitq;
526  const VALUE que;
527  int num_waiting;
528 });
529 
530 #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
531 #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
532 PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
533  struct rb_queue q;
534  int num_waiting_push;
535  struct list_head pushq;
536  long max;
537 });
538 
539 static void
540 queue_mark(void *ptr)
541 {
542  struct rb_queue *q = ptr;
543 
544  /* no need to mark threads in waitq, they are on stack */
545  rb_gc_mark(q->que);
546 }
547 
548 static size_t
549 queue_memsize(const void *ptr)
550 {
551  return sizeof(struct rb_queue);
552 }
553 
554 static const rb_data_type_t queue_data_type = {
555  "queue",
556  {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
558 };
559 
560 static VALUE
561 queue_alloc(VALUE klass)
562 {
563  VALUE obj;
564  struct rb_queue *q;
565 
566  obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
567  list_head_init(queue_waitq(q));
568  return obj;
569 }
570 
571 static struct rb_queue *
572 queue_ptr(VALUE obj)
573 {
574  struct rb_queue *q;
575 
576  TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
577  return q;
578 }
579 
580 #define QUEUE_CLOSED FL_USER5
581 
582 static void
583 szqueue_mark(void *ptr)
584 {
585  struct rb_szqueue *sq = ptr;
586 
587  queue_mark(&sq->q);
588 }
589 
590 static size_t
591 szqueue_memsize(const void *ptr)
592 {
593  return sizeof(struct rb_szqueue);
594 }
595 
596 static const rb_data_type_t szqueue_data_type = {
597  "sized_queue",
598  {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
600 };
601 
602 static VALUE
603 szqueue_alloc(VALUE klass)
604 {
605  struct rb_szqueue *sq;
606  VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
607  &szqueue_data_type, sq);
608  list_head_init(szqueue_waitq(sq));
609  list_head_init(szqueue_pushq(sq));
610  return obj;
611 }
612 
613 static struct rb_szqueue *
614 szqueue_ptr(VALUE obj)
615 {
616  struct rb_szqueue *sq;
617 
618  TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
619  return sq;
620 }
621 
622 static VALUE
623 ary_buf_new(void)
624 {
625  return rb_ary_tmp_new(1);
626 }
627 
628 static VALUE
629 check_array(VALUE obj, VALUE ary)
630 {
631  if (!RB_TYPE_P(ary, T_ARRAY)) {
632  rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
633  }
634  return ary;
635 }
636 
637 static long
638 queue_length(VALUE self, struct rb_queue *q)
639 {
640  return RARRAY_LEN(check_array(self, q->que));
641 }
642 
643 static int
644 queue_closed_p(VALUE self)
645 {
646  return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
647 }
648 
649 static void
650 raise_closed_queue_error(VALUE self)
651 {
652  rb_raise(rb_eClosedQueueError, "queue closed");
653 }
654 
655 static VALUE
656 queue_closed_result(VALUE self, struct rb_queue *q)
657 {
658  assert(queue_length(self, q) == 0);
659  return Qnil;
660 }
661 
662 /*
663  * Document-class: Queue
664  *
665  * The Queue class implements multi-producer, multi-consumer queues.
666  * It is especially useful in threaded programming when information
667  * must be exchanged safely between multiple threads. The Queue class
668  * implements all the required locking semantics.
669  *
670  * The class implements FIFO type of queue. In a FIFO queue, the first
671  * tasks added are the first retrieved.
672  *
673  * Example:
674  *
675  * queue = Queue.new
676  *
677  * producer = Thread.new do
678  * 5.times do |i|
679  * sleep rand(i) # simulate expense
680  * queue << i
681  * puts "#{i} produced"
682  * end
683  * end
684  *
685  * consumer = Thread.new do
686  * 5.times do |i|
687  * value = queue.pop
688  * sleep rand(i/2) # simulate expense
689  * puts "consumed #{value}"
690  * end
691  * end
692  *
693  */
694 
695 /*
696  * Document-method: Queue::new
697  *
698  * Creates a new queue instance.
699  */
700 
701 static VALUE
702 rb_queue_initialize(VALUE self)
703 {
704  struct rb_queue *q = queue_ptr(self);
705  RB_OBJ_WRITE(self, &q->que, ary_buf_new());
706  list_head_init(queue_waitq(q));
707  return self;
708 }
709 
710 static VALUE
711 queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
712 {
713  if (queue_closed_p(self)) {
714  raise_closed_queue_error(self);
715  }
716  rb_ary_push(check_array(self, q->que), obj);
717  wakeup_one(queue_waitq(q));
718  return self;
719 }
720 
721 /*
722  * Document-method: Queue#close
723  * call-seq:
724  * close
725  *
726  * Closes the queue. A closed queue cannot be re-opened.
727  *
728  * After the call to close completes, the following are true:
729  *
730  * - +closed?+ will return true
731  *
732  * - +close+ will be ignored.
733  *
734  * - calling enq/push/<< will raise an exception.
735  *
736  * - when +empty?+ is false, calling deq/pop/shift will return an object
737  * from the queue as usual.
738  *
739  * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
740  *
741  * Example:
742  *
743  * q = Queue.new
744  * Thread.new{
745  * while e = q.deq # wait for nil to break loop
746  * # ...
747  * end
748  * }
749  * q.close
750  */
751 
752 static VALUE
753 rb_queue_close(VALUE self)
754 {
755  struct rb_queue *q = queue_ptr(self);
756 
757  if (!queue_closed_p(self)) {
758  FL_SET(self, QUEUE_CLOSED);
759 
760  wakeup_all(queue_waitq(q));
761  }
762 
763  return self;
764 }
765 
766 /*
767  * Document-method: Queue#closed?
768  * call-seq: closed?
769  *
770  * Returns +true+ if the queue is closed.
771  */
772 
773 static VALUE
774 rb_queue_closed_p(VALUE self)
775 {
776  return queue_closed_p(self) ? Qtrue : Qfalse;
777 }
778 
779 /*
780  * Document-method: Queue#push
781  * call-seq:
782  * push(object)
783  * enq(object)
784  * <<(object)
785  *
786  * Pushes the given +object+ to the queue.
787  */
788 
789 static VALUE
790 rb_queue_push(VALUE self, VALUE obj)
791 {
792  return queue_do_push(self, queue_ptr(self), obj);
793 }
794 
795 static VALUE
796 queue_sleep(VALUE arg)
797 {
798  rb_thread_sleep_deadly_allow_spurious_wakeup();
799  return Qnil;
800 }
801 
802 struct queue_waiter {
803  struct sync_waiter w;
804  union {
805  struct rb_queue *q;
806  struct rb_szqueue *sq;
807  } as;
808 };
809 
810 static VALUE
811 queue_sleep_done(VALUE p)
812 {
813  struct queue_waiter *qw = (struct queue_waiter *)p;
814 
815  list_del(&qw->w.node);
816  qw->as.q->num_waiting--;
817 
818  return Qfalse;
819 }
820 
821 static VALUE
822 szqueue_sleep_done(VALUE p)
823 {
824  struct queue_waiter *qw = (struct queue_waiter *)p;
825 
826  list_del(&qw->w.node);
827  qw->as.sq->num_waiting_push--;
828 
829  return Qfalse;
830 }
831 
832 static VALUE
833 queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
834 {
835  check_array(self, q->que);
836 
837  while (RARRAY_LEN(q->que) == 0) {
838  if (!should_block) {
839  rb_raise(rb_eThreadError, "queue empty");
840  }
841  else if (queue_closed_p(self)) {
842  return queue_closed_result(self, q);
843  }
844  else {
845  struct queue_waiter qw;
846 
847  assert(RARRAY_LEN(q->que) == 0);
848  assert(queue_closed_p(self) == 0);
849 
850  qw.w.th = GET_THREAD();
851  qw.as.q = q;
852  list_add_tail(&qw.as.q->waitq, &qw.w.node);
853  qw.as.q->num_waiting++;
854 
855  rb_ensure(queue_sleep, Qfalse, queue_sleep_done, (VALUE)&qw);
856  }
857  }
858 
859  return rb_ary_shift(q->que);
860 }
861 
862 static int
863 queue_pop_should_block(int argc, const VALUE *argv)
864 {
865  int should_block = 1;
866  rb_check_arity(argc, 0, 1);
867  if (argc > 0) {
868  should_block = !RTEST(argv[0]);
869  }
870  return should_block;
871 }
872 
873 /*
874  * Document-method: Queue#pop
875  * call-seq:
876  * pop(non_block=false)
877  * deq(non_block=false)
878  * shift(non_block=false)
879  *
880  * Retrieves data from the queue.
881  *
882  * If the queue is empty, the calling thread is suspended until data is pushed
883  * onto the queue. If +non_block+ is true, the thread isn't suspended, and
884  * +ThreadError+ is raised.
885  */
886 
887 static VALUE
888 rb_queue_pop(int argc, VALUE *argv, VALUE self)
889 {
890  int should_block = queue_pop_should_block(argc, argv);
891  return queue_do_pop(self, queue_ptr(self), should_block);
892 }
893 
894 /*
895  * Document-method: Queue#empty?
896  * call-seq: empty?
897  *
898  * Returns +true+ if the queue is empty.
899  */
900 
901 static VALUE
902 rb_queue_empty_p(VALUE self)
903 {
904  return queue_length(self, queue_ptr(self)) == 0 ? Qtrue : Qfalse;
905 }
906 
907 /*
908  * Document-method: Queue#clear
909  *
910  * Removes all objects from the queue.
911  */
912 
913 static VALUE
914 rb_queue_clear(VALUE self)
915 {
916  struct rb_queue *q = queue_ptr(self);
917 
918  rb_ary_clear(check_array(self, q->que));
919  return self;
920 }
921 
922 /*
923  * Document-method: Queue#length
924  * call-seq:
925  * length
926  * size
927  *
928  * Returns the length of the queue.
929  */
930 
931 static VALUE
932 rb_queue_length(VALUE self)
933 {
934  return LONG2NUM(queue_length(self, queue_ptr(self)));
935 }
936 
937 /*
938  * Document-method: Queue#num_waiting
939  *
940  * Returns the number of threads waiting on the queue.
941  */
942 
943 static VALUE
944 rb_queue_num_waiting(VALUE self)
945 {
946  struct rb_queue *q = queue_ptr(self);
947 
948  return INT2NUM(q->num_waiting);
949 }
950 
951 /*
952  * Document-class: SizedQueue
953  *
954  * This class represents queues of specified size capacity. The push operation
955  * may be blocked if the capacity is full.
956  *
957  * See Queue for an example of how a SizedQueue works.
958  */
959 
960 /*
961  * Document-method: SizedQueue::new
962  * call-seq: new(max)
963  *
964  * Creates a fixed-length queue with a maximum size of +max+.
965  */
966 
967 static VALUE
968 rb_szqueue_initialize(VALUE self, VALUE vmax)
969 {
970  long max;
971  struct rb_szqueue *sq = szqueue_ptr(self);
972 
973  max = NUM2LONG(vmax);
974  if (max <= 0) {
975  rb_raise(rb_eArgError, "queue size must be positive");
976  }
977 
978  RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
979  list_head_init(szqueue_waitq(sq));
980  list_head_init(szqueue_pushq(sq));
981  sq->max = max;
982 
983  return self;
984 }
985 
986 /*
987  * Document-method: SizedQueue#close
988  * call-seq:
989  * close
990  *
991  * Similar to Queue#close.
992  *
993  * The difference is behavior with waiting enqueuing threads.
994  *
995  * If there are waiting enqueuing threads, they are interrupted by
996  * raising ClosedQueueError('queue closed').
997  */
998 static VALUE
999 rb_szqueue_close(VALUE self)
1000 {
1001  if (!queue_closed_p(self)) {
1002  struct rb_szqueue *sq = szqueue_ptr(self);
1003 
1004  FL_SET(self, QUEUE_CLOSED);
1005  wakeup_all(szqueue_waitq(sq));
1006  wakeup_all(szqueue_pushq(sq));
1007  }
1008  return self;
1009 }
1010 
1011 /*
1012  * Document-method: SizedQueue#max
1013  *
1014  * Returns the maximum size of the queue.
1015  */
1016 
1017 static VALUE
1018 rb_szqueue_max_get(VALUE self)
1019 {
1020  return LONG2NUM(szqueue_ptr(self)->max);
1021 }
1022 
1023 /*
1024  * Document-method: SizedQueue#max=
1025  * call-seq: max=(number)
1026  *
1027  * Sets the maximum size of the queue to the given +number+.
1028  */
1029 
1030 static VALUE
1031 rb_szqueue_max_set(VALUE self, VALUE vmax)
1032 {
1033  long max = NUM2LONG(vmax);
1034  long diff = 0;
1035  struct rb_szqueue *sq = szqueue_ptr(self);
1036 
1037  if (max <= 0) {
1038  rb_raise(rb_eArgError, "queue size must be positive");
1039  }
1040  if (max > sq->max) {
1041  diff = max - sq->max;
1042  }
1043  sq->max = max;
1044  while (diff-- > 0 && wakeup_one(szqueue_pushq(sq))) {
1045  /* keep waking more up */
1046  }
1047  return vmax;
1048 }
1049 
1050 static int
1051 szqueue_push_should_block(int argc, const VALUE *argv)
1052 {
1053  int should_block = 1;
1054  rb_check_arity(argc, 1, 2);
1055  if (argc > 1) {
1056  should_block = !RTEST(argv[1]);
1057  }
1058  return should_block;
1059 }
1060 
1061 /*
1062  * Document-method: SizedQueue#push
1063  * call-seq:
1064  * push(object, non_block=false)
1065  * enq(object, non_block=false)
1066  * <<(object)
1067  *
1068  * Pushes +object+ to the queue.
1069  *
1070  * If there is no space left in the queue, waits until space becomes
1071  * available, unless +non_block+ is true. If +non_block+ is true, the
1072  * thread isn't suspended, and +ThreadError+ is raised.
1073  */
1074 
1075 static VALUE
1076 rb_szqueue_push(int argc, VALUE *argv, VALUE self)
1077 {
1078  struct rb_szqueue *sq = szqueue_ptr(self);
1079  int should_block = szqueue_push_should_block(argc, argv);
1080 
1081  while (queue_length(self, &sq->q) >= sq->max) {
1082  if (!should_block) {
1083  rb_raise(rb_eThreadError, "queue full");
1084  }
1085  else if (queue_closed_p(self)) {
1086  goto closed;
1087  }
1088  else {
1089  struct queue_waiter qw;
1090  struct list_head *pushq = szqueue_pushq(sq);
1091 
1092  qw.w.th = GET_THREAD();
1093  qw.as.sq = sq;
1094  list_add_tail(pushq, &qw.w.node);
1095  sq->num_waiting_push++;
1096 
1097  rb_ensure(queue_sleep, Qfalse, szqueue_sleep_done, (VALUE)&qw);
1098  }
1099  }
1100 
1101  if (queue_closed_p(self)) {
1102  closed:
1103  raise_closed_queue_error(self);
1104  }
1105 
1106  return queue_do_push(self, &sq->q, argv[0]);
1107 }
1108 
1109 static VALUE
1110 szqueue_do_pop(VALUE self, int should_block)
1111 {
1112  struct rb_szqueue *sq = szqueue_ptr(self);
1113  VALUE retval = queue_do_pop(self, &sq->q, should_block);
1114 
1115  if (queue_length(self, &sq->q) < sq->max) {
1116  wakeup_one(szqueue_pushq(sq));
1117  }
1118 
1119  return retval;
1120 }
1121 
1122 /*
1123  * Document-method: SizedQueue#pop
1124  * call-seq:
1125  * pop(non_block=false)
1126  * deq(non_block=false)
1127  * shift(non_block=false)
1128  *
1129  * Retrieves data from the queue.
1130  *
1131  * If the queue is empty, the calling thread is suspended until data is pushed
1132  * onto the queue. If +non_block+ is true, the thread isn't suspended, and
1133  * +ThreadError+ is raised.
1134  */
1135 
1136 static VALUE
1137 rb_szqueue_pop(int argc, VALUE *argv, VALUE self)
1138 {
1139  int should_block = queue_pop_should_block(argc, argv);
1140  return szqueue_do_pop(self, should_block);
1141 }
1142 
1143 /*
1144  * Document-method: SizedQueue#clear
1145  *
1146  * Removes all objects from the queue.
1147  */
1148 
1149 static VALUE
1150 rb_szqueue_clear(VALUE self)
1151 {
1152  struct rb_szqueue *sq = szqueue_ptr(self);
1153 
1154  rb_ary_clear(check_array(self, sq->q.que));
1155  wakeup_all(szqueue_pushq(sq));
1156  return self;
1157 }
1158 
1159 static VALUE
1160 rb_szqueue_length(VALUE self)
1161 {
1162  struct rb_szqueue *sq = szqueue_ptr(self);
1163 
1164  return LONG2NUM(queue_length(self, &sq->q));
1165 }
1166 
1167 /*
1168  * Document-method: SizedQueue#num_waiting
1169  *
1170  * Returns the number of threads waiting on the queue.
1171  */
1172 
1173 static VALUE
1174 rb_szqueue_num_waiting(VALUE self)
1175 {
1176  struct rb_szqueue *sq = szqueue_ptr(self);
1177 
1178  return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1179 }
1180 
1181 /*
1182  * Document-method: SizedQueue#empty?
1183  * call-seq: empty?
1184  *
1185  * Returns +true+ if the queue is empty.
1186  */
1187 
1188 static VALUE
1189 rb_szqueue_empty_p(VALUE self)
1190 {
1191  struct rb_szqueue *sq = szqueue_ptr(self);
1192 
1193  return queue_length(self, &sq->q) == 0 ? Qtrue : Qfalse;
1194 }
1195 
1196 
1197 /* ConditionalVariable */
1198 /* TODO: maybe this can be IMEMO */
1199 struct rb_condvar {
1200  struct list_head waitq;
1201 };
1202 
1203 /*
1204  * Document-class: ConditionVariable
1205  *
1206  * ConditionVariable objects augment class Mutex. Using condition variables,
1207  * it is possible to suspend while in the middle of a critical section until a
1208  * resource becomes available.
1209  *
1210  * Example:
1211  *
1212  * mutex = Mutex.new
1213  * resource = ConditionVariable.new
1214  *
1215  * a = Thread.new {
1216  * mutex.synchronize {
1217  * # Thread 'a' now needs the resource
1218  * resource.wait(mutex)
1219  * # 'a' can now have the resource
1220  * }
1221  * }
1222  *
1223  * b = Thread.new {
1224  * mutex.synchronize {
1225  * # Thread 'b' has finished using the resource
1226  * resource.signal
1227  * }
1228  * }
1229  */
1230 
1231 static size_t
1232 condvar_memsize(const void *ptr)
1233 {
1234  return sizeof(struct rb_condvar);
1235 }
1236 
1237 static const rb_data_type_t cv_data_type = {
1238  "condvar",
1239  {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1241 };
1242 
1243 static struct rb_condvar *
1244 condvar_ptr(VALUE self)
1245 {
1246  struct rb_condvar *cv;
1247 
1248  TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1249 
1250  return cv;
1251 }
1252 
1253 static VALUE
1254 condvar_alloc(VALUE klass)
1255 {
1256  struct rb_condvar *cv;
1257  VALUE obj;
1258 
1259  obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1260  list_head_init(&cv->waitq);
1261 
1262  return obj;
1263 }
1264 
1265 /*
1266  * Document-method: ConditionVariable::new
1267  *
1268  * Creates a new condition variable instance.
1269  */
1270 
1271 static VALUE
1272 rb_condvar_initialize(VALUE self)
1273 {
1274  struct rb_condvar *cv = condvar_ptr(self);;
1275  list_head_init(&cv->waitq);
1276  return self;
1277 }
1278 
1279 struct sleep_call {
1282 };
1283 
1284 static ID id_sleep;
1285 
1286 static VALUE
1287 do_sleep(VALUE args)
1288 {
1289  struct sleep_call *p = (struct sleep_call *)args;
1290  return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1291 }
1292 
1293 static VALUE
1294 delete_from_waitq(struct sync_waiter *w)
1295 {
1296  list_del(&w->node);
1297 
1298  return Qnil;
1299 }
1300 
1301 /*
1302  * Document-method: ConditionVariable#wait
1303  * call-seq: wait(mutex, timeout=nil)
1304  *
1305  * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1306  *
1307  * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1308  * even if no other thread doesn't signal.
1309  */
1310 
1311 static VALUE
1312 rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1313 {
1314  struct rb_condvar *cv = condvar_ptr(self);
1315  VALUE mutex, timeout;
1316  struct sleep_call args;
1317  struct sync_waiter w;
1318 
1319  rb_scan_args(argc, argv, "11", &mutex, &timeout);
1320 
1321  args.mutex = mutex;
1322  args.timeout = timeout;
1323  w.th = GET_THREAD();
1324  list_add_tail(&cv->waitq, &w.node);
1325  rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&w);
1326 
1327  return self;
1328 }
1329 
1330 /*
1331  * Document-method: ConditionVariable#signal
1332  *
1333  * Wakes up the first thread in line waiting for this lock.
1334  */
1335 
1336 static VALUE
1337 rb_condvar_signal(VALUE self)
1338 {
1339  struct rb_condvar *cv = condvar_ptr(self);
1340  wakeup_one(&cv->waitq);
1341  return self;
1342 }
1343 
1344 /*
1345  * Document-method: ConditionVariable#broadcast
1346  *
1347  * Wakes up all threads waiting for this lock.
1348  */
1349 
1350 static VALUE
1351 rb_condvar_broadcast(VALUE self)
1352 {
1353  struct rb_condvar *cv = condvar_ptr(self);
1354  wakeup_all(&cv->waitq);
1355  return self;
1356 }
1357 
1358 /* :nodoc: */
1359 static VALUE
1360 undumpable(VALUE obj)
1361 {
1362  rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
1363  UNREACHABLE;
1364 }
1365 
1366 static void
1367 alias_global_const(const char *name, VALUE klass)
1368 {
1369  rb_define_const(rb_cObject, name, klass);
1370 }
1371 
1372 static void
1373 Init_thread_sync(void)
1374 {
1375 #if 0
1376  rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject); /* teach rdoc ConditionVariable */
1377  rb_cQueue = rb_define_class("Queue", rb_cObject); /* teach rdoc Queue */
1378  rb_cSizedQueue = rb_define_class("SizedQueue", rb_cObject); /* teach rdoc SizedQueue */
1379 #endif
1380 
1381  /* Mutex */
1382  rb_cMutex = rb_define_class_under(rb_cThread, "Mutex", rb_cObject);
1383  rb_define_alloc_func(rb_cMutex, mutex_alloc);
1384  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1385  rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1386  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1387  rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1388  rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1389  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1390  rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1391  rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1392 
1393  /* Queue */
1394  rb_cQueue = rb_define_class_under(rb_cThread, "Queue", rb_cObject);
1395  rb_define_alloc_func(rb_cQueue, queue_alloc);
1396 
1397  rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1398 
1399  rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, 0);
1400  rb_undef_method(rb_cQueue, "initialize_copy");
1401  rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1402  rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1403  rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1404  rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1405  rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1406  rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1407  rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1408  rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1409  rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1410 
1411  rb_define_alias(rb_cQueue, "enq", "push");
1412  rb_define_alias(rb_cQueue, "<<", "push");
1413  rb_define_alias(rb_cQueue, "deq", "pop");
1414  rb_define_alias(rb_cQueue, "shift", "pop");
1415  rb_define_alias(rb_cQueue, "size", "length");
1416 
1417  rb_cSizedQueue = rb_define_class_under(rb_cThread, "SizedQueue", rb_cQueue);
1418  rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1419 
1420  rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1421  rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1422  rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1423  rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1424  rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1);
1425  rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1);
1426  rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1427  rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1428  rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1429  rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1430 
1431  rb_define_alias(rb_cSizedQueue, "enq", "push");
1432  rb_define_alias(rb_cSizedQueue, "<<", "push");
1433  rb_define_alias(rb_cSizedQueue, "deq", "pop");
1434  rb_define_alias(rb_cSizedQueue, "shift", "pop");
1435  rb_define_alias(rb_cSizedQueue, "size", "length");
1436 
1437  /* CVar */
1438  rb_cConditionVariable = rb_define_class_under(rb_cThread,
1439  "ConditionVariable", rb_cObject);
1440  rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1441 
1442  id_sleep = rb_intern("sleep");
1443 
1444  rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1445  rb_undef_method(rb_cConditionVariable, "initialize_copy");
1446  rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1447  rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1448  rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1449  rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1450 
1451 #define ALIAS_GLOBAL_CONST(name) \
1452  alias_global_const(#name, rb_c##name)
1453 
1454  ALIAS_GLOBAL_CONST(Mutex);
1455  ALIAS_GLOBAL_CONST(Queue);
1456  ALIAS_GLOBAL_CONST(SizedQueue);
1457  ALIAS_GLOBAL_CONST(ConditionVariable);
1458  rb_provide("thread.rb");
1459 }
#define GetMutexPtr(obj, tobj)
Definition: thread_sync.c:84
VALUE rb_mutex_lock(VALUE self)
Definition: thread_sync.c:232
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2299
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread_sync.c:436
rb_vm_t * vm
Definition: vm_core.h:788
void rb_bug(const char *fmt,...)
Definition: error.c:521
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:49
#define RARRAY_LEN(a)
Definition: ruby.h:1019
#define FALSE
Definition: nkf.h:174
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1138
rb_thread_t * th
Definition: thread_sync.c:9
#define szqueue_pushq(sq)
Definition: thread_sync.c:531
#define INT2NUM(x)
Definition: ruby.h:1538
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread_sync.c:488
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:835
#define FL_SET_RAW(x, f)
Definition: ruby.h:1287
VALUE rb_mutex_trylock(VALUE self)
Definition: thread_sync.c:200
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2284
#define Qtrue
Definition: ruby.h:437
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1019
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1183
VALUE rb_mutex_unlock(VALUE self)
Definition: thread_sync.c:370
struct list_node node
Definition: thread_sync.c:10
struct rb_thread_struct volatile * th
Definition: thread_sync.c:48
#define rb_check_arity
Definition: intern.h:298
#define UNREACHABLE
Definition: ruby.h:46
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:924
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:544
#define QUEUE_CLOSED
Definition: thread_sync.c:580
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:693
#define queue_waitq(q)
Definition: thread_sync.c:523
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3501
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread_sync.c:511
#define FL_TEST_RAW(x, f)
Definition: ruby.h:1281
void rb_gc_mark(VALUE ptr)
Definition: gc.c:4464
#define T_ARRAY
Definition: ruby.h:498
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1035
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1533
#define GET_THREAD()
Definition: vm_core.h:1583
VALUE rb_eArgError
Definition: error.c:802
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:759
VALUE rb_obj_class(VALUE)
call-seq: obj.class -> class
Definition: object.c:277
#define RB_TYPE_P(obj, type)
Definition: ruby.h:527
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: thread.c:176
#define RUBY_TYPED_WB_PROTECTED
Definition: ruby.h:1139
struct rb_szqueue * sq
Definition: thread_sync.c:806
struct list_head waitq
Definition: thread_sync.c:1200
#define szqueue_waitq(sq)
Definition: thread_sync.c:530
VALUE locking_mutex
Definition: vm_core.h:836
#define val
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1893
#define FL_SET(x, f)
Definition: ruby.h:1288
#define NIL_P(v)
Definition: ruby.h:451
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:646
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2691
int argc
Definition: ruby.c:187
rb_thread_status
Definition: vm_core.h:683
#define Qfalse
Definition: ruby.h:436
struct sync_waiter w
Definition: thread_sync.c:803
volatile int sleeper
Definition: vm_core.h:532
int err
Definition: win32.c:135
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1758
#define ALIAS_GLOBAL_CONST(name)
VALUE rb_yield(VALUE)
Definition: vm_eval.c:973
#define TRUE
Definition: nkf.h:175
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:837
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:127
void ruby_xfree(void *x)
Definition: gc.c:8085
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1908
#define PRIsVALUE
Definition: ruby.h:135
unsigned long ID
Definition: ruby.h:86
#define Qnil
Definition: ruby.h:438
VALUE rb_eStopIteration
Definition: enumerator.c:109
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread_sync.c:306
VALUE rb_mutex_new(void)
Definition: thread_sync.c:161
unsigned long VALUE
Definition: ruby.h:85
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1930
VALUE rb_eTypeError
Definition: error.c:801
#define rb_funcallv
Definition: console.c:21
#define LONG2NUM(x)
Definition: ruby.h:1573
enum rb_thread_status status
Definition: vm_core.h:812
#define RB_OBJ_WRITE(a, slot, b)
Definition: eval_intern.h:175
struct list_head waitq
Definition: thread_sync.c:50
#define INT2FIX(i)
Definition: ruby.h:232
unsigned long interrupt_mask
Definition: vm_core.h:833
#define RTEST(v)
Definition: ruby.h:450
VALUE timeout
Definition: thread_sync.c:1281
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1175
#define assert
Definition: ruby_assert.h:37
#define FL_UNSET_RAW(x, f)
Definition: ruby.h:1289
const char * name
Definition: nkf.c:208
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread_sync.c:173
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:1609
struct rb_mutex_struct rb_mutex_t
union queue_waiter::@119 as
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1134
struct rb_queue * q
Definition: thread_sync.c:805
#define rb_intern(str)
PACKED_STRUCT_UNALIGNED(struct rb_queue { struct list_head waitq;const VALUE que;int num_waiting;})
#define NULL
Definition: _sdbm.c:102
#define Qundef
Definition: ruby.h:439
#define mutex_mark
Definition: thread_sync.c:87
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:433
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1515
#define MUTEX_ALLOW_TRAP
Definition: thread_sync.c:13
void rb_provide(const char *)
Definition: load.c:572
#define Check_TypedStruct(v, t)
Definition: ruby.h:1131
VALUE rb_eThreadError
Definition: eval.c:857
#define NUM2LONG(x)
Definition: ruby.h:648
char ** argv
Definition: ruby.c:188