/[eiffelstudio]/branches/eth/eve/Src/C/run-time/eif_threads.c
ViewVC logotype

Contents of /branches/eth/eve/Src/C/run-time/eif_threads.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 92811 - (show annotations)
Fri Jul 26 04:35:53 2013 UTC (6 years, 2 months ago) by jasonw
File MIME type: text/plain
File size: 65274 byte(s)
<<Merged from trunk#92810.>>
1 /*
2 description: "Thread management routines."
3 date: "$Date$"
4 revision: "$Revision$"
5 copyright: "Copyright (c) 1985-2012, Eiffel Software."
6 license: "GPL version 2 see http://www.eiffel.com/licensing/gpl.txt)"
7 licensing_options: "Commercial license is available at http://www.eiffel.com/licensing"
8 copying: "[
9 This file is part of Eiffel Software's Runtime.
10
11 Eiffel Software's Runtime is free software; you can
12 redistribute it and/or modify it under the terms of the
13 GNU General Public License as published by the Free
14 Software Foundation, version 2 of the License
15 (available at the URL listed under "license" above).
16
17 Eiffel Software's Runtime is distributed in the hope
18 that it will be useful, but WITHOUT ANY WARRANTY;
19 without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.
21 See the GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public
24 License along with Eiffel Software's Runtime; if not,
25 write to the Free Software Foundation, Inc.,
26 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 ]"
28 source: "[
29 Eiffel Software
30 5949 Hollister Ave., Goleta, CA 93117 USA
31 Telephone 805-685-1006, Fax 805-685-6869
32 Website http://www.eiffel.com
33 Customer support http://support.eiffel.com
34 ]"
35 */
36
37 /*
38 doc:<file name="eif_thread.c" header="eif_thread.h" version="$Id$" summary="Thread management routines">
39 */
40
41 #include "eif_portable.h"
42 #include "eif_eiffel.h"
43 #include "rt_threads.h"
44 #include "eif_posix_threads.h"
45 #include "rt_lmalloc.h"
46 #include "rt_globals.h"
47 #include "rt_err_msg.h"
48 #include "eif_sig.h"
49 #include "rt_garcol.h"
50 #include "rt_malloc.h"
51 #include "rt_macros.h"
52 #include "rt_types.h"
53 #include "rt_interp.h"
54 #include "rt_assert.h"
55 #include "rt_retrieve.h"
56 #include "rt_gen_conf.h"
57 #include "rt_hector.h"
58 #include "rt_run_idr.h"
59 #include "rt_store.h"
60 #include "rt_except.h"
61 #include "rt_memory.h"
62 #include "rt_option.h"
63 #include "rt_traverse.h"
64 #include "rt_object_id.h"
65 #include "rt_cecil.h"
66 #include "rt_debug.h"
67 #include "rt_main.h"
68 #include "eif_error.h"
69 #ifdef BOEHM_GC
70 #include "rt_boehm.h"
71 #endif
72
73 #include <string.h>
74
75
76 #ifdef EIF_THREADS
77
78 /*---------------------------------------*/
79 /*--- In multi-threaded environment ---*/
80 /*---------------------------------------*/
81
82 rt_public void eif_thr_panic(char *);
83 rt_public void eif_thr_init_root(void);
84 rt_public void eif_thr_register(int is_external);
85 rt_public int eif_thr_is_initialized(void);
86
87 rt_public void eif_thr_create_with_attr(EIF_OBJECT, EIF_PROCEDURE, EIF_THR_ATTR_TYPE *);
88 rt_public void eif_thr_create_with_attr_new(EIF_OBJECT, EIF_PROCEDURE, EIF_INTEGER_32, EIF_BOOLEAN, EIF_THR_ATTR_TYPE *);
89 rt_public void eif_thr_exit(void);
90
91 rt_public EIF_POINTER eif_thr_mutex_create(void);
92 rt_public void eif_thr_mutex_lock(EIF_POINTER);
93 rt_public void eif_thr_mutex_unlock(EIF_POINTER);
94 rt_public EIF_BOOLEAN eif_thr_mutex_trylock(EIF_POINTER);
95 rt_public void eif_thr_mutex_destroy(EIF_POINTER);
96
97 rt_private rt_global_context_t *eif_new_context (void);
98 rt_private void eif_free_context (rt_global_context_t *);
99 rt_private void eif_thr_entry(void *);
100 rt_private EIF_THR_TYPE eif_thr_current_thread(void);
101
102 /* To update GC with thread specific data */
103 rt_private void eif_remove_gc_stacks(rt_global_context_t *);
104 rt_private void eif_init_gc_stacks(rt_global_context_t *);
105 rt_private void eif_free_gc_stacks (void);
106 rt_private void load_stack_in_gc (struct stack_list *, void *);
107 rt_private void remove_data_from_gc (struct stack_list *, void *);
108 rt_private void eif_stack_free (void *stack);
109
110 #ifdef EIF_TLS_WRAP
111 /*
112 doc: <function name="eif_global_key" return_type="EIF_TSD_TYPE" export="private">
113 doc: <summary>Key used to access per thread data.</summary>
114 doc: <thread_safety>Safe</thread_safety>
115 doc: <synchronization>None</synchronization>
116 doc: </function>
117 */
118 rt_private EIF_TLS_DECL EIF_TSD_TYPE eif_global_key;
119 /*
120 doc: <routine name="eif_global_key_get" return_type="EIF_TSD_TYPE" export="public">
121 doc: <summary>Key used to access per thread data.</summary>
122 doc: <thread_safety>Safe</thread_safety>
123 doc: <synchronization>None</synchronization>
124 doc: </routine>
125 */
126 rt_public EIF_TSD_TYPE eif_global_key_get (void)
127 {
128 return eif_global_key;
129 }
130 #else
131 /*
132 doc: <attribute name="eif_global_key" return_type="EIF_TSD_TYPE" export="public/private">
133 doc: <summary>Key used to access per thread data.</summary>
134 doc: <thread_safety>Safe</thread_safety>
135 doc: <synchronization>None</synchronization>
136 doc: </attribute>
137 */
138 rt_public EIF_TLS EIF_TSD_TYPE eif_global_key;
139 #endif
140
141 #ifdef EIF_TLS_WRAP
142 /*
143 doc: <attribute name="rt_global_key" return_type="RT_TSD_TYPE" export="private">
144 doc: <summary>Key used to access private per thread data.</summary>
145 doc: <thread_safety>Safe</thread_safety>
146 doc: <synchronization>None</synchronization>
147 doc: </attribute>
148 */
149 rt_private EIF_TLS_DECL RT_TSD_TYPE rt_global_key;
150 /*
151 doc: <routine name="rt_global_key_get" return_type="RT_TSD_TYPE" export="shared">
152 doc: <summary>Key used to access private per thread data.</summary>
153 doc: <thread_safety>Safe</thread_safety>
154 doc: <synchronization>None</synchronization>
155 doc: </routine>
156 */
157 rt_shared RT_TSD_TYPE rt_global_key_get (void)
158 {
159 return rt_global_key;
160 }
161 #else
162 /*
163 doc: <attribute name="rt_global_key" return_type="RT_TSD_TYPE" export="shared">
164 doc: <summary>Key used to access private per thread data.</summary>
165 doc: <thread_safety>Safe</thread_safety>
166 doc: <synchronization>None</synchronization>
167 doc: </attribute>
168 */
169 rt_shared EIF_TLS RT_TSD_TYPE rt_global_key;
170 #endif
171
172 /*
173 doc: <attribute name="eif_thread_launch_mutex" return_type="EIF_CS_TYPE *" export="private">
174 doc: <summary>Mutex used to protect launching of a thread.</summary>
175 doc: <thread_safety>Safe, initialized once in `eif_thr_root_init'.</thread_safety>
176 doc: <synchronization>None</synchronization>
177 doc: <fixme>Mutex is not freed.</fixme>
178 doc: </attribute>
179 */
180 rt_private EIF_CS_TYPE *eif_thread_launch_mutex = NULL;
181
182 /*
183 doc: <attribute name="eif_is_gc_collecting" return_type="int" export="public">
184 doc: <summary>Is GC currently performing a collection? Possible values are EIF_GC_NOT_RUNNING, EIF_GC_STARTING, EIF_GC_STARTED_WITH_SINGLE_RUNNING_THREAD and EIF_GC_STARTED_WITH_MULTIPLE_RUNNING_THREADS.</summary>
185 doc: <thread_safety>Safe</thread_safety>
186 doc: <synchronization>eif_gc_mutex</synchronization>
187 doc: </attribute>
188 */
189 rt_public int volatile eif_is_gc_collecting = EIF_GC_NOT_RUNNING;
190
191 /*
192 doc: <attribute name="rt_globals_list" return_type="struct stack_list" export="shared">
193 doc: <summary>Used to store all per thread data of all running threads.</summary>
194 doc: <thread_safety>Safe</thread_safety>
195 doc: <synchronization>eif_gc_mutex</synchronization>
196 doc: </attribute>
197 */
198 rt_shared struct stack_list rt_globals_list = {
199 (int) 0, /* count */
200 (int) 0, /* capacity */
201 {NULL} /* rt_globals_list */
202 };
203
204
205 /* Debugger usage */
206 #ifdef WORKBENCH
207 /*
208 doc: <routine name="get_thread_index" export="private">
209 doc: <summary>This is used to get the index of thread `th_id' in rt_globals_list.</summary>
210 doc: <thread_safety>Safe</thread_safety>
211 doc: <synchronization>To be done while already pocessing the `eif_gc_mutex' lock. (i.e: encapsulated in eif_synchronize_gc and eif_unsynchronize_gc</synchronization>
212 doc: </routine>
213 */
214 rt_private int get_thread_index(EIF_THR_TYPE th_id)
215 {
216 #ifdef EIF_THREADS
217 int count;
218 int i;
219 rt_global_context_t ** lst;
220
221 REQUIRE("eif GC synchronized", eif_is_synchronized());
222 count = rt_globals_list.count;
223 lst = (rt_global_context_t **) rt_globals_list.threads.data;
224 for (i = 0; i < count; i++) {
225 if (th_id == (((rt_global_context_t*)lst[i])->eif_thr_context_cx->thread_id)) {
226 break;
227 }
228 }
229 return i;
230 #else
231 return 0;
232 #endif
233 }
234
235 /*
236 doc: <routine name="dbg_switch_to_thread" export="shared">
237 doc: <summary>This is used to put `th_id''s thread context into current thread's context. Should be temporary, and we should replace orignal context right after. Return current thread id (to be used for restoring current context.</summary>
238 doc: <thread_safety>Safe</thread_safety>
239 doc: <synchronization>To be done while already pocessing the `eif_gc_mutex' lock. (i.e: encapsulated in eif_synchronize_gc and eif_unsynchronize_gc</synchronization>
240 doc: </routine>
241 */
242
243 rt_shared EIF_THR_TYPE dbg_switch_to_thread (EIF_THR_TYPE th_id) {
244 RT_GET_CONTEXT
245 rt_global_context_t ** p_rtglob;
246 eif_global_context_t ** p_eifglob;
247 EIF_THR_TYPE thid;
248 thid = rt_globals->eif_thr_context_cx->thread_id;
249
250 #define thread_rt_globals(th_id) ( ((rt_global_context_t **) rt_globals_list.threads.data)[get_thread_index(th_id)] )
251
252 if (th_id != 0) {
253 if (thid != th_id) {
254 #ifdef DEBUG
255 printf ("Switch from %d to %d \n\n", thid, th_id);
256 #endif
257 CHECK("eif GC synchronized", eif_is_synchronized());
258 p_rtglob = &thread_rt_globals (th_id);
259 p_eifglob = &((*p_rtglob)->eif_globals);
260 EIF_TSD_SET(rt_global_key, *p_rtglob, "switch to thread id (rt).");
261 EIF_TSD_SET(eif_global_key, *p_eifglob, "switch to thread id (eif).");
262 }
263 }
264 #undef thread_rt_globals
265 return thid;
266 }
267 #endif
268
269 #define LAUNCH_MUTEX_LOCK RT_TRACE(eif_pthread_cs_lock(eif_thread_launch_mutex))
270 #define LAUNCH_MUTEX_UNLOCK RT_TRACE(eif_pthread_cs_unlock(eif_thread_launch_mutex))
271
272 rt_private void eif_thr_init_global_mutexes (void)
273 {
274 #ifdef ISE_GC
275 RT_TRACE(eif_pthread_cs_create(&eif_gc_mutex, 0));
276 RT_TRACE(eif_pthread_cs_create(&eif_gc_set_mutex, 4000));
277 RT_TRACE(eif_pthread_cs_create(&eif_gc_gsz_mutex, 4000));
278 RT_TRACE(eif_pthread_cs_create(&eif_type_set_mutex, 4000));
279 RT_TRACE(eif_pthread_cs_create(&eif_free_list_mutex, 4000));
280 RT_TRACE(eif_pthread_cs_create(&eiffel_usage_mutex, 4000));
281 RT_TRACE(eif_pthread_cs_create(&trigger_gc_mutex, 4000));
282 RT_TRACE(eif_pthread_cs_create(&eif_rt_g_data_mutex, 100));
283 #endif
284 RT_TRACE(eif_pthread_cs_create(&eif_thread_launch_mutex, 0));
285 RT_TRACE(eif_pthread_cs_create(&eif_except_lock, 0));
286 RT_TRACE(eif_pthread_cs_create(&eif_memory_mutex, 0));
287 RT_TRACE(eif_pthread_cs_create(&eif_trace_mutex, 0));
288 RT_TRACE(eif_pthread_cs_create(&eif_eo_store_mutex, 0));
289 RT_TRACE(eif_pthread_cs_create(&eif_global_once_set_mutex, 4000));
290 RT_TRACE(eif_pthread_cs_create(&eif_object_id_stack_mutex, 4000));
291 RT_TRACE(eif_pthread_cs_create(&eif_gen_mutex, 100));
292 RT_TRACE(eif_pthread_cs_create(&eif_hec_saved_mutex, 100));
293 RT_TRACE(eif_pthread_cs_create(&eif_cecil_mutex, 0));
294 #ifdef EIF_WINDOWS
295 RT_TRACE(eif_pthread_cs_create(&eif_console_mutex, 0));
296 #endif
297 }
298
299 rt_public void eif_thr_init_root(void)
300 {
301 /*
302 * This function must be called once and only once at the very beginning
303 * of an Eiffel program (typically from main()) or the first time a thread
304 * initializes the Eiffel run-time if it is part of a Cecil system.
305 * The global key for Thread Specific Data is initialized: this variable
306 * is shared by all the threads, but it allows them to fetch a pointer
307 * to their own context (eif_globals structure).
308 */
309
310 EIF_TSD_CREATE(eif_global_key,"Couldn't create global key for root thread");
311 EIF_TSD_CREATE(rt_global_key,"Couldn't create private global key for root thread");
312 eif_thr_init_global_mutexes();
313 eif_thr_register(0);
314 #ifdef ISE_GC
315 create_scavenge_zones();
316 #endif
317 }
318
319 /*
320 doc: <routine name="eif_thread_cleanup" export="shared">
321 doc: <summary>To cleanup thread runtime data such as mutexes used for synchronization of CECIL, memory and GC.</summary>
322 doc: <thread_safety>Safe</thread_safety>
323 doc: <synchronization>None required</synchronization>
324 doc: </routine>
325 */
326
327 rt_shared void eif_thread_cleanup (void)
328 {
329 RT_GET_CONTEXT
330 int destroy_mutex = 0;
331 EIF_MUTEX_TYPE *l_children_mutex;
332
333 REQUIRE("is_root", eif_thr_context->is_root);
334
335 /* Free per thread data which is not free because root thread
336 * does not go through `eif_thr_exit'. See `eif_thr_exit' for explanation. */
337 l_children_mutex = eif_thr_context->children_mutex;
338 if (l_children_mutex) {
339 EIF_ASYNC_SAFE_MUTEX_LOCK(l_children_mutex);
340 /* Find out if there are still some children running. */
341 destroy_mutex = eif_thr_context->n_children == 0;
342 EIF_ASYNC_SAFE_MUTEX_UNLOCK(l_children_mutex);
343 } else {
344 destroy_mutex = 1;
345 }
346 if (destroy_mutex) {
347 if (l_children_mutex) {
348 RT_TRACE(eif_pthread_mutex_destroy(l_children_mutex));
349 eif_thr_context->children_mutex = NULL;
350 RT_TRACE(eif_pthread_cond_destroy(eif_thr_context->children_cond));
351 eif_thr_context->children_cond = NULL;
352 }
353 /* Context data if any */
354 eif_thr_context->thread_id = (EIF_THR_TYPE) 0;
355 eif_free (eif_thr_context); /* Thread context passed by parent */
356 eif_thr_context = NULL;
357 }
358
359 /* Free rt_globals context */
360 eif_free_context (rt_globals);
361
362 /* Free GC allocated stacks. */
363 eif_free_gc_stacks ();
364
365 #ifdef ISE_GC
366 /* Note for the reader. We do not destroy `eif_gc_mutex' because when
367 * we reach this stage, we are called via `reclaim' which has this mutex
368 * locked. Therefore trying to destroy it, would simply not work. */
369 RT_TRACE(eif_pthread_cs_destroy(eif_gc_set_mutex));
370 RT_TRACE(eif_pthread_cs_destroy(eif_gc_gsz_mutex));
371 RT_TRACE(eif_pthread_cs_destroy(eif_type_set_mutex));
372 RT_TRACE(eif_pthread_cs_destroy(eif_free_list_mutex));
373 RT_TRACE(eif_pthread_cs_destroy(eiffel_usage_mutex));
374 RT_TRACE(eif_pthread_cs_destroy(trigger_gc_mutex));
375 RT_TRACE(eif_pthread_cs_destroy(eif_rt_g_data_mutex));
376 #endif
377 RT_TRACE(eif_pthread_cs_destroy(eif_thread_launch_mutex));
378 RT_TRACE(eif_pthread_cs_destroy(eif_except_lock));
379 RT_TRACE(eif_pthread_cs_destroy(eif_memory_mutex));
380 RT_TRACE(eif_pthread_cs_destroy(eif_trace_mutex));
381 RT_TRACE(eif_pthread_cs_destroy(eif_eo_store_mutex));
382 RT_TRACE(eif_pthread_cs_destroy(eif_global_once_set_mutex));
383 RT_TRACE(eif_pthread_cs_destroy(eif_object_id_stack_mutex));
384 RT_TRACE(eif_pthread_cs_destroy(eif_gen_mutex));
385 RT_TRACE(eif_pthread_cs_destroy(eif_hec_saved_mutex));
386 RT_TRACE(eif_pthread_cs_destroy(eif_cecil_mutex));
387 #ifdef EIF_WINDOWS
388 RT_TRACE(eif_pthread_cs_destroy(eif_console_mutex));
389 #endif
390
391 EIF_TSD_DESTROY(eif_global_key, "Could not free key");
392 EIF_TSD_DESTROY(rt_global_key, "Could not free key");
393 }
394
395 rt_public void eif_thr_register(int is_external)
396 {
397 /*
398 * Allocates memory for the rt_globals structure, initializes it
399 * and makes it part of the Thread Specific Data (TSD).
400 * Allocates memory for onces (for non-root threads)
401 */
402
403 static int not_root_thread = 0; /* For initial eif_thread, we don't know how
404 * many once values we have to allocate */
405
406 rt_global_context_t *rt_globals = eif_new_context();
407
408 {
409 EIF_GET_CONTEXT
410
411 if (not_root_thread) {
412
413 /* Allocate room for once manifest strings array. */
414 ALLOC_OMS (EIF_oms);
415
416 if (EIF_once_count == 0) {
417 EIF_once_values = (EIF_once_value_t *) 0;
418 } else {
419 /*
420 * Allocate room for once values for all threads but the initial
421 * because we do not have the number of onces yet
422 * Also set value root thread id.
423 */
424 EIF_once_values = (EIF_once_value_t *) eif_realloc (EIF_once_values, EIF_once_count * sizeof *EIF_once_values);
425 /* needs malloc; crashes otherwise on some pure C-ansi compiler (SGI)*/
426 if (EIF_once_values == (EIF_once_value_t *) 0) {
427 /* Out of memory */
428 enomem();
429 }
430 memset ((EIF_REFERENCE) EIF_once_values, 0, EIF_once_count * sizeof *EIF_once_values);
431 }
432 } else {
433 not_root_thread = 1;
434
435 eif_thr_context = (rt_thr_context *) eif_malloc (sizeof (rt_thr_context));
436 if (eif_thr_context == NULL) {
437 eif_panic ("Couldn't allocate thread context");
438 } else {
439 memset (eif_thr_context, 0, sizeof (rt_thr_context));
440 eif_thr_context->is_alive = 1;
441 eif_thr_context->is_root = 1;
442 eif_thr_context->thread_id = eif_thr_current_thread();
443 #if defined(EIF_ASSERTIONS) && defined(EIF_WINDOWS)
444 eif_thr_context->win_thread_id = eif_pthread_current_id();
445 #endif
446 eif_thr_context->is_processor = eif_thr_context->is_processor;
447 eif_thr_context->logical_id = eif_thr_context->logical_id;
448 #ifdef WORKBENCH
449 dnotify_create_thread(eif_thr_context->thread_id);
450 #endif
451 }
452 }
453 /* Is current thread created by the EiffelThread library or by a third party library */
454 eif_globals->is_external_cx = is_external;
455 }
456 }
457
458 /*
459 doc: <routine name="eif_set_thr_context" export="public">
460 doc: <summary>Initialize thread context for non Eiffel threads. There is not much to initialize, but this is necessary so that `eif_thr_is_root' can distinguish the root thread from the others.</summary>
461 doc: <thread_safety>Safe</thread_safety>
462 doc: <synchronization>None required</synchronization>
463 doc: </routine>
464 */
465
466 rt_public void eif_set_thr_context (void) {
467 /* Initialize thread context for non Eiffel Threads.
468 * There is not much to initialize, but this is necessary
469 * so that `eif_thr_is_root ()' can distinguish the root thread
470 * from the others. */
471 RT_GET_CONTEXT
472 if (rt_globals && !eif_thr_context) {
473 eif_thr_context = (rt_thr_context *) eif_malloc (sizeof (rt_thr_context));
474 if (eif_thr_context == NULL) {
475 eif_panic ("Couldn't allocate thread context");
476 } else {
477 memset (eif_thr_context, 0, sizeof (rt_thr_context));
478 eif_thr_context->is_alive = 1;
479 eif_thr_context->is_root = 0;
480 eif_thr_context->logical_id = 0;
481 eif_thr_context->is_processor = EIF_FALSE;
482 eif_thr_context->thread_id = eif_thr_current_thread();
483 #if defined(EIF_ASSERTIONS) && defined(EIF_WINDOWS)
484 eif_thr_context->win_thread_id = eif_pthread_current_id();
485 #endif
486 }
487 }
488 }
489
490 /*
491 doc: <routine name="eif_thr_current_thread" return_type="EIF_THR_TYPE" export="private">
492 doc: <summary>Helper routine to be called only once per thread to avoid leaks (HANDLE leak under Windows). Which will give the associated thread identifier for either the main thread or a thread that was not started by the Eiffel runtime.</summary>
493 doc: <thread_safety>Safe</thread_safety>
494 doc: <synchronization>None required</synchronization>
495 doc: </routine>
496 */
497 rt_private EIF_THR_TYPE eif_thr_current_thread(void)
498 {
499 EIF_THR_TYPE Result;
500
501 #ifdef EIF_WINDOWS
502 /* On Windows, `eif_pthread_self' returns a pseudo handle to the Current thread, so we have to do
503 * something special on Windows. */
504 Result = OpenThread(THREAD_ALL_ACCESS, FALSE, GetCurrentThreadId());
505 CHECK("Cannot be null", Result);
506 #else
507 Result = eif_pthread_self();
508 #endif
509 return Result;
510 }
511
512 /*
513 doc: <routine name="eif_thr_root_object" return_type="EIF_REFERENCE" export="public">
514 doc: <summary>Return the root object associated with the Current thread. It can be Void in case the thread was not started by the Eiffel runtime.</summary>
515 doc: <thread_safety>Safe</thread_safety>
516 doc: <synchronization>None required</synchronization>
517 doc: </routine>
518 */
519 rt_public EIF_REFERENCE eif_thr_root_object(void)
520 {
521 RT_GET_CONTEXT
522 if (rt_globals && !(eif_thr_context->is_root)) {
523 return eif_access(eif_thr_context->current);
524 } else {
525 return root_obj;
526 }
527 }
528
529 /*
530 doc: <routine name="eif_thr_is_root" return_type="EIF_BOOLEAN" export="public">
531 doc: <summary>True if the calling thread is the Eiffel root thread. False otherwise.</summary>
532 doc: <thread_safety>Safe</thread_safety>
533 doc: <synchronization>None required</synchronization>
534 doc: </routine>
535 */
536 rt_public EIF_BOOLEAN eif_thr_is_root(void)
537 {
538 /* Returns True is the calling thread is the Eiffel root thread,
539 * False otherwise. */
540 RT_GET_CONTEXT
541 if (rt_globals) {
542 return EIF_TEST(eif_thr_context->is_root);
543 } else {
544 return EIF_FALSE;
545 }
546 }
547
548 /* Returns a non-zero value if the calling thread is initialized for Eiffel, zero otherwise. */
549 rt_public int eif_thr_is_initialized(void)
550 {
551 EIF_GET_CONTEXT
552 return (eif_globals != NULL);
553 }
554
555 rt_private rt_global_context_t *eif_new_context (void)
556 {
557 /*
558 * Create rt_globals and eif_globals structure and initializes some of their fields
559 * fields.
560 */
561 rt_global_context_t *rt_globals;
562 eif_global_context_t *eif_globals;
563
564 /* Create and initialize private context */
565 #ifdef BOEHM_GC
566 /* Because Boehm GC is not able to access per thread data for marking data that might
567 * be references from there, we use the Boehm special allocator `GC_malloc_uncollectable'
568 * which will make `rt_globals' one of the root for the GC mark and sweep. */
569 rt_globals = (rt_global_context_t *) GC_malloc_uncollectable (sizeof(rt_global_context_t));
570 #else
571 rt_globals = (rt_global_context_t *) eif_malloc(sizeof(rt_global_context_t));
572 #endif
573 if (!rt_globals) {
574 eif_thr_panic("No more memory for thread context");
575 }
576 memset (rt_globals, 0, sizeof(rt_global_context_t));
577 EIF_TSD_SET(rt_global_key, rt_globals, "Couldn't bind private context to TSD.");
578
579 /* Create and initialize public context */
580 eif_globals = (eif_global_context_t *) eif_malloc(sizeof(eif_global_context_t));
581 if (!eif_globals) {
582 eif_free(rt_globals);
583 eif_thr_panic("No more memory for thread context");
584 }
585 memset (eif_globals, 0, sizeof(eif_global_context_t));
586 EIF_TSD_SET(eif_global_key, eif_globals, "Couldn't bind public context to TSD.");
587
588 /* Private context has always a reference to public one to avoid
589 * calls to get thread specific data. */
590 rt_globals->eif_globals = eif_globals;
591
592 /* Initialize per thread data. It is done in the module which uses them */
593
594 /* except.c */
595 eif_except_thread_init ();
596
597 /* gen_conf.c */
598 eif_gen_conf_thread_init ();
599
600 /* retrieve.c */
601 eif_retrieve_thread_init ();
602
603 /* run_idr.c */
604 eif_run_idr_thread_init ();
605
606 /* store.c */
607 eif_store_thread_init ();
608
609 /* eif_threads.c */
610 #ifdef ISE_GC
611 /* By default current thread is allowed to launch a GC cycle */
612 thread_can_launch_gc = 1;
613 #endif
614
615 /* eif_type_id.c */
616 eif_pre_ecma_mapping_status = 1;
617
618 eif_init_gc_stacks(rt_globals);
619
620 return rt_globals;
621 }
622
623 /*
624 doc: <routine name="eif_free_context" export="private">
625 doc: <summary>To cleanup per thread data.</summary>
626 doc: <thread_safety>Safe</thread_safety>
627 doc: <synchronization>None required</synchronization>
628 doc: </routine>
629 */
630
631 rt_private void eif_free_context (rt_global_context_t *rt_globals)
632 {
633 eif_global_context_t *eif_globals;
634
635 REQUIRE ("rt_globals not null", rt_globals);
636
637 eif_globals = rt_globals->eif_globals;
638
639 /* gen_conf.c */
640 eif_gen_conf_thread_cleanup ();
641
642 /* sig.c */
643 #ifdef HAS_SIGALTSTACK
644 if (c_sig_stk) {
645 eif_rt_xfree(c_sig_stk->ss_sp);
646 eif_rt_xfree(c_sig_stk);
647 c_sig_stk = NULL;
648 }
649 #endif
650
651 /* First free content of `eif_globals'. */
652 #ifdef EIF_WINDOWS
653 /* WEL data if any */
654 if (eif_globals->wel_per_thread_data) {
655 eif_free (eif_globals->wel_per_thread_data);
656 }
657 #endif
658
659 /* Free array of once manifest strings */
660 FREE_OMS (EIF_oms);
661
662 if (EIF_once_values != NULL) {
663 /* Free once values. */
664 eif_free (EIF_once_values);
665 EIF_once_values = NULL;
666 }
667
668 /* Free allocated stacks in eif_globals. */
669 #ifdef ISE_GC
670 st_reset (&loc_stack);
671 st_reset (&loc_set);
672 st_reset (&hec_stack);
673 #endif
674 st_reset (&once_set);
675 st_reset (&oms_set);
676 st_reset (&sep_stack);
677 if (prof_stack) {
678 st_reset (prof_stack);
679 }
680 xstack_reset (&eif_stack);
681 #ifdef WORKBENCH
682 c_opstack_reset (&cop_stack);
683 #endif
684
685 /* Free public per thread data */
686 eif_free (eif_globals);
687 rt_globals->eif_globals = NULL;
688
689 /* Free allocated stacks in rt_globals. */
690 xstack_reset (&eif_trace);
691
692 /* Free allocated structure for trace printing. */
693 ex_string.used = 0;
694 ex_string.size = 0;
695 if (ex_string.area) {
696 eif_rt_xfree (ex_string.area);
697 ex_string.area = NULL;
698 }
699
700 #ifdef EIF_WINDOWS
701 /* Free allocated structure for trace printing buffer. */
702 ex_buffer_1.used = 0;
703 ex_buffer_1.size = 0;
704 if (ex_buffer_1.area) {
705 eif_rt_xfree (ex_buffer_1.area);
706 ex_buffer_1.area = NULL;
707 }
708
709 ex_buffer_2.used = 0;
710 ex_buffer_2.size = 0;
711 if (ex_buffer_2.area) {
712 eif_rt_xfree (ex_buffer_2.area);
713 ex_buffer_2.area = NULL;
714 }
715 #endif
716
717 /* Free allocated structure for invariant monitoring. */
718 if (inv_mark_tablep) {
719 eif_rt_xfree(inv_mark_tablep);
720 inv_mark_tablep = NULL;
721 }
722
723 #ifdef WORKBENCH
724 opstack_reset (&op_stack);
725 dbstack_reset (&db_stack);
726 if (iregs) {
727 eif_rt_xfree (iregs);
728 }
729 #endif
730
731 /* Free private per thread data */
732 eif_free (rt_globals);
733
734 /* Reset the per thread data. */
735 EIF_TSD_SET(rt_global_key, NULL, "Couldn't bind private context to TSD.");
736 EIF_TSD_SET(eif_global_key, NULL, "Couldn't bind private context to TSD.");
737 }
738
739 rt_public void eif_thr_create_with_attr (EIF_OBJECT thr_root_obj,
740 EIF_PROCEDURE init_func,
741 EIF_THR_ATTR_TYPE *attr)
742 {
743 eif_thr_create_with_attr_new (thr_root_obj, init_func, RTS_PID(eif_access(thr_root_obj)), EIF_FALSE, attr);
744 }
745
746 rt_public void eif_thr_create_with_attr_new (EIF_OBJECT thr_root_obj,
747 EIF_PROCEDURE init_func,
748 EIF_INTEGER_32 thr_logical_id,
749 EIF_BOOLEAN is_processor,
750 EIF_THR_ATTR_TYPE *attr)
751 {
752 /*
753 * Creates a new Eiffel thread. This function is only called from
754 * Eiffel and is given five arguments:
755 * - the object (whose class inherits from THREAD) a clone of which
756 * will become the root object of the new thread
757 * - the Eiffel routine it will execute
758 * - the priority, the stack size.
759 *
760 * These arguments are part of the routine context that will be
761 * passed to the new thread via the low-level platform-dependant
762 * thread-creation function.
763 *
764 * This context also contains a pointer to the thread-id of the new
765 * thread, a pointer to the parent's children-counter `n_children', a
766 * mutex and a condition variable that are used by eif_thr_join_all()
767 * and eif_thr_exit().
768 */
769
770 RT_GET_CONTEXT
771
772 rt_thr_context *routine_ctxt;
773 int res;
774
775 routine_ctxt = (rt_thr_context *) eif_malloc(sizeof(rt_thr_context));
776 if (!routine_ctxt) {
777 eif_thr_panic("No more memory to launch new thread\n");
778 } else {
779 memset(routine_ctxt, 0, sizeof(rt_thr_context));
780 routine_ctxt->current = eif_adopt (thr_root_obj);
781 routine_ctxt->routine = init_func;
782 routine_ctxt->thread_id = (EIF_THR_TYPE) 0;
783 #if defined(EIF_ASSERTIONS) && defined(EIF_WINDOWS)
784 routine_ctxt->win_thread_id = (DWORD) 0;
785 #endif
786 routine_ctxt->logical_id = thr_logical_id;
787 routine_ctxt->is_processor = is_processor;
788 routine_ctxt->parent_context = eif_thr_context;
789 routine_ctxt->is_alive = 1;
790
791 if (!eif_thr_context->children_mutex) {
792 /* It is the first time this thread creates a subthread (hopefully!), so
793 * we create a mutex and a condition variable for join and join_all */
794 RT_TRACE_KEEP(res, eif_pthread_mutex_create(&eif_thr_context->children_mutex));
795 if (res != T_OK) {
796 eif_thr_panic ("Couldn't create join mutex");
797 } else {
798 RT_TRACE_KEEP(res, eif_pthread_cond_create (&eif_thr_context->children_cond));
799 if (res != T_OK) {
800 /* Free previously allocated mutex. */
801 RT_TRACE(eif_pthread_mutex_destroy(eif_thr_context->children_mutex));
802 eif_thr_panic ("Cannot create children condition variable");
803 }
804 }
805 }
806 EIF_ASYNC_SAFE_MUTEX_LOCK(eif_thr_context->children_mutex);
807 eif_thr_context->n_children++;
808 EIF_ASYNC_SAFE_MUTEX_UNLOCK(eif_thr_context->children_mutex);
809
810 SIGBLOCK;
811 LAUNCH_MUTEX_LOCK;
812
813 /* Actual creation of the thread in the next 3 lines. */
814 RT_TRACE_KEEP(res, eif_pthread_create (&routine_ctxt->thread_id, attr, eif_thr_entry, routine_ctxt));
815 last_child = routine_ctxt->thread_id;
816 LAUNCH_MUTEX_UNLOCK;
817 SIGRESUME;
818 if (res == T_CANNOT_CREATE_THREAD) {
819 eraise("Cannot create thread", EN_EXT);
820 }
821 }
822 }
823
824 rt_private void eif_thr_entry (void *arg)
825 {
826 /*
827 * This function is a wrapper to the Eiffel routine that will be
828 * executed by the new thread. It is directly called upon creation
829 * of the thread, and initializes the Eiffel run-time.
830 */
831
832 rt_thr_context *routine_ctxt = (rt_thr_context *) arg;
833 /* If we are starting a thread while under a GC synchronization point
834 * we should wait until this is completed before continuing.
835 * We cannot use the async safe locking routine since the runtime
836 * is not yet initialized in this thread. We can block as
837 * long as needed since for the GC, this threads does not even exist. */
838 if (eif_is_gc_collecting) {
839 RT_TRACE(eif_pthread_cs_lock(eif_gc_mutex));
840 RT_TRACE(eif_pthread_cs_unlock(eif_gc_mutex));
841 }
842
843 /* To prevent current thread to return too soon after call
844 * to eif_pthread_create.
845 * That way `thread' is properly initialized and can be freed
846 * safely later on */
847 LAUNCH_MUTEX_LOCK;
848 LAUNCH_MUTEX_UNLOCK;
849 eif_thr_register(0);
850 {
851 RT_GET_CONTEXT
852 EIF_GET_CONTEXT
853
854 struct ex_vect *exvect;
855 jmp_buf exenv;
856
857 eif_thr_context = routine_ctxt;
858 initsig();
859 initstk();
860 if (egc_prof_enabled) {
861 initprf();
862 }
863 exvect = new_exset((char *) 0, 0, (char *) 0, 0, 0, 0);
864 exvect->ex_jbuf = &exenv;
865
866 #ifdef _CRAY
867 if (setjmp(exenv)) {
868 failure();
869 }
870 #else
871 if ((echval = setjmp(exenv))) {
872 failure();
873 }
874 #endif
875
876 #ifdef WORKBENCH
877 xinitint();
878 /* Call the `execute' routine of the thread */
879 dnotify_create_thread(eif_thr_context->thread_id);
880 if (eif_thr_context->is_processor) {
881 dnotify_register_scoop_processor (eif_thr_context->thread_id, eif_thr_context->logical_id);
882 }
883 #endif
884 init_emnger(); /* Initialize objects hold by exception manager */
885 if (eif_thr_context->logical_id != -1) {
886 // A logical ID has been set so pass to Eiffel thread init callback.
887 (FUNCTION_CAST(void,(EIF_REFERENCE, EIF_INTEGER_32)) eif_thr_context->routine)(eif_access(routine_ctxt->current), eif_thr_context->logical_id);
888 } else {
889 (FUNCTION_CAST(void,(EIF_REFERENCE)) eif_thr_context->routine)(eif_access(routine_ctxt->current));
890 }
891
892 exok();
893 }
894 eif_thr_exit ();
895 return;
896 }
897
898 rt_public void eif_thr_exit(void)
899 {
900 /*
901 * Function called to terminate a thread launched by Eiffel with eif_thr_create_with_attr().
902 * All the memory allocated with eif_malloc() for the thread context is freed
903 * This function must be called from the thread itself (not the parent).
904 */
905
906 RT_GET_CONTEXT
907
908 if (!thread_exiting) {
909 int destroy_mutex; /* If non null, we'll destroy the 'join' mutex */
910 int l_has_parent_thread, l_is_external_thread;
911 int ret; /* Return Status of "eifaddr_offset". */
912 EIF_INTEGER offset; /* Location of `terminated' in `eif_thr_context->current' */
913 EIF_MUTEX_TYPE *l_children_mutex, *l_parent_children_mutex;
914 EIF_THR_TYPE l_thread_id = eif_thr_context->thread_id;
915
916 thread_exiting = 1;
917
918 REQUIRE("has_context", eif_thr_context);
919
920 l_is_external_thread = rt_globals->eif_globals->is_external_cx;
921 l_has_parent_thread = (eif_thr_context->current) && (eif_thr_context->parent_context);
922 #ifdef WORKBENCH
923 if (l_has_parent_thread) {
924 dnotify_exit_thread(eif_thr_context->thread_id);
925 }
926 #endif
927 exitprf();
928
929 if (l_has_parent_thread) {
930
931 if (eif_thr_context->is_processor == EIF_FALSE) {
932 /* Set {THREAD}.terminated to True, not applicable to SCOOP Processors. */
933 offset = eifaddr_offset (eif_access(eif_thr_context->current), "terminated", &ret);
934 if (ret == EIF_CECIL_OK) {
935 /* Set the `terminated' field of the thread object to True so that
936 * it knows the thread is terminated */
937 *(EIF_BOOLEAN *) (eif_access(eif_thr_context->current) + offset) = EIF_TRUE;
938 }
939 }
940
941 /* Remove current object from hector and reset entry to NULL. */
942 eif_wean(eif_thr_context->current);
943 eif_thr_context->current = NULL;
944
945 /* Prevent other threads to wait for current thread in case
946 * one of the following calls is blocking. */
947 EIF_ENTER_C;
948 l_parent_children_mutex = eif_thr_context->parent_context->children_mutex;
949 EIF_ASYNC_SAFE_MUTEX_LOCK(l_parent_children_mutex);
950 /* Decrement the number of child threads of the parent */
951 eif_thr_context->parent_context->n_children -= 1;
952 /* Check if no children are alive and parent is dead. */
953 destroy_mutex = (!eif_thr_context->parent_context->is_alive) && (eif_thr_context->parent_context->n_children == 0);
954 RT_TRACE(eif_pthread_cond_broadcast(eif_thr_context->parent_context->children_cond));
955 EIF_ASYNC_SAFE_MUTEX_UNLOCK(l_parent_children_mutex);
956
957 /* If we are the last running child of our parent and that our parent does not exist anymore
958 * we have to clean the resources. */
959 if (destroy_mutex) {
960 RT_TRACE(eif_pthread_mutex_destroy(l_parent_children_mutex));
961 RT_TRACE(eif_pthread_cond_destroy(eif_thr_context->parent_context->children_cond));
962 eif_thr_context->parent_context->children_cond = NULL;
963 eif_free (eif_thr_context->parent_context);
964 eif_thr_context->parent_context = NULL;
965 }
966 } else {
967 EIF_ENTER_C;
968 }
969
970 /* Every thread that has created a child thread with
971 * eif_thr_create_with_attr() has created a mutex and a condition
972 * variable to be able to do a join_all (or a join). If no children are
973 * still alive, we destroy eif_children_mutex and eif_children_cond,
974 * otherwise we will let the last child alive do the cleaning. */
975 l_children_mutex = eif_thr_context->children_mutex;
976 if (l_children_mutex) {
977 EIF_ASYNC_SAFE_MUTEX_LOCK(l_children_mutex);
978 /* Find out if there are still some children running. */
979 destroy_mutex = eif_thr_context->n_children == 0;
980 if (!destroy_mutex) {
981 /* We cannot destroy ourself because we still have some running children
982 * threads, we therefore needs to mark ourself dead. */
983 eif_thr_context->is_alive = 0;
984 }
985 EIF_ASYNC_SAFE_MUTEX_UNLOCK(l_children_mutex);
986 } else {
987 destroy_mutex = 1;
988 }
989 if (destroy_mutex) {
990 if (l_children_mutex) {
991 RT_TRACE(eif_pthread_mutex_destroy(l_children_mutex));
992 eif_thr_context->children_mutex = NULL;
993 RT_TRACE(eif_pthread_cond_destroy(eif_thr_context->children_cond));
994 eif_thr_context->children_cond = NULL;
995 }
996 }
997 EIF_EXIT_C;
998
999 #ifdef ISE_GC
1000 /* Destroy GC data associated with the current thread. Since we also
1001 * destroy the data used for signal handling, we cannot handle signals
1002 * anymore. So we are just going to prevent the signal from being
1003 * processed alltogether by a call to SIGBLOCK without corresponding
1004 * SIGRESUME. */
1005 SIGBLOCK;
1006 eif_synchronize_gc (rt_globals);
1007 eif_remove_gc_stacks (rt_globals);
1008 #endif
1009
1010 #ifdef LMALLOC_CHECK
1011 if (eif_thr_context->is_root) { /* Is this the root thread */
1012 eif_lm_display ();
1013 eif_lm_free ();
1014 }
1015 #endif /* LMALLOC_CHECK */
1016
1017 if (destroy_mutex) {
1018 /* Now that we are synchronized, we can reset the data that might be used
1019 * within `eif_synchronize_gc'. */
1020 eif_thr_context->thread_id = (EIF_THR_TYPE) 0;
1021 eif_free (eif_thr_context); /* Thread context passed by parent */
1022 eif_thr_context = NULL;
1023 }
1024
1025 /* Clean per thread data. */
1026 eif_free_context (rt_globals);
1027 rt_globals = NULL;
1028
1029 #ifdef ISE_GC
1030 /* We cannot use `eif_unsynchronize_gc' because `rt_globals' has been completely freed so
1031 * we have to do things manually.
1032 * We first signal that we are not collecting anymore and then we unlock `eif_gc_mutex'. */
1033 eif_is_gc_collecting = EIF_GC_NOT_RUNNING;
1034
1035 RT_TRACE(eif_pthread_cs_unlock(eif_gc_mutex));
1036 #endif
1037
1038 #ifdef VXWORKS
1039 /* The TSD is managed in a different way under VxWorks: each thread
1040 * must call taskVarAdd upon initialization and taskVarDelete upon
1041 * termination. It was impossible to call taskVarDelete using the same
1042 * model as on other platforms unless creating a new macro that would
1043 * be useful only for VxWorks. It is easier to do the following:
1044 */
1045
1046
1047 if (taskVarDelete(0,(int *)&(eif_global_key))) {
1048 eif_thr_panic("Problem with taskVarDelete\n");
1049 }
1050 if (taskVarDelete(0,(int *)&(rt_global_key))) {
1051 eif_thr_panic("Problem with taskVarDelete\n");
1052 }
1053 #endif /* VXWORKS */
1054
1055
1056 /* Only call the platform specific exit when thread was created by the Eiffel runtime. */
1057 if (!l_is_external_thread) {
1058 eif_pthread_exit(l_thread_id);
1059 }
1060 }
1061 } /* eif_thr_exit ().*/
1062
1063
1064 /**************************************************************************/
1065 /* NAME: eif_init_gc_stacks */
1066 /* ARGS: rt_globals: References to thread specific data */
1067 /*------------------------------------------------------------------------*/
1068 /* Initialize shared global stacks with thread specific stack. That way */
1069 /* the GC holds references to Eiffel objects in each thread */
1070 /**************************************************************************/
1071
1072 rt_private void eif_init_gc_stacks(rt_global_context_t *rt_globals)
1073 {
1074 #ifdef ISE_GC
1075 eif_global_context_t *eif_globals = rt_globals->eif_globals;
1076 eif_synchronize_gc(rt_globals);
1077 load_stack_in_gc (&rt_globals_list, rt_globals);
1078 load_stack_in_gc (&loc_stack_list, &loc_stack);
1079 load_stack_in_gc (&loc_set_list, &loc_set);
1080 load_stack_in_gc (&once_set_list, &once_set);
1081 load_stack_in_gc (&oms_set_list, &oms_set);
1082 load_stack_in_gc (&hec_stack_list, &hec_stack);
1083 load_stack_in_gc (&sep_stack_list, &sep_stack);
1084 load_stack_in_gc (&eif_stack_list, &eif_stack);
1085 load_stack_in_gc (&eif_trace_list, &eif_trace);
1086 #ifdef WORKBENCH
1087 load_stack_in_gc (&opstack_list, &op_stack);
1088 #endif
1089 eif_unsynchronize_gc(rt_globals);
1090 #endif
1091 }
1092
1093 /**************************************************************************/
1094 /* NAME: eif_free_gc_stacks */
1095 /*------------------------------------------------------------------------*/
1096 /* Free stacks allocated to hold all running threads. */
1097 /**************************************************************************/
1098
1099 rt_private void eif_free_gc_stacks(void)
1100 {
1101 #ifdef ISE_GC
1102 eif_free (rt_globals_list.threads.data);
1103 eif_free (loc_stack_list.threads.data);
1104 eif_free (loc_set_list.threads.data);
1105 eif_free (once_set_list.threads.data);
1106 eif_free (oms_set_list.threads.data);
1107 eif_free (hec_stack_list.threads.data);
1108 eif_free (sep_stack_list.threads.data);
1109 eif_free (eif_stack_list.threads.data);
1110 eif_free (eif_trace_list.threads.data);
1111 #ifdef WORKBENCH
1112 eif_free (opstack_list.threads.data);
1113 #endif
1114 #endif
1115 }
1116
1117 /**************************************************************************/
1118 /* NAME: eif_remove_gc_stacks */
1119 /* ARGS: rt_globals: References to thread specific data */
1120 /*------------------------------------------------------------------------*/
1121 /* Destroy thread specific stacks and remove them from GC global stack */
1122 /**************************************************************************/
1123
1124 rt_private void eif_remove_gc_stacks(rt_global_context_t *rt_globals)
1125 {
1126 #ifdef ISE_GC
1127 eif_global_context_t *eif_globals = rt_globals->eif_globals;
1128 remove_data_from_gc (&rt_globals_list, rt_globals);
1129 remove_data_from_gc (&loc_stack_list, &loc_stack);
1130 remove_data_from_gc (&loc_set_list, &loc_set);
1131 remove_data_from_gc (&once_set_list, &once_set);
1132 remove_data_from_gc (&oms_set_list, &oms_set);
1133 remove_data_from_gc (&hec_stack_list, &hec_stack);
1134 remove_data_from_gc (&sep_stack_list, &sep_stack);
1135 remove_data_from_gc (&eif_stack_list, &eif_stack);
1136 remove_data_from_gc (&eif_trace_list, &eif_trace);
1137 #ifdef WORKBENCH
1138 remove_data_from_gc (&opstack_list, &op_stack);
1139 #endif
1140 eif_stack_free (&loc_stack);
1141 eif_stack_free (&loc_set);
1142 eif_stack_free (&once_set);
1143 eif_stack_free (&oms_set);
1144 eif_stack_free (&hec_stack);
1145 eif_stack_free (&sep_stack);
1146 /* The two stacks below are not properly cleaned up with `eif_stack_free'
1147 * as they have one more attribute than the `struct stack' structure, thus
1148 * the extra attribute is not reset. */
1149 eif_stack_free (&eif_stack);
1150 eif_stack_free (&eif_trace);
1151 #ifdef WORKBENCH
1152 /* Although the stack below is made of 5 pointers like `struct stack' it
1153 * is not exactly the same structure, but the call to `eif_stack_free'
1154 * should do the job properly. */
1155 eif_stack_free (&op_stack);
1156 #endif
1157 #endif
1158 }
1159
1160
1161 /**************************************************************************/
1162 /* NAME: load_stack_in_gc */
1163 /* ARGS: st_list: Global GC stack */
1164 /* st: thread specific stack that we are putting in `st_list'. */
1165 /*------------------------------------------------------------------------*/
1166 /* Insert `st' in `st_list->threads_stack' and update `st_list'. */
1167 /**************************************************************************/
1168
1169 rt_private void load_stack_in_gc (struct stack_list *st_list, void *st)
1170 {
1171 int count = st_list->count + 1;
1172 st_list->count = count;
1173 if (st_list->capacity < count) {
1174 void **stack;
1175 stack = (void **) eif_realloc (st_list->threads.data,
1176 count * sizeof(struct stack **));
1177 if (!stack) {
1178 enomem();
1179 } else {
1180 st_list->threads.data = stack;
1181 st_list->capacity = count;
1182 }
1183 }
1184 st_list->threads.data[count - 1] = st;
1185 }
1186
1187
1188 /**************************************************************************/
1189 /* NAME: remove_data_from_gc */
1190 /* ARGS: st_list: Global GC stack */
1191 /* st: thread specific data that should be in `st_list'. */
1192 /*------------------------------------------------------------------------*/
1193 /* Remove `st' from `st_list->threads_stack' and update `st_list' */
1194 /* accordingly. */
1195 /**************************************************************************/
1196
1197 rt_private void remove_data_from_gc (struct stack_list *st_list, void *st)
1198 {
1199 int count = st_list->count;
1200 int i = 0;
1201 void **stack = st_list->threads.data;
1202
1203 REQUIRE("Stack not empty", count > 0);
1204
1205 /* Linear search to find `st' in `threads_stack' */
1206 while (i < count) {
1207 if (stack[i] == st) {
1208 break;
1209 }
1210 i = i + 1;
1211 }
1212
1213 CHECK("Is found", i < count); /* We must have found entry that holds reference to `st' */
1214
1215 /* Remove one element */
1216 st_list->count = count - 1;
1217 /* Move last elements of the list to location of removed element. */
1218 stack [i] = stack [count -1];
1219 /* Reset last element to NULL. */
1220 stack [count - 1] = NULL;
1221 }
1222
1223 /**************************************************************************/
1224 /* NAME: eif_stack_free */
1225 /* ARGS: st: thread specific stack. */
1226 /*------------------------------------------------------------------------*/
1227 /* Free memory used by `st'. */
1228 /**************************************************************************/
1229
1230 rt_private void eif_stack_free (void *stack){
1231 struct stack *st = (struct stack *) stack;
1232 struct stchunk *c, *cn;
1233
1234 for (c = st->st_hd; c != (struct stchunk *) 0; c = cn) {
1235 cn = c->sk_next;
1236 eif_rt_xfree ((EIF_REFERENCE) c);
1237 }
1238
1239 st->st_hd = NULL;
1240 st->st_tl = NULL;
1241 st->st_cur = NULL;
1242 st->st_top = NULL;
1243 st->st_end = NULL;
1244 }
1245
1246 #ifdef ISE_GC
1247 rt_public void eif_synchronize_for_gc (void)
1248 /* Synchronize current thread for a GC cycle */
1249 {
1250 RT_GET_CONTEXT
1251
1252 /* Simple synchronization, if a GC cycle was performed, then
1253 * we will lock on `gc_mutex' only if current thread is not the
1254 * one performing the GC cycle, otherwise we could cause dead-lock.
1255 * This is needed when a GC cycle trigger calls to `dispose' routines.
1256 */
1257 if (gc_thread_status != EIF_THREAD_GC_RUNNING) {
1258 gc_thread_status = EIF_THREAD_BLOCKED;
1259 EIF_GC_MUTEX_LOCK;
1260 gc_thread_status = EIF_THREAD_RUNNING;
1261 EIF_GC_MUTEX_UNLOCK;
1262 }
1263 }
1264
1265 rt_public int eif_is_in_eiffel_code (void)
1266 {
1267 RT_GET_CONTEXT
1268 return ((gc_thread_status == EIF_THREAD_RUNNING) ? 1 : 0);
1269 }
1270
1271 rt_public void eif_enter_eiffel_code(void)
1272 /* Synchronize current thread as we enter some Eiffel code */
1273 {
1274 RT_GET_CONTEXT
1275 if (rt_globals) {
1276 /* Do not change current thread status if we are currently running a
1277 * GC cycle, the status will be reset in `eif_unsynchronize_gc'. */
1278 if (gc_thread_status != EIF_THREAD_GC_RUNNING) {
1279 /* Check if GC requested a synchronization before resetting our status. */
1280 gc_thread_status = EIF_THREAD_RUNNING;
1281 }
1282 } else {
1283 /* We are reentering Eiffel code but we have no more context, it means that the
1284 * `rt_global_key' was destroyed in a call to `reclaim'. In other words, we are
1285 * in the process of stopping the program. We cannot therefore continue safely.
1286 * We will lock ourself on the `eif_gc_mutex' which should be locked already
1287 * by `reclaim'.
1288 * See eweasel test#scoop029 to reproduce the problem. */
1289 CHECK("is collecting", eif_is_gc_collecting);
1290 /* We cannot use EIF_GC_MUTEX_LOCK because the it blocks signals and at
1291 * this point the per thread data for signals is gone since `rt_globals' is NULL. */
1292 RT_TRACE(eif_pthread_cs_lock(eif_gc_mutex));
1293 }
1294 }
1295
1296 rt_public void eif_exit_eiffel_code(void)
1297 /* Synchronize current thread as we exit some Eiffel code */
1298 {
1299 RT_GET_CONTEXT
1300 /* Do not change current thread status if we are currently running a
1301 * GC cycle, the status will be reset in `eif_unsynchronize_gc'. */
1302 if (gc_thread_status != EIF_THREAD_GC_RUNNING) {
1303 gc_thread_status = EIF_THREAD_BLOCKED;
1304 }
1305 }
1306
1307 #ifdef DEBUG
1308 rt_private int counter = 0;
1309 #endif
1310
1311 #ifdef EIF_ASSERTIONS
1312 /*
1313 doc: <routine name="eif_is_synchronized" return_type="int" export="shared">
1314 doc: <summary>Check if all threads are in a paused state, so that GC can safely be performed.</summary>
1315 doc: <return>1 when synchronized, 0 otherwise</return>
1316 doc: <thread_safety>Safe</thread_safety>
1317 doc: <synchronization>To be done while already pocessing the `eif_gc_mutex' lock.</synchronization>
1318 doc: </routine>
1319 */
1320
1321 rt_shared int eif_is_synchronized (void)
1322 {
1323 int i;
1324
1325 for (i = 0; i < rt_globals_list.count; i ++) {
1326 if (((rt_global_context_t *) (rt_globals_list.threads.data [i]))->gc_thread_status_cx == EIF_THREAD_RUNNING) {
1327 return 0;
1328 #ifdef DEBUG
1329 } else {
1330 printf ("Status is %d\n", ((rt_global_context_t *) (rt_globals_list.threads.data [i]))->gc_thread_status_cx);
1331 #endif
1332 }
1333 }
1334 return 1;
1335 }
1336 #endif
1337
1338 #define NB_THREADS_DATA 20
1339 rt_private void * rt_threads_data [NB_THREADS_DATA];
1340
1341 rt_shared void eif_synchronize_gc (rt_global_context_t *rt_globals)
1342 /* Synchronize all threads under GC control */
1343 {
1344 if (gc_thread_status != EIF_THREAD_GC_RUNNING) {
1345 struct stack_list all_thread_list;
1346 struct stack_list running_thread_list = {0, 0, { NULL }};
1347 rt_global_context_t *thread_globals;
1348 rt_thr_context *ctxt;
1349 int status, i;
1350
1351 /* We are marking ourself to show that we are requesting a safe access
1352 * to GC data. */
1353 gc_thread_status = EIF_THREAD_GC_REQUESTED;
1354 EIF_GC_MUTEX_LOCK;
1355 #ifdef DEBUG
1356 printf ("Starting Collection number %d ...", counter);
1357 #endif
1358 eif_is_gc_collecting = EIF_GC_STARTING;
1359 gc_thread_collection_count = 1;
1360 gc_thread_status = EIF_THREAD_GC_RUNNING;
1361
1362 /* It is only useful to iterate over the threads when there are more than one. */
1363 if (rt_globals_list.count > 1) {
1364 /* We have acquired the lock, now, process all running threads and wait until
1365 * they are all not marked `EIF_THREAD_RUNNING'. */
1366 memcpy(&all_thread_list, &rt_globals_list, sizeof(struct stack_list));
1367 /* Optimization to avoid calling `eif_malloc' in this critical section.
1368 * It is also a way to circumvent a case where `eif_malloc' will deadlock
1369 * when we arrive here after a signal was sent to the application and that
1370 * the platform implementation of malloc is not async-safe. */
1371 if (rt_globals_list.count > NB_THREADS_DATA) {
1372 all_thread_list.threads.data = eif_malloc (rt_globals_list.count * sizeof(void *));
1373 if (!all_thread_list.threads.data) {
1374 EIF_GC_MUTEX_UNLOCK;
1375 enomem();
1376 }
1377 } else {
1378 all_thread_list.threads.data = rt_threads_data;
1379 }
1380
1381 memcpy(all_thread_list.threads.data, rt_globals_list.threads.data,
1382 rt_globals_list.count * sizeof(void *));
1383
1384 CHECK("data not null", all_thread_list.threads.data);
1385
1386 while (all_thread_list.count != 0) {
1387 for (i = 0; i < all_thread_list.count; i++) {
1388 thread_globals = (rt_global_context_t *) all_thread_list.threads.data[i];
1389 if (thread_globals != rt_globals) {
1390 status = thread_globals->gc_thread_status_cx;
1391 if (status == EIF_THREAD_RUNNING) {
1392 ctxt = thread_globals->eif_thr_context_cx;
1393 if (ctxt && ctxt->is_alive && eif_pthread_is_alive (ctxt->thread_id) != T_OK) {
1394 /* Thread has died, we have to remove it from our internal list. */
1395 eif_remove_gc_stacks (thread_globals);
1396 } else {
1397 load_stack_in_gc (&running_thread_list, thread_globals);
1398 }
1399 }
1400 }
1401 }
1402 if (all_thread_list.threads.data != rt_threads_data) {
1403 eif_free (all_thread_list.threads.data);
1404 }
1405 memcpy(&all_thread_list, &running_thread_list, sizeof(struct stack_list));
1406 memset(&running_thread_list, 0, sizeof(struct stack_list));
1407
1408 CHECK("data not null if not empty", (all_thread_list.count == 0) || (all_thread_list.threads.data));
1409
1410 /* For performance reasons on systems with a poor scheduling policy,
1411 * we switch context to one of the remaining running thread. Not doing
1412 * so on a uniprocessor WinXP system, the execution was about 1000 times
1413 * slower than on a bi-processor WinXP system. */
1414 if (all_thread_list.count != 0) {
1415 /* Yield to other ready to run threads if available. */
1416 RT_TRACE(eif_pthread_yield());
1417 }
1418 }
1419
1420
1421 #ifdef DEBUG
1422 printf ("Synchronized...");
1423 #endif
1424 eif_is_gc_collecting = EIF_GC_STARTED_WITH_MULTIPLE_RUNNING_THREADS;
1425 } else {
1426 eif_is_gc_collecting = EIF_GC_STARTED_WITH_SINGLE_RUNNING_THREAD;
1427 }
1428 } else {
1429 /* A recursive demand was made, we simply increment the blocking counter.
1430 * No synchronization is required as we are still under the protection
1431 * of `eif_gc_mutex'. */
1432 gc_thread_collection_count++;
1433 #ifdef DEBUG
1434 printf ("+");
1435 #endif
1436 }
1437
1438 /* The postcondition cannot hold in most cases. For example, when launching quickly
1439 * new threads, the above code will assume N threads, but now we might have N + 1 threads
1440 * and the newly launched thread has not yet reached a synchronization point although we are
1441 * sure at 100% that no Eiffel code will be executed. */
1442 /* ENSURE("Synchronized", eif_is_synchronized()); */
1443 }
1444
1445 rt_shared void eif_unsynchronize_gc (rt_global_context_t *rt_globals)
1446 /* Free all threads under GC control from GC control */
1447 {
1448 gc_thread_collection_count--;
1449
1450 if (gc_thread_collection_count == 0) {
1451 #ifdef DEBUG
1452 printf ("... finishing %d\n", counter);
1453 counter++;
1454 #endif
1455 /* Here we have still the lock of `gc_mutex'. So it is safe to update
1456 * `eif_is_gc_collecting'. */
1457 eif_is_gc_collecting = EIF_GC_NOT_RUNNING;
1458
1459 /* Let's mark ourself as a running thread. */
1460 gc_thread_status = EIF_THREAD_RUNNING;
1461
1462 /* Because recursive calls can be made to `eif_synchronize_gc' we
1463 * have to unlock the `eif_gc_mutex' mutex only at the last call
1464 * to `eif_unsynchronize_gc'. */
1465 EIF_GC_MUTEX_UNLOCK;
1466 } else {
1467 #ifdef DEBUG
1468 printf ("-");
1469 #endif
1470 }
1471 }
1472
1473 #endif /* ISE_GC */
1474
1475 #if !defined(EIF_WINDOWS) && !defined(VXWORKS)
1476 /*
1477 doc: <routine name="eif_thread_fork" return_type="pid_t" export="shared">
1478 doc: <summary>Call system fork and make sure that the GC is correctly updated in newly forked process. Made especially for EMC.</summary>
1479 doc: <return>On success, the PID of the child process is returned in the parent's thread of execution, and a 0 is returned in the child's thread of execution. On failure, a -1 will be returned in the parent's context, no child process will be created, and errno will be set appropriately.</return>
1480 doc: <thread_safety>Safe</thread_safety>
1481 doc: <synchronization>Global synchronization.</synchronization>
1482 doc: </routine>
1483 */
1484
1485 extern pid_t waitpid (pid_t pid, int *status, int options);
1486
1487 rt_shared pid_t eif_thread_fork(void) {
1488 RT_GET_CONTEXT
1489 EIF_GET_CONTEXT
1490
1491 pid_t result = (pid_t) 0;
1492
1493 /* Synchronize GC, so that only current thread is the one allowed to perform a fork. */
1494 #ifdef ISE_GC
1495 eif_synchronize_gc (rt_globals);
1496 #endif
1497
1498 /* EMC using as far as we know only Linux and Solaris, that's the two fork we are taking
1499 * care of. Not that for Solaris, we us `fork1()' which only forks the current thread, not
1500 * all thread, so that it matches the Linux behavior of `fork'. */
1501 #ifdef SOLARIS_THREAD
1502 result = fork1();
1503 #else
1504 result = fork();
1505 #endif
1506
1507 if (result == 0) {
1508 /* We are now in the child process. */
1509 /* First we need to reinitialize all our mutexes as the
1510 * one we have maybe own by the parent process. */
1511 eif_thr_init_global_mutexes();
1512
1513 /* Because we have recreated all our mutex, we need to lock the `eif_gc_mutex'
1514 * that was locked in the parent thread. */
1515 EIF_GC_MUTEX_LOCK;
1516
1517 /* Reinitialize our global lists to let the GC think that there
1518 * is only one running thread. */
1519 memset (&rt_globals_list, 0, sizeof (struct stack_list));
1520 #ifdef ISE_GC
1521 memset (&loc_stack_list, 0, sizeof (struct stack_list));
1522 memset (&loc_set_list, 0, sizeof (struct stack_list));
1523 memset (&once_set_list, 0, sizeof (struct stack_list));
1524 memset (&hec_stack_list, 0, sizeof (struct stack_list));
1525 memset (&sep_stack_list, 0, sizeof (struct stack_list));
1526 memset (&eif_stack_list, 0, sizeof (struct stack_list));
1527 memset (&eif_trace_list, 0, sizeof (struct stack_list));
1528 #ifdef WORKBENCH
1529 memset (&opstack_list, 0, sizeof (struct stack_list));
1530 #endif
1531
1532 load_stack_in_gc (&rt_globals_list, rt_globals);
1533 load_stack_in_gc (&loc_stack_list, &loc_stack);
1534 load_stack_in_gc (&loc_set_list, &loc_set);
1535 load_stack_in_gc (&once_set_list, &once_set);
1536 load_stack_in_gc (&hec_stack_list, &hec_stack);
1537 load_stack_in_gc (&sep_stack_list, &sep_stack);
1538 load_stack_in_gc (&eif_stack_list, &eif_stack);
1539 load_stack_in_gc (&eif_trace_list, &eif_trace);
1540 #ifdef WORKBENCH
1541 load_stack_in_gc (&opstack_list, &op_stack);
1542 #endif
1543 #endif
1544
1545 CHECK("Only one thread", rt_globals_list.count == 1);
1546 }
1547
1548 #ifdef ISE_GC
1549 eif_unsynchronize_gc (rt_globals);
1550 #endif
1551 return result;
1552 }
1553
1554 #endif
1555
1556 rt_public void eif_thr_yield(void)
1557 {
1558 /*
1559 * Yields execution to other threads. Platform dependant, sometimes
1560 * undefined.
1561 */
1562
1563 RT_TRACE(eif_pthread_yield());
1564 }
1565
1566
1567 rt_public void eif_thr_join_all(void)
1568 {
1569 /* Our implementation of join_all: the parent thread keeps a record of the
1570 * number of threads it has launched, and the children have a pointer to
1571 * the parent context. So they decrement it upon termination. This
1572 * variable is protected by the mutex children_mutex.
1573 * This function loops until the value of n_children is equal to zero. In
1574 * order not to use all the CPU, we yield the execution to other threads
1575 * if there are still more children.
1576 * NB: this function might be very costly in CPU if the yield function
1577 * doesn't work. */
1578
1579 RT_GET_CONTEXT
1580
1581 /* If no thread has been launched, the mutex isn't initialized */
1582 if (eif_thr_context->children_mutex) {
1583 EIF_ASYNC_SAFE_MUTEX_LOCK(eif_thr_context->children_mutex);
1584 while (eif_thr_context->n_children != 0) {
1585 RT_TRACE(eif_pthread_cond_wait(eif_thr_context->children_cond, eif_thr_context->children_mutex));
1586 }
1587 EIF_ASYNC_SAFE_MUTEX_UNLOCK(eif_thr_context->children_mutex);
1588 }
1589 }
1590
1591 rt_public void eif_thr_wait (EIF_OBJECT Current)
1592 {
1593 /*
1594 * Waits until a thread sets `terminated' from `Current' to True, which means it
1595 * is terminated. This function is called by `join'. The calling
1596 * thread must be the direct parent of the thread, or the function
1597 * might loop indefinitely --PCV
1598 */
1599
1600 RT_GET_CONTEXT
1601 EIF_GET_CONTEXT
1602
1603 int ret; /* Return Status of "eifaddr". */
1604 EIF_INTEGER offset; /* location of `terminated' in Current */
1605 EIF_REFERENCE thread_object = NULL;
1606
1607 /* We need to protect thread_object, because the protected
1608 * reference `eif_access (Current)' will be cleaned when
1609 * Current thread exit. */
1610 RT_GC_PROTECT(thread_object);
1611 thread_object = eif_access(Current);
1612 offset = eifaddr_offset (thread_object, "terminated", &ret);
1613 CHECK("terminated attribute exists", ret == EIF_CECIL_OK);
1614
1615 /* If no thread has been launched, the mutex isn't initialized */
1616 if (eif_thr_context->children_mutex) {
1617 EIF_ENTER_C;
1618 EIF_ASYNC_SAFE_MUTEX_LOCK(eif_thr_context->children_mutex);
1619 EIF_EXIT_C;
1620 RTGC;
1621 while (*(EIF_BOOLEAN *) (thread_object + offset) == EIF_FALSE) {
1622 EIF_ENTER_C;
1623 RT_TRACE(eif_pthread_cond_wait(eif_thr_context->children_cond, eif_thr_context->children_mutex));
1624 EIF_EXIT_C;
1625 RTGC;
1626 }
1627 EIF_ASYNC_SAFE_MUTEX_UNLOCK(eif_thr_context->children_mutex);
1628 }
1629 RT_GC_WEAN(thread_object);
1630 }
1631
1632 rt_public EIF_BOOLEAN eif_thr_wait_with_timeout (EIF_OBJECT Current, EIF_NATURAL_64 a_timeout_ms)
1633 {
1634 /*
1635 * Waits until a thread sets `terminated' from `Current' to True or reaching `a_timeout_ms'.
1636 * This function is called by `join_with_timeout'. The calling thread must be the direct parent
1637 * of the thread, or the function might loop indefinitely --PCV */
1638
1639 RT_GET_CONTEXT
1640 EIF_GET_CONTEXT
1641
1642 int res;
1643 EIF_INTEGER offset; /* location of `terminated' in Current */
1644 EIF_REFERENCE thread_object = NULL;
1645
1646 /* We need to protect thread_object, because the protected
1647 * reference `eif_access (Current)' will be cleaned when
1648 * Current thread exit. */
1649 RT_GC_PROTECT(thread_object);
1650 thread_object = eif_access(Current);
1651 offset = eifaddr_offset (thread_object, "terminated", &res);
1652 CHECK("terminated attribute exists", res == EIF_CECIL_OK);
1653
1654 /* If no thread has been launched, the mutex isn't initialized */
1655 res = T_OK;
1656 if (eif_thr_context->children_mutex) {
1657 EIF_ENTER_C;
1658 EIF_ASYNC_SAFE_MUTEX_LOCK(eif_thr_context->children_mutex);
1659 EIF_EXIT_C;
1660 RTGC;
1661 while ((*(EIF_BOOLEAN *) (thread_object + offset) == EIF_FALSE) && (res == T_OK)) {
1662 EIF_ENTER_C;
1663 /* We do not use `RT_TRACE_KEEP' because we might get T_TIMEDOUT.
1664 * We will trace the error after. */
1665 res = eif_pthread_cond_wait_with_timeout(eif_thr_context->children_cond, eif_thr_context->children_mutex, a_timeout_ms);
1666 #ifdef EIF_ASSERTIONS
1667 if ((res != T_OK) && (res != T_TIMEDOUT)) {
1668 RT_TRACE(res);
1669 }
1670 #endif
1671 EIF_EXIT_C;
1672 RTGC;
1673 }
1674 EIF_ASYNC_SAFE_MUTEX_UNLOCK(eif_thr_context->children_mutex);
1675 }
1676 RT_GC_WEAN(thread_object);
1677
1678 return (res == T_TIMEDOUT ? EIF_FALSE : EIF_TRUE);
1679 }
1680
1681
1682 rt_public void eif_thr_join (EIF_POINTER tid)
1683 {
1684 /*
1685 * Invokes thr_join, pthread_join, etc.. depending on the platform.
1686 * No such routine exists on VxWorks or Windows, so the Eiffel version
1687 * should be used (ie. `join' <-> eif_thr_wait)
1688 */
1689
1690 if (tid != (EIF_POINTER) 0) {
1691 RT_TRACE(eif_pthread_join((EIF_THR_TYPE) tid));
1692 } else {
1693 eraise ("Trying to join a thread whose ID is NULL", EN_EXT);
1694 }
1695 }
1696
1697
1698 /*
1699 * These three functions are used from Eiffel: they return the default,
1700 * minimum and maximum priority values for the current platform --PCV
1701 */
1702
1703 rt_public EIF_INTEGER eif_thr_default_priority(void) {
1704 return EIF_DEFAULT_THR_PRIORITY;
1705 }
1706
1707 rt_public EIF_INTEGER eif_thr_min_priority(void) {
1708 return EIF_MIN_THR_PRIORITY;
1709 }
1710
1711 rt_public EIF_INTEGER eif_thr_max_priority(void) {
1712 return EIF_MAX_THR_PRIORITY;
1713 }
1714
1715 /*
1716 * These two functions each return a pointer to respectively the thread-id
1717 * of the current thread and the thread-id of the last created thread.
1718 * They are used from the class THREAD.--PCV
1719 */
1720
1721 rt_public EIF_POINTER eif_thr_thread_id(void) {
1722 RT_GET_CONTEXT
1723 return (EIF_POINTER) eif_thr_context->thread_id;
1724 }
1725
1726 rt_public EIF_POINTER eif_thr_last_thread(void) {
1727 RT_GET_CONTEXT
1728 return (EIF_POINTER) last_child;
1729 }
1730
1731
1732 /*
1733 * Functions for mutex management:
1734 * - creation, locking, unlocking, non-blocking locking and destruction
1735 */
1736
1737 rt_public EIF_POINTER eif_thr_mutex_create(void) {
1738 EIF_MUTEX_TYPE *a_mutex_pointer;
1739 int res;
1740 RT_TRACE_KEEP(res, eif_pthread_mutex_create(&a_mutex_pointer));
1741 if (res != T_OK) {
1742 eraise ("Cannot create mutex", EN_EXT);
1743 }
1744 return a_mutex_pointer;
1745 }
1746
1747 rt_public void eif_thr_mutex_lock(EIF_POINTER mutex_pointer) {
1748 EIF_MUTEX_TYPE *a_mutex_pointer = (EIF_MUTEX_TYPE *) mutex_pointer;
1749 int res;
1750 RT_TRACE_KEEP(res, eif_pthread_mutex_lock(a_mutex_pointer));
1751 if (res != T_OK) {
1752 eraise ("Cannot lock mutex", EN_EXT);
1753 }
1754 }
1755
1756 rt_public void eif_thr_mutex_unlock(EIF_POINTER mutex_pointer) {
1757 EIF_MUTEX_TYPE *a_mutex_pointer = (EIF_MUTEX_TYPE *) mutex_pointer;
1758 int res;
1759 RT_TRACE_KEEP(res, eif_pthread_mutex_unlock(a_mutex_pointer));
1760 if (res != T_OK) {
1761 eraise ("Cannot unlock mutex", EN_EXT);
1762 }
1763 }
1764
1765 rt_public EIF_BOOLEAN eif_thr_mutex_trylock(EIF_POINTER mutex_pointer) {
1766 EIF_MUTEX_TYPE *a_mutex_pointer = (EIF_MUTEX_TYPE *) mutex_pointer;
1767 int res;
1768 /* We do not use `RT_TRACE_KEEP' because we might get T_BUSY. We will trace the error after. */
1769 res = eif_pthread_mutex_trylock(a_mutex_pointer);
1770 if (res == T_OK) {
1771 return EIF_TRUE;
1772 } else if (res == T_BUSY) {
1773 return EIF_FALSE;
1774 } else {
1775 RT_TRACE(res);
1776 eraise ("Cannot lock mutex", EN_EXT);
1777 return EIF_FALSE;
1778 }
1779 }
1780
1781 rt_public void eif_thr_mutex_destroy(EIF_POINTER mutex_pointer) {
1782 RT_TRACE(eif_pthread_mutex_destroy((EIF_MUTEX_TYPE *) mutex_pointer));
1783 }
1784
1785
1786 /*
1787 * class SEMAPHORE externals
1788 */
1789
1790 rt_public EIF_POINTER eif_thr_sem_create (EIF_INTEGER count)
1791 {
1792 EIF_SEM_TYPE *a_sem_pointer;
1793 int res;
1794 RT_TRACE_KEEP(res, eif_pthread_sem_create(&a_sem_pointer, 0, (unsigned int) count));
1795 if (res != T_OK) {
1796 eraise ("Cannot create semaphore", EN_EXT);
1797 }
1798 return (EIF_POINTER) a_sem_pointer;
1799 }
1800
1801 rt_public void eif_thr_sem_wait (EIF_POINTER sem)
1802 {
1803 EIF_SEM_TYPE *a_sem_pointer = (EIF_SEM_TYPE *) sem;
1804 int res;
1805 RT_TRACE_KEEP(res, eif_pthread_sem_wait(a_sem_pointer));
1806 if (res != T_OK) {
1807 eraise ("Cannot wait on semaphore", EN_EXT);
1808 }
1809 }
1810
1811 rt_public void eif_thr_sem_post (EIF_POINTER sem)
1812 {
1813 EIF_SEM_TYPE *a_sem_pointer = (EIF_SEM_TYPE *) sem;
1814 int res;
1815 RT_TRACE_KEEP(res, eif_pthread_sem_post(a_sem_pointer));
1816 if (res != T_OK) {
1817 eraise ("Cannot post on semaphore", EN_EXT);
1818 }
1819 }
1820
1821 rt_public EIF_BOOLEAN eif_thr_sem_trywait (EIF_POINTER sem)
1822 {
1823 EIF_SEM_TYPE *a_sem_pointer = (EIF_SEM_TYPE *) sem;
1824 int res;
1825 /* We do not use `RT_TRACE_KEEP' because we might get T_BUSY. We will trace the error after. */
1826 res = eif_pthread_sem_trywait(a_sem_pointer);
1827 if (res == T_OK) {
1828 return EIF_TRUE;
1829 } else if (res == T_BUSY) {
1830 return EIF_FALSE;
1831 } else {
1832 RT_TRACE(res);
1833 eraise ("Cannot trywait on semaphore", EN_EXT);
1834 return EIF_FALSE;
1835 }
1836 }
1837
1838 rt_public void eif_thr_sem_destroy (EIF_POINTER sem)
1839 {
1840 RT_TRACE(eif_pthread_sem_destroy((EIF_SEM_TYPE *) sem));
1841 }
1842
1843 /*
1844 * class CONDITION_VARIABLE externals
1845 */
1846
1847 rt_public EIF_POINTER eif_thr_cond_create (void)
1848 {
1849 EIF_COND_TYPE *cond;
1850 int res;
1851 RT_TRACE_KEEP(res, eif_pthread_cond_create (&cond));
1852 if (res != T_OK) {
1853 eraise ("Cannot create cond. variable", EN_EXT);
1854 }
1855 return cond;
1856 }
1857
1858 rt_public void eif_thr_cond_broadcast (EIF_POINTER cond_ptr)
1859 {
1860 EIF_COND_TYPE *cond = (EIF_COND_TYPE *) cond_ptr;
1861 int res;
1862 RT_TRACE_KEEP(res, eif_pthread_cond_broadcast(cond));
1863 if (res != T_OK) {
1864 eraise ("Cannot broadcast on condition variable", EN_EXT);
1865 }
1866 }
1867
1868 rt_public void eif_thr_cond_signal (EIF_POINTER cond_ptr)
1869 {
1870 EIF_COND_TYPE *cond = (EIF_COND_TYPE *) cond_ptr;
1871 int res;
1872 RT_TRACE_KEEP(res, eif_pthread_cond_signal(cond));
1873 if (res != T_OK) {
1874 eraise ("Cannot signal on condition variable", EN_EXT);
1875 }
1876 }
1877
1878 rt_public void eif_thr_cond_wait (EIF_POINTER cond_ptr, EIF_POINTER mutex_ptr)
1879 {
1880 EIF_COND_TYPE *cond = (EIF_COND_TYPE *) cond_ptr;
1881 EIF_MUTEX_TYPE *mutex = (EIF_MUTEX_TYPE *) mutex_ptr;
1882 int res;
1883 RT_TRACE_KEEP(res, eif_pthread_cond_wait(cond, mutex));
1884 if (res != T_OK) {
1885 eraise ("Cannot wait on condition variable", EN_EXT);
1886 }
1887 }
1888
1889 rt_public EIF_INTEGER eif_thr_cond_wait_with_timeout (EIF_POINTER cond_ptr, EIF_POINTER mutex_ptr, EIF_INTEGER a_timeout)
1890 {
1891 EIF_COND_TYPE *cond = (EIF_COND_TYPE *) cond_ptr;
1892 EIF_MUTEX_TYPE *mutex = (EIF_MUTEX_TYPE *) mutex_ptr;
1893 int res;
1894 /* We do not use `RT_TRACE_KEEP' because we might get T_TIMEDOUT. We will trace the error after. */
1895 res = eif_pthread_cond_wait_with_timeout(cond, mutex, (rt_uint_ptr) a_timeout);
1896 if (res != T_OK) {
1897 if (res == T_TIMEDOUT) {
1898 return 0;
1899 } else {
1900 RT_TRACE(res);
1901 eraise ("Cannot wait with timeout on condition variable", EN_EXT);
1902 /* Not reachable. */
1903 return -1;
1904 }
1905 } else {
1906 return 1;
1907 }
1908 }
1909
1910 rt_public void eif_thr_cond_destroy (EIF_POINTER cond_ptr)
1911 {
1912 RT_TRACE(eif_pthread_cond_destroy((EIF_COND_TYPE *) cond_ptr));
1913 }
1914
1915 /*
1916 * class READ_WRITE_LOCK externals
1917 */
1918
1919 rt_public EIF_POINTER eif_thr_rwl_create (void)
1920 {
1921 EIF_RWL_TYPE *rwlp;
1922 int res;
1923 RT_TRACE_KEEP(res, eif_pthread_rwlock_create(&rwlp));
1924 if (res != T_OK) {
1925 eraise ("Cannot create rwl variable", EN_EXT);
1926 }
1927 return rwlp;
1928 }
1929
1930 rt_public void eif_thr_rwl_rdlock (EIF_POINTER rwlp_ptr)
1931 {
1932 EIF_RWL_TYPE *rwlp = (EIF_RWL_TYPE *) rwlp_ptr;
1933 int res;
1934 RT_TRACE_KEEP(res,eif_pthread_rwlock_rdlock(rwlp));
1935 if (res != T_OK) {
1936 eraise ("Cannot read lock", EN_EXT);
1937 }
1938 }
1939
1940 rt_public void eif_thr_rwl_wrlock (EIF_POINTER rwlp_ptr)
1941 {
1942 EIF_RWL_TYPE *rwlp = (EIF_RWL_TYPE *) rwlp_ptr;
1943 int res;
1944 RT_TRACE_KEEP(res,eif_pthread_rwlock_wrlock(rwlp));
1945 if (res != T_OK) {
1946 eraise ("Cannot write lock", EN_EXT);
1947 }
1948 }
1949
1950
1951 rt_public void eif_thr_rwl_unlock (EIF_POINTER rwlp_ptr)
1952 {
1953 EIF_RWL_TYPE *rwlp = (EIF_RWL_TYPE *) rwlp_ptr;
1954 int res;
1955 RT_TRACE_KEEP(res,eif_pthread_rwlock_unlock(rwlp));
1956 if (res != T_OK) {
1957 eraise ("Cannot unlock read/write lock", EN_EXT);
1958 }
1959 }
1960
1961 rt_public void eif_thr_rwl_destroy (EIF_POINTER rwlp_ptr)
1962 {
1963 RT_TRACE(eif_pthread_rwlock_destroy((EIF_RWL_TYPE *) rwlp_ptr));
1964 }
1965
1966
1967 rt_public void eif_thr_panic(char *msg)
1968 {
1969 print_err_msg (stderr, "*** Thread panic! ***\n");
1970 eif_panic(msg);
1971 }
1972
1973 #endif /* EIF_THREADS */
1974 /*
1975 doc:</file>
1976 */

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.23