/[eiffelstudio]/branches/eth/eve/Src/C/run-time/eif_macros.h
ViewVC logotype

Contents of /branches/eth/eve/Src/C/run-time/eif_macros.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 94104 - (show annotations)
Mon Jan 27 20:22:39 2014 UTC (5 years, 8 months ago) by wests
File MIME type: text/plain
File size: 78564 byte(s)
Makes it possible to compile with EVEQs easier
1 /*
2 description: "Macros used by C code at run time."
3 date: "$Date$"
4 revision: "$Revision$"
5 copyright: "Copyright (c) 1985-2013, Eiffel Software."
6 license: "GPL version 2 see http://www.eiffel.com/licensing/gpl.txt)"
7 licensing_options: "Commercial license is available at http://www.eiffel.com/licensing"
8 copying: "[
9 This file is part of Eiffel Software's Runtime.
10
11 Eiffel Software's Runtime is free software; you can
12 redistribute it and/or modify it under the terms of the
13 GNU General Public License as published by the Free
14 Software Foundation, version 2 of the License
15 (available at the URL listed under "license" above).
16
17 Eiffel Software's Runtime is distributed in the hope
18 that it will be useful, but WITHOUT ANY WARRANTY;
19 without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.
21 See the GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public
24 License along with Eiffel Software's Runtime; if not,
25 write to the Free Software Foundation, Inc.,
26 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 ]"
28 source: "[
29 Eiffel Software
30 5949 Hollister Ave., Goleta, CA 93117 USA
31 Telephone 805-685-1006, Fax 805-685-6869
32 Website http://www.eiffel.com
33 Customer support http://support.eiffel.com
34 ]"
35 */
36
37 #ifndef _eif_macros_h_
38 #define _eif_macros_h_
39 #if defined(_MSC_VER) && (_MSC_VER >= 1020)
40 #pragma once
41 #endif
42
43 #include "eif_portable.h"
44 #include "eif_equal.h"
45 #include "eif_globals.h"
46 #include "eif_project.h"
47 #include "eif_malloc.h"
48 #include "eif_lmalloc.h"
49 #include "eif_garcol.h"
50 #include "eif_except.h"
51 #include "eif_local.h"
52 #include "eif_copy.h"
53 #include "eif_plug.h" /* For struct bit declaration */
54 #include "eif_hector.h"
55 #include "eif_size.h"
56 #include "eif_gen_conf.h"
57 #include "eif_rout_obj.h"
58 #include "eif_option.h"
59 #include "eif_scoop.h"
60
61 #ifdef SCOOPQS
62 #include "eveqs.h"
63 #endif
64
65 #ifdef WORKBENCH
66 #include "eif_wbench.h"
67 #endif
68
69 #ifdef __cplusplus
70 extern "C" {
71 #endif
72
73 RT_LNK void * eif_pointer_identity (void *);
74
75 /* On a BSD system, we should use _setjmp and _longjmp if they are available,
76 * so that no system call is made to preserve the signal mask flag. It should
77 * be taken care of by the signal handler routine.
78 */
79 #ifdef USE_BSDJMP
80 #ifndef setjmp
81 #define setjmp _setjmp
82 #endif
83 #ifndef longjmp
84 #define longjmp _longjmp
85 #endif
86 #endif
87
88
89
90 /* Convenience macros:
91 * EIF_TRUE representation of True in Eiffel (shouldn't be used in tests, always compare to False)
92 * EIF_FALSE representation of False in Eiffel
93 * EIF_TEST boolean test on an expression
94 */
95 #define EIF_TRUE (EIF_BOOLEAN) '\01'
96 #define EIF_FALSE (EIF_BOOLEAN) '\0'
97 #define EIF_TEST(x) ((EIF_BOOLEAN)((x) ? EIF_TRUE : EIF_FALSE))
98 #define EIF_VOLATILE
99
100 #ifdef WORKBENCH
101 #define EIF_IS_WORKBENCH EIF_TRUE
102 #else
103 #define EIF_IS_WORKBENCH EIF_FALSE
104 #endif
105
106 /* Function pointer call to make sure all arguments are correctly pushed onto stack.
107 * FUNCTION_CAST is for standard C calls.
108 * FUNCTION_CAST_TYPE is for non-standard C calls.
109 */
110 #define FUNCTION_CAST(r_type, arg_types) (r_type (*) arg_types)
111 #define FUNCTION_CAST_TYPE(r_type, call_type, arg_types) (r_type (call_type *) arg_types)
112
113
114 /*
115 * Macro for GC synchronization
116 */
117 #if defined(ISE_GC) && defined(EIF_THREADS)
118 RT_LNK int volatile eif_is_gc_collecting;
119 RT_LNK void eif_synchronize_for_gc(void);
120 RT_LNK int eif_is_in_eiffel_code(void);
121 RT_LNK void eif_enter_eiffel_code(void);
122 RT_LNK void eif_exit_eiffel_code(void);
123 #define RTGC if (eif_is_gc_collecting) eif_synchronize_for_gc()
124 #define EIF_ENTER_EIFFEL eif_enter_eiffel_code()
125 #define EIF_EXIT_EIFFEL eif_exit_eiffel_code()
126 #define EIF_ENTER_C EIF_EXIT_EIFFEL
127 #define EIF_EXIT_C EIF_ENTER_EIFFEL
128 #define EIF_IS_IN_EIFFEL_CODE eif_is_in_eiffel_code()
129 #else
130 #define RTGC
131 #define EIF_ENTER_EIFFEL
132 #define EIF_EXIT_EIFFEL
133 #define EIF_ENTER_C
134 #define EIF_EXIT_C
135 #define EIF_IS_IN_EIFFEL_CODE 1
136 #endif
137
138 /* Function pointer call from C to Eiffel which makes sure that all arguments are correctly
139 * pushed onto stack. It takes care of the synchronization needed in a multithreaded application.
140 * EIFFEL_CALL will call Eiffel procedures `proc_ptr' with `arg_values' using prototype given by
141 * `arg_types'.
142 * EIFFEL_FUNCTION_CALL will call Eiffel functions `fn_ptr' with `arg_values' using prototype give
143 * by `r_type' and `arg_types'.
144 */
145 #if defined(ISE_GC) && defined(EIF_THREADS)
146 #define EIFFEL_CALL(arg_types, proc_ptr, arg_values) \
147 { \
148 EIF_ENTER_EIFFEL; \
149 RTGC; \
150 (FUNCTION_CAST(void, arg_types) proc_ptr) arg_values; \
151 EIF_EXIT_EIFFEL; \
152 }
153 #define EIFFEL_FUNCTION_CALL(r_type, arg_types, target, fn_ptr, arg_values) \
154 {\
155 EIF_ENTER_EIFFEL; \
156 RTGC; \
157 target = (FUNCTION_CAST(r_type, arg_types) fn_ptr) arg_values; \
158 EIF_EXIT_EIFFEL; \
159 }
160 #else
161 #define EIFFEL_CALL(arg_types, proc_ptr, arg_values) \
162 (FUNCTION_CAST(void, arg_types) proc_ptr) arg_values
163
164 #define EIFFEL_FUNCTION_CALL(r_type, arg_types, target, fn_ptr, arg_values) \
165 target = (FUNCTION_CAST(r_type, arg_types) fn_ptr) arg_values
166 #endif
167
168
169 /* Macro used for allocation:
170 * RTLN(x) allocates a new object of dftype 'x'
171 * RTLNS(x,y,z) allocates a new routine object of dftype 'x', dtype 'y' and size 'z'
172 * RTLNT(x) allocates a new tuple object of dftype 'x'
173 * RTLNTS(x,n,a) allocates a new tuple object of dftype 'x', with 'n' elements and is_atomic 'a'
174 * RTLNTY(x) allocates a new TYPE [like x] instance of dftype 'x'
175 * RTLNSMART(x) allocates a new object of dftype 'x'
176 * RTLNR(x,y,a,o,c) allocates a new routine object of type 'x' and
177 * RTLNC(x) creates a new non-initialized instance of 'x'.
178 * RTLNSP(t,n,e,b) allocates a new special array
179 * initializes it with the routine pointer 'y', the true routine pointer 'z',
180 * argument tuple 'a', open map 'o' and closed map 'c'
181 * RTLX(x) allocates an expanded object (with possible in invocation
182 * of the creation routine) of type `x'
183 * RTBU(x) box a basic value stored in EIF_TYPED_VALUE and return EIF_REFERENCE
184 */
185 #define RTLN(x) emalloc(x)
186 #define RTLNS(x,y,z) emalloc_size(x,y,z)
187 #define RTLNT(x) tuple_malloc(x)
188 #define RTLNTS(x,n,a) tuple_malloc_specific(x,n,a)
189 #define RTLNTY(x) eif_type_malloc(x)
190 #define RTLNSMART(x) smart_emalloc(x)
191 #define RTLNRW(a,b,c,d,e,f,g,h,i,j,k,l,m) rout_obj_create_wb((a),(b),(c),(d),(e),(f),(g),(h),(i),(j),(k),(l),(m))
192 #define RTLNRF(a,b,c,d,e,f,g) rout_obj_create_fl((a),(b),(c),(d),(e),(f), (g))
193 #define RTLNC(x) eclone(x)
194 #define RTLNSP2(t,f,n,e,b) special_malloc(f,t,n,e,b)
195 #define RTLNSP(t,f,n,e,b) special_malloc(t | f,n,e,b)
196 #define RTLX(x) cr_exp(x)
197 #define RTBU(x) eif_box(x)
198 #ifdef WORKBENCH
199 #define RTLXI(x) init_exp(x)
200 #else
201 #define RTLXI(x) \
202 { \
203 void *(*cp)(EIF_REFERENCE) = (void *(*) (EIF_REFERENCE)) egc_exp_create [Dtype(x)]; \
204 if (cp) cp(x); \
205 }
206 #endif
207
208 /* Macro used for object cloning:
209 * RTCL(x) clones 'x' and return a pointer to the cloned object
210 * RTCB(x) clones bit `x'
211 * RTRCL(x) same as RTCL, but uses a user-defined version of `copy'
212 * RTCCL(c) same as RTRCL, but first checks if `x' is expanded
213 */
214 #define RTCL(x) rtclone(x)
215 #define RTCCL(x) ((x && eif_is_expanded(HEADER(x)->ov_flags))? RTRCL(x): (x))
216 #ifdef WORKBENCH
217 # define RTRCL(x) ((egc_twin(x)).it_r)
218 #else
219 # define RTRCL(x) egc_twin(x)
220 #endif
221
222
223 /* Macro used for object creation to get the proper creation type:
224 * RTCA(x,y) returns the dynamic type of 'x' if not void, otherwise 'y'
225 */
226 #define RTCA(x,y) ((x) == (EIF_REFERENCE) 0 ? (y) : Dftype(x))
227
228
229
230 /* Macros used for assignments:
231 * RTAG(x) is true if 'x' is an old object not remembered
232 * RTAN(x) is true if 'x' is a new object (i.e. not old)
233 * RTAM(x) memorizes 'x'
234 * RTAR(parent,source) remembers 'parent' if it is old and not remembered and 'source' is new
235 */
236 #ifdef ISE_GC
237 #define RTAG(x) ((HEADER(x)->ov_flags & (EO_OLD | EO_REM)) == EO_OLD)
238 #define RTAN(x) (!(HEADER(x)->ov_flags & EO_OLD))
239 #define RTAM(x) eremb(x)
240 #define RTAR(parent,source) check_gc_tracking(parent,source)
241 #else
242 #define RTAG(x) EIF_FALSE
243 #define RTAN(x) EIF_FALSE
244 #define RTAM(x) (x)
245 #define RTAR(parent,source)
246 #endif
247
248
249
250 /* Macros used by reverse assignments:
251 * RTRC(x,y) is true if type 'y' conforms to type 'x'
252 * RTRA(x,y) calls RTRC(x, Dftype(y)) if 'y' is not void
253 * RTRB(x,y,z,t) assigns value of basic type 't' of 'y' to 'z' if 'y' conforms to type 'x'
254 * RTRE(x,y,z) copies 'y' to 'z' if 'y' conforms to type 'x'
255 * RTRV(x,y) returns 'y' if it conforms to type 'x', void otherwise
256 * RTOB(t,x,y,z,v) assigns value of basic type 't' of 'y' to 'z' if 'y' conforms to type 'x' and sets boolean value to `v' accordingly
257 * RTOE(x,y,z,v) copies 'y' to 'z' if 'y' conforms to type 'x' and sets boolean value to `v' accordingly
258 */
259 #define RTRC(x,y) eif_gen_conf ((y), (x))
260 #define RTRA(x,y) ((y) == (EIF_REFERENCE) 0 ? 0 : RTRC((x),Dftype(y)))
261 #define RTRB(x,y,z,t) { if (RTRA((x),(y))) { z = t (y); } }
262 #define RTRE(x,y,z) { if (RTRA((x),(y))) { RTXA ((y), z); } }
263 #define RTRV(x,y) (RTRA((x),(y)) ? (y) : (EIF_REFERENCE) 0)
264 #define RTOB(t,x,y,z,v) { if (RTRA((x),(y))) { z = t (y); v = EIF_TRUE; } else { v = EIF_FALSE; } }
265 #define RTOE(x,y,z,v) { if (RTRA((x),(y))) { RTXA ((y), z); v = EIF_TRUE; } else { v = EIF_FALSE; } }
266
267
268 /* Macros used for variable initialization:
269 * RTAT(x) true if type 'x' is attached
270 */
271 #define RTAT(x) (eif_is_attached_type(x))
272
273 /* Macros used for local variable management:
274 * RTLI(x) makes room on the stack for 'x' addresses
275 * RTLE restore the previous stack context
276 * RTLD declares the variable used by RTLI/RTLE
277 * RTLR(x,y) register `y' on the stack at position `x'
278 * RTLRC(x) clear stack at position `x'
279 * RTXI(x) makes room on the stack for 'x' addresses when feature has rescue
280 * RTXE restores the current chunk, in case an exception was raised
281 * RTXL saves the current local chunk in case exception is raised
282 * RTYL same as RTXL except that RTXI cannot be applied
283 * RTLXE restores the current local variables chunk, in case an exception was raised
284 * RTLXL saves the current local variables chunk in case exception is raised
285 * RTLXD declares the current local variables chunk in case exception is raised
286 */
287
288 #ifdef ISE_GC
289 #define RTLI(x) \
290 { \
291 if (l >= loc_set.st_top && l + (x) <= loc_set.st_end) \
292 loc_set.st_top += (x); \
293 else { \
294 ol = l; \
295 l = eget(x); \
296 } \
297 }
298 #define RTLE \
299 { \
300 if (ol == (EIF_REFERENCE *) 0) \
301 loc_set.st_top = l; \
302 else { \
303 eback(ol); \
304 } \
305 }
306 #define RTLD \
307 EIF_REFERENCE *l = loc_set.st_top; \
308 EIF_REFERENCE *ol = (EIF_REFERENCE *) 0
309
310 #define RTLR(x,y) l[(x)] = (EIF_REFERENCE) &y
311 #define RTLRC(x) l[(x)] = (EIF_REFERENCE) 0;
312
313 #define RTXI(x) \
314 { \
315 if (l >= loc_set.st_top && l + (x) <= loc_set.st_end) \
316 loc_set.st_top += (x); \
317 else \
318 l = eget(x); \
319 }
320 #define RTXE \
321 loc_set.st_cur = lc; \
322 if (lc) loc_set.st_end = lc->sk_end; \
323 loc_set.st_top = lt
324 #define RTYL \
325 EIF_REFERENCE * volatile lt = loc_set.st_top; \
326 struct stchunk * volatile lc = loc_set.st_cur
327 #define RTXL \
328 EIF_REFERENCE * volatile l = loc_set.st_top; \
329 RTYL
330 #define RTLXD \
331 EIF_TYPED_ADDRESS * EIF_VOLATILE lv
332 #define RTLXL \
333 lv = cop_stack.st_top
334 #define RTLXE \
335 while (cop_stack.st_top != lv) { \
336 RTLO(1); \
337 }
338
339 #else
340 #define RTLI(x)
341 #define RTLE
342 #define RTLD \
343 EIF_REFERENCE ol
344 #define RTLR(x,y)
345 #define RTXI(x)
346 #define RTXE
347 #define RTYL EIF_REFERENCE lt
348 #define RTXL EIF_REFERENCE l
349 #define RTLXD
350 #define RTLXL
351 #define RTLXE
352 #endif
353
354
355
356 /* Macro used to record once functions:
357 * RTOC calls onceset to record the address of Result (static variable)
358 */
359 #define RTOC(x) onceset()
360 #define RTOC_NEW(x) new_onceset((EIF_REFERENCE) &x);
361 #define RTOC_GLOBAL(x) globalonceset((EIF_REFERENCE) &x);
362
363 /* Macros for optimized access to once feature:
364 * RTO_CP - access to once procedure
365 * RTO_CF - access to once function
366 */
367 #define RTO_CP(succeeded,c,a) if (succeeded) { (void) a; } else { c a; }
368 #define RTO_CF(succeeded,result,c,a) ((succeeded)? ((void) a, result) : c a)
369
370 /* Service macros for once routines:
371 * RTO_TRY - try to excute routine body
372 * RTO_EXCEPT - start processing exception
373 * RTO_END_EXCEPT - stop processing exception
374 */
375
376 #define RTO_TRY \
377 { \
378 /* Declare variables for exception handling. */ \
379 struct ex_vect * exvecto; \
380 jmp_buf exenvo; \
381 /* Save stack contexts. */ \
382 RTYD; \
383 /* Record execution vector to catch exception. */ \
384 exvecto = extre (); \
385 if (!setjmp(exenvo)) { \
386 /* Set catch address. */ \
387 exvect->ex_jbuf = &exenvo; \
388 /* Update routine exception vector. */ \
389 exvect = exvecto;
390
391 #define RTO_EXCEPT \
392 /* Remove execution vector to restore */ \
393 /* previous exception catch point. */ \
394 exvect = extrl(); \
395 } else { \
396 /* Exception occurred. */
397
398 #define RTO_END_EXCEPT \
399 /* Restore stack contexts. */ \
400 RTXSC; \
401 } \
402 }
403
404 #ifdef WORKBENCH
405
406 /* Macros for once routine indexes:
407 * RTOIN - name of a variable that keeps index of a once result
408 * RTOID - declaration of index into once routine result table
409 */
410 #define RTOIN(name) CAT2(name,_index)
411
412 #define RTOID(name) static ONCE_INDEX RTOIN(name);
413
414 #else
415
416 /* Macros for once routine fields:
417 * RTOFN - name of a field for once feature with the given code index
418 */
419
420 #define RTOFN(code_index,field_name) CAT3(o,code_index,field_name)
421
422 #endif /* WORKBENCH */
423
424 /* Macros for single-threaded once routines:
425 * RTOSR - result field name (for once functions)
426 * RTOSHP - declaration of variables for procedure
427 * RTOSHF - declaration of variables for function
428 * RTOSDP - definition of variables for procedure
429 * RTOSDF - definition of variables for function
430 * RTOSC - implementation of a constant attribute body
431 * RTOSCP - optimized direct call to procedure
432 * RTOSCF - optimized direct call to function
433 * RTOSP - prologue for a single-threaded once routine
434 * RTOSE - epilogue for a single-threaded once routine
435 */
436
437 #define RTOSR(code_index) \
438 RTOFN(code_index,_result)
439
440 #define RTOSHP(code_index) \
441 extern EIF_BOOLEAN RTOFN(code_index,_done); \
442 extern EIF_OBJECT RTOFN(code_index,_failed); \
443 extern EIF_BOOLEAN RTOFN(code_index,_succeeded);
444
445 #define RTOSHF(type, code_index) \
446 RTOSHP(code_index) \
447 extern type RTOSR(code_index);
448
449 #define RTOSDP(code_index) \
450 EIF_BOOLEAN RTOFN(code_index,_done) = EIF_FALSE; \
451 EIF_OBJECT RTOFN(code_index,_failed) = NULL; \
452 EIF_BOOLEAN RTOFN(code_index,_succeeded) = EIF_FALSE;
453
454 #define RTOSDF(type, code_index) \
455 RTOSDP(code_index) \
456 type RTOSR(code_index) = (type) 0;
457
458 #define RTOSCP(n,c,a) RTO_CP(RTOFN(n,_succeeded),c,a)
459 #define RTOSCF(n,c,a) RTO_CF(RTOFN(n,_succeeded),RTOFN(n,_result),c,a)
460
461 #define RTOSP(code_index) \
462 /* Check if evaluation has succeeded. */ \
463 /* If yes, skip any other calculations. */ \
464 if (!RTOFN(code_index,_succeeded)) { \
465 /* Check if evaluation is started earlier. */ \
466 /* If yes, evaluation is completed. */ \
467 if (!RTOFN(code_index,_done)) { \
468 /* Evaluation has not been started yet. */ \
469 /* Start it now. */ \
470 RTOFN(code_index,_done) = EIF_TRUE; \
471 /* Try to exceute routine body. */ \
472 RTO_TRY
473
474 #define RTOSE(code_index) \
475 /* Record successful execution result. */ \
476 RTOFN(code_index,_succeeded) = EIF_TRUE; \
477 /* Catch exception. */ \
478 RTO_EXCEPT \
479 /* Handle exception. */ \
480 /* Record exception for future use. */ \
481 RTOFN(code_index,_failed) = eif_protect(RTLA); \
482 RTO_END_EXCEPT \
483 /* Propagate the exception if any. */ \
484 if (RTOFN(code_index,_failed)) { \
485 if (eif_access(RTOFN(code_index,_failed))) { \
486 ereturn (); \
487 } \
488 } \
489 } else { \
490 /* Raise the saved exception if any. */ \
491 if (RTOFN(code_index,_failed)) { \
492 if (eif_access(RTOFN(code_index,_failed))) { \
493 oraise (eif_access(RTOFN(code_index,_failed))); \
494 } \
495 } \
496 } \
497 }
498
499 #define RTOSC(code_index, value) \
500 if (!RTOFN (code_index,_succeeded)) { \
501 RTOC_NEW (RTOSR (code_index)); \
502 RTOSR (code_index) = (value); \
503 RTOFN (code_index,_succeeded) = EIF_TRUE; \
504 } \
505 return RTOSR (code_index);
506
507 /* Macros for thread-relative once routines:
508 * RTOTS - stores index of a once routine into index variable given its code index
509 * RTOTDB, RTOUDB - declaration and initialization of variables for once function returning basic type
510 * RTOTDR, RTOUDR - declaration and initialization of variables for once function returning reference
511 * RTOTDV, RTOUDV - declaration and initialization of variables for once procedure
512 * RTOTW - stores in a list the body id of the just called once routine
513 * RTOTRB - declaration of a result macro for once function returning basic type
514 * RTOTRR - declaration of a result macro for once function returning reference
515 * RTOUC - implementation of a constant attribute
516 * RTOUCP - optimized direct call to procedure
517 * RTOUCB - optimized direct call to function returning basic type
518 * RTOUCR - optimized direct call to function returning reference type
519 * RTOTP - prologue for a thread-relative once routine
520 * RTOTE - epilogue for a thread-relative once routine
521 */
522
523 #ifdef WORKBENCH
524
525 #define RTOTS(code_index, name) \
526 RTOIN(name) = once_index (code_index);
527
528 #define RTOTDV(name) \
529 MTOT OResult = (MTOT) MTOI(RTOIN(name));
530
531 #define RTOTDB(type, name) \
532 RTOTDV(name) \
533 if (!MTOD(OResult)) { \
534 MTOP(type, OResult, (type) 0); \
535 }
536
537 #define RTOTDR(name) \
538 RTOTDV(name) \
539 if (!MTOD(OResult)) { \
540 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
541 MTOE(OResult, RTOC(0)); \
542 }
543
544 #define RTOTW(body_id)
545
546 #define RTOTC(name, body_id, v) \
547 RTOTDV(name) \
548 EIF_REFERENCE * PResult = MTOR(EIF_REFERENCE,OResult); \
549 EIF_TYPED_VALUE r; \
550 r.type = SK_REF; \
551 if (PResult) { \
552 r.it_r = *PResult; \
553 return r; \
554 } \
555 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
556 MTOE(OResult, RTOC(0)); \
557 MTOM(OResult); \
558 r.it_r = RTOTRR = v; \
559 return r;
560
561 #define RTOTOK
562
563 #elif defined EIF_THREADS
564
565 #define RTOTOK OResult->succeeded = EIF_TRUE;
566
567 #define RTOUCP(once_index,c,a) RTO_CP(MTOI(once_index)->succeeded,c,a)
568 #define RTOUCB(type,once_index,c,a) RTO_CF(MTOI(once_index)->succeeded,MTOR(type,MTOI(once_index)),c,a)
569 #define RTOUCR(once_index,c,a) RTO_CF(MTOI(once_index)->succeeded,*MTOR(EIF_REFERENCE,MTOI(once_index)),c,a)
570
571 #define RTOUDV(once_index) \
572 MTOT OResult = (MTOT) MTOI(once_index);
573
574 #define RTOUDB(type, once_index) \
575 RTOUDV(once_index) \
576 if (!MTOD(OResult)) { \
577 MTOP(type, OResult, (type) 0); \
578 }
579
580 #define RTOUDR(once_index) \
581 RTOUDV(once_index) \
582 if (!MTOD(OResult)) { \
583 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
584 MTOE(OResult, RTOC(0)); \
585 }
586
587 #define RTOUC(once_index, value) \
588 RTOUDV(once_index) \
589 EIF_REFERENCE Result; \
590 EIF_REFERENCE * PResult = MTOR(EIF_REFERENCE,OResult); \
591 if (PResult) { \
592 Result = *PResult; \
593 } \
594 else { \
595 PResult = RTOC(0); \
596 MTOP(EIF_REFERENCE,OResult,PResult); \
597 Result = (value); \
598 *PResult = Result; \
599 MTOM(OResult); \
600 RTOTOK \
601 } \
602 return Result;
603
604 #endif /* WORKBENCH */
605
606 #define RTOTRB(type) MTOR(type,OResult)
607 #define RTOTRR (*MTOR(EIF_REFERENCE,OResult))
608
609 #define RTOTP \
610 /* Check if evaluation is started earlier. */ \
611 /* If yes, evaluation is completed. */ \
612 if (!MTOD(OResult)) { \
613 /* Evaluation has not been started yet. */ \
614 /* Start it now. */ \
615 MTOM(OResult); \
616 /* Try to exceute routine body. */ \
617 RTO_TRY
618
619 #define RTOTE \
620 /* Record that execution completed successfully */ \
621 RTOTOK \
622 /* Catch exception. */ \
623 RTO_EXCEPT \
624 /* Handle exception. */ \
625 /* Record exception for future use. */ \
626 MTOE(OResult, RTOC(0)); \
627 MTOEV(OResult, RTLA); \
628 RTO_END_EXCEPT \
629 /* Propagate the exception if any. */ \
630 if (MTOF(OResult)) { \
631 if (*MTOF(OResult)) { \
632 ereturn (); \
633 } \
634 } \
635 } else { \
636 /* Raise the saved exception if any.*/ \
637 if (MTOF(OResult)) { \
638 if (*MTOF(OResult)) { \
639 oraise (*MTOF(OResult)); \
640 } \
641 } \
642 }
643
644 #ifdef EIF_THREADS
645
646 /* Service macros for process-relative once routines:
647 * RTOPL - lock a mutex of a once routine
648 * RTOPLT - try to lock a mutex of a once routine
649 * RTOPLU - unlock a mutex of a once routine
650 * RTOPMBW - write memory barrier when available
651 * RTOPW - let thread that started once evaluation to complete
652 * RTOPLP - once prologue that is executed with locked mutex
653 * RTOPLE - once epilogue that is executed with locked mutex
654 * RTOPRE - raise previously recorded exception
655 */
656
657 #ifdef EIF_HAS_MEMORY_BARRIER
658 # define RTOPMBW EIF_MEMORY_WRITE_BARRIER
659 #else
660 # define RTOPMBW
661 #endif
662
663 #define RTOPL(mutex) \
664 EIF_ENTER_C; \
665 eif_thr_mutex_lock (mutex); \
666 EIF_EXIT_C; \
667 RTGC
668
669 #define RTOPLT(mutex) \
670 eif_thr_mutex_trylock (mutex)
671
672 #define RTOPLU(mutex) \
673 eif_thr_mutex_unlock (mutex)
674
675 #define RTOPW(mutex, thread_id) \
676 /* Once routine evaluation has been started. */ \
677 /* To wait until evaluation is completed, */ \
678 /* it's enough to lock and unlock a mutex, */ \
679 /* then the mutex should be recursive. */ \
680 /* Recursive mutexes are not in POSIX standard, */ \
681 /* so they should be emulated by checking thread id. */ \
682 /* Check what thread performs evaluation. */ \
683 if (thread_id != eif_thr_thread_id()) { \
684 /* Evaluation is performed by a different thread. */ \
685 /* Wait until it completes evaluation. */ \
686 RTOPL (mutex); \
687 RTOPLU (mutex); \
688 }
689
690 #define RTOPLP(started, thread_id) \
691 /* Check if some thread started evaluation earlier. */ \
692 /* If yes, evaluation is completed. */ \
693 if (!started) { \
694 /* Evaluation has not been started yet. */ \
695 /* Record thread id and start evaluation. */ \
696 thread_id = eif_thr_thread_id(); \
697 started = EIF_TRUE; \
698 /* Try to exceute routine body. */ \
699 RTO_TRY
700
701 #define RTOPLE(completed,failed,thread_id) \
702 /* Catch exception. */ \
703 RTO_EXCEPT \
704 /* Handle exception. */ \
705 /* Record exception for future use. */ \
706 RTOC_GLOBAL(failed); \
707 failed = RTLA; \
708 RTO_END_EXCEPT \
709 /* Clear field that holds locking thread id. */ \
710 thread_id = NULL; \
711 /* Ensure memory is flushed (if required). */ \
712 RTOPMBW; \
713 /* Mark evaluation as completed. */ \
714 completed = EIF_TRUE; \
715 }
716
717 #define RTOPRE(failed) \
718 if (failed) { \
719 oraise (failed); \
720 }
721
722 #ifdef EIF_HAS_MEMORY_BARRIER
723
724 # define RTOFP(started, completed, mutex, thread_id) \
725 EIF_MEMORY_READ_BARRIER; \
726 if (!completed) { \
727 /* Once evaluation is not completed yet. */ \
728 /* Check whether once evaluation has been started. */ \
729 if (started) { \
730 /* Evaluation has been started. */ \
731 /* Let it to complete. */ \
732 RTOPW(mutex, thread_id); \
733 } \
734 else { \
735 /* Current thread has not started evaluation. */ \
736 /* It's safe to lock a mutex. */ \
737 RTOPL (mutex); \
738 /* Use thread-safe prologue. */ \
739 RTOPLP (started, thread_id);
740
741 # define RTOFE(completed, failed, mutex, thread_id) \
742 /* Use thread-safe epilogue. */ \
743 RTOPLE (completed, failed, thread_id); \
744 /* Unlock mutex. */ \
745 RTOPLU (mutex); \
746 /* Propagate the exception if any. */ \
747 if (failed) { \
748 ereturn (); \
749 } \
750 } \
751 } else { \
752 /* Raise the saved exception if any. */ \
753 RTOPRE(failed); \
754 }
755
756 #else /* !defined(EIF_HAS_MEMORY_BARRIER) */
757
758 # define RTOFP(started, completed, mutex, thread_id) \
759 /* Try to lock a mutex. */ \
760 if (RTOPLT (mutex)) { \
761 /* Mutex has been locked. */ \
762 /* Check if once evaluation has been completed. */ \
763 if (!completed) { \
764 /* Evaluation is not completed. */ \
765 /* Use thread-safe prologue. */ \
766 RTOPLP (started, thread_id);
767
768 # define RTOFE(completed, failed, mutex, thread_id) \
769 /* Use thread-safe epilogue. */ \
770 RTOPLE (completed, failed, thread_id); \
771 /* Unlock mutex. */ \
772 RTOPLU (mutex); \
773 /* Propagate the exception if any.*/ \
774 if (failed) { \
775 ereturn (); \
776 } \
777 } else { \
778 /* Unlock mutex. */ \
779 RTOPLU (mutex); \
780 /* Raise the saved exception if any.*/ \
781 RTOPRE(failed); \
782 } \
783 } \
784 else { \
785 /* Mutex cannot be locked. */ \
786 /* Evaluation has been started. */ \
787 /* Let it to complete. */ \
788 RTOPW (mutex, thread_id); \
789 } \
790
791 #endif /* EIF_HAS_MEMORY_BARRIER */
792
793 #ifdef WORKBENCH
794 /* Main for process-relative once routines in workbench mode:
795 * RTOQS - stores index of a once routine into index variable given its code index
796 */
797
798 #define RTOQS(code_index, name) \
799 RTOIN(name) = process_once_index (code_index);
800
801 #define RTOQDV(name) \
802 EIF_process_once_value_t * POResult = \
803 EIF_process_once_values + RTOIN(name); \
804 MTOT OResult = &(POResult -> value); \
805 MTOE(OResult, &(POResult -> exception));
806
807 #define RTOQDB(type, name) \
808 RTOQDV(name)
809
810 #define RTOQDR(name) \
811 RTOQDV(name) \
812 MTOP(EIF_REFERENCE, OResult, &(POResult -> reference)); \
813
814 #define RTOQRB(type) MTOR(type,OResult)
815 #define RTOQRR (*MTOR(EIF_REFERENCE,OResult))
816
817 #define RTOQP \
818 RTOFP ( \
819 POResult -> value.done, \
820 POResult -> completed, \
821 POResult -> mutex, \
822 POResult -> thread_id \
823 )
824
825 #define RTOQE \
826 RTOFE ( \
827 POResult -> completed, \
828 *(POResult -> value.exception), \
829 POResult -> mutex, \
830 POResult -> thread_id \
831 )
832
833 #else
834
835 /* Main macros for process-relative once routines in finalized mode:
836 * RTOPH - declaration of variables (that is used to refer to the variables)
837 * RTOPD - definition of variables
838 * RTOPI - initialization of variables
839 * RTOPP - prologue
840 * RTOPE - epilogue */
841
842 #define RTOPR(code_index) \
843 RTOFN(code_index,_result)
844
845 #define RTOPHP(code_index) \
846 extern EIF_BOOLEAN RTOFN(code_index,_started); \
847 extern EIF_BOOLEAN RTOFN(code_index,_completed); \
848 extern EIF_REFERENCE RTOFN(code_index,_failed); \
849 extern EIF_MUTEX_TYPE * RTOFN(code_index,_mutex); \
850 extern EIF_POINTER RTOFN(code_index,_thread_id);
851
852 #define RTOPHF(type, code_index) \
853 RTOPHP(code_index) \
854 extern type RTOPR(code_index);
855
856 #define RTOPDP(code_index) \
857 volatile EIF_BOOLEAN RTOFN(code_index,_started) = EIF_FALSE; \
858 volatile EIF_BOOLEAN RTOFN(code_index,_completed) = EIF_FALSE; \
859 volatile EIF_REFERENCE RTOFN(code_index,_failed) = NULL; \
860 volatile EIF_MUTEX_TYPE * RTOFN(code_index,_mutex) = NULL; \
861 volatile EIF_POINTER RTOFN(code_index,_thread_id) = NULL;
862
863 #define RTOPDF(type, code_index) \
864 RTOPDP(code_index) \
865 volatile type RTOPR(code_index) = (type) 0;
866
867 #define RTOPI(code_index) \
868 RTOFN(code_index,_mutex) = eif_thr_mutex_create ();
869
870 #define RTOPP(code_index) \
871 RTOFP ( \
872 RTOFN(code_index,_started), \
873 RTOFN(code_index,_completed), \
874 RTOFN(code_index,_mutex), \
875 RTOFN(code_index,_thread_id) \
876 )
877
878 #define RTOPE(code_index) \
879 RTOFE ( \
880 RTOFN(code_index,_completed), \
881 RTOFN(code_index,_failed), \
882 RTOFN(code_index,_mutex), \
883 RTOFN(code_index,_thread_id) \
884 )
885
886 #ifdef EIF_HAS_MEMORY_BARRIER
887
888 #define RTOPCP(code_index,c,a) \
889 EIF_MEMORY_READ_BARRIER; \
890 RTO_CP( \
891 RTOFN(code_index,_completed) && (!RTOFN(code_index,_failed)), \
892 c, \
893 a \
894 )
895
896 #define RTOPCF(code_index,c,a) \
897 ((void) EIF_MEMORY_READ_BARRIER, \
898 RTO_CF( \
899 RTOFN(code_index,_completed) && (!RTOFN(code_index,_failed)), \
900 RTOPR(code_index), \
901 c, \
902 a \
903 ))
904
905 #else
906
907 #define RTOPCP(code_index,c,a) {c a;}
908 #define RTOPCF(code_index,c,a) (c a)
909
910 #endif /* EIF_HAS_MEMORY_BARRIER */
911
912 #endif /* WORKBENCH */
913
914 #endif /* EIF_THREADS */
915
916
917 /* Macro used for object information:
918 * Dtype: Dynamic type of object. The name is not RTDT for historical reasons.
919 * Dftype: Full dynamic type of object - for generic conformance
920 * RT_DFS(x,y): Set dynamic type and full dynamic type `y' to overhead `x'.
921 * To_dtype: Convert a Full dynamic type to a dynamic type
922 * RTCDT: Compute `dtype' of Current
923 * RTCDD: Declare `dtype' for later computation of Dtype of Current.
924 */
925
926 #define Dftype(x) (HEADER(x)->ov_dftype)
927 #define Dtype(x) (HEADER(x)->ov_dtype)
928 #define To_dtype(t) (eif_cid_map[t])
929
930 #define RT_DFS(x,y) ((x)->ov_dftype = y, (x)->ov_dtype = To_dtype(y))
931 #define RTCDT EIF_TYPE_INDEX EIF_VOLATILE dtype = Dtype(Current)
932 #define RTCDD EIF_TYPE_INDEX EIF_VOLATILE dtype
933 #define RTCFDT EIF_TYPE_INDEX EIF_VOLATILE dftype = Dftype(Current)
934 #define RTCFDD EIF_TYPE_INDEX EIF_VOLATILE dftype
935
936 /* If call on void target are detected, we use RTCV to perform the check. Unlike the workbench
937 * mode, we won't know the message of the call as it would require too much data to be generated. */
938 #if defined(WORKBENCH) || !defined(EIF_NO_RTCV)
939 #define RTCV(x) eif_check_call_on_void_target(x)
940 #else
941 #define RTCV(x) (x)
942 #endif
943
944 /* Detect catcall at runtime for argument 'o' at position 'i' for feature 'f' in dtype 'd'
945 * and expected dftype 't'. */
946 #define RTCC(o,d,f,i,t) eif_check_catcall_at_runtime(o,d,f,i,t)
947
948
949 /* Macros for assertion checking:
950 * RTCT(t,x) next assertion has tag 't' and is of type 'x'
951 * RTCK signals successful end of last assertion check
952 * RTJB goto body
953 * RTCF signals failure during last assertion check
954 * RTTE tests assertion
955 * RTIT(t,x) next invariant assertion has tag 't' and is of type 'x'
956 * RTVR(x,y) check if call of feature 'y' on 'x' is done on a void reference
957 */
958 #define RTCT0(t,x) exasrt(t, x)
959 #define RTIT0(t,x) exinv(t, x)
960 #define RTCK0 expop(&eif_stack)
961 #define RTCF0 eviol()
962
963 #define RTCT(t,x) RTCT0(t,x); in_assertion = ~0
964 #define RTIT(t,x) RTIT0(t,x); in_assertion = ~0
965 #define RTCK in_assertion = 0; RTCK0
966 #define RTCF in_assertion = 0; RTCF0
967 #define RTTE(x,y) if (!(x)) goto y
968 #define RTJB goto body
969 #ifdef WORKBENCH
970 #define RTVR(x,y) if ((x) == (EIF_REFERENCE) 0) eraise(y, EN_VOID)
971 #endif
972
973 /* Obsolete for backward compatibility */
974 #define RTCS(x) RTCT(NULL,x)
975 #define RTIS(x) RTIT(NULL,x)
976
977
978
979 /* Macros for exception handling:
980 * RTEX declares the exception vector variable for current routine
981 * RTED declares the setjmp buffer, saved_assertion and saved_except
982 * RTES issues the setjmp call for remote control transfer via longjmp
983 * RTEJ sets the exception handling mechanism (must appear only once)
984 * RTEA(x,y,z) signal entry in routine 'x', origin 'y', object ID 'z'
985 * RTEAA(x,y,z,i,j,b) signal entry in routine 'x', origin 'y', object ID 'z', locnum 'i', argnum 'j', body id 'b'
986 * RTEAINV(x,y,z,i,b) signal entry in _invariant routine 'x', origin 'y', object ID 'z', locnum 'i', body id 'b'
987 * RTEV signals entry in routine (simply gets an execution vector)
988 * RTET(t,x) raises an exception tagged 't' whose code is 'x'
989 * RTEC(x) raises an exception whose code is 'x'
990 * RTEE exits the routine by removing the execution vector from stack
991 * RTER retries the routine
992 * RTEU enters in rescue clause
993 * RTEF ends the rescue clause
994 * RTXD declares the variables used to save the run-time stacks context
995 * RTYD same as RTXD except that RTXI cannot be used
996 * RTXSC resynchronizes the run-time stacks in a pseudo rescue clause in C
997 * RTXS(x) resynchronizes the run-time stacks in a rescue clause
998 * RTEOK ends a routine with a rescue clause by cleaning the trace stack
999 * RTMD(x) Stops monitoring (profile or tracing) for routine context 'y' (i.e. normal vs external)
1000 * RTLA Last exception from EXCEPTION_MANAGER
1001 * RTCO(x) Check if x is NULL, if not raise an OLD_VIOLATION
1002 * RTE_T start try block (for body)
1003 * RTE_E start except block (for rescue)
1004 * RTE_EE end except block
1005 * RTE_OT start try block of old expression evaluation
1006 * RTE_O end of try block of old expression evaluation
1007 * RTE_OE local rescue, recording possible exception for later use
1008 * RTE_OTD Declare old stack vector and push it.
1009 * RTE_OP Pop old stack vector.
1010 * RTDBGE,RTDBGL are declared in eif_debug.h
1011 */
1012 #define RTED jmp_buf exenv; int EIF_VOLATILE saved_assertion = in_assertion
1013 #define RTES if (setjmp(exenv)) goto rescue
1014 #define RTEA(x,y,z) exvect = new_exset(MTC x, y, z, 0, 0, 0)
1015 #define RTEV exvect = exft()
1016 #define RTET(t,x) eraise(t,x)
1017 #define RTEC(x) RTET((EIF_REFERENCE) 0,x)
1018 #define RTSO check_options_stop(0)
1019 #define RTMD(x) check_options_stop(x)
1020 #define RTLA last_exception()
1021 #define RTCO(x) chk_old(x)
1022
1023 #ifdef WORKBENCH
1024 #define RTEX struct ex_vect * EIF_VOLATILE exvect; uint32 EIF_VOLATILE db_cstack
1025 #define RTEAA(x,y,z,i,j,b) exvect = new_exset(x, y, z,i,j,b); db_cstack = ++d_data.db_callstack_depth;
1026 #define RTDBGEAA(y,z,b) RTDBGE(y,b,z,db_cstack);
1027 #define RTDBGLE RTDBGL(exvect->ex_orig,exvect->ex_bodyid,exvect->ex_id,db_cstack);
1028
1029 #define RTEE d_data.db_callstack_depth = --db_cstack; expop(&eif_stack)
1030 #define RTEOK d_data.db_callstack_depth = --db_cstack; exok ()
1031
1032 #define RTEJ current_call_level = trace_call_level; \
1033 if (prof_stack) saved_prof_top = prof_stack->st_top; \
1034 start: exvect->ex_jbuf = &exenv; RTES
1035
1036 #define RTEU d_data.db_callstack_depth = db_cstack; exresc(MTC exvect); \
1037 RTDBGR(exvect->ex_orig,exvect->ex_bodyid,exvect->ex_id,db_cstack);
1038
1039 #define RTE_T \
1040 current_call_level = trace_call_level; \
1041 if (prof_stack) saved_prof_top = prof_stack->st_top; \
1042 saved_except = RTLA; \
1043 start: exvect->ex_jbuf = &exenv; \
1044 if (!setjmp(exenv)) {
1045
1046 #else
1047 #define RTEX struct ex_vect * EIF_VOLATILE exvect
1048 #define RTEAA(x,y,z,i,j,b) exvect = new_exset(x, y, z, 0, 0, 0)
1049 #define RTDBGEAA(y,z,b)
1050 #define RTEE expop(&eif_stack)
1051 #define RTEOK exok ()
1052 #define RTEJ start: exvect->ex_jbuf = &exenv; RTES
1053 #define RTEU exresc(MTC exvect)
1054
1055 #define RTE_T \
1056 saved_except = RTLA; \
1057 start: exvect->ex_jbuf = &exenv; \
1058 if (!setjmp(exenv)) {
1059
1060 #endif
1061
1062 #define RTEAINV(x,y,z,i,b) RTEAA(x,y,z,i,0,b); exvect->ex_is_invariant = 1; /* argnum = 0 for _invariant */
1063
1064 #define RTER in_assertion = saved_assertion; \
1065 exvect = exret(exvect); goto start
1066 #define RTEF exfail()
1067 #define RTXS(x) RTXSC; RTXI(x)
1068 #define RTXSC RTXE; RTHS; RTLS
1069 #define RTXD RTXL; RTXH; RTXLS
1070 #define RTYD RTYL; RTXH; RTXLS
1071
1072 #define RTE_E \
1073 } else { \
1074 RTEU;
1075 #define RTE_EE \
1076 RTEF; \
1077 } \
1078 set_last_exception (saved_except);
1079
1080
1081 /* new debug */
1082 #ifdef WORKBENCH
1083 #define RTLU(x,y) insert_local_var (x, (void *) y)
1084 #define RTLO(n) clean_local_vars (n)
1085 #define RTHOOK(n) dstop (exvect, n);
1086 #define RTNHOOK(n,m) dstop_nested (exvect, n, m);
1087 #else
1088 #define RTLU(x,y)
1089 #define RTLO(n)
1090 #define RTHOOK(n) exvect->ex_linenum = n; exvect->ex_bpnested = 0;
1091 #define RTNHOOK(n,m) exvect->ex_bpnested = m;
1092 #endif
1093
1094 /* Old expression evaluation */
1095 #define RTE_OT { \
1096 RTE_OTD; \
1097 if (!setjmp(exenv_o)) {
1098 #define RTE_O \
1099 RTE_OP; \
1100 } else {
1101 #define RTE_OE \
1102 } \
1103 }
1104
1105 #define RTE_OTD \
1106 jmp_buf exenv_o; \
1107 struct ex_vect * EIF_VOLATILE exvect_o; \
1108 exvect_o = exold (); \
1109 exvect_o->ex_jbuf = &exenv_o
1110 #define RTE_OP \
1111 expop(&eif_stack)
1112
1113 /* Hector protection for external calls:
1114 * RTHP(x) protects 'x' returns Hector indirection pointer
1115 * RTHF(x) removes 'x' topmost entries from Hector stack
1116 * RTXH saves hector's stack context in case an exception occurs
1117 * RTHS resynchronizes the hector stack by restoring saved context
1118 */
1119 #ifdef ISE_GC
1120 #define RTHP(x) hrecord(x)
1121 #define RTHF(x) epop(&hec_stack, x)
1122 #define RTXH \
1123 EIF_REFERENCE * volatile ht = hec_stack.st_top; \
1124 struct stchunk * volatile hc = hec_stack.st_cur
1125 #define RTHS \
1126 if (ht){ \
1127 hec_stack.st_cur = hc; \
1128 if (hc) hec_stack.st_end = hc->sk_end; \
1129 hec_stack.st_top = ht; \
1130 }else if (hec_stack.st_top) { \
1131 hec_stack.st_cur = hec_stack.st_hd; \
1132 hec_stack.st_top = hec_stack.st_cur->sk_arena; \
1133 hec_stack.st_end = hec_stack.st_cur->sk_end; \
1134 }
1135 #else
1136 #define RTHP(x) (x)
1137 #define RTHF(x)
1138 #define RTXH \
1139 EIF_REFERENCE ht
1140 #define RTHS
1141 #endif
1142
1143 /* Loc stack protection:
1144 * RTXLS saves loc_stack context in case an exception occurs
1145 * RTLS resynchronizes the loc_stack by restoring saved context
1146 */
1147 #ifdef ISE_GC
1148 #define RTXLS \
1149 EIF_REFERENCE * volatile lst = loc_stack.st_top; \
1150 struct stchunk * volatile lsc = loc_stack.st_cur
1151 #define RTLS \
1152 if (lst){ \
1153 loc_stack.st_cur = lsc; \
1154 if (lsc) loc_stack.st_end = lsc->sk_end; \
1155 loc_stack.st_top = lst; \
1156 }else if (loc_stack.st_top) { \
1157 loc_stack.st_cur = loc_stack.st_hd; \
1158 loc_stack.st_top = loc_stack.st_cur->sk_arena; \
1159 loc_stack.st_end = loc_stack.st_cur->sk_end; \
1160 }
1161 #else
1162 #define RTXLS \
1163 EIF_REFERENCE lst
1164 #define RTLS
1165 #endif
1166
1167
1168
1169 /* Other macros used to handle specific needs:
1170 * RTMS(s) creates an Eiffel string from a C manifest string s.
1171 * RTMS_EX(s,c) creates an Eiffel string from a C manifest string s of length c.
1172 * RTMS_EX_H(s,c,h) creates an Eiffel string from a C manifest string s of length c and hash-code h.
1173 * RTMS32_EX_H(s,c,h) creates an STRING_32 from a C manifest string s of length c and hash-code h.
1174 * RTMS_EX_O(s,c,h) creates an Eiffel string in heap for old objects from a C manifest string s of length c and hash-code h.
1175 * RTOMS(b,n) a value of a once manifest string object for routine body index `b' and number `n'.
1176 * RTDOMS(b,m) declares a field to store once manifest string objects for routine body index `b' and number `n' of such objects.
1177 * RTEOMS(b,m) "extern" reference to the field declared by RTDOMS.
1178 * RTAOMS(b,m) allocates memory to store at least `m' once manifest strings for routine body index `b'.
1179 * RTPOMS(b,n,s,c,h) stores a new once manifest string object of value `s', length `c' and has-code `h' for body index `b' and number `n' if such object is not already created.
1180 * RTCOMS(r,b,n,s,c,h) does the same as RTPOMS, but also puts the corresponding object into `r'.
1181 * RTPOF(p,o) returns the C pointer of the address p + o where p represents a C pointer.
1182 * RTST(c,d,i,n) creates an Eiffel ARRAY[ANY] (for strip).
1183 * RTXA(x,y) copies 'x' into expanded 'y' with exception if 'x' is void.
1184 * RTEQ(x,y) returns true if 'x' = 'y'
1185 * RTOF(x) returns the offset of expanded 'x' within enclosing object
1186 * RTEO(x) returns the address of the enclosing object for expanded 'x'
1187 */
1188 #define RTMS(s) makestr_with_hash(s,strlen(s),0)
1189 #define RTMS_EX(s,c) makestr_with_hash(s,c,0)
1190 #define RTMS_EX_H(s,c,h) makestr_with_hash(s,c,h)
1191 #define RTMS_EX_O(s,c,h) makestr_with_hash_as_old(s,c,h)
1192
1193 #define RTMS32(s) makestr32_with_hash(s,strlen(s),0)
1194 #define RTMS32_EX(s,c) makestr32_with_hash(s,c,0)
1195 #define RTMS32_EX_H(s,c,h) makestr32_with_hash(s,c,h)
1196 #define RTMS32_EX_O(s,c,h) makestr32_with_hash_as_old(s,c,h)
1197
1198 #if defined(WORKBENCH) || defined(EIF_THREADS)
1199 #define RTOMS(b,n) (EIF_oms[(b)][(n)])
1200 #define RTAOMS(b,m) \
1201 { \
1202 EIF_REFERENCE ** p; \
1203 p = &(EIF_oms[(b)]); \
1204 if (!(*p)) { \
1205 EIF_REFERENCE * pm; \
1206 pm = (EIF_REFERENCE *) eif_calloc (m, sizeof (EIF_REFERENCE *)); \
1207 if (!pm) { \
1208 enomem(); \
1209 } \
1210 *p = pm; \
1211 } \
1212 }
1213 #define RTCOMS(r,b,n,s,c,h) \
1214 { \
1215 EIF_REFERENCE * rsp; \
1216 EIF_REFERENCE rs; \
1217 rsp = &RTOMS(b,n); \
1218 rs = *rsp; \
1219 if (!rs) { \
1220 register_oms (rsp); \
1221 rs = RTMS_EX_O(s,c,h); \
1222 *rsp = rs; \
1223 } \
1224 r = rs; \
1225 }
1226 #define RTCOMS32(r,b,n,s,c,h) \
1227 { \
1228 EIF_REFERENCE * rsp; \
1229 EIF_REFERENCE rs; \
1230 rsp = &RTOMS(b,n); \
1231 rs = *rsp; \
1232 if (!rs) { \
1233 register_oms (rsp); \
1234 rs = RTMS32_EX_O(s,c,h); \
1235 *rsp = rs; \
1236 } \
1237 r = rs; \
1238 }
1239 #else
1240 #define RTOMS(b,n) CAT2(EIF_oms_,b) [n]
1241 #define RTDOMS(b,m) EIF_REFERENCE RTOMS(b,m)
1242 #define RTEOMS(b,m) extern RTDOMS(b,m)
1243 #define RTPOMS(b,n,s,c,h) \
1244 { \
1245 EIF_REFERENCE * rsp; \
1246 rsp = &RTOMS(b,n); \
1247 if (!(*rsp)) { \
1248 register_oms (rsp); \
1249 *rsp = RTMS_EX_O(s,c,h); \
1250 } \
1251 }
1252 #define RTPOMS32(b,n,s,c,h) \
1253 { \
1254 EIF_REFERENCE * rsp; \
1255 rsp = &RTOMS(b,n); \
1256 if (!(*rsp)) { \
1257 register_oms (rsp); \
1258 *rsp = RTMS32_EX_O(s,c,h); \
1259 } \
1260 }
1261 #endif
1262
1263 #define RTPOF(p,o) (EIF_POINTER)((EIF_POINTER *)(((char *)(p))+(o)))
1264 #define RTST(c,d,i,n) striparr(c,d,i,n);
1265 #define RTXA(x,y) eif_xcopy(x, y)
1266 #define RTEQ(x,y) eif_xequal((x),(y))
1267 #define RTCEQ(x,y) (((x) && eif_is_boxed_expanded(HEADER(x)->ov_flags) && (y) && eif_is_boxed_expanded(HEADER(y)->ov_flags) && eif_gen_conf(Dftype(x), Dftype(y)))? eif_xequal((x),(y)): (x)==(y))
1268 #define RTOF(x) (HEADER(x)->ov_size & B_SIZE)
1269 #define RTEO(x) ((x) - RTOF(x))
1270
1271
1272 /* Macros for invariant check.
1273 * RTSN saves global variable 'nstcall' within C stack
1274 * RTIV(x,y) checks invariant before call on object 'x' if good flags 'y'
1275 * RTVI(x,y) checks invariant after call on object 'x' if good flags 'y'
1276 * RTCI(x) checks invariant after creation call on object 'x'
1277 */
1278 #define RTSN int EIF_VOLATILE is_nested = nstcall
1279 #define RTIV(x,y) if ((is_nested > 0) && ((y) & CK_INVARIANT)) chkinv(MTC x,0)
1280 #define RTVI(x,y) if ((is_nested != 0) && ((y) & CK_INVARIANT)) chkinv(MTC x,1)
1281
1282 /* To be removed */
1283 #define RTIV2(x,y) if (is_nested && ((y) & CK_INVARIANT)) chkinv(MTC x,0)
1284 #define RTVI2(x,y) if (is_nested && ((y) & CK_INVARIANT)) chkinv(MTC x,1)
1285 #define RTCI2(x) chkcinv(MTC x)
1286
1287 #ifndef EIF_THREADS
1288 RT_LNK EIF_NATURAL_32 caller_assertion_level; /*Saves information about the assertion level of the caller*/
1289 #endif
1290
1291
1292 /* Macros to cache assertion level in generated C routine.
1293 * RTDA declares integer used to save the assertion level
1294 * RTAL is the access to the saved assertion level variable.
1295 * RTAC Checks the assertion level of the caller.
1296 * RTSC saves assertion level of the current feature.
1297 * RTRS restores the caller_assertion_level.
1298 * WASC(x) Assertion level.
1299 * RTSA(x) gets the option settings for dynamic type 'x'
1300 * RTME(x,y) Starts monitoring (profile or tracing) for dynamic type 'x' for routine context 'y' (i.e. normal vs external)
1301 */
1302
1303 #define RTDA \
1304 struct eif_opt * EIF_VOLATILE opt; \
1305 EIF_NATURAL_32 saved_caller_assertion_level = caller_assertion_level
1306 #define RTAL (~in_assertion & opt->assert_level)
1307 #define RTAC (~in_assertion & saved_caller_assertion_level)
1308 #define RTSC caller_assertion_level = RTAL & CK_SUP_REQUIRE
1309 #define RTRS caller_assertion_level = saved_caller_assertion_level
1310 #define WASC(x) eoption[x].assert_level
1311 #define RTSA(x) opt = eoption + x;
1312 #ifdef WORKBENCH
1313 #define RTME(x,y) check_options_start(opt, x, y)
1314 #endif
1315
1316 /*
1317 * Macros for SCOOP
1318 */
1319
1320 // Define RTS_SCP_CAPABLE for use by eplug to determine whether SCOOP can be initialized.
1321 #ifndef RTS_SCP_CAPABLE
1322 #ifdef EIF_THREADS
1323 #define RTS_SCP_CAPABLE 1
1324 #else
1325 #define RTS_SCP_CAPABLE 0
1326 #endif
1327 #endif
1328
1329
1330 #define RTS_PROCESSOR_SPIN_LOOP(x) EIF_ENTER_C; {volatile int i; for (i = 0; i < x; i++) {}}; EIF_EXIT_C; RTGC;
1331
1332 #define scoop_task_assign_processor 1
1333 #define scoop_task_free_processor 2
1334 #define scoop_task_enumerate_live_processors 3
1335 #define scoop_task_signify_start_of_new_chain 4
1336 #define scoop_task_signify_end_of_new_chain 5
1337 #define scoop_task_add_supplier_to_request_chain 6
1338 #define scoop_task_wait_for_supplier_processor_locks 7
1339 #define scoop_task_add_call 8
1340 #define scoop_task_add_synchronous_call 9
1341 #define scoop_task_wait_for_processor_redundancy 10
1342 #define scoop_task_check_uncontrolled 11
1343 #define scoop_task_update_statistics 12
1344 #define scoop_task_set_active 13
1345 #define scoop_task_set_passive 14
1346 #define scoop_task_is_passive 15
1347 #define scoop_task_is_synced_on 16
1348 #define scoop_task_set_synced_on 17
1349 #define scoop_task_set_not_synced_on 18
1350
1351
1352 #ifdef WORKBENCH
1353 #define RTS_TCB(t,c,s,a) \
1354 { \
1355 EIF_TYPED_VALUE xt,xc,xs,xa; \
1356 xt.it_i1 = t; \
1357 xt.type = SK_INT8; \
1358 xc.it_i4 = c; \
1359 xc.type = SK_INT32; \
1360 xs.it_i4 = s; \
1361 xs.type = SK_INT32; \
1362 xa.it_p = a; \
1363 xa.type = SK_POINTER; \
1364 (egc_scoop_manager_task_callback)(scp_mnger,xt,xc,xs,xa); \
1365 }
1366 #elif defined SCOOPQS
1367 #define RTS_TCB(t,c,s,a) "SCOOP/Qs: no callbacks to the ISE SCOOP manager" t c s a;
1368 #else
1369 #define RTS_TCB(t,c,s,a) (egc_scoop_manager_task_callback)(scp_mnger,t,c,s,a);
1370 #endif
1371
1372 #define RTS_PID(o) HEADER(o)->ov_pid
1373
1374 #ifdef SCOOPQS
1375 #define eif_scoop_access(x) (x)
1376 #else
1377 #define eif_scoop_access(x) eif_access(x)
1378 #endif
1379
1380 /*
1381 * SCOOP processor ID swapping.
1382 *
1383 * `eif_globals' should be reloaded after the restoration.
1384 */
1385 #define RTS_IMPERSONATE(pid) \
1386 { \
1387 EIF_ENTER_C; \
1388 eif_globals = (eif_global_context_t*) eif_thr_impersonate(pid); \
1389 EIF_EXIT_C; \
1390 RTGC; \
1391 }
1392
1393 /*
1394 * Object status:
1395 * EIF_IS_DIFFERENT_PROCESSOR (o1, o2) - tells if o1 and o2 run on different processors
1396 * RTS_OS(c,o) - tells if object o is separate relative to object c (i.e. they run on different processors)
1397 * RTS_OU(c,o) - tells if object o is uncontrolled by the processor associated with object c
1398 */
1399
1400 #ifdef SCOOPQS
1401 #define EIF_IS_SYNCED_ON(c,s) eveqs_is_synced_on (RTS_PID(c), RTS_PID(s))
1402 #define EIF_IS_DIFFERENT_PROCESSOR(o1,o2) ((RTS_PID (o1) != RTS_PID (o2)))
1403 #define EIF_IS_DIFFERENT_PROCESSOR_FOR_QUERY(o1,o2) ((RTS_PID (o1) != RTS_PID (o2)) && !(EIF_IS_SYNCED_ON(o1,o2)))
1404 #else
1405 EIF_BOOLEAN eif_is_synced_on (EIF_SCP_PID c, EIF_SCP_PID );
1406 #define EIF_IS_SYNCED_ON(c,s) \
1407 (eif_is_synced_on (RTS_PID (c), RTS_PID(s)) == EIF_TRUE)
1408 #define EIF_SET_SYNCED_ON(c,s) \
1409 RTS_TCB(scoop_task_set_synced_on, RTS_PID(c), RTS_PID(s), 0)
1410 #define EIF_SET_NOT_SYNCED_ON(c,s) \
1411 RTS_TCB(scoop_task_set_not_synced_on, RTS_PID(c), RTS_PID(s), 0)
1412 #define EIF_IS_DIFFERENT_PROCESSOR(o1,o2) (RTS_PID (o1) != RTS_PID (o2))
1413 #define EIF_IS_DIFFERENT_PROCESSOR_FOR_QUERY(o1,o2) ((RTS_PID (o1) != RTS_PID (o2)))
1414 #endif
1415
1416 #define RTS_EIF_ACCESS(r) (r ? eif_scoop_access (r) : NULL)
1417 #define RTS_OS(c,o) (RTS_PID (c) != RTS_PID (o))
1418
1419
1420 #ifdef SCOOPQS
1421 #define RTS_OU(c,o) ((o) && eveqs_is_uncontrolled (RTS_PID (c), RTS_PID (o)))
1422 #else /* SCOOPQS */
1423 #define RTS_OU(c,o) ((o) && (scp_mnger) && EIF_TEST (eif_is_uncontrolled (RTS_PID (c), RTS_PID (o))))
1424 #endif
1425
1426 #ifdef SCOOPQS
1427 #define EIF_SET_ACTIVE(o) ; // "SCOOP/Qs: set_active not implemented" o;
1428 #define EIF_SET_PASSIVE(o) ; // "SCOOP/Qs: set_passive not implemented" o;
1429 #define EIF_IS_PASSIVE(o) 0 // "SCOOP/Qs: is_passive not implemented" o;
1430 #else
1431 #define EIF_SET_ACTIVE(o) RTS_TCB(scoop_task_set_active,0,RTS_PID (o),0)
1432 #define EIF_SET_PASSIVE(o) RTS_TCB(scoop_task_set_passive,0,RTS_PID (o),0)
1433 #define EIF_IS_PASSIVE(o) (eif_is_passive (RTS_PID (o)) == EIF_TRUE)
1434 #endif
1435
1436 /*
1437 * Processor:
1438 * RTS_PA(o) - associate a fresh processor with an object o
1439 */
1440 #ifdef SCOOPQS
1441 #define RTS_PA(o) eveqs_processor_fresh (o);
1442 #else
1443 #define RTS_PA(o) \
1444 { \
1445 EIF_TYPED_VALUE pid; \
1446 pid.it_i4 = 0; \
1447 pid.type = SK_INT32; \
1448 RTS_TCB(scoop_task_assign_processor,RTS_PID(o),0,&pid); \
1449 RTS_PID(o) = (EIF_SCP_PID) pid.it_i4; \
1450 }
1451 #endif
1452 /*
1453 * Request chain:
1454 * RTS_RC(p) - create a request chain for the processor identified by object p
1455 * RTS_RD(p) - delete a request chain for the processor identified by object p
1456 * RTS_RF(p) - same as RTS_RD except that it is called when wait condition fails
1457 * RTS_RS(p,s) - add a supplier s to the request chain of the processor identified by object p
1458 * RTS_RW(p) - wait until all the suppliers are ready in the request chain of the processor identified by object p
1459 * The only valid sequence of calls is
1460 * RTS_RC (RTS_RS)* [RTS_RW] RTS_RD
1461 */
1462 #ifdef SCOOPQS
1463 #define RTS_RC(p) eveqs_req_grp_new(RTS_PID(p));
1464 #define RTS_RD(p) eveqs_req_grp_delete(RTS_PID(p));
1465 #define RTS_RF(p) eveqs_req_grp_wait (RTS_PID(p));
1466 #define RTS_RS(p,s) eveqs_req_grp_add_supplier(RTS_PID(p), RTS_PID(s));
1467 #define RTS_RW(p) eveqs_req_grp_lock(RTS_PID(p));
1468 #else
1469 #define RTS_RC(p) RTS_TCB(scoop_task_signify_start_of_new_chain,RTS_PID(p),0,NULL)
1470 #define RTS_RD(p) RTS_TCB(scoop_task_signify_end_of_new_chain,RTS_PID(p),RTS_PID(p),NULL)
1471 #define RTS_RF(p) RTS_TCB(scoop_task_signify_end_of_new_chain,RTS_PID(p),-1,NULL)
1472 #define RTS_RS(p,s) if (s) {RTS_TCB(scoop_task_add_supplier_to_request_chain,RTS_PID(p),RTS_PID(s),NULL)}
1473 #define RTS_RW(p) RTS_TCB(scoop_task_wait_for_supplier_processor_locks,RTS_PID(p),0,NULL)
1474 #endif
1475
1476 /*
1477 * Request chain stack:
1478 * RTS_SD - declare variables that are used to track request chain stack without rescue clause.
1479 * RTS_SDX - declare variables that are used to track request chain stack with rescue clause.
1480 * RTS_SDR - declare variables that are used to restore request chain stack with rescue clause.
1481 * RTS_SRC(p) - create request chain for the processor identified by object p when there is no rescue clase.
1482 * RTS_SRCX(p) - create request chain for the processor identified by object p when there is a rescue clase.
1483 * RTS_SRF(p) - release request chain for the processor identified by object p when wait condition fails.
1484 * RTS_SRD(p) - release request chain for the processor identified by object p when routine exits normally.
1485 * RTS_SRR - release request chains (if any) when entering a rescue clause because of an exception.
1486 */
1487 #define RTS_SD \
1488 EIF_REFERENCE * q = sep_stack.st_top;
1489 #define RTS_SDX \
1490 EIF_REFERENCE * volatile q = sep_stack.st_top; \
1491 EIF_REFERENCE * volatile qt = q;
1492 #define RTS_SDR \
1493 EIF_REFERENCE * qt = sep_stack.st_top;
1494 #define RTS_SRC(p) \
1495 { \
1496 RTS_RC(p); \
1497 if (q && q < sep_stack.st_end) { \
1498 *q = p; \
1499 sep_stack.st_top = q + 1; \
1500 } \
1501 else { \
1502 eif_request_chain_push (p, &sep_stack); \
1503 q = (EIF_REFERENCE *) 0; \
1504 } \
1505 }
1506 #define RTS_SRCX(p) {RTS_SRC(p); qt = sep_stack.st_top;}
1507 #define RTS_SRP(p) \
1508 if (q == (EIF_REFERENCE *) 0) { \
1509 eif_request_chain_pop (&sep_stack); \
1510 } \
1511 else { \
1512 sep_stack.st_top = q; \
1513 }
1514 #define RTS_SRF(p) {RTS_SRP (p); RTS_RF (p);}
1515 #define RTS_SRD(p) {RTS_SRP (p); RTS_RD (p);}
1516 #define RTS_SRR \
1517 if (sep_stack.st_top != qt) { \
1518 eif_request_chain_restore (qt, &sep_stack); \
1519 }
1520
1521 /*
1522 * Separate call (versions ending with P stand for calls to precompiled routines, the first two arguments to them have a different meaning):
1523 * RTS_CF(s,f,n,t,a,r) - call a function on a static type s with a feature id f and name n on a target t and arguments a and result r
1524 * RTS_CP(s,f,n,t,a) - call a procedure on a static type s with a feature id f and name n on a target t and arguments a
1525 * RTS_CC(s,f,d,a) - call a creation procedure (asynchronous) on a static type s with a feature id f on a target of dynamic type d and arguments a
1526 * RTS_CA(o,p,t,a,r) - call an attribute at offset o using pattern p on target t with arguments a and result r
1527 * RTS_CS(t,a) - call a constant on target t with call structure a
1528 */
1529
1530 #ifdef WORKBENCH
1531 #define RTS_CF(s,f,n,t,a,r) \
1532 { \
1533 ((call_data*)(a)) -> result = &(r); \
1534 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1535 eif_log_call (s, f, RTS_PID(Current), (call_data*) a); \
1536 }
1537 #define RTS_CFP(s,f,n,t,a,r) \
1538 { \
1539 ((call_data*)(a)) -> result = &(r); \
1540 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1541 eif_log_callp (s, f, RTS_PID(Current), a); \
1542 }
1543 #define RTS_CP(s,f,n,t,a) eif_log_call (s, f, RTS_PID(Current), (call_data*) a);
1544 #define RTS_CPP(s,f,n,t,a) eif_log_callp (s, f, RTS_PID(Current), (call_data*) a);
1545
1546 #define RTS_CC(s,f,d,a) eif_log_call (s, f, RTS_PID(Current), (call_data*) a);
1547 #define RTS_CCP(s,f,d,a) eif_log_callp (s, f, RTS_PID(Current), (call_data*) a);
1548 #else /* WORKBENCH */
1549 #define RTS_CF(f,p,t,a,r) \
1550 { \
1551 ((call_data*)(a)) -> feature.address = (fnptr) f; \
1552 ((call_data*)(a)) -> pattern = p; \
1553 ((call_data*)(a)) -> result = &(r); \
1554 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1555 eif_log_call (((call_data*)(a))->sync_pid, (call_data*) a); \
1556 }
1557 #define RTS_CP(f,p,t,a) \
1558 { \
1559 ((call_data*)(a)) -> feature.address = (fnptr) f; \
1560 ((call_data*)(a)) -> pattern = p; \
1561 eif_log_call (RTS_PID(Current), (call_data*) a); \
1562 }
1563 #define RTS_CC(f,p,t,a) RTS_CP(f,p,t,a)
1564 #define RTS_CA(o,p,t,a,r) \
1565 { \
1566 ((call_data*)(a)) -> feature.offset = o; \
1567 ((call_data*)(a)) -> pattern = p; \
1568 ((call_data*)(a)) -> result = &(r); \
1569 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1570 eif_log_call (((call_data*)(a))->sync_pid, (call_data*) a); \
1571 }
1572 #define RTS_CS(t,a) \
1573 { \
1574 ((call_data*)(a)) -> pattern = eif_call_const; \
1575 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1576 eif_log_call (((call_data*)(a))->sync_pid, (call_data*) a); \
1577 }
1578 #endif /* WORKBENCH */
1579
1580 /*
1581 * Separate call arguments:
1582 * RTS_AC(n,t,a) - allocate container a that can hold n arguments for target t
1583 * RTS_AA(v,f,t,n,a) - register argument v corresponding to field f of type t at position n in a
1584 * RTS_AS(v,f,t,n,a) - same as RTS_AA except that that argument is checked if it is controlled or not that is recorded to make synchronous call if required
1585 */
1586 #ifdef SCOOPQS
1587 #define RTS_AC(n,t,a) \
1588 { \
1589 a = malloc (sizeof (call_data) + sizeof (EIF_TYPED_VALUE) * (size_t) (n) - sizeof (EIF_TYPED_VALUE)); \
1590 ((call_data*)(a)) -> target = (t); \
1591 ((call_data*)(a)) -> count = (n); \
1592 ((call_data*)(a)) -> result = (EIF_TYPED_VALUE *) 0; \
1593 ((call_data*)(a)) -> sync_pid = (EIF_SCP_PID) -1; \
1594 ((call_data*)(a)) -> is_lock_passing = EIF_FALSE; \
1595 }
1596 # define RTS_AA(v,f,t,n,a) \
1597 { \
1598 ((call_data*)(a)) -> argument [(n) - 1].f = (v); \
1599 ((call_data*)(a)) -> argument [(n) - 1].type = (t); \
1600 }
1601 # define RTS_AS(v,f,t,n,a) \
1602 { \
1603 ((call_data*)(a)) -> argument [(n) - 1].type = SK_REF; \
1604 if (v) \
1605 { \
1606 ((call_data*)(a)) -> argument [(n) - 1].it_r = (EIF_REFERENCE) (v); \
1607 if (!RTS_OU(Current, ((call_data*)(a)) -> argument [(n) - 1].it_r ) ) \
1608 { \
1609 if (EIF_IS_DIFFERENT_PROCESSOR (((call_data*)(a))->target, ((call_data*)(a))->argument [(n)-1].it_r)) \
1610 { \
1611 ((call_data*)(a)) -> is_lock_passing = EIF_TRUE; \
1612 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1613 } \
1614 } \
1615 } \
1616 else \
1617 { \
1618 ((call_data*)(a)) -> argument [(n) - 1].it_r = (v); \
1619 } \
1620 }
1621 #ifdef WORKBENCH
1622 #undef RTS_AC
1623 #undef RTS_AA
1624 #undef RTS_AS
1625 #define RTS_AC(n,t,a)
1626 #define RTS_AA(v,f,t,n,a)
1627 #define RTS_AS(v,f,t,n,a)
1628 #endif
1629
1630 #endif
1631
1632 #ifndef SCOOPQS
1633 #define RTS_AC(n,t,a) \
1634 { \
1635 a = cmalloc (sizeof (call_data) + sizeof (EIF_TYPED_VALUE) * (size_t) (n) - sizeof (EIF_TYPED_VALUE)); \
1636 ((call_data*)(a)) -> target = eif_protect (t); \
1637 ((call_data*)(a)) -> count = (n); \
1638 ((call_data*)(a)) -> result = (EIF_TYPED_VALUE *) 0; \
1639 ((call_data*)(a)) -> sync_pid = (EIF_SCP_PID) -1; \
1640 ((call_data*)(a)) -> is_lock_passing = EIF_FALSE; \
1641 }
1642 #ifdef WORKBENCH
1643 # define RTS_AA(v,f,t,n,a) ((call_data*)(a)) -> argument [(n) - 1] = (v);
1644 # define RTS_AS(v,f,t,n,a) \
1645 { \
1646 ((call_data*)(a)) -> argument [(n) - 1] = (v); \
1647 if ( (v).it_r ) \
1648 { \
1649 ((call_data*)(a)) -> argument [(n) - 1].it_r = (EIF_REFERENCE) eif_protect ((v).it_r); \
1650 if ( !RTS_OU(Current, eif_access ( ((call_data*)(a)) -> argument [(n) - 1].it_r ) ) ) \
1651 { \
1652 if (EIF_IS_DIFFERENT_PROCESSOR (eif_access (((call_data*)(a))->target),eif_access (((call_data*)(a))->argument [(n)-1].it_r))) \
1653 { \
1654 ((call_data*)(a)) -> is_lock_passing = EIF_TRUE; \
1655 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1656 } \
1657 } \
1658 } \
1659 else \
1660 { \
1661 ((call_data*)(a)) -> argument [(n) - 1].it_r = (v).it_r; \
1662 }\
1663 }
1664 #else
1665 # define RTS_AA(v,f,t,n,a) \
1666 { \
1667 ((call_data*)(a)) -> argument [(n) - 1].f = (v); \
1668 ((call_data*)(a)) -> argument [(n) - 1].type = t; \
1669 }
1670 # define RTS_AS(v,f,t,n,a) \
1671 { \
1672 ((call_data*)(a)) -> argument [(n) - 1].type = SK_REF; \
1673 if (v) \
1674 { \
1675 ((call_data*)(a)) -> argument [(n) - 1].it_r = (EIF_REFERENCE) eif_protect (v); \
1676 if (!RTS_OU(Current, eif_access ( ((call_data*)(a)) -> argument [(n) - 1].it_r ) ) ) \
1677 { \
1678 if (EIF_IS_DIFFERENT_PROCESSOR (eif_access (((call_data*)(a))->target),eif_access (((call_data*)(a))->argument [(n)-1].it_r))) \
1679 { \
1680 ((call_data*)(a)) -> is_lock_passing = EIF_TRUE; \
1681 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1682 } \
1683 } \
1684 } \
1685 else \
1686 { \
1687 ((call_data*)(a)) -> argument [(n) - 1].it_r = v; \
1688 } \
1689 }
1690 #endif /* WORKBENCH */
1691 #endif /* SCOOPQS */
1692
1693 #ifdef SCOOPQS
1694 #define RTS_WPR eveqs_wait_for_all();
1695 #else
1696 #define RTS_WPR RTS_TCB(scoop_task_wait_for_processor_redundancy,0,0,NULL)
1697 #endif
1698
1699 #define RTS_SEMAPHORE_CLIENT_WAIT(semaddr) EIF_ENTER_C; eif_pthread_sem_wait(semaddr); EIF_EXIT_C; RTGC;
1700 #define RTS_SEMAPHORE_SUPPLIER_SIGNAL(semaddr) eif_pthread_sem_post(semaddr);
1701
1702 #define RTS_PROCESSOR_CPU_YIELD EIF_ENTER_C; eif_pthread_yield(); EIF_EXIT_C; RTGC;
1703
1704 /*
1705 * Macros for workbench
1706 */
1707
1708 #ifdef WORKBENCH
1709
1710 /* Macros used for feature call and various accesses to objects.
1711 * RTWF(x,y,z) is a feature call
1712 * RTWPF(x,y,z) is a precompiled feature call
1713 * RTVF(x,y,z,t) is a nested feature call (dot expression)
1714 * RTVPF(x,y,z,t) is a nested precompiled feature call (dot expression)
1715 * RTWC(x,y,z) is a creation procedure call
1716 * RTWPC(x,y,z) is a precompiled creation procedure call
1717 * RTWA(x,y,z) is the access to an attribute
1718 * RTWPA(x,y,z) is the access to a precompiled attribute
1719 * RTVA(x,y,z,t) is a nested access to an attribute (dot expression)
1720 * RTVPA(x,y,z,t) is a nested access to a precompiled attribute (dot expr)
1721 * RTWT(x,y,z) fetches the creation type
1722 * RTWPT(x,y,z) fetches the creation type of a precompiled feature
1723 * RTWCT(x,y,z) fetches the creation type of a generic features
1724 * RTWPCT(st,x,y,z) fetches the creation type of a precompiled generic feature
1725 * RTWCTT(x,y,z) same as RTWCT but takes dftype instead of object
1726 * RTWPCTT(x,y,z) same as RTWPCT but takes dftype instead of object
1727 * RTWPP(x) returns the feature address ($ or agent operator) of id x. The ids are assigned int ADDRESS_TABLE.
1728 * RTWO(x) stores in a list the body id of the just called once routine
1729 */
1730 #define RTWF(x,y,z) wfeat(x,y,z)
1731 #define RTWPF(x,y,z) wpfeat(x,y,z)
1732 #define RTVF(x,y,z,t) wfeat_inv(x,y,z,t)
1733 #define RTVPF(x,y,z,t) wpfeat_inv(x,y,z,t)
1734 #define RTWC(x,y,z) wcreat(x,y,z)
1735 #define RTWPC(x,y,z) wpcreat(x,y,z)
1736 #define RTWA(x,y,z) wattr(x,y,z)
1737 #define RTWPA(x,y,z) wpattr(x,y,z)
1738 #define RTVA(x,y,z,t) wattr_inv(x,y,z,t)
1739 #define RTVPA(x,y,z,t) wpattr_inv(x,y,z,t)
1740 #define RTWT(x,y,z) wtype(x,y,z)
1741 #define RTWPT(x,y,z) wptype(x,y,z)
1742 #define RTWCT(x,y,z) wtype_gen(x,y,z)
1743 #define RTWPCT(st,x,y,z) wptype_gen(st,x,y,z)
1744 #define RTWCTT(x,y,z) wttype_gen(x,y,z)
1745 #define RTWPCTT(st,x,y,z) wtptype_gen(st,x,y,z)
1746 #define RTWPP(x) (egc_address_table[x])
1747 #define RTWO(x)
1748
1749 #define WDBG(x,y) eif_is_debug(x,y) /* Debug option */
1750
1751 #define WASL(x,y,z) waslist(x,y,z) /* Assertion list evaluation */
1752 #define WASF(x) wasfree(x) /* Free assertion list */
1753
1754 #define RTDS(x) dispatch(x) /* Body id of body index (x) */
1755 #define RTFZ(x) egc_frozen(x) /* C frozen pointer of body id (x) */
1756 #define RTMT(x) melt(x) /* Byte code of body id (x) */
1757
1758 #define RTDT \
1759 int EIF_VOLATILE current_call_level; \
1760 char ** EIF_VOLATILE saved_prof_top /* Declare saved trace and profile */
1761 #else
1762 /* In final mode, an Eiffel call to a deferred feature without any actual
1763 * implementation could be generated anyway because of the statical dead code
1764 * removal process; so we need a funciton pointer trigeering an exception
1765 */
1766 #define RTNR rt_norout
1767
1768 /* In final mode, we have two macros for E-TRACE called RTTR (start trace) and RTXT (stop trace).
1769 * We have also two macros for E-PROFILE in final mode, called RTPR (start profile) and RTXP (stop profile).
1770 * All macros need to have 'x' = featurename, 'y' = origin, 'z' = dtype, except RTXP.
1771 *
1772 * Plus, we need to declare 'current_call_level' whenever a finalized feature has a rescue-clause.
1773 * This is done by RTLT
1774 *
1775 */
1776 #define RTTR(x,y,z,w) start_trace(x,y,z,w) /* Print message "entering..." */
1777 #define RTXT(x,y,z,w) stop_trace(x,y,z,w) /* Print message "leaving..." */
1778 #define RTPR(x,y,z) start_profile(x,y,z) /* Start measurement of feature */
1779 #define RTXP stop_profile() /* Stop measurement of feature */
1780 #define RTLT int EIF_VOLATILE current_call_level /* Declare local trave variable */
1781 #define RTLP char ** EIF_VOLATILE saved_prof_top /* Declare local profiler variable */
1782 #define RTPI saved_prof_top = prof_stack->st_top /* Create local profile stack
1783 * during rescue clause
1784 */
1785 #define RTTI current_call_level = trace_call_level /* Save the tracer call level */
1786 #endif
1787
1788
1789
1790 /* Macros needed for profile stack and trace clean up */
1791
1792 #ifdef WORKBENCH
1793 #define RTPS if (prof_stack) prof_stack_rewind(saved_prof_top) /* Clean up profiler stack */
1794 #else
1795 #define RTPS prof_stack_rewind(saved_prof_top) /* Clean up profiler stack */
1796 #endif
1797 #define RTTS trace_call_level = current_call_level /* Clean up trace levels */
1798
1799
1800 /* Macro used to get info about SPECIAL objects.
1801 * RT_SPECIAL_PADDED_DATA_SIZE is the additional size of the data at the end of the SPECIAL.
1802 * RT_SPECIAL_DATA_SIZE is the meaningful part of RT_SPECIAL_PADDED_DATA_SIZE being used.
1803 * RT_SPECIAL_MALLOC_COUNT is the macro to compute the necessary memory size for the SPECIAL.
1804 * RT_SPECIAL_COUNT returns `count' of special objects.
1805 * RT_SPECIAL_ELEM_SIZE returns `element_size' of items in special objects.
1806 */
1807 #define RT_SPECIAL_PADDED_DATA_SIZE LNGPAD(3)
1808 #define RT_SPECIAL_DATA_SIZE (3*sizeof(EIF_INTEGER))
1809 #define RT_SPECIAL_VISIBLE_SIZE(spec) ((rt_uint_ptr) RT_SPECIAL_COUNT(spec) * (rt_uint_ptr) RT_SPECIAL_ELEM_SIZE(spec))
1810
1811 #define RT_IS_SPECIAL(obj) \
1812 ((HEADER(obj)->ov_flags & (EO_SPEC | EO_TUPLE)) == EO_SPEC)
1813
1814 #define RT_SPECIAL_COUNT(spec) \
1815 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE)))
1816
1817 #define RT_SPECIAL_ELEM_SIZE(spec) \
1818 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE) + sizeof(EIF_INTEGER)))
1819
1820 #define RT_SPECIAL_CAPACITY(spec) \
1821 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE) + 2*sizeof(EIF_INTEGER)))
1822
1823
1824 /* Macros used for array optimization
1825 * RTADTYPE(x) defines the variable for the dynamic type of `x'
1826 * RTADOFFSETS(x) defines the variables for the offsets of area and lower of `x'
1827 * RTAD(x) defines the variables for array optimization on `x'
1828 * RTAITYPE(x,y) initializes the variable for Dftype on `x', `y'
1829 * RTAI(cast,x,y) initializes the variables for array optimization on `x', `y'
1830 * RTAF(x, y) unfreeze `y' if frozen at this level
1831 * RTAA(cast,x,i) gets the value at position `i' from `x' of type `cast'
1832 * RTAP(cast,x,val,i) puts `val' at position `i' from `x' of type `cast'
1833 * RTAUA(cast,x,y,i) gets the value at position `i' from `x' of type `cast' (unsafe version)
1834 * RTAUP(cast,x,y,val,i) puts `val' at position `i' from `x' of type `cast' (unsafe version)
1835 */
1836
1837 #define RTADTYPE(x) int EIF_VOLATILE CAT2(x,_dtype) = 0
1838
1839 #define RTADOFFSETS(x) \
1840 long EIF_VOLATILE CAT2(x,_area_offset) = 0; \
1841 long EIF_VOLATILE CAT2(x,_lower_offset) = 0
1842
1843 #define RTAD(x) \
1844 char EIF_VOLATILE CAT2(x,_freeze) = 0; \
1845 char* EIF_VOLATILE CAT2(x,_area); \
1846 char* EIF_VOLATILE CAT2(x,_area_minus_lower)
1847
1848 #define RTAITYPE(x,y) CAT2(x,_dtype) = Dtype(y)
1849
1850 #define RTAIOFFSETS(x,y) \
1851 if (y) { \
1852 RTAITYPE(x,y); \
1853 CAT2(x,_area_offset) = (eif_area_table) [CAT2(x,_dtype)]; \
1854 CAT2(x,_lower_offset) = (eif_lower_table) [CAT2(x,_dtype)]; \
1855 }
1856
1857 #define RTAUA(cast,x,y,i) \
1858 *(cast*)(*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))+(i-*(long*)(y+CAT2(x,_lower_offset)))*sizeof(cast))
1859
1860 #define RTAUP(cast,x,y,val,i) CAT2(RTAUP_,cast)(cast,x,y,val,i)
1861
1862 #define RTAUP_EIF_INTEGER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1863 #define RTAUP_EIF_CHARACTER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1864 #define RTAUP_EIF_REAL(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1865 #define RTAUP_EIF_DOUBLE(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1866 #define RTAUP_EIF_POINTER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1867 #define RTAUP_EIF_BOOLEAN(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1868
1869 #define RTAUP_EIF_REFERENCE(cast,x,y,val,i) \
1870 { \
1871 EIF_REFERENCE EIF_VOLATILE ptr_current = (*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))); \
1872 long EIF_VOLATILE i = i-*(long*)(y+CAT2(x,_lower_offset)); \
1873 *((EIF_REFERENCE *)ptr_current + i) = val; \
1874 RTAR(ptr_current, val); \
1875 }
1876
1877 #define RTAUP_BASIC(cast,x,y,val,i) \
1878 *(cast*)(*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))+(i-*(long*)(y+CAT2(x,_lower_offset)))*sizeof(cast)) = val
1879
1880 #define RTAI(cast,x,y) \
1881 if (y) { \
1882 RTAITYPE(x,y); \
1883 if (CAT2(x,_area) = *(EIF_REFERENCE *) ((y)+ (eif_area_table) [CAT2(x,_dtype)])) { \
1884 if (!(HEADER(CAT2(x,_area))->ov_size & B_C)) { \
1885 CAT2(x,_freeze) = 1; \
1886 HEADER(CAT2(x,_area))->ov_size |= B_C; \
1887 } \
1888 CAT2(x,_area_minus_lower) = CAT2(x,_area)-(*(long*) ((y)+ (eif_lower_table) [CAT2(x,_dtype)]))*sizeof(cast); \
1889 } \
1890 }
1891
1892 #define RTAIOFF(cast,x,y) \
1893 if (y) { \
1894 RTAITYPE(x,y); \
1895 if (CAT2(x,_area) = *(EIF_REFERENCE *) ((y)+CAT2(x,_area_offset))) { \
1896 if (!(HEADER(CAT2(x,_area))->ov_size & B_C)) { \
1897 CAT2(x,_freeze) = 1; \
1898 HEADER(CAT2(x,_area))->ov_size |= B_C; \
1899 } \
1900 CAT2(x,_area_minus_lower) = CAT2(x,_area)-(*(long*) ((y)+CAT2(x,_lower_offset)))*sizeof(cast); \
1901 } \
1902 }
1903
1904 #define RTAF(x, y) \
1905 if (CAT2(x,_freeze)!=0) { \
1906 HEADER(CAT2(x,_area))->ov_size &= ~B_C; \
1907 }
1908
1909 #define RTAA(cast,x,i) \
1910 *(cast*)(CAT2(x,_area_minus_lower)+(i)*sizeof(cast))
1911
1912 #define RTAP(cast,x,val,i) CAT2(RTAP_,cast)(cast,x,val,i)
1913
1914 #define RTAP_EIF_INTEGER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1915 #define RTAP_EIF_CHARACTER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1916 #define RTAP_EIF_REAL(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1917 #define RTAP_EIF_DOUBLE(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1918 #define RTAP_EIF_POINTER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1919 #define RTAP_EIF_BOOLEAN(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1920
1921 #define RTAP_EIF_REFERENCE(cast,x,val,i) \
1922 { \
1923 *((cast*)CAT2(x,_area_minus_lower) + i) = val; \
1924 RTAR(CAT2(x,_area), val); \
1925 }
1926
1927 #define RTAP_BASIC(cast,x,val,i) \
1928 *(cast*)(CAT2(x,_area_minus_lower)+(i)*sizeof(cast)) = val;
1929
1930 #ifdef __cplusplus
1931 }
1932 #endif
1933
1934 #endif
1935

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.23