/[eiffelstudio]/branches/eth/eve/Src/C/run-time/eif_macros.h
ViewVC logotype

Contents of /branches/eth/eve/Src/C/run-time/eif_macros.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 88121 - (show annotations)
Fri Dec 16 14:42:24 2011 UTC (7 years, 10 months ago) by jasonw
File MIME type: text/plain
File size: 74464 byte(s)
<<Merged from trunk#88120.>>
1 /*
2 description: "Macros used by C code at run time."
3 date: "$Date$"
4 revision: "$Revision$"
5 copyright: "Copyright (c) 1985-2010, Eiffel Software."
6 license: "GPL version 2 see http://www.eiffel.com/licensing/gpl.txt)"
7 licensing_options: "Commercial license is available at http://www.eiffel.com/licensing"
8 copying: "[
9 This file is part of Eiffel Software's Runtime.
10
11 Eiffel Software's Runtime is free software; you can
12 redistribute it and/or modify it under the terms of the
13 GNU General Public License as published by the Free
14 Software Foundation, version 2 of the License
15 (available at the URL listed under "license" above).
16
17 Eiffel Software's Runtime is distributed in the hope
18 that it will be useful, but WITHOUT ANY WARRANTY;
19 without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.
21 See the GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public
24 License along with Eiffel Software's Runtime; if not,
25 write to the Free Software Foundation, Inc.,
26 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 ]"
28 source: "[
29 Eiffel Software
30 356 Storke Road, Goleta, CA 93117 USA
31 Telephone 805-685-1006, Fax 805-685-6869
32 Website http://www.eiffel.com
33 Customer support http://support.eiffel.com
34 ]"
35 */
36
37 #ifndef _eif_macros_h_
38 #define _eif_macros_h_
39
40 #include "eif_portable.h"
41 #include "eif_equal.h"
42 #include "eif_globals.h"
43 #include "eif_project.h"
44 #include "eif_malloc.h"
45 #include "eif_lmalloc.h"
46 #include "eif_garcol.h"
47 #include "eif_except.h"
48 #include "eif_local.h"
49 #include "eif_copy.h"
50 #include "eif_plug.h" /* For struct bit declaration */
51 #include "eif_hector.h"
52 #include "eif_size.h"
53 #include "eif_gen_conf.h"
54 #include "eif_rout_obj.h"
55 #include "eif_option.h"
56 #include "eif_bits.h"
57 #include "eif_scoop.h"
58
59 #ifdef WORKBENCH
60 #include "eif_wbench.h"
61 #endif
62
63 #ifdef __cplusplus
64 extern "C" {
65 #endif
66
67 RT_LNK void * eif_pointer_identity (void *);
68
69 /* On a BSD system, we should use _setjmp and _longjmp if they are available,
70 * so that no system call is made to preserve the signal mask flag. It should
71 * be taken care of by the signal handler routine.
72 */
73 #ifdef USE_BSDJMP
74 #ifndef setjmp
75 #define setjmp _setjmp
76 #endif
77 #ifndef longjmp
78 #define longjmp _longjmp
79 #endif
80 #endif
81
82
83
84 /* Convenience macros:
85 * EIF_TRUE representation of True in Eiffel (shouldn't be used in tests, always compare to False)
86 * EIF_FALSE representation of False in Eiffel
87 * EIF_TEST boolean test on an expression
88 */
89 #define EIF_TRUE (EIF_BOOLEAN) '\01'
90 #define EIF_FALSE (EIF_BOOLEAN) '\0'
91 #define EIF_TEST(x) ((EIF_BOOLEAN)((x) ? EIF_TRUE : EIF_FALSE))
92 #define EIF_VOLATILE
93
94 #ifdef WORKBENCH
95 #define EIF_IS_WORKBENCH EIF_TRUE
96 #else
97 #define EIF_IS_WORKBENCH EIF_FALSE
98 #endif
99
100 /* Function pointer call to make sure all arguments are correctly pushed onto stack.
101 * FUNCTION_CAST is for standard C calls.
102 * FUNCTION_CAST_TYPE is for non-standard C calls.
103 */
104 #define FUNCTION_CAST(r_type, arg_types) (r_type (*) arg_types)
105 #define FUNCTION_CAST_TYPE(r_type, call_type, arg_types) (r_type (call_type *) arg_types)
106
107
108 /*
109 * Macro for GC synchronization
110 */
111 #if defined(ISE_GC) && defined(EIF_THREADS)
112 RT_LNK int volatile eif_is_gc_collecting;
113 RT_LNK void eif_synchronize_for_gc(void);
114 RT_LNK int eif_is_in_eiffel_code(void);
115 RT_LNK void eif_enter_eiffel_code(void);
116 RT_LNK void eif_exit_eiffel_code(void);
117 #define RTGC if (eif_is_gc_collecting) eif_synchronize_for_gc()
118 #define EIF_ENTER_EIFFEL eif_enter_eiffel_code()
119 #define EIF_EXIT_EIFFEL eif_exit_eiffel_code()
120 #define EIF_ENTER_C EIF_EXIT_EIFFEL
121 #define EIF_EXIT_C EIF_ENTER_EIFFEL
122 #define EIF_IS_IN_EIFFEL_CODE eif_is_in_eiffel_code()
123 #else
124 #define RTGC
125 #define EIF_ENTER_EIFFEL
126 #define EIF_EXIT_EIFFEL
127 #define EIF_ENTER_C
128 #define EIF_EXIT_C
129 #define EIF_IS_IN_EIFFEL_CODE 1
130 #endif
131
132 /* Function pointer call from C to Eiffel which makes sure that all arguments are correctly
133 * pushed onto stack. It takes care of the synchronization needed in a multithreaded application.
134 * EIFFEL_CALL will call Eiffel procedures `proc_ptr' with `arg_values' using prototype given by
135 * `arg_types'.
136 * EIFFEL_FUNCTION_CALL will call Eiffel functions `fn_ptr' with `arg_values' using prototype give
137 * by `r_type' and `arg_types'.
138 */
139 #if defined(ISE_GC) && defined(EIF_THREADS)
140 #define EIFFEL_CALL(arg_types, proc_ptr, arg_values) \
141 { \
142 EIF_ENTER_EIFFEL; \
143 (FUNCTION_CAST(void, arg_types) proc_ptr) arg_values; \
144 RTGC; \
145 EIF_EXIT_EIFFEL; \
146 }
147 #define EIFFEL_FUNCTION_CALL(r_type, arg_types, target, fn_ptr, arg_values) \
148 {\
149 EIF_ENTER_EIFFEL; \
150 target = (FUNCTION_CAST(r_type, arg_types) fn_ptr) arg_values; \
151 RTGC; \
152 EIF_EXIT_EIFFEL; \
153 }
154 #else
155 #define EIFFEL_CALL(arg_types, proc_ptr, arg_values) \
156 (FUNCTION_CAST(void, arg_types) proc_ptr) arg_values
157
158 #define EIFFEL_FUNCTION_CALL(r_type, arg_types, target, fn_ptr, arg_values) \
159 target = (FUNCTION_CAST(r_type, arg_types) fn_ptr) arg_values
160 #endif
161
162
163 /* Macro used for allocation:
164 * RTLN(x) allocates a new object of dftype 'x'
165 * RTLNS(x,y,z) allocates a new routine object of dftype 'x', dtype 'y' and size 'z'
166 * RTLNT(x) allocates a new tuple object of dftype 'x'
167 * RTLNTS(x) allocates a new tuple object of dftype 'x', with 'n' elements and is_atomic 'a'
168 * RTLNTY(x) allocates a new TYPE [like x] instance of dftype 'x'
169 * RTLNSMART(x) allocates a new object of dftype 'x'
170 * RTLNR(x,y,a,o,c) allocates a new routine object of type 'x' and
171 * RTLNC(x) creates a new non-initialized instance of 'x'.
172 * RTLNSP(t,n,e,b) allocates a new special array
173 * initializes it with the routine pointer 'y', the true routine pointer 'z',
174 * argument tuple 'a', open map 'o' and closed map 'c'
175 * RTLB(x) allocated a new bit object of size 'x'
176 * RTLX(x) allocates an expanded object (with possible in invocation
177 * of the creation routine) of type `x'
178 * RTXB(x,y) copies bit `x' to `y'
179 * RTMB(x,y) creates bit of length y bits from string value x
180 * RTEB(x,y) are bits `x' and `y' equal?
181 * RTBU(x) box a basic value stored in EIF_TYPED_VALUE and return EIF_REFERENCE
182 */
183 #define RTLN(x) emalloc(x)
184 #define RTLNS(x,y,z) emalloc_size(x,y,z)
185 #define RTLNT(x) tuple_malloc(x)
186 #define RTLNTS(x,n,a) tuple_malloc_specific(x,n,a)
187 #define RTLNTY(x) eif_type_malloc(x)
188 #define RTLNSMART(x) smart_emalloc(x)
189 #define RTLNRW(a,b,c,d,e,f,g,h,i,j,k,l,m) rout_obj_create_wb((a),(b),(c),(d),(e),(f),(g),(h),(i),(j),(k),(l),(m))
190 #define RTLNRF(a,b,c,d,e,f,g) rout_obj_create_fl((a),(b),(c),(d),(e),(f), (g))
191 #define RTLNC(x) eclone(x)
192 #define RTLNSP2(t,f,n,e,b) special_malloc(f,t,n,e,b)
193 #define RTLNSP(t,f,n,e,b) special_malloc(t | f,n,e,b)
194 #define RTLB(x) bmalloc(x)
195 #define RTMB(x,y) makebit(x,y)
196 #define RTXB(x,y) b_copy(x,y)
197 #define RTEB(x,y) b_equal(x,y)
198 #define RTLX(x) cr_exp(x)
199 #define RTBU(x) eif_box(x)
200 #ifdef WORKBENCH
201 #define RTLXI(x) init_exp(x)
202 #else
203 #define RTLXI(x) \
204 { \
205 void *(*cp)(EIF_REFERENCE) = (void *(*) (EIF_REFERENCE)) egc_exp_create [Dtype(x)]; \
206 if (cp) cp(x); \
207 }
208 #endif
209
210 /* Macro used for object cloning:
211 * RTCL(x) clones 'x' and return a pointer to the cloned object
212 * RTCB(x) clones bit `x'
213 * RTRCL(x) same as RTCL, but uses a user-defined version of `copy'
214 * RTCCL(c) same as RTRCL, but first checks if `x' is expanded
215 */
216 #define RTCL(x) rtclone(x)
217 #define RTCB(x) b_clone(x)
218 #define RTCCL(x) ((x && eif_is_expanded(HEADER(x)->ov_flags))? RTRCL(x): (x))
219 #ifdef WORKBENCH
220 # define RTRCL(x) ((egc_twin(x)).it_r)
221 #else
222 # define RTRCL(x) egc_twin(x)
223 #endif
224
225
226 /* Macro used for object creation to get the proper creation type:
227 * RTCA(x,y) returns the dynamic type of 'x' if not void, otherwise 'y'
228 */
229 #define RTCA(x,y) ((x) == (EIF_REFERENCE) 0 ? (y) : Dftype(x))
230
231
232
233 /* Macros used for assignments:
234 * RTAG(x) is true if 'x' is an old object not remembered
235 * RTAN(x) is true if 'x' is a new object (i.e. not old)
236 * RTAM(x) memorizes 'x'
237 * RTAR(parent,source) remembers 'parent' if it is old and not remembered and 'source' is new
238 */
239 #ifdef ISE_GC
240 #define RTAG(x) ((HEADER(x)->ov_flags & (EO_OLD | EO_REM)) == EO_OLD)
241 #define RTAN(x) (!(HEADER(x)->ov_flags & EO_OLD))
242 #define RTAM(x) eremb(x)
243 #define RTAR(parent,source) check_gc_tracking(parent,source)
244 #else
245 #define RTAG(x) EIF_FALSE
246 #define RTAN(x) EIF_FALSE
247 #define RTAM(x) (x)
248 #define RTAR(parent,source)
249 #endif
250
251
252
253 /* Macros used by reverse assignments:
254 * RTRC(x,y) is true if type 'y' conforms to type 'x'
255 * RTRA(x,y) calls RTRC(x, Dftype(y)) if 'y' is not void
256 * RTRB(x,y,z,t) assigns value of basic type 't' of 'y' to 'z' if 'y' conforms to type 'x'
257 * RTRE(x,y,z) copies 'y' to 'z' if 'y' conforms to type 'x'
258 * RTRV(x,y) returns 'y' if it conforms to type 'x', void otherwise
259 * RTOB(t,x,y,z,v) assigns value of basic type 't' of 'y' to 'z' if 'y' conforms to type 'x' and sets boolean value to `v' accordingly
260 * RTOE(x,y,z,v) copies 'y' to 'z' if 'y' conforms to type 'x' and sets boolean value to `v' accordingly
261 */
262 #define RTRC(x,y) eif_gen_conf ((y), (x))
263 #define RTRA(x,y) ((y) == (EIF_REFERENCE) 0 ? 0 : RTRC((x),Dftype(y)))
264 #define RTRB(x,y,z,t) { if (RTRA((x),(y))) { z = t (y); } }
265 #define RTRE(x,y,z) { if (RTRA((x),(y))) { RTXA ((y), z); } }
266 #define RTRV(x,y) (RTRA((x),(y)) ? (y) : (EIF_REFERENCE) 0)
267 #define RTOB(t,x,y,z,v) { if (RTRA((x),(y))) { z = t (y); v = EIF_TRUE; } else { v = EIF_FALSE; } }
268 #define RTOE(x,y,z,v) { if (RTRA((x),(y))) { RTXA ((y), z); v = EIF_TRUE; } else { v = EIF_FALSE; } }
269
270
271 /* Macros used for variable initialization:
272 * RTAT(x) true if type 'x' is attached
273 */
274 #define RTAT(x) (eif_is_attached_type(x))
275
276 /* Macros used for local variable management:
277 * RTLI(x) makes room on the stack for 'x' addresses
278 * RTLE restore the previous stack context
279 * RTLD declares the variable used by RTLI/RTLE
280 * RTLR(x,y) register `y' on the stack at position `x'
281 * RTLRC(x) clear stack at position `x'
282 * RTXI(x) makes room on the stack for 'x' addresses when feature has rescue
283 * RTXE restores the current chunk, in case an exception was raised
284 * RTXL saves the current local chunk in case exception is raised
285 * RTYL same as RTXL except that RTXI cannot be applied
286 * RTLXE restores the current local variables chunk, in case an exception was raised
287 * RTLXL saves the current local variables chunk in case exception is raised
288 * RTLXD declares the current local variables chunk in case exception is raised
289 */
290
291 #ifdef ISE_GC
292 #define RTLI(x) \
293 { \
294 if (l >= loc_set.st_top && l + (x) <= loc_set.st_end) \
295 loc_set.st_top += (x); \
296 else { \
297 ol = l; \
298 l = eget(x); \
299 } \
300 }
301 #define RTLE \
302 { \
303 if (ol == (EIF_REFERENCE *) 0) \
304 loc_set.st_top = l; \
305 else { \
306 eback(ol); \
307 } \
308 }
309 #define RTLD \
310 EIF_REFERENCE *l = loc_set.st_top; \
311 EIF_REFERENCE *ol = (EIF_REFERENCE *) 0
312
313 #define RTLR(x,y) l[(x)] = (EIF_REFERENCE) &y
314 #define RTLRC(x) l[(x)] = (EIF_REFERENCE) 0;
315
316 #define RTXI(x) \
317 { \
318 if (l >= loc_set.st_top && l + (x) <= loc_set.st_end) \
319 loc_set.st_top += (x); \
320 else \
321 l = eget(x); \
322 }
323 #define RTXE \
324 loc_set.st_cur = lc; \
325 if (lc) loc_set.st_end = lc->sk_end; \
326 loc_set.st_top = lt
327 #define RTYL \
328 EIF_REFERENCE * volatile lt = loc_set.st_top; \
329 struct stchunk * volatile lc = loc_set.st_cur
330 #define RTXL \
331 EIF_REFERENCE * volatile l = loc_set.st_top; \
332 RTYL
333 #define RTLXD \
334 EIF_TYPED_ADDRESS * EIF_VOLATILE lv
335 #define RTLXL \
336 lv = cop_stack.st_top
337 #define RTLXE \
338 while (cop_stack.st_top != lv) { \
339 RTLO(1); \
340 }
341
342 #else
343 #define RTLI(x)
344 #define RTLE
345 #define RTLD \
346 EIF_REFERENCE ol
347 #define RTLR(x,y)
348 #define RTXI(x)
349 #define RTXE
350 #define RTYL EIF_REFERENCE lt
351 #define RTXL EIF_REFERENCE l
352 #define RTLXD
353 #define RTLXL
354 #define RTLXE
355 #endif
356
357
358
359 /* Macro used to record once functions:
360 * RTOC calls onceset to record the address of Result (static variable)
361 */
362 #define RTOC(x) onceset()
363 #define RTOC_NEW(x) new_onceset((EIF_REFERENCE) &x);
364 #define RTOC_GLOBAL(x) globalonceset((EIF_REFERENCE) &x);
365
366 /* Macros for optimized access to once feature:
367 * RTO_CP - access to once procedure
368 * RTO_CF - access to once function
369 */
370 #define RTO_CP(succeeded,c,a) if (succeeded) { (void) a; } else { c a; }
371 #define RTO_CF(succeeded,result,c,a) ((succeeded)? ((void) a, result) : c a)
372
373 /* Service macros for once routines:
374 * RTO_TRY - try to excute routine body
375 * RTO_EXCEPT - start processing exception
376 * RTO_END_EXCEPT - stop processing exception
377 */
378
379 #define RTO_TRY \
380 { \
381 /* Declare variables for exception handling. */ \
382 struct ex_vect * exvecto; \
383 jmp_buf exenvo; \
384 /* Save stack contexts. */ \
385 RTYD; \
386 /* Record execution vector to catch exception. */ \
387 exvecto = extre (); \
388 if (!setjmp(exenvo)) { \
389 /* Set catch address. */ \
390 exvect->ex_jbuf = &exenvo; \
391 /* Update routine exception vector. */ \
392 exvect = exvecto;
393
394 #define RTO_EXCEPT \
395 /* Remove execution vector to restore */ \
396 /* previous exception catch point. */ \
397 exvect = extrl(); \
398 } else { \
399 /* Exception occurred. */
400
401 #define RTO_END_EXCEPT \
402 /* Restore stack contexts. */ \
403 RTXSC; \
404 } \
405 }
406
407 #ifdef WORKBENCH
408
409 /* Macros for once routine indexes:
410 * RTOIN - name of a variable that keeps index of a once result
411 * RTOID - declaration of index into once routine result table
412 */
413 #define RTOIN(name) CAT2(name,_index)
414
415 #define RTOID(name) static ONCE_INDEX RTOIN(name);
416
417 #else
418
419 /* Macros for once routine fields:
420 * RTOFN - name of a field for once feature with the given code index
421 */
422
423 #define RTOFN(code_index,field_name) CAT3(o,code_index,field_name)
424
425 #endif /* WORKBENCH */
426
427 /* Macros for single-threaded once routines:
428 * RTOSR - result field name (for once functions)
429 * RTOSHP - declaration of variables for procedure
430 * RTOSHF - declaration of variables for function
431 * RTOSDP - definition of variables for procedure
432 * RTOSDF - definition of variables for function
433 * RTOSC - implementation of a constant attribute body
434 * RTOSCP - optimized direct call to procedure
435 * RTOSCF - optimized direct call to function
436 * RTOSP - prologue for a single-threaded once routine
437 * RTOSE - epilogue for a single-threaded once routine
438 */
439
440 #define RTOSR(code_index) \
441 RTOFN(code_index,_result)
442
443 #define RTOSHP(code_index) \
444 extern EIF_BOOLEAN RTOFN(code_index,_done); \
445 extern EIF_OBJECT RTOFN(code_index,_failed); \
446 extern EIF_BOOLEAN RTOFN(code_index,_succeeded);
447
448 #define RTOSHF(type, code_index) \
449 RTOSHP(code_index) \
450 extern type RTOSR(code_index);
451
452 #define RTOSDP(code_index) \
453 EIF_BOOLEAN RTOFN(code_index,_done) = EIF_FALSE; \
454 EIF_OBJECT RTOFN(code_index,_failed) = NULL; \
455 EIF_BOOLEAN RTOFN(code_index,_succeeded) = EIF_FALSE;
456
457 #define RTOSDF(type, code_index) \
458 RTOSDP(code_index) \
459 type RTOSR(code_index) = (type) 0;
460
461 #define RTOSCP(n,c,a) RTO_CP(RTOFN(n,_succeeded),c,a)
462 #define RTOSCF(n,c,a) RTO_CF(RTOFN(n,_succeeded),RTOFN(n,_result),c,a)
463
464 #define RTOSP(code_index) \
465 /* Check if evaluation has succeeded. */ \
466 /* If yes, skip any other calculations. */ \
467 if (!RTOFN(code_index,_succeeded)) { \
468 /* Check if evaluation is started earlier. */ \
469 /* If yes, evaluation is completed. */ \
470 if (!RTOFN(code_index,_done)) { \
471 /* Evaluation has not been started yet. */ \
472 /* Start it now. */ \
473 RTOFN(code_index,_done) = EIF_TRUE; \
474 /* Try to exceute routine body. */ \
475 RTO_TRY
476
477 #define RTOSE(code_index) \
478 /* Record successful execution result. */ \
479 RTOFN(code_index,_succeeded) = EIF_TRUE; \
480 /* Catch exception. */ \
481 RTO_EXCEPT \
482 /* Handle exception. */ \
483 /* Record exception for future use. */ \
484 RTOFN(code_index,_failed) = eif_protect(RTLA); \
485 RTO_END_EXCEPT \
486 /* Propagate the exception if any. */ \
487 if (RTOFN(code_index,_failed)) { \
488 if (eif_access(RTOFN(code_index,_failed))) { \
489 ereturn (); \
490 } \
491 } \
492 } else { \
493 /* Raise the saved exception if any. */ \
494 if (RTOFN(code_index,_failed)) { \
495 if (eif_access(RTOFN(code_index,_failed))) { \
496 oraise (eif_access(RTOFN(code_index,_failed))); \
497 } \
498 } \
499 } \
500 }
501
502 #define RTOSC(code_index, value) \
503 if (!RTOFN (code_index,_succeeded)) { \
504 RTOC_NEW (RTOSR (code_index)); \
505 RTOSR (code_index) = (value); \
506 RTOFN (code_index,_succeeded) = EIF_TRUE; \
507 } \
508 return RTOSR (code_index);
509
510 /* Macros for thread-relative once routines:
511 * RTOTS - stores index of a once routine into index variable given its code index
512 * RTOTDB, RTOUDB - declaration and initialization of variables for once function returning basic type
513 * RTOTDR, RTOUDR - declaration and initialization of variables for once function returning reference
514 * RTOTDV, RTOUDV - declaration and initialization of variables for once procedure
515 * RTOTW - stores in a list the body id of the just called once routine
516 * RTOTRB - declaration of a result macro for once function returning basic type
517 * RTOTRR - declaration of a result macro for once function returning reference
518 * RTOUC - implementation of a constant attribute
519 * RTOUCP - optimized direct call to procedure
520 * RTOUCB - optimized direct call to function returning basic type
521 * RTOUCR - optimized direct call to function returning reference type
522 * RTOTP - prologue for a thread-relative once routine
523 * RTOTE - epilogue for a thread-relative once routine
524 */
525
526 #ifdef WORKBENCH
527
528 #define RTOTS(code_index, name) \
529 RTOIN(name) = once_index (code_index);
530
531 #define RTOTDV(name) \
532 MTOT OResult = (MTOT) MTOI(RTOIN(name));
533
534 #define RTOTDB(type, name) \
535 RTOTDV(name) \
536 if (!MTOD(OResult)) { \
537 MTOP(type, OResult, (type) 0); \
538 }
539
540 #define RTOTDR(name) \
541 RTOTDV(name) \
542 if (!MTOD(OResult)) { \
543 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
544 MTOE(OResult, RTOC(0)); \
545 }
546
547 #define RTOTW(body_id)
548
549 #define RTOTC(name, body_id, v) \
550 RTOTDV(name) \
551 EIF_REFERENCE * PResult = MTOR(EIF_REFERENCE,OResult); \
552 EIF_TYPED_VALUE r; \
553 r.type = SK_REF; \
554 if (PResult) { \
555 r.it_r = *PResult; \
556 return r; \
557 } \
558 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
559 MTOE(OResult, RTOC(0)); \
560 MTOM(OResult); \
561 r.it_r = RTOTRR = v; \
562 return r;
563
564 #define RTOTOK
565
566 #elif defined EIF_THREADS
567
568 #define RTOTOK OResult->succeeded = EIF_TRUE;
569
570 #define RTOUCP(once_index,c,a) RTO_CP(MTOI(once_index)->succeeded,c,a)
571 #define RTOUCB(type,once_index,c,a) RTO_CF(MTOI(once_index)->succeeded,MTOR(type,MTOI(once_index)),c,a)
572 #define RTOUCR(once_index,c,a) RTO_CF(MTOI(once_index)->succeeded,*MTOR(EIF_REFERENCE,MTOI(once_index)),c,a)
573
574 #define RTOUDV(once_index) \
575 MTOT OResult = (MTOT) MTOI(once_index);
576
577 #define RTOUDB(type, once_index) \
578 RTOUDV(once_index) \
579 if (!MTOD(OResult)) { \
580 MTOP(type, OResult, (type) 0); \
581 }
582
583 #define RTOUDR(once_index) \
584 RTOUDV(once_index) \
585 if (!MTOD(OResult)) { \
586 MTOP(EIF_REFERENCE, OResult, RTOC(0)); \
587 MTOE(OResult, RTOC(0)); \
588 }
589
590 #define RTOUC(once_index, value) \
591 RTOUDV(once_index) \
592 EIF_REFERENCE Result; \
593 EIF_REFERENCE * PResult = MTOR(EIF_REFERENCE,OResult); \
594 if (PResult) { \
595 Result = *PResult; \
596 } \
597 else { \
598 PResult = RTOC(0); \
599 MTOP(EIF_REFERENCE,OResult,PResult); \
600 Result = (value); \
601 *PResult = Result; \
602 MTOM(OResult); \
603 RTOTOK \
604 } \
605 return Result;
606
607 #endif /* WORKBENCH */
608
609 #define RTOTRB(type) MTOR(type,OResult)
610 #define RTOTRR (*MTOR(EIF_REFERENCE,OResult))
611
612 #define RTOTP \
613 /* Check if evaluation is started earlier. */ \
614 /* If yes, evaluation is completed. */ \
615 if (!MTOD(OResult)) { \
616 /* Evaluation has not been started yet. */ \
617 /* Start it now. */ \
618 MTOM(OResult); \
619 /* Try to exceute routine body. */ \
620 RTO_TRY
621
622 #define RTOTE \
623 /* Record that execution completed successfully */ \
624 RTOTOK \
625 /* Catch exception. */ \
626 RTO_EXCEPT \
627 /* Handle exception. */ \
628 /* Record exception for future use. */ \
629 MTOE(OResult, RTOC(0)); \
630 MTOEV(OResult, RTLA); \
631 RTO_END_EXCEPT \
632 /* Propagate the exception if any. */ \
633 if (MTOF(OResult)) { \
634 if (*MTOF(OResult)) { \
635 ereturn (); \
636 } \
637 } \
638 } else { \
639 /* Raise the saved exception if any.*/ \
640 if (MTOF(OResult)) { \
641 if (*MTOF(OResult)) { \
642 oraise (*MTOF(OResult)); \
643 } \
644 } \
645 }
646
647 #ifdef EIF_THREADS
648
649 /* Service macros for process-relative once routines:
650 * RTOPL - lock a mutex of a once routine
651 * RTOPLT - try to lock a mutex of a once routine
652 * RTOPLU - unlock a mutex of a once routine
653 * RTOPMBW - write memory barrier when available
654 * RTOPW - let thread that started once evaluation to complete
655 * RTOPLP - once prologue that is executed with locked mutex
656 * RTOPLE - once epilogue that is executed with locked mutex
657 * RTOPRE - raise previously recorded exception
658 */
659
660 #ifdef EIF_HAS_MEMORY_BARRIER
661 # define RTOPMBW EIF_MEMORY_WRITE_BARRIER
662 #else
663 # define RTOPMBW
664 #endif
665
666 #define RTOPL(mutex) \
667 EIF_ENTER_C; \
668 eif_thr_mutex_lock (mutex); \
669 EIF_EXIT_C; \
670 RTGC
671
672 #define RTOPLT(mutex) \
673 eif_thr_mutex_trylock (mutex)
674
675 #define RTOPLU(mutex) \
676 eif_thr_mutex_unlock (mutex)
677
678 #define RTOPW(mutex, thread_id) \
679 /* Once routine evaluation has been started. */ \
680 /* To wait until evaluation is completed, */ \
681 /* it's enough to lock and unlock a mutex, */ \
682 /* then the mutex should be recursive. */ \
683 /* Recursive mutexes are not in POSIX standard, */ \
684 /* so they should be emulated by checking thread id. */ \
685 /* Check what thread performs evaluation. */ \
686 if (thread_id != eif_thr_thread_id()) { \
687 /* Evaluation is performed by a different thread. */ \
688 /* Wait until it completes evaluation. */ \
689 RTOPL (mutex); \
690 RTOPLU (mutex); \
691 }
692
693 #define RTOPLP(started, thread_id) \
694 /* Check if some thread started evaluation earlier. */ \
695 /* If yes, evaluation is completed. */ \
696 if (!started) { \
697 /* Evaluation has not been started yet. */ \
698 /* Record thread id and start evaluation. */ \
699 thread_id = eif_thr_thread_id(); \
700 started = EIF_TRUE; \
701 /* Try to exceute routine body. */ \
702 RTO_TRY
703
704 #define RTOPLE(completed,failed,thread_id) \
705 /* Catch exception. */ \
706 RTO_EXCEPT \
707 /* Handle exception. */ \
708 /* Record exception for future use. */ \
709 RTOC_GLOBAL(failed); \
710 failed = RTLA; \
711 RTO_END_EXCEPT \
712 /* Clear field that holds locking thread id. */ \
713 thread_id = NULL; \
714 /* Ensure memory is flushed (if required). */ \
715 RTOPMBW; \
716 /* Mark evaluation as completed. */ \
717 completed = EIF_TRUE; \
718 }
719
720 #define RTOPRE(failed) \
721 if (failed) { \
722 oraise (failed); \
723 }
724
725 #ifdef EIF_HAS_MEMORY_BARRIER
726
727 # define RTOFP(started, completed, mutex, thread_id) \
728 EIF_MEMORY_READ_BARRIER; \
729 if (!completed) { \
730 /* Once evaluation is not completed yet. */ \
731 /* Check whether once evaluation has been started. */ \
732 if (started) { \
733 /* Evaluation has been started. */ \
734 /* Let it to complete. */ \
735 RTOPW(mutex, thread_id); \
736 } \
737 else { \
738 /* Current thread has not started evaluation. */ \
739 /* It's safe to lock a mutex. */ \
740 RTOPL (mutex); \
741 /* Use thread-safe prologue. */ \
742 RTOPLP (started, thread_id);
743
744 # define RTOFE(completed, failed, mutex, thread_id) \
745 /* Use thread-safe epilogue. */ \
746 RTOPLE (completed, failed, thread_id); \
747 /* Unlock mutex. */ \
748 RTOPLU (mutex); \
749 /* Propagate the exception if any. */ \
750 if (failed) { \
751 ereturn (); \
752 } \
753 } \
754 } else { \
755 /* Raise the saved exception if any. */ \
756 RTOPRE(failed); \
757 }
758
759 #else /* !defined(EIF_HAS_MEMORY_BARRIER) */
760
761 # define RTOFP(started, completed, mutex, thread_id) \
762 /* Try to lock a mutex. */ \
763 if (RTOPLT (mutex)) { \
764 /* Mutex has been locked. */ \
765 /* Check if once evaluation has been completed. */ \
766 if (!completed) { \
767 /* Evaluation is not completed. */ \
768 /* Use thread-safe prologue. */ \
769 RTOPLP (started, thread_id);
770
771 # define RTOFE(completed, failed, mutex, thread_id) \
772 /* Use thread-safe epilogue. */ \
773 RTOPLE (completed, failed, thread_id); \
774 /* Unlock mutex. */ \
775 RTOPLU (mutex); \
776 /* Propagate the exception if any.*/ \
777 if (failed) { \
778 ereturn (); \
779 } \
780 } else { \
781 /* Unlock mutex. */ \
782 RTOPLU (mutex); \
783 /* Raise the saved exception if any.*/ \
784 RTOPRE(failed); \
785 } \
786 } \
787 else { \
788 /* Mutex cannot be locked. */ \
789 /* Evaluation has been started. */ \
790 /* Let it to complete. */ \
791 RTOPW (mutex, thread_id); \
792 } \
793
794 #endif /* EIF_HAS_MEMORY_BARRIER */
795
796 #ifdef WORKBENCH
797 /* Main for process-relative once routines in workbench mode:
798 * RTOQS - stores index of a once routine into index variable given its code index
799 */
800
801 #define RTOQS(code_index, name) \
802 RTOIN(name) = process_once_index (code_index);
803
804 #define RTOQDV(name) \
805 EIF_process_once_value_t * POResult = \
806 EIF_process_once_values + RTOIN(name); \
807 MTOT OResult = &(POResult -> value); \
808 MTOE(OResult, &(POResult -> exception));
809
810 #define RTOQDB(type, name) \
811 RTOQDV(name)
812
813 #define RTOQDR(name) \
814 RTOQDV(name) \
815 MTOP(EIF_REFERENCE, OResult, &(POResult -> reference)); \
816
817 #define RTOQRB(type) MTOR(type,OResult)
818 #define RTOQRR (*MTOR(EIF_REFERENCE,OResult))
819
820 #define RTOQP \
821 RTOFP ( \
822 POResult -> value.done, \
823 POResult -> completed, \
824 POResult -> mutex, \
825 POResult -> thread_id \
826 )
827
828 #define RTOQE \
829 RTOFE ( \
830 POResult -> completed, \
831 *(POResult -> value.exception), \
832 POResult -> mutex, \
833 POResult -> thread_id \
834 )
835
836 #else
837
838 /* Main macros for process-relative once routines in finalized mode:
839 * RTOPH - declaration of variables (that is used to refer to the variables)
840 * RTOPD - definition of variables
841 * RTOPI - initialization of variables
842 * RTOPP - prologue
843 * RTOPE - epilogue */
844
845 #define RTOPR(code_index) \
846 RTOFN(code_index,_result)
847
848 #define RTOPHP(code_index) \
849 extern EIF_BOOLEAN RTOFN(code_index,_started); \
850 extern EIF_BOOLEAN RTOFN(code_index,_completed); \
851 extern EIF_REFERENCE RTOFN(code_index,_failed); \
852 extern EIF_MUTEX_TYPE * RTOFN(code_index,_mutex); \
853 extern EIF_POINTER RTOFN(code_index,_thread_id);
854
855 #define RTOPHF(type, code_index) \
856 RTOPHP(code_index) \
857 extern type RTOPR(code_index);
858
859 #define RTOPDP(code_index) \
860 volatile EIF_BOOLEAN RTOFN(code_index,_started) = EIF_FALSE; \
861 volatile EIF_BOOLEAN RTOFN(code_index,_completed) = EIF_FALSE; \
862 volatile EIF_REFERENCE RTOFN(code_index,_failed) = NULL; \
863 volatile EIF_MUTEX_TYPE * RTOFN(code_index,_mutex) = NULL; \
864 volatile EIF_POINTER RTOFN(code_index,_thread_id) = NULL;
865
866 #define RTOPDF(type, code_index) \
867 RTOPDP(code_index) \
868 volatile type RTOPR(code_index) = (type) 0;
869
870 #define RTOPI(code_index) \
871 RTOFN(code_index,_mutex) = eif_thr_mutex_create ();
872
873 #define RTOPP(code_index) \
874 RTOFP ( \
875 RTOFN(code_index,_started), \
876 RTOFN(code_index,_completed), \
877 RTOFN(code_index,_mutex), \
878 RTOFN(code_index,_thread_id) \
879 )
880
881 #define RTOPE(code_index) \
882 RTOFE ( \
883 RTOFN(code_index,_completed), \
884 RTOFN(code_index,_failed), \
885 RTOFN(code_index,_mutex), \
886 RTOFN(code_index,_thread_id) \
887 )
888
889 #ifdef EIF_HAS_MEMORY_BARRIER
890
891 #define RTOPCP(code_index,c,a) \
892 EIF_MEMORY_READ_BARRIER; \
893 RTO_CP( \
894 RTOFN(code_index,_completed) && (!RTOFN(code_index,_failed)), \
895 c, \
896 a \
897 )
898
899 #define RTOPCF(code_index,c,a) \
900 ((void) EIF_MEMORY_READ_BARRIER, \
901 RTO_CF( \
902 RTOFN(code_index,_completed) && (!RTOFN(code_index,_failed)), \
903 RTOPR(code_index), \
904 c, \
905 a \
906 ))
907
908 #else
909
910 #define RTOPCP(code_index,c,a) {c a;}
911 #define RTOPCF(code_index,c,a) (c a)
912
913 #endif /* EIF_HAS_MEMORY_BARRIER */
914
915 #endif /* WORKBENCH */
916
917 #endif /* EIF_THREADS */
918
919
920 /* Macro used for object information:
921 * Dtype: Dynamic type of object. The name is not RTDT for historical reasons.
922 * Dftype: Full dynamic type of object - for generic conformance
923 * RT_DFS(x,y): Set dynamic type and full dynamic type `y' to overhead `x'.
924 * To_dtype: Convert a Full dynamic type to a dynamic type
925 * RTCDT: Compute `dtype' of Current
926 * RTCDD: Declare `dtype' for later computation of Dtype of Current.
927 */
928
929 #define Dftype(x) (HEADER(x)->ov_dftype)
930 #define Dtype(x) (HEADER(x)->ov_dtype)
931 #define To_dtype(t) (eif_cid_map[t])
932
933 #define RT_DFS(x,y) ((x)->ov_dftype = y, (x)->ov_dtype = To_dtype(y))
934 #define RTCDT EIF_TYPE_INDEX EIF_VOLATILE dtype = Dtype(Current)
935 #define RTCDD EIF_TYPE_INDEX EIF_VOLATILE dtype
936 #define RTCFDT EIF_TYPE_INDEX EIF_VOLATILE dftype = Dftype(Current)
937 #define RTCFDD EIF_TYPE_INDEX EIF_VOLATILE dftype
938
939 /* If call on void target are detected, we use RTCV to perform the check. Unlike the workbench
940 * mode, we won't know the message of the call as it would require too much data to be generated. */
941 #if defined(WORKBENCH) || !defined(EIF_NO_RTCV)
942 #define RTCV(x) eif_check_call_on_void_target(x)
943 #else
944 #define RTCV(x) (x)
945 #endif
946
947 /* Detect catcall at runtime for argument 'o' at position 'i' for feature 'f' in dtype 'd'
948 * and expected dftype 't'. */
949 #define RTCC(o,d,f,i,t) eif_check_catcall_at_runtime(o,d,f,i,t)
950
951
952 /* Macros for assertion checking:
953 * RTCT(t,x) next assertion has tag 't' and is of type 'x'
954 * RTCK signals successful end of last assertion check
955 * RTJB goto body
956 * RTCF signals failure during last assertion check
957 * RTTE tests assertion
958 * RTIT(t,x) next invariant assertion has tag 't' and is of type 'x'
959 * RTVR(x,y) check if call of feature 'y' on 'x' is done on a void reference
960 */
961 #define RTCT0(t,x) exasrt(t, x)
962 #define RTIT0(t,x) exinv(t, x)
963 #define RTCK0 expop(&eif_stack)
964 #define RTCF0 eviol()
965
966 #define RTCT(t,x) RTCT0(t,x); in_assertion = ~0
967 #define RTIT(t,x) RTIT0(t,x); in_assertion = ~0
968 #define RTCK in_assertion = 0; RTCK0
969 #define RTCF in_assertion = 0; RTCF0
970 #define RTTE(x,y) if (!(x)) goto y
971 #define RTJB goto body
972 #ifdef WORKBENCH
973 #define RTVR(x,y) if ((x) == (EIF_REFERENCE) 0) eraise(y, EN_VOID)
974 #endif
975
976 /* Obsolete for backward compatibility */
977 #define RTCS(x) RTCT(NULL,x)
978 #define RTIS(x) RTIT(NULL,x)
979
980
981
982 /* Macros for exception handling:
983 * RTEX declares the exception vector variable for current routine
984 * RTED declares the setjmp buffer, saved_assertion and saved_except
985 * RTES issues the setjmp call for remote control transfer via longjmp
986 * RTEJ sets the exception handling mechanism (must appear only once)
987 * RTEA(x,y,z) signal entry in routine 'x', origin 'y', object ID 'z'
988 * RTEAA(x,y,z,i,j,b) signal entry in routine 'x', origin 'y', object ID 'z', locnum 'i', argnum 'j', body id 'b'
989 * RTEAINV(x,y,z,i,b) signal entry in _invariant routine 'x', origin 'y', object ID 'z', locnum 'i', body id 'b'
990 * RTEV signals entry in routine (simply gets an execution vector)
991 * RTET(t,x) raises an exception tagged 't' whose code is 'x'
992 * RTEC(x) raises an exception whose code is 'x'
993 * RTEE exits the routine by removing the execution vector from stack
994 * RTER retries the routine
995 * RTEU enters in rescue clause
996 * RTEF ends the rescue clause
997 * RTXD declares the variables used to save the run-time stacks context
998 * RTYD same as RTXD except that RTXI cannot be used
999 * RTXSC resynchronizes the run-time stacks in a pseudo rescue clause in C
1000 * RTXS(x) resynchronizes the run-time stacks in a rescue clause
1001 * RTEOK ends a routine with a rescue clause by cleaning the trace stack
1002 * RTMD(x) Stops monitoring (profile or tracing) for routine context 'y' (i.e. normal vs external)
1003 * RTLA Last exception from EXCEPTION_MANAGER
1004 * RTCO(x) Check if x is NULL, if not raise an OLD_VIOLATION
1005 * RTE_T start try block (for body)
1006 * RTE_E start except block (for rescue)
1007 * RTE_EE end except block
1008 * RTE_OT start try block of old expression evaluation
1009 * RTE_O end of try block of old expression evaluation
1010 * RTE_OE local rescue, recording possible exception for later use
1011 * RTE_OTD Declare old stack vector and push it.
1012 * RTE_OP Pop old stack vector.
1013 * RTDBGE,RTDBGL are declared in eif_debug.h
1014 */
1015 #define RTED jmp_buf exenv; int EIF_VOLATILE saved_assertion = in_assertion
1016 #define RTES if (setjmp(exenv)) goto rescue
1017 #define RTEA(x,y,z) exvect = new_exset(MTC x, y, z, 0, 0, 0)
1018 #define RTEV exvect = exft()
1019 #define RTET(t,x) eraise(t,x)
1020 #define RTEC(x) RTET((EIF_REFERENCE) 0,x)
1021 #define RTSO check_options_stop(0)
1022 #define RTMD(x) check_options_stop(x)
1023 #define RTLA last_exception()
1024 #define RTCO(x) chk_old(x)
1025
1026 #ifdef WORKBENCH
1027 #define RTEX struct ex_vect * EIF_VOLATILE exvect; uint32 EIF_VOLATILE db_cstack
1028 #define RTEAA(x,y,z,i,j,b) exvect = new_exset(x, y, z,i,j,b); db_cstack = ++d_data.db_callstack_depth;
1029 #define RTDBGEAA(y,z,b) RTDBGE(y,b,z,db_cstack);
1030 #define RTDBGLE RTDBGL(exvect->ex_orig,exvect->ex_bodyid,exvect->ex_id,db_cstack);
1031
1032 #define RTEE d_data.db_callstack_depth = --db_cstack; expop(&eif_stack)
1033 #define RTEOK d_data.db_callstack_depth = --db_cstack; exok ()
1034
1035 #define RTEJ current_call_level = trace_call_level; \
1036 if (prof_stack) saved_prof_top = prof_stack->st_top; \
1037 start: exvect->ex_jbuf = &exenv; RTES
1038
1039 #define RTEU d_data.db_callstack_depth = db_cstack; exresc(MTC exvect); \
1040 RTDBGR(exvect->ex_orig,exvect->ex_bodyid,exvect->ex_id,db_cstack);
1041
1042 #define RTE_T \
1043 current_call_level = trace_call_level; \
1044 if (prof_stack) saved_prof_top = prof_stack->st_top; \
1045 saved_except = RTLA; \
1046 start: exvect->ex_jbuf = &exenv; \
1047 if (!setjmp(exenv)) {
1048
1049 #else
1050 #define RTEX struct ex_vect * EIF_VOLATILE exvect
1051 #define RTEAA(x,y,z,i,j,b) exvect = new_exset(x, y, z, 0, 0, 0)
1052 #define RTDBGEAA(y,z,b)
1053 #define RTEE expop(&eif_stack)
1054 #define RTEOK exok ()
1055 #define RTEJ start: exvect->ex_jbuf = &exenv; RTES
1056 #define RTEU exresc(MTC exvect)
1057
1058 #define RTE_T \
1059 saved_except = RTLA; \
1060 start: exvect->ex_jbuf = &exenv; \
1061 if (!setjmp(exenv)) {
1062
1063 #endif
1064
1065 #define RTEAINV(x,y,z,i,b) RTEAA(x,y,z,i,0,b); exvect->ex_is_invariant = 1; /* argnum = 0 for _invariant */
1066
1067 #define RTER in_assertion = saved_assertion; \
1068 exvect = exret(exvect); goto start
1069 #define RTEF exfail()
1070 #define RTXS(x) RTXSC; RTXI(x)
1071 #define RTXSC RTXE; RTHS; RTLS
1072 #define RTXD RTXL; RTXH; RTXLS
1073 #define RTYD RTYL; RTXH; RTXLS
1074
1075 #define RTE_E \
1076 } else { \
1077 RTEU;
1078 #define RTE_EE \
1079 RTEF; \
1080 } \
1081 set_last_exception (saved_except);
1082
1083
1084 /* new debug */
1085 #ifdef WORKBENCH
1086 #define RTLU(x,y) insert_local_var (x, (void *) y)
1087 #define RTLO(n) clean_local_vars (n)
1088 #define RTHOOK(n) dstop (exvect, n);
1089 #define RTNHOOK(n,m) dstop_nested (exvect, n, m);
1090 #else
1091 #define RTLU(x,y)
1092 #define RTLO(n)
1093 #define RTHOOK(n) exvect->ex_linenum = n; exvect->ex_bpnested = 0;
1094 #define RTNHOOK(n,m) exvect->ex_bpnested = m;
1095 #endif
1096
1097 /* Old expression evaluation */
1098 #define RTE_OT { \
1099 RTE_OTD; \
1100 if (!setjmp(exenv_o)) {
1101 #define RTE_O \
1102 RTE_OP; \
1103 } else {
1104 #define RTE_OE \
1105 } \
1106 }
1107
1108 #define RTE_OTD \
1109 jmp_buf exenv_o; \
1110 struct ex_vect * EIF_VOLATILE exvect_o; \
1111 exvect_o = exold (); \
1112 exvect_o->ex_jbuf = &exenv_o
1113 #define RTE_OP \
1114 expop(&eif_stack)
1115
1116 /* Accessing of bits in a bit field is done via macros.
1117 * Bits are stored from left to right. If the size of an int is I (in bits),
1118 * then bit "n" is in the n/I th int at the position n%I.
1119 * RTBI(b,n) accesses bit 'n' in the bit field 'b'
1120 * RTBS(b,n) sets bit 'n' to 1 in the bit field 'b'
1121 * RTBR(b,n) resets bit 'n' to 0 in the bit field 'b'
1122 */
1123 #define RTBI(b,n) \
1124 (((struct bit *) (b))->b_value[(n)/BITLONG] & (1<<((n)%BITLONG)))
1125 #define RTBS(b,n) \
1126 (((struct bit *) (b))->b_value[(n)/BITLONG] |= (1<<((n)%BITLONG)))
1127 #define RTBR(b,n) \
1128 (((struct bit *) (b))->b_value[(n)/BITLONG] &= ~(1<<((n)%BITLONG)))
1129
1130
1131
1132 /* Hector protection for external calls:
1133 * RTHP(x) protects 'x' returns Hector indirection pointer
1134 * RTHF(x) removes 'x' topmost entries from Hector stack
1135 * RTXH saves hector's stack context in case an exception occurs
1136 * RTHS resynchronizes the hector stack by restoring saved context
1137 */
1138 #ifdef ISE_GC
1139 #define RTHP(x) hrecord(x)
1140 #define RTHF(x) epop(&hec_stack, x)
1141 #define RTXH \
1142 EIF_REFERENCE * volatile ht = hec_stack.st_top; \
1143 struct stchunk * volatile hc = hec_stack.st_cur
1144 #define RTHS \
1145 if (ht){ \
1146 hec_stack.st_cur = hc; \
1147 if (hc) hec_stack.st_end = hc->sk_end; \
1148 hec_stack.st_top = ht; \
1149 }else if (hec_stack.st_top) { \
1150 hec_stack.st_cur = hec_stack.st_hd; \
1151 hec_stack.st_top = hec_stack.st_cur->sk_arena; \
1152 hec_stack.st_end = hec_stack.st_cur->sk_end; \
1153 }
1154 #else
1155 #define RTHP(x) (x)
1156 #define RTHF(x)
1157 #define RTXH \
1158 EIF_REFERENCE ht
1159 #define RTHS
1160 #endif
1161
1162 /* Loc stack protection:
1163 * RTXLS saves loc_stack context in case an exception occurs
1164 * RTLS resynchronizes the loc_stack by restoring saved context
1165 */
1166 #ifdef ISE_GC
1167 #define RTXLS \
1168 EIF_REFERENCE * volatile lst = loc_stack.st_top; \
1169 struct stchunk * volatile lsc = loc_stack.st_cur
1170 #define RTLS \
1171 if (lst){ \
1172 loc_stack.st_cur = lsc; \
1173 if (lsc) loc_stack.st_end = lsc->sk_end; \
1174 loc_stack.st_top = lst; \
1175 }else if (loc_stack.st_top) { \
1176 loc_stack.st_cur = loc_stack.st_hd; \
1177 loc_stack.st_top = loc_stack.st_cur->sk_arena; \
1178 loc_stack.st_end = loc_stack.st_cur->sk_end; \
1179 }
1180 #else
1181 #define RTXLS \
1182 EIF_REFERENCE lst
1183 #define RTLS
1184 #endif
1185
1186
1187
1188 /* Other macros used to handle specific needs:
1189 * RTMS(s) creates an Eiffel string from a C manifest string s.
1190 * RTMS_EX(s,c) creates an Eiffel string from a C manifest string s of length c.
1191 * RTMS_EX_H(s,c,h) creates an Eiffel string from a C manifest string s of length c and hash-code h.
1192 * RTMS32_EX_H(s,c,h) creates an STRING_32 from a C manifest string s of length c and hash-code h.
1193 * RTMS_EX_O(s,c,h) creates an Eiffel string in heap for old objects from a C manifest string s of length c and hash-code h.
1194 * RTOMS(b,n) a value of a once manifest string object for routine body index `b' and number `n'.
1195 * RTDOMS(b,m) declares a field to store once manifest string objects for routine body index `b' and number `n' of such objects.
1196 * RTEOMS(b,m) "extern" reference to the field declared by RTDOMS.
1197 * RTAOMS(b,m) allocates memory to store at least `m' once manifest strings for routine body index `b'.
1198 * RTPOMS(b,n,s,c,h) stores a new once manifest string object of value `s', length `c' and has-code `h' for body index `b' and number `n' if such object is not already created.
1199 * RTCOMS(r,b,n,s,c,h) does the same as RTPOMS, but also puts the corresponding object into `r'.
1200 * RTPOF(p,o) returns the C pointer of the address p + o where p represents a C pointer.
1201 * RTST(c,d,i,n) creates an Eiffel ARRAY[ANY] (for strip).
1202 * RTXA(x,y) copies 'x' into expanded 'y' with exception if 'x' is void.
1203 * RTEQ(x,y) returns true if 'x' = 'y'
1204 * RTOF(x) returns the offset of expanded 'x' within enclosing object
1205 * RTEO(x) returns the address of the enclosing object for expanded 'x'
1206 */
1207 #define RTMS(s) makestr_with_hash(s,strlen(s),0)
1208 #define RTMS_EX(s,c) makestr_with_hash(s,c,0)
1209 #define RTMS_EX_H(s,c,h) makestr_with_hash(s,c,h)
1210 #define RTMS_EX_O(s,c,h) makestr_with_hash_as_old(s,c,h)
1211
1212 #define RTMS32(s) makestr32_with_hash(s,strlen(s),0)
1213 #define RTMS32_EX(s,c) makestr32_with_hash(s,c,0)
1214 #define RTMS32_EX_H(s,c,h) makestr32_with_hash(s,c,h)
1215 #define RTMS32_EX_O(s,c,h) makestr32_with_hash_as_old(s,c,h)
1216
1217 #if defined(WORKBENCH) || defined(EIF_THREADS)
1218 #define RTOMS(b,n) (EIF_oms[(b)][(n)])
1219 #define RTAOMS(b,m) \
1220 { \
1221 EIF_REFERENCE ** p; \
1222 p = &(EIF_oms[(b)]); \
1223 if (!(*p)) { \
1224 EIF_REFERENCE * pm; \
1225 pm = (EIF_REFERENCE *) eif_calloc (m, sizeof (EIF_REFERENCE *)); \
1226 if (!pm) { \
1227 enomem(); \
1228 } \
1229 *p = pm; \
1230 } \
1231 }
1232 #define RTCOMS(r,b,n,s,c,h) \
1233 { \
1234 EIF_REFERENCE * rsp; \
1235 EIF_REFERENCE rs; \
1236 rsp = &RTOMS(b,n); \
1237 rs = *rsp; \
1238 if (!rs) { \
1239 register_oms (rsp); \
1240 rs = RTMS_EX_O(s,c,h); \
1241 *rsp = rs; \
1242 } \
1243 r = rs; \
1244 }
1245 #define RTCOMS32(r,b,n,s,c,h) \
1246 { \
1247 EIF_REFERENCE * rsp; \
1248 EIF_REFERENCE rs; \
1249 rsp = &RTOMS(b,n); \
1250 rs = *rsp; \
1251 if (!rs) { \
1252 register_oms (rsp); \
1253 rs = RTMS32_EX_O(s,c,h); \
1254 *rsp = rs; \
1255 } \
1256 r = rs; \
1257 }
1258 #else
1259 #define RTOMS(b,n) CAT2(EIF_oms_,b) [n]
1260 #define RTDOMS(b,m) EIF_REFERENCE RTOMS(b,m)
1261 #define RTEOMS(b,m) extern RTDOMS(b,m)
1262 #define RTPOMS(b,n,s,c,h) \
1263 { \
1264 EIF_REFERENCE * rsp; \
1265 rsp = &RTOMS(b,n); \
1266 if (!(*rsp)) { \
1267 register_oms (rsp); \
1268 *rsp = RTMS_EX_O(s,c,h); \
1269 } \
1270 }
1271 #define RTPOMS32(b,n,s,c,h) \
1272 { \
1273 EIF_REFERENCE * rsp; \
1274 rsp = &RTOMS(b,n); \
1275 if (!(*rsp)) { \
1276 register_oms (rsp); \
1277 *rsp = RTMS32_EX_O(s,c,h); \
1278 } \
1279 }
1280 #endif
1281
1282 #define RTPOF(p,o) (EIF_POINTER)((EIF_POINTER *)(((char *)(p))+(o)))
1283 #define RTST(c,d,i,n) striparr(c,d,i,n);
1284 #define RTXA(x,y) eif_xcopy(x, y)
1285 #define RTEQ(x,y) eif_xequal((x),(y))
1286 #define RTCEQ(x,y) (((x) && eif_is_boxed_expanded(HEADER(x)->ov_flags) && (y) && eif_is_boxed_expanded(HEADER(y)->ov_flags) && eif_gen_conf(Dftype(x), Dftype(y)))? eif_xequal((x),(y)): (x)==(y))
1287 #define RTOF(x) (HEADER(x)->ov_size & B_SIZE)
1288 #define RTEO(x) ((x) - RTOF(x))
1289
1290
1291 /* Macros for invariant check.
1292 * RTSN saves global variable 'nstcall' within C stack
1293 * RTIV(x,y) checks invariant before call on object 'x' if good flags 'y'
1294 * RTVI(x,y) checks invariant after call on object 'x' if good flags 'y'
1295 * RTCI(x) checks invariant after creation call on object 'x'
1296 */
1297 #define RTSN int EIF_VOLATILE is_nested = nstcall
1298 #define RTIV(x,y) if ((is_nested > 0) && ((y) & CK_INVARIANT)) chkinv(MTC x,0)
1299 #define RTVI(x,y) if ((is_nested != 0) && ((y) & CK_INVARIANT)) chkinv(MTC x,1)
1300
1301 /* To be removed */
1302 #define RTIV2(x,y) if (is_nested && ((y) & CK_INVARIANT)) chkinv(MTC x,0)
1303 #define RTVI2(x,y) if (is_nested && ((y) & CK_INVARIANT)) chkinv(MTC x,1)
1304 #define RTCI2(x) chkcinv(MTC x)
1305
1306 #ifndef EIF_THREADS
1307 RT_LNK int16 caller_assertion_level; /*Saves information about the assertion level of the caller*/
1308 #endif
1309
1310
1311 /* Macros to cache assertion level in generated C routine.
1312 * RTDA declares integer used to save the assertion level
1313 * RTAL is the access to the saved assertion level variable.
1314 * RTAC Checks the assertion level of the caller.
1315 * RTSC saves assertion level of the current feature.
1316 * RTRS restores the caller_assertion_level.
1317 * WASC(x) Assertion level.
1318 * RTSA(x) gets the option settings for dynamic type 'x'
1319 * RTME(x,y) Starts monitoring (profile or tracing) for dynamic type 'x' for routine context 'y' (i.e. normal vs external)
1320 */
1321
1322 #define RTDA struct eif_opt * EIF_VOLATILE opt; \
1323 int16 saved_caller_assertion_level = caller_assertion_level
1324 #define RTAL (~in_assertion & opt->assert_level)
1325 #define RTAC (~in_assertion & saved_caller_assertion_level)
1326 #define RTSC caller_assertion_level = RTAL & CK_SUP_REQUIRE
1327 #define RTRS caller_assertion_level = saved_caller_assertion_level
1328 #define WASC(x) eoption[x].assert_level
1329 #define RTSA(x) opt = eoption + x;
1330 #ifdef WORKBENCH
1331 #define RTME(x,y) check_options_start(opt, x, y)
1332 #endif
1333
1334 /*
1335 * Macros for SCOOP
1336 */
1337
1338 // Define RTS_SCP_CAPABLE for use by eplug to determine whether SCOOP can be initialized.
1339 #ifndef RTS_SCP_CAPABLE
1340 #ifdef EIF_THREADS
1341 #define RTS_SCP_CAPABLE 1
1342 #else
1343 #define RTS_SCP_CAPABLE 0
1344 #endif
1345 #endif
1346
1347 #define scoop_task_assign_processor 1
1348 #define scoop_task_free_processor 2
1349 #define scoop_task_start_processor_loop 3
1350 #define scoop_task_signify_start_of_new_chain 4
1351 #define scoop_task_signify_end_of_new_chain 5
1352 #define scoop_task_add_supplier_to_request_chain 6
1353 #define scoop_task_wait_for_supplier_processor_locks 7
1354 #define scoop_task_add_call 8
1355 #define scoop_task_add_synchronous_call 9
1356 #define scoop_task_wait_for_processor_redundancy 10
1357 #define scoop_task_add_processor_reference 11
1358 #define scoop_task_remove_processor_reference 12
1359 #define scoop_task_check_uncontrolled 13
1360
1361 #ifdef WORKBENCH
1362 #define RTS_TCB(t,c,s,a) \
1363 { \
1364 EIF_TYPED_VALUE xt,xc,xs,xa; \
1365 xt.it_i1 = t; \
1366 xt.type = SK_INT8; \
1367 xc.it_i4 = c; \
1368 xc.type = SK_INT32; \
1369 xs.it_i4 = s; \
1370 xs.type = SK_INT32; \
1371 xa.it_p = a; \
1372 xa.type = SK_POINTER; \
1373 (egc_scoop_manager_task_callback)(scp_mnger,xt,xc,xs,xa); \
1374 }
1375 #else
1376 #define RTS_TCB(t,c,s,a) (egc_scoop_manager_task_callback)(scp_mnger,t,c,s,a);
1377 #endif
1378 #define RTS_PID(o) HEADER(o)->ov_pid
1379
1380 /*
1381 * Object status:
1382 * EIF_IS_DIFFERENT_PROCESSOR (o1, o2) - tells if o1 and o2 run on different processors
1383 * RTS_OS(c,o) - tells if object o is separate relative to object c (i.e. they run on different processors)
1384 * RTS_OU(c,o) - tells if object o is uncontrolled by the processor associated with object c
1385 */
1386
1387 #define EIF_IS_DIFFERENT_PROCESSOR(o1,o2) (RTS_PID (o1) != RTS_PID (o2))
1388 #define RTS_OS(c,o) (RTS_PID (c) != RTS_PID (o))
1389 #define RTS_OU(c,o) ((o) && (scp_mnger) && EIF_TEST (eif_is_uncontrolled (RTS_PID (c), RTS_PID (o))))
1390
1391 /*
1392 * Processor:
1393 * RTS_PA(o) - associate a fresh processor with an object o
1394 */
1395 #define RTS_PA(o) \
1396 { \
1397 EIF_TYPED_VALUE pid; \
1398 pid.it_i4 = 0; \
1399 pid.type = SK_INT32; \
1400 RTS_TCB(scoop_task_assign_processor,RTS_PID(o),0,&pid); \
1401 RTS_PID(o) = (EIF_SCP_PID) pid.it_i4; \
1402 }
1403
1404 /*
1405 * Request chain:
1406 * RTS_RC(p) - create a request chain for the processor identified by object p
1407 * RTS_RD(p) - delete a request chain for the processor identified by object p
1408 * RTS_RF(p) - same as RTS_RD except that it is called when wait condition fails
1409 * RTS_RS(p,s) - add a supplier s to the request chain of the processor identified by object p
1410 * RTS_RW(p) - wait until all the suppliers are ready in the request chain of the processor identified by object p
1411 * The only valid sequence of calls is
1412 * RTS_RC (RTS_RS)* [RTS_RW] RTS_RD
1413 */
1414 #define RTS_RC(p) RTS_TCB(scoop_task_signify_start_of_new_chain,RTS_PID(p),0,NULL)
1415 #define RTS_RD(p) RTS_TCB(scoop_task_signify_end_of_new_chain,RTS_PID(p),RTS_PID(p),NULL)
1416 #define RTS_RF(p) RTS_TCB(scoop_task_signify_end_of_new_chain,RTS_PID(p),-1,NULL)
1417 #define RTS_RS(p,s) RTS_TCB(scoop_task_add_supplier_to_request_chain,RTS_PID(p),RTS_PID(s),NULL)
1418 #define RTS_RW(p) RTS_TCB(scoop_task_wait_for_supplier_processor_locks,RTS_PID(p),0,NULL)
1419
1420
1421 /*
1422 * Request chain stack:
1423 * RTS_SD - declare variables that are used to track request chain stack without rescue clause.
1424 * RTS_SDX - declare variables that are used to track request chain stack with rescue clause.
1425 * RTS_SDR - declare variables that are used to restore request chain stack with rescue clause.
1426 * RTS_SRC(p) - create request chain for the processor identified by object p when there is no rescue clase.
1427 * RTS_SRCX(p) - create request chain for the processor identified by object p when there is a rescue clase.
1428 * RTS_SRF(p) - release request chain for the processor identified by object p when wait condition fails.
1429 * RTS_SRD(p) - release request chain for the processor identified by object p when routine exits normally.
1430 * RTS_SRR - release request chains (if any) when entering a rescue clause because of an exception.
1431 */
1432 #define RTS_SD \
1433 EIF_REFERENCE * q = sep_stack.st_top;
1434 #define RTS_SDX \
1435 EIF_REFERENCE * volatile q = sep_stack.st_top; \
1436 EIF_REFERENCE * volatile qt = q;
1437 #define RTS_SDR \
1438 EIF_REFERENCE * qt = sep_stack.st_top;
1439 #define RTS_SRC(p) \
1440 { \
1441 RTS_RC(p); \
1442 if (q && q < sep_stack.st_end) { \
1443 *q = p; \
1444 sep_stack.st_top = q + 1; \
1445 } \
1446 else { \
1447 eif_request_chain_push (p, &sep_stack); \
1448 q = (EIF_REFERENCE *) 0; \
1449 } \
1450 }
1451 #define RTS_SRCX(p) {RTS_SRC(p); qt = sep_stack.st_top;}
1452 #define RTS_SRP(p) \
1453 if (q == (EIF_REFERENCE *) 0) { \
1454 eif_request_chain_pop (&sep_stack); \
1455 } \
1456 else { \
1457 sep_stack.st_top = q; \
1458 }
1459 #define RTS_SRF(p) {RTS_SRP (p); RTS_RF (p);}
1460 #define RTS_SRD(p) {RTS_SRP (p); RTS_RD (p);}
1461 #define RTS_SRR \
1462 if (sep_stack.st_top != qt) { \
1463 eif_request_chain_restore (qt, &sep_stack); \
1464 }
1465
1466 /*
1467 * Separate call (versions ending with P stand for calls to precompiled routines, the first two arguments to them have a different meaning):
1468 * RTS_CF(s,f,n,t,a,r) - call a function on a static type s with a feature id f and name n on a target t and arguments a and result r
1469 * RTS_CP(s,f,n,t,a) - call a procedure on a static type s with a feature id f and name n on a target t and arguments a
1470 * RTS_CC(s,f,d,a) - call a creation procedure (asynchronous) on a static type s with a feature id f on a target of dynamic type d and arguments a
1471 * RTS_CA(o,p,t,a,r) - call an attribute at offset o using pattern p on target t with arguments a and result r
1472 * RTS_CS(t,a) - call a constant on target t with call structure a
1473 */
1474
1475 #ifdef WORKBENCH
1476 #define RTS_CF(s,f,n,t,a,r) \
1477 { \
1478 ((call_data*)(a)) -> result = &(r); \
1479 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1480 eif_log_call (s, f, RTS_PID(Current), a); \
1481 }
1482 #define RTS_CFP(s,f,n,t,a,r) \
1483 { \
1484 ((call_data*)(a)) -> result = &(r); \
1485 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1486 eif_log_callp (s, f, RTS_PID(Current), a); \
1487 }
1488 #define RTS_CP(s,f,n,t,a) eif_log_call (s, f, RTS_PID(Current), a);
1489 #define RTS_CPP(s,f,n,t,a) eif_log_callp (s, f, RTS_PID(Current), a);
1490
1491 #define RTS_CC(s,f,d,a) eif_log_call (s, f, RTS_PID(Current), a);
1492 #define RTS_CCP(s,f,d,a) eif_log_callp (s, f, RTS_PID(Current), a);
1493 #else /* WORKBENCH */
1494 #define RTS_CF(f,p,t,a,r) \
1495 { \
1496 ((call_data*)(a)) -> feature.address = f; \
1497 ((call_data*)(a)) -> pattern = p; \
1498 ((call_data*)(a)) -> result = &(r); \
1499 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1500 eif_log_call (((call_data*)(a))->sync_pid, a); \
1501 }
1502 #define RTS_CP(f,p,t,a) \
1503 { \
1504 ((call_data*)(a)) -> feature.address = f; \
1505 ((call_data*)(a)) -> pattern = p; \
1506 eif_log_call (RTS_PID(Current), a); \
1507 }
1508 #define RTS_CC(f,p,t,a) RTS_CP(f,p,t,a)
1509 #define RTS_CA(o,p,t,a,r) \
1510 { \
1511 ((call_data*)(a)) -> feature.offset = o; \
1512 ((call_data*)(a)) -> pattern = p; \
1513 ((call_data*)(a)) -> result = &(r); \
1514 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1515 eif_log_call (((call_data*)(a))->sync_pid, a); \
1516 }
1517 #define RTS_CS(t,a) \
1518 { \
1519 ((call_data*)(a)) -> pattern = eif_call_const; \
1520 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1521 eif_log_call (((call_data*)(a))->sync_pid, a); \
1522 }
1523 #endif /* WORKBENCH */
1524
1525 /*
1526 * Separate call arguments:
1527 * RTS_AC(n,t,a) - allocate container a that can hold n arguments for target t
1528 * RTS_AA(v,f,t,n,a) - register argument v corresponding to field f of type t at position n in a
1529 * RTS_AS(v,f,t,n,a) - same as RTS_AA except that that argument is checked if it is controlled or not that is recorded to make synchronous call if required
1530 */
1531 #define RTS_AC(n,t,a) \
1532 { \
1533 a = cmalloc (sizeof (call_data) + sizeof (EIF_TYPED_VALUE) * (size_t) (n) - sizeof (EIF_TYPED_VALUE)); \
1534 ((call_data*)(a)) -> target = eif_protect (t); \
1535 ((call_data*)(a)) -> count = (n); \
1536 ((call_data*)(a)) -> result = (EIF_TYPED_VALUE *) 0; \
1537 ((call_data*)(a)) -> sync_pid = (EIF_SCP_PID) -1; \
1538 ((call_data*)(a)) -> is_lock_passing = EIF_FALSE; \
1539 }
1540 #ifdef WORKBENCH
1541 # define RTS_AA(v,f,t,n,a) ((call_data*)(a)) -> argument [(n) - 1] = (v);
1542 # define RTS_AS(v,f,t,n,a) \
1543 { \
1544 ((call_data*)(a)) -> argument [(n) - 1] = (v); \
1545 ((call_data*)(a)) -> argument [(n) - 1].it_r = (EIF_REFERENCE) eif_protect ((v).it_r); \
1546 if ( !RTS_OU(Current, eif_access ( ((call_data*)(a)) -> argument [(n) - 1].it_r ) ) ) \
1547 { \
1548 ((call_data*)(a)) -> is_lock_passing = EIF_TRUE; \
1549 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1550 } \
1551 }
1552 #else
1553 # define RTS_AA(v,f,t,n,a) \
1554 { \
1555 ((call_data*)(a)) -> argument [(n) - 1].f = (v); \
1556 ((call_data*)(a)) -> argument [(n) - 1].type = t; \
1557 }
1558 # define RTS_AS(v,f,t,n,a) \
1559 { \
1560 ((call_data*)(a)) -> argument [(n) - 1].it_r = (EIF_REFERENCE) eif_protect (v); \
1561 ((call_data*)(a)) -> argument [(n) - 1].type = SK_REF; \
1562 if (!RTS_OU(Current, eif_access (((call_data*)(a)) -> argument [(n) - 1].it_r))) \
1563 { \
1564 ((call_data*)(a)) -> is_lock_passing = EIF_TRUE; \
1565 ((call_data*)(a)) -> sync_pid = RTS_PID(Current); \
1566 } \
1567 }
1568 #endif /* WORKBENCH */
1569
1570 #define RTS_WPR RTS_TCB(scoop_task_wait_for_processor_redundancy,0,0,NULL)
1571
1572 #define RTS_SEMAPHORE_CLIENT_WAIT(semaddr) EIF_ENTER_C; eif_pthread_sem_wait(semaddr); EIF_EXIT_C; RTGC;
1573 #define RTS_SEMAPHORE_SUPPLIER_SIGNAL(semaddr) eif_pthread_sem_post(semaddr);
1574
1575 #define RTS_PROCESSOR_CPU_YIELD EIF_ENTER_C; eif_pthread_yield(); EIF_EXIT_C; RTGC;
1576
1577 /*
1578 * Macros for workbench
1579 */
1580
1581 #ifdef WORKBENCH
1582
1583 /* Macros used for feature call and various accesses to objects.
1584 * RTWF(x,y,z) is a feature call
1585 * RTWPF(x,y,z) is a precompiled feature call
1586 * RTVF(x,y,z,t) is a nested feature call (dot expression)
1587 * RTVPF(x,y,z,t) is a nested precompiled feature call (dot expression)
1588 * RTWC(x,y,z) is a creation procedure call
1589 * RTWPC(x,y,z) is a precompiled creation procedure call
1590 * RTWA(x,y,z) is the access to an attribute
1591 * RTWPA(x,y,z) is the access to a precompiled attribute
1592 * RTVA(x,y,z,t) is a nested access to an attribute (dot expression)
1593 * RTVPA(x,y,z,t) is a nested access to a precompiled attribute (dot expr)
1594 * RTWT(x,y,z) fetches the creation type
1595 * RTWPT(x,y,z) fetches the creation type of a precompiled feature
1596 * RTWCT(x,y,z) fetches the creation type of a generic features
1597 * RTWPCT(st,x,y,z) fetches the creation type of a precompiled generic feature
1598 * RTWCTT(x,y,z) same as RTWCT but takes dftype instead of object
1599 * RTWPCTT(x,y,z) same as RTWPCT but takes dftype instead of object
1600 * RTWPP(x) returns the feature address ($ or agent operator) of id x. The ids are assigned int ADDRESS_TABLE.
1601 * RTWO(x) stores in a list the body id of the just called once routine
1602 */
1603 #define RTWF(x,y,z) wfeat(x,y,z)
1604 #define RTWPF(x,y,z) wpfeat(x,y,z)
1605 #define RTVF(x,y,z,t) wfeat_inv(x,y,z,t)
1606 #define RTVPF(x,y,z,t) wpfeat_inv(x,y,z,t)
1607 #define RTWC(x,y,z) wcreat(x,y,z)
1608 #define RTWPC(x,y,z) wpcreat(x,y,z)
1609 #define RTWA(x,y,z) wattr(x,y,z)
1610 #define RTWPA(x,y,z) wpattr(x,y,z)
1611 #define RTVA(x,y,z,t) wattr_inv(x,y,z,t)
1612 #define RTVPA(x,y,z,t) wpattr_inv(x,y,z,t)
1613 #define RTWT(x,y,z) wtype(x,y,z)
1614 #define RTWPT(x,y,z) wptype(x,y,z)
1615 #define RTWCT(x,y,z) wtype_gen(x,y,z)
1616 #define RTWPCT(st,x,y,z) wptype_gen(st,x,y,z)
1617 #define RTWCTT(x,y,z) wttype_gen(x,y,z)
1618 #define RTWPCTT(st,x,y,z) wtptype_gen(st,x,y,z)
1619 #define RTWPP(x) (egc_address_table[x])
1620 #define RTWO(x)
1621
1622 #define WDBG(x,y) eif_is_debug(x,y) /* Debug option */
1623
1624 #define WASL(x,y,z) waslist(x,y,z) /* Assertion list evaluation */
1625 #define WASF(x) wasfree(x) /* Free assertion list */
1626
1627 #define RTDS(x) dispatch(x) /* Body id of body index (x) */
1628 #define RTFZ(x) egc_frozen(x) /* C frozen pointer of body id (x) */
1629 #define RTMT(x) melt(x) /* Byte code of body id (x) */
1630
1631 #define RTDT \
1632 int EIF_VOLATILE current_call_level; \
1633 char ** EIF_VOLATILE saved_prof_top /* Declare saved trace and profile */
1634 #else
1635 /* In final mode, an Eiffel call to a deferred feature without any actual
1636 * implementation could be generated anyway because of the statical dead code
1637 * removal process; so we need a funciton pointer trigeering an exception
1638 */
1639 #define RTNR rt_norout
1640
1641 /* In final mode, we have two macros for E-TRACE called RTTR (start trace) and RTXT (stop trace).
1642 * We have also two macros for E-PROFILE in final mode, called RTPR (start profile) and RTXP (stop profile).
1643 * All macros need to have 'x' = featurename, 'y' = origin, 'z' = dtype, except RTXP.
1644 *
1645 * Plus, we need to declare 'current_call_level' whenever a finalized feature has a rescue-clause.
1646 * This is done by RTLT
1647 *
1648 */
1649 #define RTTR(x,y,z,w) start_trace(x,y,z,w) /* Print message "entering..." */
1650 #define RTXT(x,y,z,w) stop_trace(x,y,z,w) /* Print message "leaving..." */
1651 #define RTPR(x,y,z) start_profile(x,y,z) /* Start measurement of feature */
1652 #define RTXP stop_profile() /* Stop measurement of feature */
1653 #define RTLT int EIF_VOLATILE current_call_level /* Declare local trave variable */
1654 #define RTLP char ** EIF_VOLATILE saved_prof_top /* Declare local profiler variable */
1655 #define RTPI saved_prof_top = prof_stack->st_top /* Create local profile stack
1656 * during rescue clause
1657 */
1658 #define RTTI current_call_level = trace_call_level /* Save the tracer call level */
1659 #endif
1660
1661
1662
1663 /* Macros needed for profile stack and trace clean up */
1664
1665 #ifdef WORKBENCH
1666 #define RTPS if (prof_stack) prof_stack_rewind(saved_prof_top) /* Clean up profiler stack */
1667 #else
1668 #define RTPS prof_stack_rewind(saved_prof_top) /* Clean up profiler stack */
1669 #endif
1670 #define RTTS trace_call_level = current_call_level /* Clean up trace levels */
1671
1672
1673 /* Macro used to get info about SPECIAL objects.
1674 * RT_SPECIAL_PADDED_DATA_SIZE is the additional size of the data at the end of the SPECIAL.
1675 * RT_SPECIAL_DATA_SIZE is the meaningful part of RT_SPECIAL_PADDED_DATA_SIZE being used.
1676 * RT_SPECIAL_MALLOC_COUNT is the macro to compute the necessary memory size for the SPECIAL.
1677 * RT_SPECIAL_COUNT returns `count' of special objects.
1678 * RT_SPECIAL_ELEM_SIZE returns `element_size' of items in special objects.
1679 */
1680 #define RT_SPECIAL_PADDED_DATA_SIZE LNGPAD(3)
1681 #define RT_SPECIAL_DATA_SIZE (3*sizeof(EIF_INTEGER))
1682 #define RT_SPECIAL_VISIBLE_SIZE(spec) ((rt_uint_ptr) RT_SPECIAL_COUNT(spec) * (rt_uint_ptr) RT_SPECIAL_ELEM_SIZE(spec))
1683
1684 #define RT_IS_SPECIAL(obj) \
1685 ((HEADER(obj)->ov_flags & (EO_SPEC | EO_TUPLE)) == EO_SPEC)
1686
1687 #define RT_SPECIAL_COUNT(spec) \
1688 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE)))
1689
1690 #define RT_SPECIAL_ELEM_SIZE(spec) \
1691 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE) + sizeof(EIF_INTEGER)))
1692
1693 #define RT_SPECIAL_CAPACITY(spec) \
1694 (*(EIF_INTEGER *) ((char *) ((spec) + (HEADER(spec)->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE) + 2*sizeof(EIF_INTEGER)))
1695
1696
1697 /* Macros used for array optimization
1698 * RTADTYPE(x) defines the variable for the dynamic type of `x'
1699 * RTADOFFSETS(x) defines the variables for the offsets of area and lower of `x'
1700 * RTAD(x) defines the variables for array optimization on `x'
1701 * RTAITYPE(x,y) initializes the variable for Dftype on `x', `y'
1702 * RTAI(cast,x,y) initializes the variables for array optimization on `x', `y'
1703 * RTAF(x, y) unfreeze `y' if frozen at this level
1704 * RTAA(cast,x,i) gets the value at position `i' from `x' of type `cast'
1705 * RTAP(cast,x,val,i) puts `val' at position `i' from `x' of type `cast'
1706 * RTAUA(cast,x,y,i) gets the value at position `i' from `x' of type `cast' (unsafe version)
1707 * RTAUP(cast,x,y,val,i) puts `val' at position `i' from `x' of type `cast' (unsafe version)
1708 */
1709
1710 #define RTADTYPE(x) int EIF_VOLATILE CAT2(x,_dtype) = 0
1711
1712 #define RTADOFFSETS(x) \
1713 long EIF_VOLATILE CAT2(x,_area_offset) = 0; \
1714 long EIF_VOLATILE CAT2(x,_lower_offset) = 0
1715
1716 #define RTAD(x) \
1717 char EIF_VOLATILE CAT2(x,_freeze) = 0; \
1718 char* EIF_VOLATILE CAT2(x,_area); \
1719 char* EIF_VOLATILE CAT2(x,_area_minus_lower)
1720
1721 #define RTAITYPE(x,y) CAT2(x,_dtype) = Dtype(y)
1722
1723 #define RTAIOFFSETS(x,y) \
1724 if (y) { \
1725 RTAITYPE(x,y); \
1726 CAT2(x,_area_offset) = (eif_area_table) [CAT2(x,_dtype)]; \
1727 CAT2(x,_lower_offset) = (eif_lower_table) [CAT2(x,_dtype)]; \
1728 }
1729
1730 #define RTAUA(cast,x,y,i) \
1731 *(cast*)(*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))+(i-*(long*)(y+CAT2(x,_lower_offset)))*sizeof(cast))
1732
1733 #define RTAUP(cast,x,y,val,i) CAT2(RTAUP_,cast)(cast,x,y,val,i)
1734
1735 #define RTAUP_EIF_INTEGER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1736 #define RTAUP_EIF_CHARACTER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1737 #define RTAUP_EIF_REAL(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1738 #define RTAUP_EIF_DOUBLE(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1739 #define RTAUP_EIF_POINTER(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1740 #define RTAUP_EIF_BOOLEAN(cast,x,y,val,i) RTAUP_BASIC(cast,x,y,val,i)
1741
1742 #define RTAUP_EIF_REFERENCE(cast,x,y,val,i) \
1743 { \
1744 EIF_REFERENCE EIF_VOLATILE ptr_current = (*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))); \
1745 long EIF_VOLATILE i = i-*(long*)(y+CAT2(x,_lower_offset)); \
1746 *((EIF_REFERENCE *)ptr_current + i) = val; \
1747 RTAR(ptr_current, val); \
1748 }
1749
1750 #define RTAUP_BASIC(cast,x,y,val,i) \
1751 *(cast*)(*(EIF_REFERENCE *)(y+CAT2(x,_area_offset))+(i-*(long*)(y+CAT2(x,_lower_offset)))*sizeof(cast)) = val
1752
1753 #define RTAI(cast,x,y) \
1754 if (y) { \
1755 RTAITYPE(x,y); \
1756 if (CAT2(x,_area) = *(EIF_REFERENCE *) ((y)+ (eif_area_table) [CAT2(x,_dtype)])) { \
1757 if (!(HEADER(CAT2(x,_area))->ov_size & B_C)) { \
1758 CAT2(x,_freeze) = 1; \
1759 HEADER(CAT2(x,_area))->ov_size |= B_C; \
1760 } \
1761 CAT2(x,_area_minus_lower) = CAT2(x,_area)-(*(long*) ((y)+ (eif_lower_table) [CAT2(x,_dtype)]))*sizeof(cast); \
1762 } \
1763 }
1764
1765 #define RTAIOFF(cast,x,y) \
1766 if (y) { \
1767 RTAITYPE(x,y); \
1768 if (CAT2(x,_area) = *(EIF_REFERENCE *) ((y)+CAT2(x,_area_offset))) { \
1769 if (!(HEADER(CAT2(x,_area))->ov_size & B_C)) { \
1770 CAT2(x,_freeze) = 1; \
1771 HEADER(CAT2(x,_area))->ov_size |= B_C; \
1772 } \
1773 CAT2(x,_area_minus_lower) = CAT2(x,_area)-(*(long*) ((y)+CAT2(x,_lower_offset)))*sizeof(cast); \
1774 } \
1775 }
1776
1777 #define RTAF(x, y) \
1778 if (CAT2(x,_freeze)!=0) { \
1779 HEADER(CAT2(x,_area))->ov_size &= ~B_C; \
1780 }
1781
1782 #define RTAA(cast,x,i) \
1783 *(cast*)(CAT2(x,_area_minus_lower)+(i)*sizeof(cast))
1784
1785 #define RTAP(cast,x,val,i) CAT2(RTAP_,cast)(cast,x,val,i)
1786
1787 #define RTAP_EIF_INTEGER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1788 #define RTAP_EIF_CHARACTER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1789 #define RTAP_EIF_REAL(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1790 #define RTAP_EIF_DOUBLE(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1791 #define RTAP_EIF_POINTER(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1792 #define RTAP_EIF_BOOLEAN(cast,x,val,i) RTAP_BASIC(cast,x,val,i)
1793
1794 #define RTAP_EIF_REFERENCE(cast,x,val,i) \
1795 { \
1796 *((cast*)CAT2(x,_area_minus_lower) + i) = val; \
1797 RTAR(CAT2(x,_area), val); \
1798 }
1799
1800 #define RTAP_BASIC(cast,x,val,i) \
1801 *(cast*)(CAT2(x,_area_minus_lower)+(i)*sizeof(cast)) = val;
1802
1803 #ifdef __cplusplus
1804 }
1805 #endif
1806
1807 #endif
1808
1809
1810

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.23