/[eiffelstudio]/branches/eth/eve/Src/C/run-time/garcol.c
ViewVC logotype

Contents of /branches/eth/eve/Src/C/run-time/garcol.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 92964 - (show annotations)
Fri Sep 20 05:41:23 2013 UTC (6 years, 1 month ago) by jasonw
File MIME type: text/plain
File size: 208657 byte(s)
<<Merged from trunk#92963.>>
1 /*
2 description: "Garbage collection routines."
3 date: "$Date$"
4 revision: "$Revision$"
5 copyright: "Copyright (c) 1985-2013, Eiffel Software."
6 license: "GPL version 2 see http://www.eiffel.com/licensing/gpl.txt)"
7 licensing_options: "Commercial license is available at http://www.eiffel.com/licensing"
8 copying: "[
9 This file is part of Eiffel Software's Runtime.
10
11 Eiffel Software's Runtime is free software; you can
12 redistribute it and/or modify it under the terms of the
13 GNU General Public License as published by the Free
14 Software Foundation, version 2 of the License
15 (available at the URL listed under "license" above).
16
17 Eiffel Software's Runtime is distributed in the hope
18 that it will be useful, but WITHOUT ANY WARRANTY;
19 without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.
21 See the GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public
24 License along with Eiffel Software's Runtime; if not,
25 write to the Free Software Foundation, Inc.,
26 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 ]"
28 source: "[
29 Eiffel Software
30 5949 Hollister Ave., Goleta, CA 93117 USA
31 Telephone 805-685-1006, Fax 805-685-6869
32 Website http://www.eiffel.com
33 Customer support http://support.eiffel.com
34 ]"
35 */
36
37 /*
38 doc:<file name="garcol.c" header="eif_garcol.h" version="$Id$" summary="Garbage collection routines">
39 */
40
41 #include "eif_portable.h"
42 #include "eif_project.h" /* for egc_prof_enabled */
43 #include "eif_eiffel.h" /* For bcopy/memcpy */
44 #include "eif_struct.h"
45 #include "rt_globals.h"
46 #include "eif_misc.h"
47 #include "eif_size.h"
48 #include "rt_malloc.h"
49 #include "rt_garcol.h"
50 #include "rt_types.h"
51 #include "rt_threads.h"
52 #include "rt_lmalloc.h" /* for eif_free */
53 #include "eif_memory.h"
54 #if ! defined CUSTOM || defined NEED_TIMER_H
55 #include "rt_timer.h"
56 #endif
57 #include "rt_macros.h"
58 #include "rt_sig.h"
59 #include "rt_urgent.h"
60 #include "rt_search.h"
61 #include "rt_gen_conf.h" /* For eif_gen_conf_cleanup () */
62 #include "rt_gen_types.h" /* For tuple marking */
63 #include "eif_cecil.h"
64 #include "rt_struct.h"
65 #ifdef VXWORKS
66 #include <string.h>
67 #endif
68
69 #include <stdio.h> /* For stream flushing */
70
71 #include "rt_assert.h" /* For assertions checking. */
72 #if ! defined CUSTOM || defined NEED_OPTION_H
73 #include "rt_option.h" /* For exitprf */
74 #endif
75 #if ! defined CUSTOM || defined NEED_OBJECT_ID_H
76 #include "rt_object_id.h" /* For the object id and separate stacks */
77 #endif
78 #include "rt_hector.h"
79 #include "rt_except.h"
80 #include "rt_debug.h"
81 #include "rt_main.h"
82
83 #include "rt_scoop_gc.h"
84
85 #ifdef WORKBENCH
86 #include "rt_interp.h"
87 #endif
88
89
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93
94 #ifdef EIF_WINDOWS
95 extern void eif_cleanup(void); /* %%ss added. In extra/win32/console/argcargv.c */
96 #endif
97
98 #ifdef ISE_GC
99
100
101 #define MARK_SWITCH hybrid_mark
102 #define GEN_SWITCH hybrid_gen_mark
103
104 /*#define DEBUG 63 */ /* Debugging level */
105 /*#define MEMCHK */ /* Activate memory checking */
106 /*#define MEM_STAT */ /* Activate Eiffel memory monitoring */
107
108 /*
109 doc: <attribute name="rt_g_data" return_type="struct gcinfo" export="shared">
110 doc: <summary>Internal data structure used to monitor the activity of the garbage collection process and help the auto-adaptative algorithm in its decisions (heuristics).</summary>
111 doc: <thread_safety>Not safe</thread_safety>
112 doc: <synchronization>Safe if caller holds either `eif_gc_mutex' or `eif_g_data_mutex'.</synchronization>
113 doc: <fixme>Because it is very easy to turn the GC on or off, if more than one threads plays with it we are stuck a most likely the GC will be off. We need to have a better synchronization thanwhat we have at the moment, so that we only let one thread turn the GC off, no one else but this threads can turn it back on.</fixme>
114 doc: </attribute>
115 */
116 rt_shared struct gacinfo rt_g_data = { /* Global status */
117 0L, /* nb_full */
118 0L, /* nb_partial */
119 0L, /* mem_used */
120 0L, /* mem_copied */
121 0L, /* mem_move */
122 0L, /* gc_to */
123 (char) 0, /* status */
124 };
125
126 /*
127 doc: <attribute name="rt_g_stat" return_type="struct gacstat [GST_NBR]" export="shared">
128 doc: <summary>Run-time statistics, one for partial scavenging and one for generational.</summary>
129 doc: <thread_safety>Safe</thread_safety>
130 doc: <synchronization>Through `eif_gc_mutex'.</synchronization>
131 doc: </attribute>
132 */
133 rt_shared struct gacstat rt_g_stat[GST_NBR] = { /* Run-time statistics */
134 {
135 0L, /* count */ 0L, /* mem_used */
136 0L, /* mem_collect */ 0L, /* mem_avg */
137 0L, /* real_avg */ 0L, /* real_time */
138 0L, /* real_iavg */ 0L, /* real_itime */
139 0., /* cpu_avg */ 0., /* sys_avg */
140 0., /* cpu_iavg */ 0., /* sys_iavg */
141 0., /* cpu_time */ 0., /* sys_time */
142 0., /* cpu_itime */ 0., /* sys_itime */
143 0., /* cpu_total_time */ 0., /* sys_total_time */
144 },
145 {
146 0L, /* count */ 0L, /* mem_used */
147 0L, /* mem_collect */ 0L, /* mem_avg */
148 0L, /* real_avg */ 0L, /* real_time */
149 0L, /* real_iavg */ 0L, /* real_itime */
150 0., /* cpu_avg */ 0., /* sys_avg */
151 0., /* cpu_iavg */ 0., /* sys_iavg */
152 0., /* cpu_time */ 0., /* sys_time */
153 0., /* cpu_itime */ 0., /* sys_itime */
154 0., /* cpu_total_time */ 0., /* sys_total_time */
155 }
156 };
157
158
159 #endif /* ISE_GC */
160
161 #ifndef EIF_THREADS
162 #ifdef ISE_GC
163 /*
164 doc: <attribute name="loc_stack" return_type="struct stack" export="public">
165 doc: <summary>To protect EIF_REFERENCE in C routines through RT_GC_PROTECT/RT_GC_WEAN macros. Used internally by runtime. Content points to objects which may be moved by garbage collector or memory management routines.</summary>
166 doc: <thread_safety>Safe</thread_safety>
167 doc: <synchronization>Per thread data.</synchronization>
168 doc: </attribute>
169 */
170 rt_public struct stack loc_stack = { /* Local indirection stack */
171 (struct stchunk *) 0, /* st_hd */
172 (struct stchunk *) 0, /* st_tl */
173 (struct stchunk *) 0, /* st_cur */
174 (EIF_REFERENCE *) 0, /* st_top */
175 (EIF_REFERENCE *) 0, /* st_end */
176 };
177
178 /*
179 doc: <attribute name="loc_set" return_type="struct stack" export="public">
180 doc: <summary>To protect Eiffel objects in C generated code. Same purpose as `loc_stack' but only used for C generated code.</summary>
181 doc: <thread_safety>Safe</thread_safety>
182 doc: <synchronization>Per thread data.</synchronization>
183 doc: </attribute>
184 */
185 rt_public struct stack loc_set = { /* Local variable stack */
186 (struct stchunk *) 0, /* st_hd */
187 (struct stchunk *) 0, /* st_tl */
188 (struct stchunk *) 0, /* st_cur */
189 (EIF_REFERENCE *) 0, /* st_top */
190 (EIF_REFERENCE *) 0, /* st_end */
191 };
192 #endif
193 /*
194 doc: <attribute name="once_set" return_type="struct stack" export="public">
195 doc: <summary>Keep safe references of once function results which are computed per thread (default behavior).</summary>
196 doc: <thread_safety>Safe</thread_safety>
197 doc: <synchronization>Per thread data.</synchronization>
198 doc: </attribute>
199 */
200 rt_public struct stack once_set = { /* Once functions */
201 (struct stchunk *) 0, /* st_hd */
202 (struct stchunk *) 0, /* st_tl */
203 (struct stchunk *) 0, /* st_cur */
204 (EIF_REFERENCE *) 0, /* st_top */
205 (EIF_REFERENCE *) 0, /* st_end */
206 };
207 /*
208 doc: <attribute name="oms_set" return_type="struct stack" export="public">
209 doc: <summary>Keep safe references of once manifest strings which are computed per thread.</summary>
210 doc: <thread_safety>Safe</thread_safety>
211 doc: <synchronization>Per thread data.</synchronization>
212 doc: </attribute>
213 */
214 rt_public struct stack oms_set = { /* Once manifest strings */
215 (struct stchunk *) 0, /* st_hd */
216 (struct stchunk *) 0, /* st_tl */
217 (struct stchunk *) 0, /* st_cur */
218 (EIF_REFERENCE *) 0, /* st_top */
219 (EIF_REFERENCE *) 0, /* st_end */
220 };
221 #else
222 /*
223 doc: <attribute name="global_once_set" return_type="struct stack" export="public">
224 doc: <summary>Same as `once_set' but for results that are computed per process.</summary>
225 doc: <thread_safety>Safe</thread_safety>
226 doc: <synchronization>Through `eif_global_once_set_mutex'</synchronization>
227 doc: </attribute>
228 */
229 rt_public struct stack global_once_set = { /* Once functions */
230 (struct stchunk *) 0, /* st_hd */
231 (struct stchunk *) 0, /* st_tl */
232 (struct stchunk *) 0, /* st_cur */
233 (EIF_REFERENCE *) 0, /* st_top */
234 (EIF_REFERENCE *) 0, /* st_end */
235 };
236
237
238 /* Same as above except that GC keeps track of all thread specific stack to
239 * perform a GC cycle among all threads */
240 #ifdef ISE_GC
241 /*
242 doc: <attribute name="loc_stack_list" return_type="struct stack_list" export="public">
243 doc: <summary>List of all `loc_stack'. There is one per thread.</summary>
244 doc: <thread_safety>Safe</thread_safety>
245 doc: <synchronization>eif_gc_mutex</synchronization>
246 doc: </attribute>
247 */
248 rt_public struct stack_list loc_stack_list = {
249 (int) 0, /* count */
250 (int) 0, /* capacity */
251 {NULL} /* threads_stack */
252 };
253
254 /*
255 doc: <attribute name="loc_set_list" return_type="struct stack_list" export="public">
256 doc: <summary>List of all `loc_set'. There is one per thread.</summary>
257 doc: <thread_safety>Safe</thread_safety>
258 doc: <synchronization>eif_gc_mutex</synchronization>
259 doc: </attribute>
260 */
261 rt_public struct stack_list loc_set_list = {
262 (int) 0, /* count */
263 (int) 0, /* capacity */
264 {NULL} /* threads_stack */
265 };
266 #endif
267 /*
268 doc: <attribute name="once_set_list" return_type="struct stack_list" export="public">
269 doc: <summary>List of all `once_set'. There is one per thread.</summary>
270 doc: <thread_safety>Safe</thread_safety>
271 doc: <synchronization>eif_gc_mutex</synchronization>
272 doc: </attribute>
273 */
274 rt_public struct stack_list once_set_list = {
275 (int) 0, /* count */
276 (int) 0, /* capacity */
277 {NULL} /* threads_stack */
278 };
279 /*
280 doc: <attribute name="oms_set_list" return_type="struct stack_list" export="public">
281 doc: <summary>List of all `oms_set'. There is one per thread.</summary>
282 doc: <thread_safety>Safe</thread_safety>
283 doc: <synchronization>eif_gc_mutex</synchronization>
284 doc: </attribute>
285 */
286 rt_public struct stack_list oms_set_list = {
287 (int) 0, /* count */
288 (int) 0, /* capacity */
289 {NULL} /* threads_stack */
290 };
291 #ifdef ISE_GC
292 /*
293 doc: <attribute name="hec_stack_list" return_type="struct stack_list" export="public">
294 doc: <summary>List of all `hec_stack'. There is one per thread.</summary>
295 doc: <thread_safety>Safe</thread_safety>
296 doc: <synchronization>eif_gc_mutex</synchronization>
297 doc: </attribute>
298 */
299 rt_public struct stack_list hec_stack_list = {
300 (int) 0, /* count */
301 (int) 0, /* capacity */
302 {NULL} /* threads_stack */
303 };
304 #endif
305
306 /*
307 doc: <attribute name="sep_stack_list" return_type="struct stack_list" export="public">
308 doc: <summary>List of all `sep_stack'. There is one per thread.</summary>
309 doc: <thread_safety>Safe</thread_safety>
310 doc: <synchronization>eif_gc_mutex</synchronization>
311 doc: </attribute>
312 */
313 rt_public struct stack_list sep_stack_list = {
314 (int) 0, /* count */
315 (int) 0, /* capacity */
316 {NULL} /* threads_stack */
317 };
318
319 /*
320 doc: <attribute name="eif_stack_list" return_type="struct stack_list" export="public">
321 doc: <summary>List of all `eif_stack'. There is one per thread.</summary>
322 doc: <thread_safety>Safe</thread_safety>
323 doc: <synchronization>eif_gc_mutex</synchronization>
324 doc: </attribute>
325 */
326 rt_public struct stack_list eif_stack_list = {
327 (int) 0, /* count */
328 (int) 0, /* capacity */
329 {NULL} /* threads_stack */
330 };
331
332 /*
333 doc: <attribute name="eif_trace_list" return_type="struct stack_list" export="public">
334 doc: <summary>List of all `eif_trace'. There is one per thread.</summary>
335 doc: <thread_safety>Safe</thread_safety>
336 doc: <synchronization>eif_gc_mutex</synchronization>
337 doc: </attribute>
338 */
339 rt_public struct stack_list eif_trace_list = {
340 (int) 0, /* count */
341 (int) 0, /* capacity */
342 {NULL} /* threads_stack */
343 };
344
345 #ifdef WORKBENCH
346 /*
347 doc: <attribute name="opstack_list" return_type="struct stack_list" export="public">
348 doc: <summary>List of all `op_stack'. There is one per thread.</summary>
349 doc: <thread_safety>Safe</thread_safety>
350 doc: <synchronization>eif_gc_mutex</synchronization>
351 doc: </attribute>
352 */
353 rt_public struct stack_list opstack_list = {
354 (int) 0, /* count */
355 (int) 0, /* capacity */
356 {NULL} /* threads_stack */
357 };
358 #endif
359
360 #endif
361
362 #ifdef ISE_GC
363 /*
364 doc: <attribute name="rem_set" return_type="struct stack" export="private">
365 doc: <summary>Remembered set. Remembers all old objects pointing on new ones.</summary>
366 doc: <thread_safety>Safe</thread_safety>
367 doc: <synchronization>eif_gc_set_mutex</synchronization>
368 doc: </attribute>
369 */
370 rt_private struct stack rem_set = { /* Remembered set */
371 (struct stchunk *) 0, /* st_hd */
372 (struct stchunk *) 0, /* st_tl */
373 (struct stchunk *) 0, /* st_cur */
374 (EIF_REFERENCE *) 0, /* st_top */
375 (EIF_REFERENCE *) 0, /* st_end */
376 };
377
378 /*
379 doc: <attribute name="moved_set" return_type="struct stack" export="shared">
380 doc: <summary>Moved objets set. Track all objects allocated outside the scavenge zone.</summary>
381 doc: <thread_safety>Safe</thread_safety>
382 doc: <synchronization>eif_gc_set_mutex</synchronization>
383 doc: </attribute>
384 */
385 rt_shared struct stack moved_set = { /* Moved objects set */
386 (struct stchunk *) 0, /* st_hd */
387 (struct stchunk *) 0, /* st_tl */
388 (struct stchunk *) 0, /* st_cur */
389 (EIF_REFERENCE *) 0, /* st_top */
390 (EIF_REFERENCE *) 0, /* st_end */
391 };
392
393 /*
394 doc: <attribute name="memory_set" return_type="struct stack" export="public">
395 doc: <summary>Memory set. Track all objects allocated in the scavenge zone which have a `dispose' routine.</summary>
396 doc: <thread_safety>Safe</thread_safety>
397 doc: <synchronization>eif_gc_set_mutex for insertion, eif_gc_mutex for manipulating it.</synchronization>
398 doc: </attribute>
399 */
400 rt_public struct stack memory_set =
401 {
402 (struct stchunk *) 0, /* st_hd */
403 (struct stchunk *) 0, /* st_tl */
404 (struct stchunk *) 0, /* st_cur */
405 (EIF_REFERENCE *) 0, /* st_top */
406 (EIF_REFERENCE *) 0, /* st_end */
407 };
408
409 /*
410 doc: <attribute name="overflow_stack_set" return_type="struct stack" export="private">
411 doc: <summary>Stack containing objects that are not yet traversed because it could generate a stack overflow during a GC cycle.</summary>
412 doc: <thread_safety>Safe</thread_safety>
413 doc: <synchronization>eif_gc_mutex</synchronization>
414 doc: </attribute>
415 */
416 rt_private struct stack overflow_stack_set = {
417 (struct stchunk *) 0, /* st_hd */
418 (struct stchunk *) 0, /* st_tl */
419 (struct stchunk *) 0, /* st_cur */
420 (EIF_REFERENCE *) 0, /* st_top */
421 (EIF_REFERENCE *) 0, /* st_end */
422 };
423
424 /*
425 doc: <attribute name="overflow_stack_count" return_type="uint32" export="private">
426 doc: <summary>Number of elements in `overflow_stack_set'.</summary>
427 doc: <thread_safety>Safe</thread_safety>
428 doc: <synchronization>eif_gc_mutex</synchronization>
429 doc: </attribute>
430 */
431 rt_private uint32 overflow_stack_count = 0;
432
433 /*
434 doc: <attribute name="overflow_stack_depth" return_type="uint32" export="private">
435 doc: <summary>Depth current recursive call to marking routine.</summary>
436 doc: <thread_safety>Safe</thread_safety>
437 doc: <synchronization>eif_gc_mutex</synchronization>
438 doc: </attribute>
439 */
440 rt_private uint32 overflow_stack_depth = 0;
441
442 /*
443 doc: <attribute name="overflow_stack_limit" return_type="uint32" export="shared">
444 doc: <summary>Limit on `overflow_stack_depth'. When limit is reached, recursive calls are stopped and current element is stored in `overflow_stack_set' for later traversal.</summary>
445 doc: <thread_safety>Safe</thread_safety>
446 doc: <synchronization>eif_gc_mutex</synchronization>
447 doc: </attribute>
448 */
449 rt_shared uint32 overflow_stack_limit = 0;
450
451 /*
452 doc: <attribute name="c_stack_object_set" return_type="struct stack" export="private">
453 doc: <summary>Stack containing all objects whose memory is allocated on the stack. They are added during marking and unmarked at the end of a GC cycle.</summary>
454 doc: <thread_safety>Safe</thread_safety>
455 doc: <synchronization>eif_gc_mutex</synchronization>
456 doc: </attribute>
457 */
458 rt_private struct stack c_stack_object_set = {
459 (struct stchunk *) 0, /* st_hd */
460 (struct stchunk *) 0, /* st_tl */
461 (struct stchunk *) 0, /* st_cur */
462 (EIF_REFERENCE *) 0, /* st_top */
463 (EIF_REFERENCE *) 0, /* st_end */
464 };
465
466
467 /* Signature of marking functions. They take the address where
468 * reference is stored (to be updated by `mark_overflow_stack'
469 * if we are too deep in the call stack) and return new location
470 * of object. */
471 typedef EIF_REFERENCE (*MARKER) (EIF_REFERENCE *);
472
473 #endif
474
475 /*
476 doc: <attribute name="rt_type_set" return_type="EIF_REFERENCE *" export="public">
477 doc: <summary>Mapping between dynamic type and TYPE instances of size `rt_type_set_count'.</summary>
478 doc: <thread_safety>Safe</thread_safety>
479 doc: <synchronization>Through `eif_type_set_mutex'</synchronization>
480 doc: </attribute>
481 */
482 rt_public EIF_REFERENCE *rt_type_set = NULL;
483
484 /*
485 doc: <attribute name="rt_type_set_count" return_type="EIF_REFERENCE *" export="public">
486 doc: <summary>Number of elements in `rt_type_set'.</summary>
487 doc: <thread_safety>Safe</thread_safety>
488 doc: <synchronization>Through `eif_type_set_mutex'</synchronization>
489 doc: </attribute>
490 */
491 rt_public rt_uint_ptr volatile rt_type_set_count = 0;
492
493 #ifdef EIF_THREADS
494 /*
495 doc: <attribute name="eif_gc_mutex" return_type="EIF_CS_TYPE *" export="public">
496 doc: <summary>Mutex used for run-time synchronization when performing a GC cycle. All running threads should be stopped on `eif_gc_mutex' or be blocked before starting a GC cycle</summary>
497 doc: <thread_safety>Safe</thread_safety>
498 doc: </attribute>
499 */
500 rt_public EIF_CS_TYPE *eif_gc_mutex = NULL;
501
502 /*
503 doc: <attribute name="eif_gc_set_mutex" return_type="EIF_CS_TYPE *" export="public">
504 doc: <summary>Mutex used to access all the global sets `moved_set', `rem_set' and `memory_set'.</summary>
505 doc: <thread_safety>Safe</thread_safety>
506 doc: </attribute>
507 */
508 rt_public EIF_CS_TYPE *eif_gc_set_mutex = NULL;
509
510 #ifdef ISE_GC
511 /*
512 doc: <attribute name="eif_g_data_mutex" return_type="EIF_CS_TYPE *" export="public">
513 doc: <summary>Mutex used to access `rt_g_data' when not protected by `eif_gc_mutex'.</summary>
514 doc: <thread_safety>Safe</thread_safety>
515 doc: </attribute>
516 */
517 rt_public EIF_CS_TYPE *eif_rt_g_data_mutex = NULL;
518 #endif
519
520 /*
521 doc: <attribute name="eif_global_once_set_mutex" return_type="EIF_CS_TYPE *" export="public">
522 doc: <summary>Mutex used to protect insertion of global once result in `global_once_set'.</summary>
523 doc: <thread_safety>Safe</thread_safety>
524 doc: </attribute>
525 */
526 rt_public EIF_CS_TYPE *eif_global_once_set_mutex = NULL;
527 #endif
528
529 /*
530 doc: <attribute name="age_table" return_type="uint32 [TENURE_MAX]" export="private">
531 doc: <summary>Array used to store the number of objects used, indexed by object's age. This is used when computing the demographic feedback-mediated tenuring threshold for the next step (generation collection).</summary>
532 doc: <indexing>age</indexing>
533 doc: <thread_safety>Safe</thread_safety>
534 doc: <synchronization>eif_gc_mutex</synchronization>
535 doc: </attribute>
536 */
537 rt_private uint32 age_table[TENURE_MAX]; /* Number of objects/age */
538
539 /*
540 doc: <attribute name="size_table" return_type="rt_uint_ptr [TENURE_MAX]" export="private">
541 doc: <summary>Array used to store the size of objects used, indexed by object's age. This is used when computing the demographic feedback-mediated tenuring threshold for the next step (generation collection) and by the generation scavenging algorithm.</summary>
542 doc: <indexing>age</indexing>
543 doc: <thread_safety>Safe</thread_safety>
544 doc: <synchronization>eif_gc_mutex</synchronization>
545 doc: </attribute>
546 */
547 rt_private rt_uint_ptr size_table[TENURE_MAX]; /* Amount of bytes/age */
548
549 /*
550 doc: <attribute name="tenure" return_type="size_t" export="private">
551 doc: <summary>Maximum age for tenuring.</summary>
552 doc: <thread_safety>Safe</thread_safety>
553 doc: <synchronization>None while initialized in `main.c' but use `eif_gc_mutex' when updating its value.</synchronization>
554 doc: </attribute>
555 */
556 rt_private size_t tenure;
557
558 /*
559 doc: <attribute name="plsc_per" return_type="long" export="public">
560 doc: <summary>Period of calls to `plsc' in `acollect'.</summary>
561 doc: <thread_safety>Safe</thread_safety>
562 doc: <synchronization>None while initialized in main.c, but use `eif_memory_mutex' when updating its value.</synchronization>
563 doc: </attribute>
564 */
565 rt_public long plsc_per;
566
567 /*
568 doc: <attribute name="force_plsc" return_type="long" export="shared">
569 doc: <summary>When moving objects outside the scavenge zone, if it turns out we do not have enough memory in the free list, we force a full collection at the next collection. That way we are sure not to over-allocate block of memory when not needed. Doing so reduces the memory foot-print of Eiffel applications.</summary>
570 doc: <thread_safety>Safe with synchronization</thread_safety>
571 doc: <synchronization>Under `trigger_gc_mutex' or `eiffel_usage_mutex' or GC synchronization.</synchronization>
572 doc: </attribute>
573 */
574 rt_shared long force_plsc = 0;
575
576 /*
577 doc: <attribute name="clsc_per" return_type="EIF_INTEGER" export="public">
578 doc: <summary>Period of full coalescing. If `0', it is never called.</summary>
579 doc: <thread_safety>Not safe</thread_safety>
580 doc: <synchronization>None</synchronization>
581 doc: <fixme>Updated needs to be synchronized with a mutex.</fixme>
582 doc: </attribute>
583 */
584 rt_public EIF_INTEGER clsc_per; /* Period of full coalescing: 0 => never. */
585
586 /* Zones used for partial scavenging */
587 /*
588 doc: <attribute name="ps_from" return_type="struct partial_sc_zone" export="private">
589 doc: <summary>From zone used for partial scavenging</summary>
590 doc: <thread_safety>Safe</thread_safety>
591 doc: <synchronization>eif_gc_mutex</synchronization>
592 doc: </attribute>
593 */
594 rt_private struct partial_sc_zone ps_from;
595
596 /*
597 doc: <attribute name="ps_to" return_type="struct partial_sc_zone" export="private">
598 doc: <summary>To zone used for partial scavenging</summary>
599 doc: <thread_safety>Safe</thread_safety>
600 doc: <synchronization>eif_gc_mutex</synchronization>
601 doc: </attribute>
602 */
603 rt_private struct partial_sc_zone ps_to;
604
605 /*
606 doc: <attribute name="last_from" return_type="struct chunk *" export="private">
607 doc: <summary>Last `from' zone used by partial scavenging.</summary>
608 doc: <thread_safety>Safe</thread_safety>
609 doc: <synchronization>eif_gc_mutex</synchronization>
610 doc: </attribute>
611 */
612 rt_private struct chunk *last_from = NULL;
613
614 /*
615 doc: <attribute name="th_alloc" return_type="size_t" export="public">
616 doc: <summary>Allocation threshold before calling GC. Initialized in `main.c', updated in `memory.c'.</summary>
617 doc: <thread_safety>Safe</thread_safety>
618 doc: <synchronization>None while initialized in main.c, but use `eif_memory_mutex' when updating its value.</synchronization>
619 doc: </attribute>
620 */
621 rt_public size_t th_alloc;
622
623 /*
624 doc: <attribute name="gc_monitor" return_type="int" export="public">
625 doc: <summary>Disable GC time-monitoring. By default it is 0.</summary>
626 doc: <thread_safety>Safe</thread_safety>
627 doc: <synchronization>None while initialized in main.c, but use `eif_memory_mutex' when updating its value.</synchronization>
628 doc: </attribute>
629 */
630 rt_public int gc_monitor = 0;
631
632 /*
633 doc: <attribute name="root_obj" return_type="EIF_REFERENCE" export="public">
634 doc: <summary>Pointer to root object of current system. Initialized by generated C code.</summary>
635 doc: <thread_safety>Safe</thread_safety>
636 doc: <synchronization>None</synchronization>
637 doc: </attribute>
638 */
639 rt_public EIF_REFERENCE root_obj = NULL;
640
641
642 /*
643 doc: <attribute name="rt_extension_obj" return_type="EIF_REFERENCE" export="public">
644 doc: <summary>Pointer to RT_EXTENSION object of current system. Initialized by generated C code.</summary>
645 doc: <thread_safety>Safe</thread_safety>
646 doc: <synchronization>None</synchronization>
647 doc: </attribute>
648 */
649 #ifdef WORKBENCH
650 rt_public EIF_REFERENCE rt_extension_obj = NULL;
651 #endif
652
653 /*
654 doc: <attribute name="except_mnger" return_type="EIF_REFERENCE" export="public">
655 doc: <summary>Pointer to EXCEPTION_MANAGER object of current system. Initialized by generated C code.</summary>
656 doc: <thread_safety>Safe</thread_safety>
657 doc: <synchronization>None</synchronization>
658 doc: </attribute>
659 */
660 rt_public EIF_REFERENCE except_mnger = NULL;
661
662
663 /*
664 doc: <attribute name="scp_mnger" return_type="EIF_REFERENCE" export="public">
665 doc: <summary>Pointer to EXCEPTION_MANAGER object of current system. Initialized by generated C code.</summary>
666 doc: <thread_safety>Safe</thread_safety>
667 doc: <synchronization>None</synchronization>
668 doc: </attribute>
669 */
670 rt_public EIF_REFERENCE scp_mnger = NULL;
671
672 /*
673 doc: <attribute name="has_reclaim_been_called" return_type="EIF_BOOLEAN" export="private">
674 doc: <summary>Flag to prevent multiple calls to `reclaim' which could occur if for some reasons `reclaim failed, then the `main' routine of the Eiffel program will call `failure' which calls `reclaim' again. So if it failed the first time around it is going to fail a second time and therefore it is useless to call `reclaim' again.</summary>
675 doc: <thread_safety>Safe</thread_safety>
676 doc: <synchronization>None</synchronization>
677 doc: </attribute>
678 */
679 rt_private EIF_BOOLEAN has_reclaim_been_called = 0;
680
681 #ifdef ISE_GC
682
683 #ifdef DEBUG
684 rt_private int nb_items(struct stack *); /* Number of items held in a stack */
685 #endif
686 /* Automatic invokations of GC */
687 rt_shared int acollect(void); /* Collection based on threshold */
688 rt_shared int scollect(int (*gc_func) (void), int i); /* Collect with statistics */
689
690 #endif /* ISE_GC */
691
692 /* Stopping/restarting the GC */
693 rt_public void eif_gc_stop(void); /* Stop the garbage collector */
694 rt_public void eif_gc_run(void); /* Restart the garbage collector */
695
696 rt_public void reclaim(void); /* Reclaim all the objects */
697 #ifdef ISE_GC
698 rt_private void internal_marking(MARKER marking, int moving);
699 rt_private void full_mark(EIF_CONTEXT_NOARG); /* Marks all reachable objects */
700 rt_private void full_sweep(void); /* Removes all un-marked objects */
701 rt_private void run_collector(void); /* Wrapper for full collections */
702 rt_private void unmark_c_stack_objects (void);
703
704 /* Stack markers */
705 rt_private void mark_simple_stack(struct stack *stk, MARKER marker, int move); /* Marks a collector's stack */
706 rt_private void mark_stack(struct stack *stk, MARKER marker, int move); /* Marks a collector's stack */
707 rt_private void mark_overflow_stack(MARKER marker, int move);
708 rt_private void mark_array(EIF_REFERENCE *arr, rt_uint_ptr arr_count, MARKER marker, int move);
709 #if ! defined CUSTOM || defined NEED_OBJECT_ID_H
710 rt_private void update_object_id_stack(void); /* Update the object id stack */
711 #endif
712 rt_private void update_weak_references(void);
713 /* Storage compation reclaimer */
714 rt_public void plsc(void); /* Storage compaction reclaimer entry */
715 rt_private int partial_scavenging(void); /* The partial scavenging algorithm */
716 rt_private void run_plsc(void); /* Run the partial scavenging algorithm */
717 rt_shared void urgent_plsc(EIF_REFERENCE *object); /* Partial scavenge with given local root */
718 rt_private void init_plsc(void); /* Initialize the scavenging process */
719 rt_private void clean_zones(void); /* Clean up scavenge zones */
720 rt_private EIF_REFERENCE scavenge(register EIF_REFERENCE root, char **top); /* Scavenge an object */
721 /*rt_private void clean_space(void);*/ /* Sweep forwarded objects */ /* %%ss undefine */
722 rt_private void full_update(void); /* Update scavenge-related structures */
723 rt_private int split_to_block (int is_to_keep); /* Keep only needed space in 'to' block */
724 rt_private int sweep_from_space(void); /* Clean space after the scavenging */
725 rt_private int find_scavenge_spaces(void); /* Find a pair of scavenging spaces */
726 #ifndef EIF_NO_SCAVENGING
727 rt_private struct chunk *find_from_space(void); /* Look for a chunk that could be used as a `from' space. */
728 rt_private void find_to_space(void); /* Look for a chunk that could be used as a 'to' chunks */
729 #endif
730
731 /* Generation based collector */
732 rt_public int collect(void); /* Generation based collector main entry */
733 rt_private int generational_collect(void); /* The generational collection algorithm */
734 rt_public void eremb(EIF_REFERENCE obj); /* Remember an old object */
735 rt_public void erembq(EIF_REFERENCE obj); /* Quick version (no GC call) of eremb */
736 rt_private void update_memory_set (void); /* Update memory set */
737 rt_private void mark_new_generation(EIF_CONTEXT_NOARG); /* The name says it all, I think--RAM */
738 rt_private EIF_REFERENCE mark_expanded(EIF_REFERENCE root, MARKER marker); /* Marks expanded reference in stack */
739 rt_private void update_moved_set(void); /* Update the moved set (young objects) */
740 rt_private void update_rem_set(void); /* Update remembered set */
741 rt_shared int refers_new_object(register EIF_REFERENCE object); /* Does an object refers to young ones ? */
742 rt_private EIF_REFERENCE gscavenge(EIF_REFERENCE root); /* Generation scavenging on an object */
743 rt_private void swap_gen_zones(void); /* Exchange 'from' and 'to' zones */
744
745 /* Dealing with dispose routine */
746 rt_shared void gfree(register union overhead *zone); /* Free object, eventually call dispose */
747
748 #endif
749 /* Stack handling routines */
750 rt_shared int epush(register struct stack *stk, register void *value); /* Push value on stack */
751 rt_shared EIF_REFERENCE *st_alloc(register struct stack *stk, register size_t size); /* Creates an empty stack */
752 rt_shared void st_truncate(register struct stack *stk); /* Truncate stack if necessary */
753 rt_shared void st_wipe_out(register struct stchunk *chunk); /* Remove unneeded chunk from stack */
754 rt_shared int st_extend(register struct stack *stk, register size_t size); /* Extends size of stack */
755 rt_shared int st_has (register struct stack *stck, register void *);
756 #ifdef ISE_GC
757
758 /* Marking algorithm */
759 rt_private EIF_REFERENCE hybrid_mark(EIF_REFERENCE *root); /* Mark all reachable objects */
760 rt_private EIF_REFERENCE hybrid_gen_mark(EIF_REFERENCE *root); /* hybrid_mark with on-the-fly copy */
761
762 rt_private void mark_ex_stack(struct xstack *stk, MARKER marker, int move); /* Marks the exception stacks */
763
764 #ifdef WORKBENCH
765 rt_private void mark_op_stack(struct opstack *stk, MARKER marker, int move); /* Marks operational stack */
766 #endif
767
768 /* Compiled with -DTEST, we turn on DEBUG if not already done */
769 #ifdef TEST
770 #ifndef DEBUG
771 #define DEBUG 63 /* Highest debug level */
772 #endif
773 #endif
774
775 #ifdef DEBUG
776 static int fdone = 0; /* Tracing flag to only get the last full collect */
777 #define debug_ok(n) ((n) & DEBUG || fdone)
778 #define dprintf(n) if (DEBUG & (n) && debug_ok(n)) printf
779 #define flush fflush(stdout);
780 #endif
781
782
783 /* Function(s) used only in DEBUG mode */
784 #ifdef DEBUG
785 #ifndef MEMCHK
786 #define memck(x) ; /* No memory checking compiled */
787 #endif
788 #endif
789
790 #ifdef TEST
791 /* This is to make tests */
792 #undef References
793 #undef Size
794 #define References(type) 2 /* Number of references in object */
795 #define Size(type) 40 /* Size of the object */
796 #define Dispose(type) ((void (*)()) 0) /* No dispose routine */
797 #endif
798
799 /*
800 * Automatic collection and statistics routines.
801 */
802 /*
803 doc: <routine name="acollect" return_type="int" export="shared">
804 doc: <summary>This is the main dispatcher for garbage collection. Calls are based on a threshold th_alloc. Statistics are gathered while performing collection. We run the collect() most of the time (for a generational mark and sweep and/or a generation scavenging) and a full collection once every 'plsc_per' (aka the period) calls. Each time we run a full collection, we perform a full coalesc of the memory. Our experience shows that it is more efficient to do the coalesc just after a full collection, doing it in between degrades the performance.</summary>
805 doc: <return>0 if collection was done, -1 otherwise.</return>
806 doc: <thread_safety>Safe with synchronization</thread_safety>
807 doc: <synchronization>Through `trigger_gc_mutex'.</synchronization>
808 doc: </routine>
809 */
810
811 rt_shared int acollect(void)
812 {
813 static long nb_calls = 0; /* Number of calls to function */
814 int status; /* Status returned by scollect() */
815 #ifdef EIF_CONDITIONAL_COLLECT
816 static rt_uint_ptr eif_total = 0; /* Total Eiffel memory allocated */
817 int freemem; /* Amount of free memory */
818 int tau; /* Mean allocation rate */
819 int half_tau;
820 int allocated; /* Memory used since last full collect */
821 #endif /* EIF_CONDITIONAL_COLLECT */
822 if (rt_g_data.status & GC_STOP)
823 #ifdef DEBUG
824 {
825 dprintf(1)("acollect: Nothing has to be done because GC_STOP\n");
826 #endif
827 return -1; /* Garbage collection stopped */
828 #ifdef DEBUG
829 }
830 #endif
831
832 #ifdef DEBUG
833 dprintf(1)("acollect: automatic garbage collection with %s\n",
834 nb_calls % plsc_per ? "generation collection" : "partial scavenging");
835 flush;
836 #endif
837
838 /* If the Eiffel memory free F is such that F > (.5 * P * T), where P is
839 * the period of full collections 'plsc_per' and T is the allocation
840 * threshold 'th_alloc', and F < (1.5 * P * T) then nothing is done. Thus
841 * we do collections only when a small amount of free memory is left or
842 * when a large amout is free (in the hope we'll be able to give some of
843 * this memory to the kernel).
844 * However, we have to counter balance this scheme with the extra amount of
845 * memory allocated since the last full collection. Whenever it is more
846 * than (P * T), we want to run a collection since some garbage might have
847 * been created.
848 */
849
850 #ifdef EIF_CONDITIONAL_COLLECT
851 freemem = rt_e_data.ml_total - rt_e_data.ml_used - rt_e_data.ml_over;
852 tau = plsc_per * th_alloc;
853 half_tau = tau >> 1;
854 allocated = rt_e_data.ml_total - eif_total;
855
856 if (allocated < tau && freemem > half_tau && freemem < (tau + half_tau)) {
857
858 #ifdef DEBUG
859 dprintf(1)("acollect: skipping call (%d bytes free)\n", freemem);
860 #endif
861 return -1; /* Do not perform collection */ /* %%ss -1 was 0 */
862 }
863 #endif
864 /* Every "plsc_per" collections, we ran a full collection.
865 * This period can be set by the user.
866 */
867
868 if (plsc_per) { /* Can we run full collections?.*/
869 if (force_plsc || (0 == nb_calls % plsc_per)) { /* Full collection required */
870 plsc();
871 status = 0;
872 /* Reset `force_plsc' since we don't want to have a second full
873 * collection right after this one. */
874 force_plsc = 0;
875 /* Reset `nb_calls' so that if we came here because of a `force_plsc'
876 * which happens between `0' and `plsc_per', we still wait `plsc_per'
877 * calls before launching the next full collection. */
878 nb_calls = 0;
879 #ifdef EIF_CONDITIONAL_COLLECT
880 eif_total = rt_e_data.ml_total;
881 #endif
882 } else /* Generation-base collector */
883 status = collect();
884 } else { /* Generation-base collector called, since
885 * there is no Full Collection. */
886 status = collect();
887 }
888
889 #ifdef DEBUG
890 dprintf(1)("acollect: returning status %d\n", status);
891 #endif
892
893 nb_calls++; /* Records the call */
894
895 return status; /* Collection done, forward status */
896 }
897
898 /*
899 doc: <routine name="rt_average" return_type="rt_uint_ptr" export="private">
900 doc: <summary>Compute an average without overflow as long as the sum of the two input does not cause an overflow .</summary>
901 doc: <param name="average" type="rt_uint_ptr">Value of average so far for the `n - 1' iterations.</param>
902 doc: <param name="value" type="rt_uint_ptr">New computed value to take into account in average.</param>
903 doc: <param name="n" type="rt_uint_ptr">Number of iteration so far. Assumes `n > 0'.</param>
904 doc: <return>Return the new average</return>
905 doc: <thread_safety>Safe</thread_safety>
906 doc: <synchronization>Performs a GC synchronization before executing itself.</synchronization>
907 doc: </routine>
908 */
909 #define RT_AVERAGE(average, value, n) ((average) + (((value) - (average)) / (n)))
910 rt_private rt_uint_ptr rt_average (rt_uint_ptr average, rt_uint_ptr value, rt_uint_ptr n)
911 {
912 if (value > average) {
913 return average + ((value - average) / n);
914 } else {
915 return average - ((average - value) / n);
916 }
917 }
918
919 /*
920 doc: <routine name="scollect" return_type="int" export="shared">
921 doc: <summary>Run a garbage collection cycle with statistics updating. We monitor both the time spent in the collection and the memory released, if any, as well as time between two collections... </summary>
922 doc: <param name="gc_func" type="int (*) (void)">Collection function to be called.</param>
923 doc: <param name="i" type="int">Index in `rt_g_stat' array where statistics are kept.</param>
924 doc: <return>Return the status given by the collection function.</return>
925 doc: <thread_safety>Safe</thread_safety>
926 doc: <synchronization>Performs a GC synchronization before executing itself.</synchronization>
927 doc: </routine>
928 */
929
930 rt_shared int scollect(int (*gc_func) (void), int i)
931 {
932 RT_GET_CONTEXT
933 static rt_uint_ptr nb_stats[GST_NBR]; /* For average computation */
934 #ifndef NO_GC_STATISTICS
935 static Timeval lastreal[GST_NBR]; /* Last real time of invocation */
936 Timeval realtime, realtime2; /* Real time stamps */
937 double usertime = 0, systime = 0; /* CPU stats before collection */
938 double usertime2 = 0, systime2 = 0; /* CPU usage after collection */
939 static double lastuser[GST_NBR]; /* Last CPU time for last call */
940 static double lastsys[GST_NBR]; /* Last kernel time for last call */
941 #endif
942 rt_uint_ptr mem_used; /* Current amount of memory used */
943 rt_uint_ptr e_mem_used_before, e_mem_used_after;
944 int status; /* Status reported by GC function */
945 struct gacstat *gstat = &rt_g_stat[i]; /* Address where stats are kept */
946 rt_uint_ptr nbstat; /* Current number of statistics */
947 rt_uint_ptr nb_full;
948 int old_trace_disabled;
949
950 if (rt_g_data.status & GC_STOP)
951 return -1; /* Garbage collection stopped */
952
953 GC_THREAD_PROTECT(eif_synchronize_gc (rt_globals));
954 DISCARD_BREAKPOINTS;
955 /* We have to disable the trace as if a `dispose' routine is called and trace
956 * is enabled it might create Eiffel objects and we currently do not allow it. */
957 old_trace_disabled = eif_trace_disabled;
958 eif_trace_disabled = 1;
959
960 nb_full = rt_g_data.nb_full;
961 mem_used = rt_m_data.ml_used + rt_m_data.ml_over; /* Count overhead */
962 e_mem_used_before = rt_e_data.ml_used + rt_e_data.ml_over;
963 /* One more GC cycle. */
964 if (nb_stats [i] == 0) {
965 /* This is the first GC collection ever for `i'. */
966 nbstat = nb_stats [i] = 1;
967 } else {
968 nbstat = ++nb_stats[i];
969 /* If we overflow `nbstat' we restart the processing of the average calculation. */
970 if (nbstat == 0) {
971 nbstat = 3;
972 }
973 }
974
975 /* Reset scavenging-related figures, since those will be updated by the
976 * scavenging routines when needed.
977 */
978
979 rt_g_data.mem_move = 0; /* Memory subject to scavenging */
980 rt_g_data.mem_copied = 0; /* Amount of that memory which moved */
981
982 #ifndef NO_GC_STATISTICS
983 /* Get the current time before CPU time, because the accuracy of the
984 * real time clock is usually less important than the one used for CPU
985 * accounting.
986 */
987
988 if (gc_monitor) {
989 gettime(&realtime); /* Get current time stamp */
990 getcputime(&usertime, &systime); /* Current CPU usage */
991 }
992 #endif
993
994 #ifdef MEMCHK
995 #ifdef DEBUG
996 dprintf(1)("scollect: before GC\n");
997 memck(0);
998 #endif
999 #endif
1000
1001 status = (gc_func)(); /* GC invocation */
1002
1003 #ifdef MEMCHK
1004 #ifdef DEBUG
1005 dprintf(1)("scollect: after GC\n");
1006 memck(0);
1007 #endif
1008 #endif
1009
1010 #ifndef NO_GC_STATISTICS
1011 /* Get CPU time before real time, so that we have a more precise figure
1012 * (gettime uses a system call)--RAM.
1013 */
1014
1015 if (gc_monitor) {
1016 getcputime(&usertime2, &systime2); /* Current CPU usage */
1017 gettime(&realtime2); /* Get current time stamp */
1018 } else {
1019 memset(&realtime2, 0, sizeof(Timeval));
1020 }
1021 #endif
1022
1023 /* Now collect the statistics in the rt_g_stat structure. The real time
1024 * field will not be really significant if the time() system call is
1025 * used (granularity is one second).
1026 * Note that the memory collected can be negative, e.g. at the first
1027 * partial scavenging where a scavenge zone is allocated.
1028 */
1029
1030 rt_g_data.mem_used = rt_m_data.ml_used + rt_m_data.ml_over; /* Total mem used */
1031 gstat->mem_used = rt_g_data.mem_used;
1032 /* Sometimes during a collection we can have increased our memory
1033 * pool because for example we moved objects outside the scavenge zone
1034 * and therefore more objects have been allocated in memory. */
1035 if (mem_used > rt_g_data.mem_used) {
1036 gstat->mem_collect = mem_used - rt_g_data.mem_used; /* Memory collected */
1037 } else {
1038 gstat->mem_collect = 0;
1039 }
1040 /* Memory freed by scavenging (with overhead) */
1041 gstat->mem_collect += rt_g_data.mem_copied - rt_g_data.mem_move;
1042 gstat->mem_avg = rt_average(gstat->mem_avg, gstat->mem_collect, nbstat); /* Average mem freed */
1043
1044 if (nb_full != rt_g_data.nb_full) {
1045 /* We are during a full collection cycle. This is were we
1046 * will update value of `plsc_per' to a better value.
1047 * We only increase its value if the ratio freed memory
1048 * used memory is less than 1/3, betwen 1/3 and 2/3 we do not change
1049 * anything, and above 2/3 we decrease its value. */
1050 rt_uint_ptr partial_used_memory = (rt_e_data.ml_used + rt_e_data.ml_over) / 3;
1051 rt_uint_ptr freed_memory;
1052 if (mem_used > rt_g_data.mem_used) {
1053 freed_memory = mem_used - rt_g_data.mem_used;
1054 } else {
1055 freed_memory = 0;
1056 }
1057 if (freed_memory == 0) {
1058 /* Gogo stage. That is to say new memory has been allocated
1059 * while we were collecting (moving young objects to old).
1060 * Therefore there is nothing we can say about increasing or
1061 * decreasing the full collection perido, so we don't change
1062 * anything. */
1063 } else if (freed_memory <= partial_used_memory) {
1064 /* Perform a dichotomic increase */
1065 if (plsc_per < 100) {
1066 if (plsc_per < 50) {
1067 if (plsc_per < 8) {
1068 if (plsc_per < 4) {
1069 plsc_per += 1;
1070 } else {
1071 plsc_per += 2;
1072 }
1073 } else {
1074 plsc_per += 4;
1075 }
1076 } else {
1077 plsc_per += 8;
1078 }
1079 } else {
1080 plsc_per += 16;
1081 }
1082 if (plsc_per < 0) {
1083 /* Overflow here, so restore back the max positive integer value */
1084 plsc_per = 0x7FFFFFFF;
1085 }
1086 } else if (freed_memory > 2 * partial_used_memory) {
1087 /* Perform a dichotomic decrease */
1088 if (plsc_per <= 100) {
1089 if (plsc_per <= 50) {
1090 if (plsc_per <= 8) {
1091 if (plsc_per <= 4) {
1092 plsc_per -= 1;
1093 } else {
1094 plsc_per -= 2;
1095 }
1096 } else {
1097 plsc_per -= 4;
1098 }
1099 } else {
1100 plsc_per -= 8;
1101 }
1102 } else {
1103 plsc_per -= 16;
1104 }
1105 if (plsc_per < 1) {
1106 plsc_per = 1;
1107 }
1108 }
1109 } else {
1110 e_mem_used_after = rt_e_data.ml_used + rt_e_data.ml_over;
1111 if (e_mem_used_before > e_mem_used_after) {
1112 /* Some memory of free list was freed, so we should update `eiffel_usage' accordingly. */
1113 e_mem_used_before -= e_mem_used_after;
1114 if (eiffel_usage > e_mem_used_before) {
1115 eiffel_usage -= e_mem_used_before;
1116 } else {
1117 eiffel_usage = 0;
1118 }
1119 }
1120 }
1121
1122 #ifndef NO_GC_STATISTICS
1123 if (gc_monitor) {
1124 gstat->real_time = elapsed(&realtime, &realtime2);
1125 gstat->cpu_time = usertime2 - usertime; /* CPU time (user) */
1126 gstat->sys_time = systime2 - systime; /* CPU time (kernel) */
1127 gstat->cpu_total_time = usertime2;
1128 gstat->sys_total_time = systime2;
1129 } else {
1130 gstat->real_time = gstat->real_avg; /* Adding the average */
1131 gstat->cpu_time = gstat->cpu_avg; /* will not change the */
1132 gstat->sys_time = gstat->sys_avg; /* computation done so far */
1133 }
1134 gstat->real_avg = rt_average(gstat->real_avg, gstat->real_time, nbstat); /* Average real time */
1135 gstat->cpu_avg = RT_AVERAGE(gstat->cpu_avg, gstat->cpu_time, nbstat); /* Average user time */
1136 gstat->sys_avg = RT_AVERAGE(gstat->sys_avg, gstat->sys_time, nbstat); /* Average sys time */
1137
1138
1139 /* If it is not the first time, update the statistics. First compute the
1140 * time elapsed since last call, then update the average accordingly. */
1141 if (lastuser[i] != 0) {
1142 if (gc_monitor) {
1143 gstat->cpu_itime = usertime - lastuser[i];
1144 gstat->sys_itime = systime - lastsys[i];
1145 gstat->real_itime = elapsed(&lastreal[i], &realtime);
1146 } else {
1147 gstat->cpu_itime = gstat->cpu_iavg; /* Adding the average */
1148 gstat->sys_itime = gstat->sys_iavg; /* does not change the */
1149 gstat->real_itime = gstat->real_iavg; /* data we already have */
1150 }
1151 gstat->real_iavg = rt_average(gstat->real_iavg, gstat->real_itime, nbstat - 1);
1152 gstat->cpu_iavg = RT_AVERAGE(gstat->cpu_iavg, gstat->cpu_itime, nbstat - 1);
1153 gstat->sys_iavg = RT_AVERAGE(gstat->sys_iavg, gstat->sys_itime, nbstat - 1);
1154 }
1155
1156 /* Record current times for next invokation */
1157
1158 if (gc_monitor) {
1159 lastuser[i] = usertime2; /* User time after last GC */
1160 lastsys[i] = systime2; /* System time after last GC */
1161 memcpy (&lastreal[i], &realtime2, sizeof(Timeval));
1162 }
1163 #endif
1164
1165 #ifdef DEBUG
1166 dprintf(1)("scollect: statistics for %s\n",
1167 i == GST_PART ? "partial scavenging" : "generation collection");
1168 dprintf(1)("scollect: # of full collects: %ld\n", rt_g_data.nb_full);
1169 dprintf(1)("scollect: # of partial collects: %ld\n", rt_g_data.nb_partial);
1170 dprintf(1)("scollect: Total mem allocated: %ld bytes\n", rt_m_data.ml_total);
1171 dprintf(1)("scollect: Total mem used: %ld bytes\n", rt_m_data.ml_used);
1172 dprintf(1)("scollect: Total overhead: %ld bytes\n", rt_m_data.ml_over);
1173 dprintf(1)("scollect: Collected: %ld bytes\n", gstat->mem_collect);
1174 dprintf(1)("scollect: (Scavenging collect: %ld bytes)\n",
1175 rt_g_data.mem_copied - rt_g_data.mem_move);
1176 if (gc_monitor) {
1177 dprintf(1)("scollect: Real time: %fs\n", gstat->real_time / 100.);
1178 dprintf(1)("scollect: CPU time: %fs\n", gstat->cpu_time);
1179 dprintf(1)("scollect: System time: %fs\n", gstat->sys_time);
1180 dprintf(1)("scollect: Average real time: %fs\n", gstat->real_avg / 100.);
1181 dprintf(1)("scollect: Average CPU time: %f\n", gstat->cpu_avg);
1182 dprintf(1)("scollect: Average system time: %f\n", gstat->sys_avg);
1183 dprintf(1)("scollect: Interval time: %f\n", gstat->real_itime / 100.);
1184 dprintf(1)("scollect: Interval CPU time: %f\n", gstat->cpu_itime);
1185 dprintf(1)("scollect: Interval sys time: %f\n", gstat->sys_itime);
1186 dprintf(1)("scollect: Avg interval time: %f\n", gstat->real_iavg / 100.);
1187 dprintf(1)("scollect: Avg interval CPU time: %f\n", gstat->cpu_iavg);
1188 dprintf(1)("scollect: Avg interval sys time: %f\n", gstat->sys_iavg);
1189 }
1190 #endif
1191
1192 eif_trace_disabled = old_trace_disabled;
1193 UNDISCARD_BREAKPOINTS;
1194 GC_THREAD_PROTECT(eif_unsynchronize_gc (rt_globals));
1195 return status; /* Forward status report */
1196 }
1197 #endif /* ISE_GC */
1198
1199 /*
1200 * Garbage collector stop/run
1201 */
1202
1203 rt_public void eif_gc_stop(void)
1204 {
1205 /* Stop the GC -- this should be used in case of emergency only, i.e.
1206 * in an exception handler or in a time-critical routine.
1207 * Note that when we are in an exception handler, requests to GC controls
1208 * are silently ignored anyway (GC is turned off before executing the
1209 * signal handler).
1210 */
1211
1212 #ifdef ISE_GC
1213 RT_GET_CONTEXT
1214 EIF_G_DATA_MUTEX_LOCK;
1215 if (!(rt_g_data.status & GC_SIG)) /* If not in signal handler */
1216 rt_g_data.status |= GC_STOP; /* Stop GC */
1217 EIF_G_DATA_MUTEX_UNLOCK;
1218 #endif
1219 }
1220
1221 rt_public void eif_gc_run(void)
1222 {
1223 /* Restart the GC -- the garbage collector should always run excepted in
1224 * some critical operations, which should be rare. Anyway, after having
1225 * stopped it, here is the way to wake it up. Note that no collection
1226 * cycle is raised.
1227 * As for eif_gc_stop(), the request is ignored while in the exception handler.
1228 * The garbage collector is turned off in that case to avoid the dangling
1229 * reference problem--RAM.
1230 */
1231
1232 #ifdef ISE_GC
1233 RT_GET_CONTEXT
1234 EIF_G_DATA_MUTEX_LOCK;
1235 if (!(rt_g_data.status & GC_SIG)) /* If not in signal handler */
1236 rt_g_data.status &= ~GC_STOP; /* Restart GC */
1237 EIF_G_DATA_MUTEX_UNLOCK;
1238 #endif
1239 }
1240
1241 #if defined (WORKBENCH) || defined (EIF_THREADS)
1242 /*
1243 doc: <routine name="alloc_oms" return_type="EIF_REFERENCE **" export="shared">
1244 doc: <summary>Allocate array of once manifest strings.</summary>
1245 doc: <return>Allocated array filled with 0s.</return>
1246 doc: <exception>"No more memory" when allocation fails.</exception>
1247 doc: <thread_safety>Safe</thread_safety>
1248 doc: <synchronization>Per thread data.</synchronization>
1249 doc: </routine>
1250 */
1251 rt_shared EIF_REFERENCE ** alloc_oms (void)
1252 {
1253 EIF_REFERENCE ** result;
1254
1255 result = (EIF_REFERENCE **) eif_calloc (eif_nb_org_routines, sizeof (EIF_REFERENCE *));
1256 if (result == (EIF_REFERENCE **) 0) { /* Out of memory */
1257 enomem ();
1258 }
1259 return result;
1260 }
1261
1262 /*
1263 doc: <routine name="free_oms" return_type="void" export="shared">
1264 doc: <summary>Free array of once manifest strings.</summary>
1265 doc: <param name="oms_array" type="EIF_REFERENCE **">Array to free.</param>
1266 doc: <thread_safety>Safe</thread_safety>
1267 doc: <synchronization>Per thread data.</synchronization>
1268 doc: </routine>
1269 */
1270 rt_shared void free_oms (EIF_REFERENCE **oms_array)
1271 {
1272 uint32 i;
1273
1274 if (oms_array) {
1275 i = eif_nb_org_routines;
1276 while (i > 0) {
1277 i --;
1278 if (oms_array[i]) {
1279 eif_free (oms_array[i]); /* have been allocated with eif_malloc */
1280 }
1281 }
1282 }
1283 eif_free (oms_array); /* have been allocated with eif_malloc */
1284 }
1285 #endif
1286
1287
1288 rt_public void reclaim(void)
1289 {
1290 /* At the end of the process's lifetime, all the objects need to be
1291 * reclaimed, so that all the "dispose" procedures are called to perform
1292 * their clean-up job (such as removing locks or temporary files).
1293 * As all the objects are unmarked, we simply call full_sweep.
1294 * There is no need to explode the scavenge zone as objects held there
1295 * are known not to have any dispose routine (cf emalloc).
1296 */
1297
1298 RT_GET_CONTEXT
1299 EIF_GET_CONTEXT
1300
1301 #ifdef ISE_GC
1302 struct chunk *c, *cn;
1303 #endif
1304
1305 /* Mark final collection */
1306 eif_is_in_final_collect = EIF_TRUE;
1307
1308 if (!has_reclaim_been_called) {
1309 has_reclaim_been_called = 1;
1310
1311 #if ! defined CUSTOM || defined NEED_OPTION_H
1312 if (egc_prof_enabled) {
1313 exitprf(); /* Store profile information */
1314 }
1315 #endif
1316
1317 #ifdef ISE_GC
1318 if (!eif_no_reclaim && !(rt_g_data.status & GC_STOP)) { /* Does user want no reclaim? */
1319 #else
1320 if (!eif_no_reclaim) {
1321 #endif
1322
1323 #ifdef ISE_GC
1324 /* Failure occurred or we are exiting normally. In any case, we just synchronize
1325 * our GC to prevent any Eiffel thread to run. We will not unsynchronize the GC
1326 * because after the call to reclaim we are exiting and if we did unsynchronize
1327 * then some Eiffel code would be running on memory that has been freed which would
1328 * cause a crash. */
1329 GC_THREAD_PROTECT(eif_synchronize_gc(rt_globals));
1330 #endif
1331
1332 #ifdef RECLAIM_DEBUG
1333 fprintf(stderr, "reclaim: collecting all objects...\n");
1334 #endif
1335
1336 #ifdef ISE_GC
1337 if (gen_scavenge & GS_ON) { /* If generation scaveging was on */
1338 sc_stop(); /* Free 'to' and explode 'from' space */
1339 }
1340
1341 /* Reset GC status otherwise plsc() might skip some memory blocks
1342 * (those previously used as partial scavenging areas).
1343 */
1344 rt_g_data.status = (char) 0;
1345 /* Call for the last time the GC through a `full_collect'. It enables
1346 * the call to `dispose' routine of remaining objects which defines
1347 * the dispose routine.
1348 * Ensures that `root_obj' is cleared.
1349 */
1350 root_obj = NULL;
1351 #ifdef WORKBENCH
1352 rt_extension_obj = NULL;
1353 #endif
1354 except_mnger = NULL;
1355 scp_mnger = NULL;
1356
1357 plsc ();
1358
1359 #endif
1360
1361 if (EIF_once_values != (EIF_once_value_t *) 0) {
1362 eif_free (EIF_once_values); /* have been allocated with eif_malloc */
1363 EIF_once_values = (EIF_once_value_t *) 0;
1364 }
1365 #ifdef EIF_THREADS
1366 {
1367 int i = EIF_process_once_count;
1368 while (i > 0) {
1369 i--;
1370 eif_thr_mutex_destroy (EIF_process_once_values [i].mutex);
1371 }
1372 }
1373
1374 if (EIF_process_once_values != (EIF_process_once_value_t *) 0) {
1375 eif_free (EIF_process_once_values); /* Free array of process-relative once results. */
1376 }
1377 #endif
1378 FREE_ONCE_INDEXES; /* Free array of once indexes. */
1379
1380 FREE_OMS (EIF_oms); /* Free array of once manifest strings */
1381
1382 eif_free (starting_working_directory);
1383 starting_working_directory = NULL;
1384 eif_gen_conf_cleanup ();
1385 #ifdef EIF_WINDOWS
1386 eif_cleanup();
1387 eif_free_dlls();
1388 #endif /* EIF_WINDOWS */
1389
1390 #ifdef WORKBENCH
1391 dbreak_free_table ();
1392 #endif
1393
1394 #ifdef EIF_THREADS
1395 if (eif_thr_is_root ()) {
1396 eif_thread_cleanup ();
1397 }
1398 #endif /* EIF_THREADS */
1399
1400 #ifdef ISE_GC
1401 for (c = cklst.ck_head; c != (struct chunk *) 0; c = cn) {
1402 cn = c->ck_next;
1403 eif_free (c); /* Previously allocated with eif_malloc. */
1404 }
1405 cklst.ck_head = (struct chunk *) 0;
1406 #endif
1407
1408 #ifdef ISE_GC
1409 #ifdef LMALLOC_CHECK
1410 eif_lm_display ();
1411 eif_lm_free ();
1412 #endif /* LMALLOC_CHECK */
1413 #endif
1414 /* Reclaim root creation procedure structures. */
1415 if (egc_rlist) {
1416 eif_free(egc_rlist);
1417 egc_rlist = NULL;
1418 }
1419 if (egc_rcdt) {
1420 eif_free(egc_rcdt);
1421 egc_rcdt = NULL;
1422 }
1423 if (egc_rcorigin) {
1424 eif_free(egc_rcorigin);
1425 egc_rcorigin = NULL;
1426 }
1427 if (egc_rcoffset) {
1428 eif_free(egc_rcoffset);
1429 egc_rcoffset = NULL;
1430 }
1431 if (egc_rcarg) {
1432 eif_free(egc_rcarg);
1433 egc_rcarg = NULL;
1434 }
1435 }
1436 }
1437
1438 /* Final collection terminated, unmark the flag */
1439 eif_is_in_final_collect = EIF_FALSE;
1440 }
1441
1442 #ifdef ISE_GC
1443 rt_private void run_collector(void)
1444 {
1445 /* Run the mark and sweep collectors, assuming the state is already set.
1446 * Provision is made for generation scavenging, as this zone cannot be
1447 * collected excepted by using a scavenging algorithm (with no aging).
1448 */
1449
1450 rt_g_data.nb_full++; /* One more full collection */
1451
1452 #ifdef DEBUG
1453 fdone = 1;
1454 dprintf(1)("run_collector: gen_scavenge: 0x%lx, status: 0x%lx\n",
1455 gen_scavenge, rt_g_data.status);
1456 flush;
1457 #endif
1458
1459 /* If the root object address is void, only run a full sweep. At the end
1460 * of the program (when the final disposal is run) or at the beginning
1461 * (when allocating some memory in "optimized for memory" mode), the root
1462 * object's address will be null and a mark phase does not really make
1463 * sense--RAM.
1464 */
1465
1466 full_mark(MTC_NOARG); /* Mark phase */
1467 full_update(); /* Update moved and remembered set (BEFORE sweep) */
1468 full_sweep(); /* Sweep phase */
1469 unmark_c_stack_objects ();
1470
1471 /* After a full collection (this routine is only called for a full mark
1472 * and sweep or a partial scavenging), give generation scavenging a try
1473 * again (in case it was stopped) by clearing the GS_STOP flag.
1474 */
1475
1476 if (gen_scavenge & GS_ON) /* Generation scavenging is on */
1477 swap_gen_zones(); /* Swap generation zones */
1478 gen_scavenge &= ~GS_STOP; /* Clear any stop flag */
1479 if (gen_scavenge == GS_OFF) {
1480 /* If generation scavenging is off, try to restore the scavenge zone
1481 * so that they can be reused for next Eiffel object creation. */
1482 create_scavenge_zones();
1483 }
1484
1485 ufill(); /* Eventually refill our urgent memory stock */
1486
1487 #ifdef EIF_THREADS
1488 /* Notify SCOOP manager about any unused processors. */
1489 report_live_index ();
1490 #endif
1491
1492 #ifdef DEBUG
1493 fdone = 0; /* Do not trace any further */
1494 #endif
1495 }
1496
1497 rt_private void full_mark (EIF_CONTEXT_NOARG)
1498 {
1499 /* Mark phase -- Starting from the root object and the subsidiary
1500 * roots in the local stack, we recursively mark all the reachable
1501 * objects. At the beginning of this phase, it is assumed that no
1502 * object is marked.
1503 */
1504
1505 #ifdef EIF_THREADS
1506 int i;
1507 #endif
1508 int moving = rt_g_data.status & (GC_PART | GC_GEN);
1509
1510 /* Initialize our overflow depth */
1511 overflow_stack_depth = 0;
1512
1513 #ifdef EIF_THREADS
1514 /* Initialize list of live indexes for threads. */
1515 /* This should be done before any marking. */
1516 prepare_live_index ();
1517 #endif
1518
1519 /* Perform marking */
1520 root_obj = MARK_SWITCH(&root_obj); /* Primary root */
1521 #ifdef WORKBENCH
1522 if (rt_extension_obj) {
1523 rt_extension_obj = MARK_SWITCH(&rt_extension_obj); /* Primary root */
1524 }
1525 #endif
1526 except_mnger = MARK_SWITCH(&except_mnger); /* EXCEPTION_MANAGER */
1527 if (scp_mnger) {
1528 /* Mark SCOOP manager. */
1529 scp_mnger = MARK_SWITCH(&scp_mnger); /* ISE_SCOOP_MANAGER */
1530 }
1531
1532 /* Deal with TYPE instances. */
1533 /* We add +2 to `eif_next_gen_id' because index 0 and 1 are reserved for detachable NONE and
1534 * attached NONE. See `eif_type_malloc'. */
1535 CHECK("valid bounds", eif_next_gen_id + 2 <= rt_type_set_count);
1536 mark_array (rt_type_set, (rt_type_set_count > eif_next_gen_id ? eif_next_gen_id + (rt_uint_ptr) 2 : rt_type_set_count), MARK_SWITCH, moving);
1537
1538 /* Detect live and dead processors without taking once manifest strings into account,
1539 * because they do not add any information about liveness status of the processors. */
1540 internal_marking (MARK_SWITCH, moving);
1541
1542 /* Deal with once manifest strings. */
1543 #ifndef EIF_THREADS
1544 mark_stack(&oms_set, MARK_SWITCH, moving);
1545 #else
1546 /* Mark both live and dead indexes, because it's up to SCOOP manager
1547 * to delete a thread and to reclaim once manifest strings. */
1548 for (i = 0; i < oms_set_list.count; i++)
1549 mark_stack(oms_set_list.threads.sstack[i], MARK_SWITCH, moving);
1550 #endif
1551 }
1552
1553 rt_private void internal_marking(MARKER marking, int moving)
1554 /* Generic marking of all stacks. This code is common to `full_mark' and
1555 * `mark_new_generation'
1556 */
1557 {
1558 #ifdef EIF_THREADS
1559 size_t i;
1560 size_t j;
1561 size_t n;
1562 #endif
1563
1564 #ifndef EIF_THREADS
1565 /* The regular Eiffel variables have their values stored directly within
1566 * the loc_set stack. Those variables are the local roots for the garbage
1567 * collection process.
1568 */
1569 mark_stack(&loc_set, marking, moving);
1570
1571 /* The stack of local variables holds the addresses of variables
1572 * in the process's stack which refers to the objects, hence the
1573 * double indirection necessary to reach the objects.
1574 */
1575 mark_stack(&loc_stack, marking, moving);
1576
1577 /* Once functions are old objects that are always alive in the system.
1578 * They are all gathered in a stack and always belong to the old
1579 * generation (thus they are allocated from the free-list). As with
1580 * locals, a double indirection is necessary.
1581 */
1582 #if defined WORKBENCH
1583 mark_simple_stack(&once_set, marking, moving);
1584 #else
1585 mark_stack(&once_set, marking, moving);
1586 #endif
1587
1588 /* The hector stacks record the objects which has been given to C and may
1589 * have been kept by the C side. Those objects are alive, of course.
1590 */
1591 mark_simple_stack(&hec_stack, marking, moving);
1592 mark_simple_stack(&eif_hec_saved, marking, moving);
1593
1594 #ifdef WORKBENCH
1595 /* The operational stack of the interpreter holds some references which
1596 * must be marked and/or updated.
1597 */
1598 mark_op_stack(&op_stack, marking, moving);
1599
1600 /* The exception stacks are scanned. It is more to update the references on
1601 * objects than to ensure these objects are indeed alive...
1602 */
1603 mark_ex_stack(&eif_stack, marking, moving);
1604 mark_ex_stack(&eif_trace, marking, moving);
1605
1606 #else
1607 if (exception_stack_managed) {
1608 /* The exception stacks are scanned. It is more to update the references on
1609 * objects than to ensure these objects are indeed alive...
1610 */
1611 mark_ex_stack(&eif_stack, marking, moving);
1612 mark_ex_stack(&eif_trace, marking, moving);
1613 }
1614 #endif
1615 #else /* EIF_THREADS */
1616 /* Traverse global stacks. */
1617 mark_stack(&global_once_set, marking, moving);
1618 mark_simple_stack(&eif_hec_saved, marking, moving);
1619
1620 /* Traverse per-thread stacks. */
1621 /* All stacks are in arrays of the same size. */
1622 CHECK ("Same stack count", loc_set_list.count == loc_stack_list.count);
1623 CHECK ("Same stack count", loc_set_list.count == once_set_list.count);
1624 CHECK ("Same stack count", loc_set_list.count == hec_stack_list.count);
1625 CHECK ("Same stack count", loc_set_list.count == sep_stack_list.count);
1626 CHECK ("Same stack count", loc_set_list.count == eif_stack_list.count);
1627 CHECK ("Same stack count", loc_set_list.count == eif_trace_list.count);
1628 #ifdef WORKBENCH
1629 CHECK ("Same stack count", loc_set_list.count == opstack_list.count);
1630 #endif
1631
1632 if (rt_g_data.status & (GC_PART | GC_GEN)) {
1633 /* Full GC: mark only live processors. */
1634 for (j = 0; j < live_index_count;) {
1635 /* Iterate over known live indexes. */
1636 for (n = live_index_count; j < n; j++) {
1637 /* Use only live indexes. */
1638 i = live_index [j];
1639 CHECK ("Valid index", i < loc_set_list.count);
1640 mark_stack(loc_set_list.threads.sstack[i], marking, moving);
1641 mark_stack(loc_stack_list.threads.sstack[i], marking, moving);
1642 mark_simple_stack(once_set_list.threads.sstack[i], marking, moving);
1643 mark_simple_stack(hec_stack_list.threads.sstack[i], marking, moving);
1644 mark_simple_stack(sep_stack_list.threads.sstack[i], marking, moving);
1645 #ifdef WORKBENCH
1646 mark_op_stack(opstack_list.threads.opstack[i], marking, moving);
1647 mark_ex_stack(eif_stack_list.threads.xstack[i], marking, moving);
1648 mark_ex_stack(eif_trace_list.threads.xstack[i], marking, moving);
1649 #else
1650 if (exception_stack_managed) {
1651 mark_ex_stack(eif_stack_list.threads.xstack[i], marking, moving);
1652 mark_ex_stack(eif_trace_list.threads.xstack[i], marking, moving);
1653 }
1654 #endif
1655 }
1656 /* Check if there are new live indexes. */
1657 update_live_index ();
1658 }
1659 /* Perform marking for dead indexes.
1660 * This slowdowns GC a bit, because objects, corresponding to dead processors
1661 * are not released immediately, but allows SCOOP manager to proceed normally
1662 * and manage threads in any suitable way, including reuse.
1663 */
1664 complement_live_index ();
1665 } else {
1666 /* Partical GC: duplicate indexes to mark all processors. */
1667 j = 0;
1668 for (n = loc_set_list.count; j < n; j++) {
1669 live_index [j] = j;
1670 }
1671 j = 0;
1672 }
1673 for (n = loc_set_list.count; j < n; j++) {
1674 /* Use `live_index' to figure out what else has to be marked.
1675 * It includes unmarked indexes when doing full GC and
1676 * all indexes when doing partial GC. */
1677 i = live_index [j];
1678 CHECK ("Valid index", i < loc_set_list.count);
1679 mark_stack(loc_set_list.threads.sstack[i], marking, moving);
1680 mark_stack(loc_stack_list.threads.sstack[i], marking, moving);
1681 mark_simple_stack(once_set_list.threads.sstack[i], marking, moving);
1682 mark_simple_stack(hec_stack_list.threads.sstack[i], marking, moving);
1683 mark_simple_stack(sep_stack_list.threads.sstack[i], marking, moving);
1684 #ifdef WORKBENCH
1685 mark_op_stack(opstack_list.threads.opstack[i], marking, moving);
1686 mark_ex_stack(eif_stack_list.threads.xstack[i], marking, moving);
1687 mark_ex_stack(eif_trace_list.threads.xstack[i], marking, moving);
1688 #else
1689 if (exception_stack_managed) {
1690 mark_ex_stack(eif_stack_list.threads.xstack[i], marking, moving);
1691 mark_ex_stack(eif_trace_list.threads.xstack[i], marking, moving);
1692 }
1693 #endif
1694 }
1695
1696 #endif /* EIF_THREADS */
1697
1698 /* process overflow_stack_set */
1699 mark_overflow_stack(marking, moving);
1700
1701 #if ! defined CUSTOM || defined NEED_OBJECT_ID_H
1702 /* The object id stacks record the objects referenced by an identifier. Those objects
1703 * are not necessarily alive. Thus only an update after a move is needed.
1704 */
1705 if (moving) update_object_id_stack();
1706 #endif
1707 }
1708
1709 rt_private void mark_simple_stack(struct stack *stk, MARKER marker, int move)
1710 /* The stack which is to be marked */
1711 /* The routine used to mark objects */
1712 /* Are the objects expected to move? */
1713 {
1714 /* Loop over the specified stack, using the supplied marker to recursively
1715 * mark the objects. The 'move' flag is a flag which tells us whether the
1716 * objects are expected to more or not (to avoid useless writing
1717 * indirections). Stack holds direct references to objects.
1718 */
1719 #ifdef DEBUG
1720 EIF_GET_CONTEXT
1721 #endif
1722
1723 EIF_REFERENCE *object; /* For looping over subsidiary roots */
1724 rt_uint_ptr roots; /* Number of roots in each chunk */
1725 struct stchunk *s; /* To walk through each stack's chunk */
1726 int done = 0; /* Top of stack not reached yet */
1727
1728 #ifdef DEBUG
1729 int saved_roots; EIF_REFERENCE *saved_object;
1730 dprintf(1)("mark_simple_stack: scanning %s stack for %s collector\n",
1731 stk == &loc_set ? "local" : (stk == &rem_set ? "remembered" : "other"),
1732 marker == GEN_SWITCH ? "generation" : "traditional");
1733 flush;
1734 #endif
1735
1736 if (stk->st_top == (EIF_REFERENCE *) 0) /* Stack is not created yet */
1737 return;
1738
1739 for (s = stk->st_hd; s && !done; s = s->sk_next) {
1740 object = s->sk_arena; /* Start of stack */
1741 if (s != stk->st_cur) /* Before current pos? */
1742 roots = s->sk_end - object; /* The whole chunk */
1743 else {
1744 roots = stk->st_top - object; /* Stop at the top */
1745 done = 1; /* Reached end of stack */
1746 }
1747
1748 #ifdef DEBUG
1749 dprintf(2)("mark_simple_stack: %d objects in %s chunk\n",
1750 roots, done ? "last" : "current");
1751 saved_roots = roots; saved_object = object;
1752 if (DEBUG & 2 && debug_ok(2)) {
1753 int i; EIF_REFERENCE *obj = object;
1754 for (i = 0; i < roots; i++, obj++)
1755 printf("%d: 0x%lx\n", i, *obj);
1756 }
1757 flush;
1758 #endif
1759
1760 /* This is the actual marking! (hard to see in the middle of all those
1761 * debug statement, so the only purpose of this comment is to catch
1762 * the eye and spot the parsing code)--RAM.
1763 */
1764
1765 if (move) {
1766 for (; roots > 0; roots--, object++) {
1767 if (*object) {
1768 *object = mark_expanded(*object, marker);
1769 }
1770 }
1771 } else {
1772 for (; roots > 0; roots--, object++) {
1773 if (*object) {
1774 (void) mark_expanded(*object, marker);
1775 }
1776 }
1777 }
1778 #ifdef DEBUG
1779 roots = saved_roots; object = saved_object;
1780 dprintf(2)("mark_simple_stack: after GC: %d objects in %s chunk\n",
1781 roots, done ? "last" : "current");
1782 if (DEBUG & 2 && debug_ok(2)) {
1783 int i; EIF_REFERENCE *obj = object;
1784 for (i = 0; i < roots; i++, obj++)
1785 printf("%d: 0x%lx\n", i, *obj);
1786 }
1787 flush;
1788 #endif
1789 }
1790 }
1791
1792 #if ! defined CUSTOM || defined NEED_OBJECT_ID_H
1793 rt_private void update_object_id_stack(void)
1794 {
1795 /* Loop over the specified stack to update the objects after a move.
1796 * Stack holds direct references to objects.
1797 * No marking is done, just the update, i.e. the objects are not roots
1798 * for the GC.
1799 */
1800
1801 struct stack *stk = &object_id_stack;
1802
1803 EIF_REFERENCE *object; /* For looping over subsidiary roots */
1804 rt_uint_ptr roots; /* Number of roots in each chunk */
1805 struct stchunk *s; /* To walk through each stack's chunk */
1806 int done = 0; /* Top of stack not reached yet */
1807
1808 #ifdef DEBUG
1809 int saved_roots; EIF_REFERENCE *saved_object;
1810 dprintf(1)("mark_object_id_stack\n");
1811 flush;
1812 #endif
1813
1814 if (stk->st_top == (EIF_REFERENCE *) 0) /* Stack is not created yet */
1815 return;
1816
1817 for (s = stk->st_hd; s && !done; s = s->sk_next) {
1818 object = s->sk_arena; /* Start of stack */
1819 if (s != stk->st_cur) /* Before current pos? */
1820 roots = s->sk_end - object; /* The whole chunk */
1821 else {
1822 roots = stk->st_top - object; /* Stop at the top */
1823 done = 1; /* Reached end of stack */
1824 }
1825
1826 #ifdef DEBUG
1827 dprintf(2)("mark_object_id_stack: %d objects in %s chunk\n",
1828 roots, done ? "last" : "current");
1829 saved_roots = roots; saved_object = object;
1830 if (DEBUG & 2 && debug_ok(2)) {
1831 int i; EIF_REFERENCE *obj = object;
1832 for (i = 0; i < roots; i++, obj++)
1833 printf("%d: 0x%lx\n", i, *obj);
1834 }
1835 flush;
1836 #endif
1837
1838 for (; roots > 0; roots--, object++)
1839 {
1840 register char* root;
1841 register union overhead *zone;
1842
1843 root = *object;
1844 if (root != (EIF_REFERENCE)0){
1845 zone = HEADER(root);
1846 /* If the object has moved, update the stack */
1847 if (zone->ov_size & B_FWD)
1848 *object = zone->ov_fwd;
1849 }
1850 }
1851
1852 #ifdef DEBUG
1853 roots = saved_roots; object = saved_object;
1854 dprintf(2)("mark_object_id_stack: after GC: %d objects in %s chunk\n",
1855 roots, done ? "last" : "current");
1856 if (DEBUG & 2 && debug_ok(2)) {
1857 int i; EIF_REFERENCE *obj = object;
1858 for (i = 0; i < roots; i++, obj++)
1859 printf("%d: 0x%lx\n", i, *obj);
1860 }
1861 flush;
1862 #endif
1863 }
1864 }
1865 #endif /* !CUSTOM || NEED_OBJECT_ID_H */
1866
1867 /*
1868 doc: <routine name="update_weak_references" export="private">
1869 doc: <summary>Loop over the `eif_weak_references' stack and update objects after a move. No marking is done just the update.</summary>
1870 doc: <thread_safety>Safe</thread_safety>
1871 doc: <synchronization>Called during GC cycle.</synchronization>
1872 doc: </routine>
1873 */
1874 rt_private void update_weak_references(void)
1875 {
1876 /* Loop over the specified stack to update the objects after a move.
1877 * Stack holds direct references to objects.
1878 * No marking is done, just the update, i.e. the objects are not roots
1879 * for the GC.
1880 */
1881
1882 struct stack *stk = &eif_weak_references;
1883
1884 EIF_REFERENCE *object; /* For looping over subsidiary roots */
1885 rt_uint_ptr roots; /* Number of roots in each chunk */
1886 struct stchunk *s; /* To walk through each stack's chunk */
1887 int done = 0; /* Top of stack not reached yet */
1888 int generational; /* Are we in a generational cycle? */
1889 union overhead *zone;
1890
1891 if (stk->st_top == (EIF_REFERENCE *) 0) /* Stack is not created yet */
1892 return;
1893
1894 generational = rt_g_data.status & GC_FAST;
1895
1896 for (s = stk->st_hd; s && !done; s = s->sk_next) {
1897 object = s->sk_arena; /* Start of stack */
1898 if (s != stk->st_cur) /* Before current pos? */
1899 roots = s->sk_end - object; /* The whole chunk */
1900 else {
1901 roots = stk->st_top - object; /* Stop at the top */
1902 done = 1; /* Reached end of stack */
1903 }
1904
1905 for (; roots > 0; roots--, object++) {
1906 if (*object) {
1907 zone = HEADER(*object);
1908 if (zone->ov_size & B_FWD) {
1909 /* If the object has moved, update the stack */
1910 *object = zone->ov_fwd;
1911 } else if (generational) {
1912 if ((!(zone->ov_flags & EO_OLD)) && (!(zone->ov_flags & EO_MARK))) {
1913 /* Object is not alive anymore since it was not marked. */
1914 *object = NULL;
1915 }
1916 } else if (!(zone->ov_flags & EO_MARK)) {
1917 /* Object is not alive anymore since it was not marked. */
1918 *object = NULL;
1919 }
1920 }
1921 }
1922 }
1923 }
1924
1925 rt_private void mark_stack(struct stack *stk, MARKER marker, int move)
1926 /* The stack which is to be marked */
1927 /* The routine used to mark objects */
1928 /* Are the objects expected to move? */
1929 {
1930 /* Loop over the specified stack, using the supplied marker to recursively
1931 * mark the objects. The 'move' flag is a flag which tells us whether the
1932 * objects are expected to move or not (to avoid useless writing
1933 * indirections). Stack holds indirect references to objects.
1934 */
1935 #ifdef DEBUG
1936 EIF_GET_CONTEXT
1937 #endif
1938 EIF_REFERENCE *object; /* For looping over subsidiary roots */
1939 rt_uint_ptr roots; /* Number of roots in each chunk */
1940 struct stchunk *s; /* To walk through each stack's chunk */
1941 int done = 0; /* Top of stack not reached yet */
1942
1943 #ifdef DEBUG
1944 int saved_roots; EIF_REFERENCE *saved_object;
1945 dprintf(1)("mark_stack: scanning %s stack for %s collector\n",
1946 stk == &loc_stack ? "local (indirect)" : "once",
1947 marker == GEN_SWITCH ? "generation" : "traditional");
1948 flush;
1949 #endif
1950
1951 if (stk->st_top == (EIF_REFERENCE *) 0) /* Stack is not created yet */
1952 return;
1953
1954 for (s = stk->st_hd; s && !done; s = s->sk_next) {
1955 object = s->sk_arena; /* Start of stack */
1956 if (s != stk->st_cur) /* Before current pos? */
1957 roots = s->sk_end - object; /* The whole chunk */
1958 else {
1959 roots = stk->st_top - object; /* Stop at the top */
1960 done = 1; /* Reached end of stack */
1961 }
1962
1963 #ifdef DEBUG
1964 dprintf(2)("mark_stack: %d objects in %s chunk\n",
1965 roots, done ? "last" : "current");
1966 saved_roots = roots; saved_object = object;
1967 if (DEBUG & 2 && debug_ok(2)) {
1968 int i; EIF_REFERENCE *obj = object;
1969 for (i = 0; i < roots; i++, obj++)
1970 printf("%d: 0x%lx\n", i, *(EIF_REFERENCE *) *obj);
1971 }
1972 flush;
1973 #endif
1974
1975 if (move) {
1976 for (; roots > 0; roots--, object++) {
1977 if (*object) {
1978 *(EIF_REFERENCE *) *object = mark_expanded(*(EIF_REFERENCE *) *object, marker);
1979 }
1980 }
1981 } else {
1982 for (; roots > 0; roots--, object++) {
1983 if (*object) {
1984 (void) mark_expanded(*(EIF_REFERENCE *) *object, marker);
1985 }
1986 }
1987 }
1988
1989 #ifdef DEBUG
1990 roots = saved_roots; object = saved_object;
1991 dprintf(2)("mark_stack: after GC: %d objects in %s chunk\n",
1992 roots, done ? "last" : "current");
1993 if (DEBUG & 2 && debug_ok(2)) {
1994 int i; EIF_REFERENCE *obj = object;
1995 for (i = 0; i < roots; i++, obj++)
1996 printf("%d: 0x%lx\n", i, * (EIF_REFERENCE *) *obj);
1997 }
1998 flush;
1999 #endif
2000 }
2001 }
2002
2003 rt_private EIF_REFERENCE mark_expanded(EIF_REFERENCE root, MARKER marker)
2004 /* Expanded reference to be marked */
2005 /* The routine used to mark objects */
2006 {
2007 /* The main invariant from the GC is: "expanded objects are only referenced
2008 * once, therefore they are not marked and must be traversed only once".
2009 * Here, the operational stack may reference expanded objects directly,
2010 * hence jeopardizing the invariant. This routine will ask for a traversal
2011 * of the enclosing object and then update the reference to the expanded
2012 * should the enclosing object have moved.
2013 */
2014
2015 EIF_REFERENCE enclosing; /* Address of the enclosing object */
2016 EIF_REFERENCE new; /* New address of the enclosing object */
2017 rt_uint_ptr offset; /* Offset of the expanded object within enclosing one */
2018 union overhead *zone;
2019
2020 if (root == (EIF_REFERENCE) 0)
2021 return (EIF_REFERENCE) 0;
2022
2023 zone = HEADER(root); /* Malloc info zone */
2024
2025 if (zone->ov_size & B_FWD)
2026 return zone->ov_fwd; /* Already forwarded, i.e. traversed */
2027
2028 if (!eif_is_nested_expanded(zone->ov_flags)) {
2029 /* It is guaranteed that first call to `marker' will not put it
2030 * in `overflow_stack_set', therefore giving the address of
2031 * `root' is ok. */
2032 return (marker)(&root); /* Mark non-expanded objects directly */
2033 }
2034
2035 /* Expanded in the call stack have a size of `0' therefore
2036 * the following code is still safe because it is just equivalent to
2037 * marking `root'. */
2038 offset = zone->ov_size & B_SIZE;
2039 enclosing = root - offset;
2040
2041 /* See remark above for `overflow_stack_set' */
2042 new = (marker)(&enclosing); /* Traverse enclosing, save new location */
2043 if (new == enclosing) /* Object did not move */
2044 return root; /* Neither did the expanded object */
2045
2046 return new + offset; /* New address of the expanded object */
2047 }
2048
2049 /* Start of workbench-specific marking functions */
2050 #ifdef WORKBENCH
2051 rt_private void mark_op_stack(struct opstack *stk, MARKER marker, int move)
2052 /* The routine used to mark objects */
2053 /* Are the objects expected to move? */
2054 {
2055 /* Loop over the operational stack (the one used by the interpreter) and
2056 * mark all the references found.
2057 */
2058
2059 EIF_TYPED_VALUE *last; /* For looping over subsidiary roots */
2060 rt_uint_ptr roots; /* Number of roots in each chunk */
2061 struct stochunk *s; /* To walk through each stack's chunk */
2062 int done = 0; /* Top of stack not reached yet */
2063
2064 #ifdef DEBUG
2065 int saved_roots; EIF_TYPED_VALUE *saved_last;
2066 dprintf(1)("mark_op_stack: scanning operational stack for %s collector\n",
2067 marker == GEN_SWITCH ? "generation" : "traditional");
2068 flush;
2069 #endif
2070
2071 /* There is no need to check for the existence of the operational stack:
2072 * we know it has already been created.
2073 */
2074
2075 for (s = stk->st_hd; s && !done; s = s->sk_next) {
2076 last = s->sk_arena; /* Start of stack */
2077 if (s != stk->st_cur) /* Before current pos? */
2078 roots = s->sk_end - last; /* The whole chunk */
2079 else {
2080 roots = stk->st_top - last; /* Stop at the top */
2081 done = 1; /* Reached end of stack */
2082 }
2083
2084 #ifdef DEBUG
2085 dprintf(2)("mark_op_stack: %d objects in %s chunk\n",
2086 roots, done ? "last" : "current");
2087 saved_roots = roots; saved_last = last;
2088 if (DEBUG & 2 && debug_ok(2)) {
2089 int i; EIF_TYPED_VALUE *lst = last;
2090 for (i = 0; i < roots; i++, lst++) {
2091 switch (lst->type & SK_HEAD) {
2092 case SK_EXP: printf("\t%d: expanded 0x%lx\n", i, lst->it_ref); break;
2093 case SK_REF: printf("\t%d: 0x%lx\n", i, lst->it_ref); break;
2094 case SK_BOOL: printf("\t%d: bool %s\n", i, lst->it_char ? "true" : "false"); break;
2095 case SK_CHAR8: printf("\t%d: char %d\n", i, lst->it_char); break;
2096 case SK_CHAR32: printf("\t%d: wide char %lu\n", i, lst->it_wchar); break;
2097 case SK_UINT8: printf("\t%d: uint8 %ld\n", i, lst->it_uint8); break;
2098 case SK_UINT16: printf("\t%d: uint16 %ld\n", i, lst->it_uint16); break;
2099 case SK_UINT32: printf("\t%d: uint32 %ld\n", i, lst->it_uint32); break;
2100 case SK_UINT64: printf("\t%d: uint64 %ld\n", i, lst->it_uint64); break;
2101 case SK_INT8: printf("\t%d: int8 %ld\n", i, lst->it_int8); break;
2102 case SK_INT16: printf("\t%d: int16 %ld\n", i, lst->it_int16); break;
2103 case SK_INT32: printf("\t%d: int32 %ld\n", i, lst->it_int32); break;
2104 case SK_INT64: printf("\t%d: int64 %ld\n", i, lst->it_int64); break;
2105 case SK_REAL32: printf("\t%d: real32 %f\n", i, lst->it_real32); break;
2106 case SK_REAL64: printf("\t%d: real64 %f\n", i, lst->it_real64); break;
2107 case SK_POINTER: printf("\t%d: pointer 0x%lx\n", i, lst->it_ref); break;
2108 case SK_VOID: printf("\t%d: void\n", i); break;
2109 default:
2110 printf("\t%d: UNKNOWN TYPE 0x%lx\n", i, lst->type);
2111 }
2112 }
2113 }
2114 flush;
2115 #endif
2116
2117 if (move)
2118 for (; roots > 0; roots--, last++) /* Objects may be moved */
2119 switch (last->type & SK_HEAD) { /* Type in stack */
2120 case SK_REF: /* Reference */
2121 case SK_EXP:
2122 last->it_ref = mark_expanded(last->it_ref, marker);
2123 break;
2124 }
2125 else
2126 for (; roots > 0; roots--, last++) /* Objects cannot move */
2127 switch (last->type & SK_HEAD) { /* Type in stack */
2128 case SK_REF: /* Reference */
2129 case SK_EXP:
2130 (void) mark_expanded(last->it_ref, marker);
2131 break;
2132 }
2133
2134 #ifdef DEBUG
2135 roots = saved_roots;
2136 last = saved_last;
2137 dprintf(2)("mark_op_stack: after GC: %d objects in %s chunk\n",
2138 roots, done ? "last" : "current");
2139 if (DEBUG & 2 && debug_ok(2)) {
2140 int i; EIF_TYPED_VALUE *lst = last;
2141 for (i = 0; i < roots; i++, lst++) {
2142 switch (lst->type & SK_HEAD) {
2143 case SK_EXP: printf("\t%d: expanded 0x%lx\n", i, lst->it_ref); break;
2144 case SK_REF: printf("\t%d: 0x%lx\n", i, lst->it_ref); break;
2145 case SK_BOOL: printf("\t%d: bool %s\n", i, lst->it_char ? "true" : "false"); break;
2146 case SK_CHAR8: printf("\t%d: char %d\n", i, lst->it_char); break;
2147 case SK_CHAR32: printf("\t%d: wide char %lu\n", i, lst->it_wchar); break;
2148 case SK_UINT8: printf("\t%d: uint8 %ld\n", i, lst->it_uint8); break;
2149 case SK_UINT16: printf("\t%d: uint16 %ld\n", i, lst->it_uint16); break;
2150 case SK_UINT32: printf("\t%d: uint32 %ld\n", i, lst->it_uint32); break;
2151 case SK_UINT64: printf("\t%d: uint64 %ld\n", i, lst->it_uint64); break;
2152 case SK_INT8: printf("\t%d: int %ld\n", i, lst->it_int8); break;
2153 case SK_INT16: printf("\t%d: int %ld\n", i, lst->it_int16); break;
2154 case SK_INT32: printf("\t%d: int %ld\n", i, lst->it_int32); break;
2155 case SK_INT64: printf("\t%d: int %ld\n", i, lst->it_int64); break;
2156 case SK_REAL32: printf("\t%d: real32 %f\n", i, lst->it_real32); break;
2157 case SK_REAL64: printf("\t%d: real64 %f\n", i, lst->it_real64); break;
2158 case SK_POINTER: printf("\t%d: pointer 0x%lx\n", i, lst->it_ref); break;
2159 case SK_VOID: printf("\t%d: void\n", i); break;
2160 default:
2161 printf("\t%d: UNKNOWN TYPE 0x%lx\n", i, lst->type);
2162 }
2163 }
2164 }
2165 flush;
2166 #endif
2167
2168 }
2169 }
2170 #endif
2171 /* End of workbench-specific marking functions */
2172
2173 rt_private void mark_ex_stack(struct xstack *stk, MARKER marker, int move)
2174 /* The stack which is to be marked */
2175 /* The routine used to mark objects */
2176 /* Are the objects expected to move? */
2177 {
2178 /* Loop over the exception stacks (the one used by the exception handling
2179 * mechanism) and update all the references found. Those references are
2180 * alive, so the sole purpose of this traversal is to update the pointers
2181 * when objects are moved around.
2182 */
2183
2184 #ifdef DEBUG
2185 RT_GET_CONTEXT
2186 #endif
2187
2188 struct ex_vect *last; /* For looping over subsidiary roots */
2189 rt_uint_ptr roots; /* Number of roots in each chunk */
2190 struct stxchunk *s; /* To walk through each stack's chunk */
2191 int done = 0; /* Top of stack not reached yet */
2192
2193 #ifdef DEBUG
2194 dprintf(1)("mark_ex_stack: scanning exception %s stack for %s collector\n",
2195 stk == &eif_trace ? "trace" : "vector",
2196 marker == GEN_SWITCH ? "generation" : "traditional");
2197 flush;
2198 #endif
2199
2200 if (stk->st_top == (struct ex_vect *) 0)
2201 return; /* Stack is not created yet */
2202
2203 for (s = stk->st_hd; s && !done; s = s->sk_next) {
2204 last = s->sk_arena; /* Start of stack */
2205 if (s != stk->st_cur) /* Before current pos? */
2206 roots = s->sk_end - last; /* The whole chunk */
2207 else {
2208 roots = stk->st_top - last; /* Stop at the top */
2209 done = 1; /* Reached end of stack */
2210 }
2211 if (move)
2212 for (; roots > 0; roots--, last++)
2213 switch (last->ex_type) { /* Type in stack */
2214 /* The following are meaningful when printing the exception
2215 * trace: the first set records the enclosing calls, the
2216 * second records the failed preconditions (because in that
2217 * case, the enclosing call does not appear in the printed
2218 * stack).
2219 */
2220 case EX_CALL: case EN_FAIL:
2221 case EX_RESC: case EN_RESC:
2222 case EX_RETY: case EN_RES:
2223 last->ex_id = mark_expanded(last->ex_id, marker);
2224 break;
2225 /* Do not inspect EX_PRE records. They do not carry any
2226 * valid object ID, which is put in the EN_PRE vector by
2227 * backtrack (the precondition failure raises exception in
2228 * caller).
2229 */
2230 case EN_PRE:
2231 case EX_CINV: case EN_CINV:
2232 last->ex_oid = mark_expanded(last->ex_oid, marker);
2233 break;
2234 }
2235 else
2236 for (; roots > 0; roots--, last++)
2237 switch (last->ex_type) { /* Type in stack */
2238 /* The following are meaningful when printing the exception
2239 * trace: the first set records the enclosing calls, the
2240 * second records the failed preconditions (because in that
2241 * case, the enclosing call does not appear in the printed
2242 * stack).
2243 */
2244 case EX_CALL: case EN_FAIL:
2245 case EX_RESC: case EN_RESC:
2246 case EX_RETY: case EN_RES:
2247 (void) mark_expanded(last->ex_id, marker);
2248 break;
2249 /* Do not inspect EX_PRE records. They do not carry any
2250 * valid object ID, which is put in the EN_PRE vector by
2251 * backtrack (the precondition failure raises exception in
2252 * caller).
2253 */
2254 case EN_PRE:
2255 case EX_CINV: case EN_CINV:
2256 (void) mark_expanded(last->ex_oid, marker);
2257 break;
2258 }
2259
2260 }
2261 }
2262
2263 rt_private void mark_overflow_stack(MARKER marker, int move)
2264 /* The routine used to mark objects */
2265 /* Are the objects expected to move? */
2266 {
2267 /* Overflow stack management:
2268 * --------------------------
2269 *
2270 * During the marking phase, each time we reach a depth of `overflow_stack_limit' we stop
2271 * the recursion on the marking routines. What we do instead is to store the location
2272 * where the reference to the object is stored and we insert it in `overflow_stack_set'.
2273 * Then we continue with our marking. As a consequence the marking phase has
2274 * a stack depth which is bounded to `current_stack_depth + overflow_stack_limit'.
2275 *
2276 * When all structures have been marked, we are marking in an iterative manner
2277 * all objects stored in `stk' a copy of `overflow_stack_set'. Same things here,
2278 * if we reach the maximum depth during marking, we add the item to a freshly created
2279 * copy of `overflow_stack_set'. We repeat this process until there are no more
2280 * objects added to `overflow_stack_set'.
2281 *
2282 * Note the trick is to store the address of where the object is referenced from.
2283 * Without it we would not be able to resolve all references to moved objects during
2284 * marking.
2285 */
2286
2287 /* Loop over the `overflow_stack_set' stack, using the supplied marker to recursively
2288 * mark the objects. The 'move' flag is a flag which tells us whether the
2289 * objects are expected to move or not (to avoid useless writing
2290 * indirections). Stack holds indirect references to objects.
2291 */
2292 EIF_REFERENCE *object; /* For looping over subsidiary roots */
2293 rt_uint_ptr roots; /* Number of roots in each chunk */
2294 struct stchunk *s; /* To walk through each stack's chunk */
2295 struct stack stk; /* Copy of current `overflow_stack_set' */
2296 int done; /* Top of stack not reached yet */
2297
2298 while (overflow_stack_count > 0) {
2299 /* Copy `overflow_stack' to `stk', as we will iterate through `stk'.
2300 * We do this process as long as `overflow_stack_count' is not null. */
2301 memcpy(&stk, &overflow_stack_set, sizeof(struct stack));
2302 memset(&overflow_stack_set, 0, sizeof(struct stack));
2303 overflow_stack_count = 0;
2304 done = 0;
2305
2306 CHECK("Stack created", stk.st_top != NULL); /* Stack should be created since count > 0 */
2307
2308 for (s = stk.st_hd; s && !done; s = s->sk_next) {
2309 object = s->sk_arena; /* Start of stack */
2310 if (s != stk.st_cur) /* Before current pos? */
2311 roots = s->sk_end - object; /* The whole chunk */
2312 else {
2313 roots = stk.st_top - object; /* Stop at the top */
2314 done = 1; /* Reached end of stack */
2315 }
2316
2317 if (move) {
2318 for (; roots > 0; roots--, object++) {
2319 if (*object) {
2320 *(EIF_REFERENCE *) *object = mark_expanded(*(EIF_REFERENCE *) *object, marker);
2321 }
2322 }
2323 } else {
2324 for (; roots > 0; roots--, object++) {
2325 if (*object) {
2326 (void) mark_expanded(*(EIF_REFERENCE *) *object, marker);
2327 }
2328 }
2329 }
2330 }
2331
2332 /* Free memory used by stack as we don't need it anymore */
2333 st_reset(&stk);
2334 }
2335
2336 ENSURE ("Overflow stack empty", overflow_stack_count == 0);
2337 }
2338
2339 /*
2340 doc: <routine name="mark_array" return_type="void" export="private">
2341 doc: <summary>Mark all references stored in `arr' from 0 to `arr_count - 1'. It is assumed that old the entries are not in the scavenge zones.</summary>
2342 doc: <param name="arr" type="EIF_REFERENCE *">The array to traverse.</param>
2343 doc: <param name="arr_count" type="rt_uint_ptr">The number of elements to traverse.</param>
2344 doc: <param name="marker" type="MARKER">The GC marker.</param>
2345 doc: <param name="move" type="int">Are the objects expected to move?</param>
2346 doc: <thread_safety>Safe with synchronization</thread_safety>
2347 doc: <synchronization>Through `eif_gc_mutex'.</synchronization>
2348 doc: </routine>
2349 */
2350 rt_private void mark_array(EIF_REFERENCE *arr, rt_uint_ptr arr_count, MARKER marker, int move)
2351 {
2352 if ((arr) && (arr_count > 0)) {
2353 if (move) {
2354 for (; arr_count > 0; arr_count--, arr++) {
2355 if (*arr) {
2356 CHECK("Not in scavenge `from' zone", (*arr < sc_from.sc_arena) || (*arr > sc_from.sc_top));
2357 CHECK("Not in scavenge `to' zone", (*arr < sc_to.sc_arena) || (*arr > sc_to.sc_top));
2358 *arr = mark_expanded(*arr, marker);
2359 CHECK("Not in scavenge `from' zone", (*arr < sc_from.sc_arena) || (*arr > sc_from.sc_top));
2360 CHECK("Not in scavenge `to' zone", (*arr < sc_to.sc_arena) || (*arr > sc_to.sc_top));
2361 }
2362 }
2363 } else {
2364 for (; arr_count > 0; arr_count--, arr++) {
2365 if (*arr) {
2366 (void) mark_expanded(*arr, marker);
2367 }
2368 }
2369 }
2370 }
2371 }
2372
2373 /*
2374 doc: <routine name="unmark_c_stack_objects" return_type="void" export="private">
2375 doc: <summary>When objects are allocated on the C stack, we need to unmark them at the end of a GC cycle, because none of the existing code will unmark it since they are only referenced usually through `loc_set' or `loc_stack'. At the end of this routine, the stack is emptied.</summary>
2376 doc: <thread_safety>Safe with synchronization</thread_safety>
2377 doc: <synchronization>Through `eif_gc_mutex'.</synchronization>
2378 doc: </routine>
2379 */
2380
2381 rt_private void unmark_c_stack_objects (void)
2382 {
2383 EIF_REFERENCE *object; /* For looping over subsidiary roots */
2384 rt_uint_ptr roots; /* Number of roots in each chunk */
2385 struct stchunk *s; /* To walk through each stack's chunk */
2386 int done; /* Top of stack not reached yet */
2387
2388 /* Only do some processing if the stack was created. */
2389 if (c_stack_object_set.st_top) {
2390 done = 0;
2391 for (s = c_stack_object_set.st_hd; s && !done; s = s->sk_next) {
2392 object = s->sk_arena; /* Start of stack */
2393 if (s != c_stack_object_set.st_cur) /* Before current pos? */
2394 roots = s->sk_end - object; /* The whole chunk */
2395 else {
2396 roots = c_stack_object_set.st_top - object; /* Stop at the top */
2397 done = 1; /* Reached end of stack */
2398 }
2399 for (; roots > 0; roots--, object++) {
2400 CHECK("Object is marked", HEADER(*object)->ov_flags & EO_MARK);
2401 CHECK("Object is on C stack", HEADER(*object)->ov_flags & EO_STACK);
2402 HEADER(*object)->ov_flags &= ~EO_MARK;
2403 }
2404 }
2405
2406 /* Reset the content of the stack. This is not great for performance since
2407 * we will most likely reallocate the stack at the next GC cycle. */
2408 st_reset(&c_stack_object_set);
2409 }
2410 }
2411
2412 rt_private EIF_REFERENCE hybrid_mark(EIF_REFERENCE *a_root)
2413 {
2414 /* Mark all the objects referenced by the root object.
2415 * All the attributes of an object are recursively marked,
2416 * except the last one. This brings a noticeable
2417 * improvement with structures like LINKED_LIST when its `right'
2418 * part is the last reference (note this is not always the case).
2419 * It also prevents stack overflow with the `overflow_stack_set'.
2420 */
2421 union overhead *zone; /* Malloc info zone fields */
2422 uint16 flags; /* Eiffel flags */
2423 long offset; /* Reference's offset */
2424 rt_uint_ptr size; /* Size of an item (for array of expanded) */
2425 EIF_REFERENCE *object; /* Sub-objects scanned */
2426 EIF_REFERENCE current; /* Object currently inspected */
2427 EIF_REFERENCE *prev; /* Holder of current (for update) */
2428 EIF_REFERENCE root = *a_root; /* Root object */
2429 long count; /* Number of references */
2430
2431 /* If 'root' is a void reference, return immediately. This is redundant
2432 * with the beginning of the loop, but this case occurs quite often.
2433 */
2434 if (root == (EIF_REFERENCE) 0)
2435 return (EIF_REFERENCE) 0;
2436
2437 /* Stack overflow protection */
2438 overflow_stack_depth++;
2439 if (overflow_stack_depth > overflow_stack_limit) {
2440 /* If we can add to the stack overflow recursion, then we do it, otherwise
2441 * we hope we will have enough stack to complete the GC cycle. */
2442 if (epush(&overflow_stack_set, a_root) != -1) {
2443 overflow_stack_count++;
2444 overflow_stack_depth--;
2445 return root;
2446 }
2447 }
2448
2449 /* Initialize the variables for the loop */
2450 current = root;
2451 prev = (EIF_REFERENCE *) 0;
2452
2453 do {
2454 if (current == (EIF_REFERENCE) 0) /* No further exploration */
2455 goto done; /* Exit the procedure */
2456
2457 zone = HEADER(current); /* Malloc info zone */
2458 flags = zone->ov_flags; /* Fetch Eiffel flags */
2459
2460 #ifdef DEBUG
2461 if (zone->ov_size & B_FWD) {
2462 dprintf(16)("hybrid_mark: 0x%lx fwd to 0x%lx (DT %d, %d bytes)\n",
2463 current,
2464 zone->ov_fwd,
2465 HEADER(zone->ov_fwd)->ov_dftype,
2466 zone->ov_size & B_SIZE);
2467 } else {
2468 dprintf(16)("hybrid_mark: 0x%lx %s%s%s(DT %d, %d bytes)\n",
2469 current,
2470 zone->ov_flags & EO_MARK ? "marked " : "",
2471 zone->ov_flags & EO_OLD ? "old " : "",
2472 zone->ov_flags & EO_REM ? "remembered " : "",
2473 zone->ov_dftype,
2474 zone->ov_size & B_SIZE);
2475 }
2476 flush;
2477 #endif
2478
2479 /* Deal with scavenging here, namely scavenge the reached object if it
2480 * belongs to a 'from' space. Leave a forwarding pointer behind and mark
2481 * the object as forwarded. Scavenging while marking avoids another pass
2482 * for scavenging the 'from' zone and another entire pass to update the
2483 * references, so it should be a big win--RAM. Note that scavenged
2484 * objects are NOT marked: the fact that they have been forwarded is the
2485 * mark. The expanded objects are never scavenged (only the object which
2486 * holds them is).
2487 */
2488 offset = (uint32) rt_g_data.status; /* Garbage collector's status */
2489
2490 if (offset & (GC_PART | GC_GEN)) {
2491
2492 /* If we enter here, then we are currently running a scavenging
2493 * algorithm of some sort. Depending on the garbage collector's
2494 * flag, we are able to see if the current object is in a 'from'
2495 * zone (i.e. has to be scavenged). Note that the generation
2496 * scavenging process does not usually call this routine (tenuring
2497 * can fail, and we are in a process that is not allowed to fail).
2498 * Here, the new generation is simply scavenged, with no tenuring.
2499 * Detecting whether an object is in the scavenge zone or not is
2500 * easy and fast: all the objects in the scavenge zone have their
2501 * B_BUSY flag reset.
2502 */
2503
2504 size = zone->ov_size;
2505 if (size & B_FWD) { /* Can't be true if expanded */
2506 if(prev) /* Update the referencing address */
2507 *prev = zone->ov_fwd;
2508 goto done; /* So it has been already processed */
2509 }
2510
2511 if (flags & EO_MARK) /* Already marked */
2512 goto done; /* Object processed and did not move */
2513
2514 if (offset & GC_GEN && !(size & B_BUSY)) {
2515 current = scavenge(current, &sc_to.sc_top); /* Simple scavenging */
2516 zone = HEADER(current); /* Update zone */
2517 flags = zone->ov_flags; /* And Eiffel flags */
2518 if (prev) /* Update referencing pointer */
2519 *prev = current;
2520 goto marked;
2521 } else {
2522 if ((offset & GC_PART) && (current > ps_from.sc_arena && current <= ps_from.sc_end)) {
2523 if (ps_to.sc_top + ((size & B_SIZE) + OVERHEAD) <= ps_to.sc_end) {
2524 /* Record location of previous `top' which will be used to set the
2525 * B_LAST flag at the end of the scavenging. */
2526 ps_to.sc_previous_top = ps_to.sc_top;
2527 current = scavenge(current, &ps_to.sc_top);/* Partial scavenge */
2528 zone = HEADER(current); /* Update zone */
2529 /* Clear B_LAST flag in case previous location of `current' was the last
2530 * block in its chunk, we don't want to find a B_LAST flag bit in the new
2531 * location especially if it is most likely the case that we are not at the
2532 * end of the `ps_to' zone. This is a 1 day bug.
2533 */
2534 zone->ov_size &= ~B_LAST;
2535 flags = zone->ov_flags; /* And Eiffel flags */
2536 if (prev)
2537 *prev = current; /* Update referencing pointer */
2538 goto marked;
2539 } else {
2540 ps_to.sc_overflowed_size += (uint32) (size & B_SIZE) + OVERHEAD;
2541 }
2542 }
2543 }
2544 }
2545
2546 /* This part of code, until the 'marked' label is executed only when the
2547 * object does not belong any scavenging space, or no scavenging is to
2548 * ever be done.
2549 */
2550
2551 /* If current object is already marked, it has been (or is)
2552 * studied. So return immediately.
2553 */
2554 if (flags & EO_MARK)
2555 goto done;
2556
2557 /* Expanded objects have no 'ov_size' field. Instead, they have a
2558 * pointer to the object which holds them. This is needed by the
2559 * scavenging process, so that we can update the internal references
2560 * to the expanded in the scavenged object.
2561 * It's useless to mark an expanded, because it has only one reference
2562 * on itself, in the object which holds it.
2563 */
2564 if (!eif_is_nested_expanded(flags)) {
2565 if (flags & EO_STACK) {
2566 /* Object is on the C stack, so we need to record it to unmark it later. */
2567 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
2568 epush(&c_stack_object_set, current);
2569 }
2570 flags |= EO_MARK;
2571 zone->ov_flags = flags;
2572 }
2573
2574 marked: /* Goto label needed to avoid code duplication */
2575
2576 /* Mark associated SCOOP processor. */
2577 RT_MARK_PROCESSOR(current);
2578
2579 /* Now explore all the references of the current object.
2580 * For each object of type 'type', References(type) gives the number
2581 * of references in the objects. The references are placed at the
2582 * beginning of the data space by the Eiffel compiler. Expanded
2583 * objects have a reference to them, so no special treatment is
2584 * required. Special objects full of references are also explored.
2585 */
2586
2587 if (flags & EO_SPEC) {
2588 /* Special objects may have no references (e.g. an array of
2589 * integer or a string), so we have to skip those.
2590 */
2591
2592 if (!(flags & EO_REF)) /* If object moved, reference updated */
2593 goto done;
2594
2595 /* At the end of the special data zone, there are two long integers
2596 * which give informations to the run-time about the content of the
2597 * zone: the first is the 'count', i.e. the number of items, and the
2598 * second is the size of each item (for expandeds, the overhead of
2599 * the header is not taken into account).
2600 */
2601 count = offset = RT_SPECIAL_COUNT(current); /* Get # of items */
2602
2603 if (flags & EO_TUPLE) {
2604 EIF_TYPED_VALUE *l_item = (EIF_TYPED_VALUE *) current;
2605 /* Don't forget that first element of TUPLE is the BOOLEAN
2606 * `object_comparison' attribute. */
2607 l_item++;
2608 offset--;
2609 if (rt_g_data.status & (GC_PART | GC_GEN)) {
2610 for (; offset > 1; offset--, l_item++ ) {
2611 if (eif_is_reference_tuple_item(l_item)) {
2612 eif_reference_tuple_item(l_item) =
2613 hybrid_mark (&eif_reference_tuple_item(l_item));
2614 }
2615 }
2616 } else {
2617 for (; offset > 1; offset--, l_item++ ) {
2618 if (eif_is_reference_tuple_item(l_item)) {
2619 (void) hybrid_mark(&eif_reference_tuple_item(l_item));
2620 }
2621 }
2622 }
2623 if ((count >= 1) && (eif_is_reference_tuple_item(l_item))) {
2624 /* If last element of TUPLE is a reference, then we continue the
2625 * iteration. */
2626 prev = &eif_reference_tuple_item(l_item);
2627 current = eif_reference_tuple_item(l_item);
2628 continue;
2629 } else
2630 goto done; /* End of iteration; exit procedure */
2631 } else if (flags & EO_COMP) {
2632 /* Treat arrays of expanded object here, because we have a special
2633 * way of looping over the array (we must take the size of each item
2634 * into account). Code below is somewhat duplicated with the normal
2635 * code for regular objects or arrays of references, but this is
2636 * because we have to increment our pointers by size and I do not
2637 * want to to slow down the normal loop--RAM.
2638 */
2639 size = RT_SPECIAL_ELEM_SIZE(current); /* Item's size */
2640 if (rt_g_data.status & (GC_PART | GC_GEN)) { /* Moving objects */
2641 object = (EIF_REFERENCE *) (current + OVERHEAD);/* First expanded */
2642 for (; offset > 1; offset--) { /* Loop over array */
2643 if (*object) {
2644 *object = hybrid_mark(object);
2645 object = (EIF_REFERENCE *) ((char *) object + size);
2646 }
2647 }
2648 } else { /* Object can't move */
2649 object = (EIF_REFERENCE *) (current + OVERHEAD);/* First expanded */
2650 for (; offset > 1; offset--) { /* Loop over array */
2651 if (*object) {
2652 (void) hybrid_mark(object);
2653 object = (EIF_REFERENCE *) ((char *) object + size);
2654 }
2655 }
2656 }
2657 /* Keep iterating if and only if the current object has at
2658 * least one attribute.
2659 */
2660 if (count >= 1) {
2661 prev = object;
2662 current = *object;
2663 continue;
2664 } else
2665 goto done; /* End of iteration; exit procedure */
2666 }
2667
2668 } else {
2669 count = offset = References(zone->ov_dtype); /* # items */
2670 }
2671
2672 #ifdef DEBUG
2673 dprintf(16)("hybrid_mark: %d references for 0x%lx\n", offset, current);
2674 if (DEBUG & 16 && debug_ok(16)) {
2675 int i;
2676 for (i = 0; i < offset; i++)
2677 printf("\t0x%lx\n", *((EIF_REFERENCE *) current + i));
2678 }
2679 flush;
2680 #endif
2681
2682 /* Mark all objects under root, updating the references if scavenging */
2683
2684 if (rt_g_data.status & (GC_PART | GC_GEN)) {
2685 for (object = (EIF_REFERENCE *) current; offset > 1; offset--, object++) {
2686 if (*object) {
2687 *object = hybrid_mark(object);
2688 }
2689 }
2690 } else {
2691 for (object = (EIF_REFERENCE *) current; offset > 1; offset--, object++) {
2692 if (*object) {
2693 (void) hybrid_mark(object);
2694 }
2695 }
2696 }
2697
2698 if (count >= 1) {
2699 prev = object;
2700 current = *object;
2701 } else
2702 goto done;
2703
2704 } while(current);
2705
2706 done:
2707 /* Return the [new] address of the root object */
2708 zone = HEADER(root);
2709 overflow_stack_depth--;
2710 return ((zone->ov_size & B_FWD) ? zone->ov_fwd : root);
2711 }
2712
2713 rt_private void full_sweep(void)
2714 {
2715 /* Sweep phase -- All the reachable objects have been marked, so
2716 * all we have to do is scan all the objects and look for garbage.
2717 * The remaining objects are unmarked. If partial scavenging is on,
2718 * the 'from' and 'to' spaces are left untouched (the objects in the
2719 * 'to' space are unmarked but alive...).
2720 */
2721 union overhead *zone; /* Malloc info zone */
2722 rt_uint_ptr size; /* Object's size in bytes */
2723 EIF_REFERENCE end; /* First address beyond chunk */
2724 uint16 flags; /* Eiffel flags */
2725 struct chunk *chunk; /* Current chunk */
2726 EIF_REFERENCE arena; /* Arena in chunk */
2727
2728 /* We start the sweeping at the end of the memory, and we walk
2729 * backawrds along the chunk list. That way, the freed objects
2730 * are inserted at the beginning of the free list (which is kept
2731 * in increasing order). If I did it the other way round, then
2732 * the first freed objects would be inserted at the beginning but
2733 * the last one would take much more time to get inserted, not to
2734 * mention all the paging problems that could be involved--RAM.
2735 */
2736
2737 for (chunk = cklst.ck_tail; chunk; chunk = chunk->ck_prev) {
2738
2739 arena = (EIF_REFERENCE) (chunk + 1);
2740
2741 /* Skip the scavenge zones, if they exist and we are in a partial
2742 * scavenge: (objects there have to go back to the free list only if
2743 * the chunk is not completely free, i.e. if it has C blocks in it).
2744 * The 'to' zone is full of 'alive' objects, but they are unmarked...
2745 * There is no special consideration for generation scavenging,
2746 * because the blocks which hold these zone are C ones.
2747 *
2748 * In non-partial scavenging mode, then only the 'to' has to be skipped:
2749 * the scavenge zone are unused in this mode, but 'to' contains a bunch
2750 * of objects which should be dead (it was surely a 'from' during the
2751 * last scavenging cycle).
2752 */
2753
2754 if (arena == ps_to.sc_arena) {
2755 /* Only traverse the bottom part of `ps_to' which
2756 * is actually not used as a `to' zone for partial scavenging. */
2757 end = ps_to.sc_active_arena;
2758 } else if ((rt_g_data.status & GC_PART) && (arena == ps_from.sc_arena)) {
2759 continue;
2760 } else {
2761 end = (EIF_REFERENCE) arena + chunk->ck_length; /* Chunk's tail */
2762 }
2763
2764 /* Objects are not chained together, so the only way to walk
2765 * through them is to use the size field of each block. C blocks
2766 * have to be skipped. The main disadvantage of this mechanism is
2767 * that it involves swapping, but this is the price to pay to have
2768 * only an 8 bytes header--RAM.
2769 */
2770
2771 for (
2772 zone = (union overhead *) arena;
2773 (EIF_REFERENCE) zone < end;
2774 zone = (union overhead *) (((EIF_REFERENCE) zone) + (size & B_SIZE) + OVERHEAD)
2775 ) {
2776 size = zone->ov_size; /* Size and flags */
2777 if (!(size & B_BUSY)) {
2778 /* Object belongs to the free list (not busy). */
2779 } else if (size & B_C) {
2780 /* Object is a C one.
2781 * However, any Eiffel object is marked during the marking phase and has
2782 * to be unmarked now. It is not freed however, since it is
2783 * marked B_C and hence is under user control. Moreover, we
2784 * would not be able to remove the reference from hector.
2785 */
2786 zone->ov_flags &= ~EO_MARK; /* Unconditionally unmark it */
2787 } else {
2788 flags = zone->ov_flags; /* Fetch Eiffel flags */
2789 if (flags & EO_MARK) { /* Object is marked */
2790 zone->ov_flags = flags & ~EO_MARK; /* Unmark it */
2791 } else {
2792 /* Expanded objects are within normal objects and therefore
2793 * cannot be explicitely removed. I assume it is impossible
2794 * to reference an expanded object directly (via another object
2795 * reference)--RAM.
2796 */
2797 gfree(zone); /* Object is freed */
2798 #ifdef FULL_SWEEP_DEBUG
2799 printf("FULL_SWEEP: Removing 0x%x (type %d, %d bytes) %s %s %s %s %s %s %s, age %ld\n",
2800 (union overhead *) zone + 1,
2801 HEADER( (union overhead *) zone + 1 )->ov_dftype,
2802 zone->ov_size & B_SIZE,
2803 ((union overhead *) zone + 1),
2804 zone->ov_size & B_FWD ? "forwarded" : "",
2805 zone->ov_flags & EO_MARK ? "marked" : "",
2806 zone->ov_flags & EO_REF ? "ref" : "",
2807 zone->ov_flags & EO_COMP ? "cmp" : "",
2808 zone->ov_flags & EO_SPEC ? "spec" : "",
2809 zone->ov_flags & EO_NEW ? "new" : "",
2810 zone->ov_flags & EO_OLD ? "old" : "",
2811 ((zone->ov_flags & EO_AGE) >> 24) / 2);
2812 #endif /* FULL_SWEEP_DEBUG */
2813 }
2814 }
2815 }
2816 }
2817
2818 /* The Hector stack has to be traversed to call `dispose' on protected objects
2819 * Standard dispose traversal checks for Eiffel objects, frozen obj are
2820 * marked as C obj thus ignored. The other stacks are referencing "moving"
2821 * objects so there's no problem */
2822 }
2823
2824 rt_private void full_update(void)
2825 {
2826 /* After a mark and sweep, eventually mixed with scavenging, the data
2827 * structures which are used to describe the generations have to be
2828 * updated, in case the references changed or some objects died.
2829 * An object is considered to be alive iff it carries the EO_MARK bit
2830 * or if it has been forwarded. The references are updated in that case.
2831 * The routines rely on the garbage collector's status flag to do the
2832 * proper job.
2833 */
2834
2835 /* Must be done before anything else as it relies on EO_MARK to find out if objects are
2836 * dead or not. */
2837 update_weak_references();
2838
2839 /* Then we proceed with `moved_set'. */
2840 update_moved_set();
2841
2842 /* Processing of `rem_set; has to be done after `moved_set' (for GC_FAST). */
2843 update_rem_set();
2844
2845 /* Finally the memory set (objects in the scavenge zone that have `dispose'. */
2846 update_memory_set ();
2847 }
2848 #endif /* ISE_GC */
2849
2850
2851 /*
2852 doc: <routine name="plsc" export="public">
2853 doc: <summary>Mixed strategy garbage collector (mark and sweep plus scavenging). This can also be qualified as a storage compaction garbage collector. The partial scavenging entry point, which is monitored for statistics updating (available to the user via MEMORY).</summary>
2854 doc: <thread_safety>Safe</thread_safety>
2855 doc: <synchronization>Synchronization done through `scollect'.</synchronization>
2856 doc: </routine>
2857 */
2858
2859 rt_public void plsc(void)
2860 {
2861 #ifdef ISE_GC
2862 if (rt_g_data.status & GC_STOP)
2863 return; /* Garbage collection stopped */
2864
2865 (void) scollect(partial_scavenging, GST_PART);
2866 #endif /* ISE_GC */
2867 }
2868
2869 #ifdef ISE_GC
2870 rt_private int partial_scavenging(void)
2871 {
2872 /* Partial Scavenging -- Implementation of the INRIA algorithm
2873 * Lang-Dupont 1987. The idea is to do a full mark and sweep for
2874 * most of the memory, the 'from' space excepted. This space is
2875 * scavenged to the 'to' space, thus doing storage compaction.
2876 * Note that for efficiency reasons and for the memory update to
2877 * work correctly, 'from' MUST be the address of the first block
2878 * of a memory chunk (because this zone is skipped by doing pointer
2879 * comparaisons).
2880 */
2881 RT_GET_CONTEXT
2882
2883 SIGBLOCK; /* Block all signals during garbage collection */
2884 init_plsc(); /* Initialize scavenging (find 'to' space) */
2885 run_plsc(); /* Normal sequence */
2886 rel_core(); /* We may give some core back to the kernel */
2887 eiffel_usage = 0; /* Reset Eiffel memory allocated since last collection */
2888 SIGRESUME; /* Dispatch any signal which has been queued */
2889 return 0;
2890 }
2891
2892 rt_private void run_plsc(void)
2893 {
2894 /* This routine actually invokes the partial scavenging. */
2895 run_collector(); /* Call a wrapper to do the job */
2896 /* Clean up `from' and `to' scavenge zone if a partial scavenging is done, not
2897 * a simple mark and sweep. */
2898 if (rt_g_data.status & GC_PART) {
2899 clean_zones(); /* Clean up 'from' and 'to' scavenge zonse */
2900 }
2901 }
2902
2903 rt_shared void urgent_plsc(EIF_REFERENCE *object)
2904 {
2905 /* Perform an urgent partial scavenging, taking 'object' as a pointer
2906 * to the address of a variable holding a reference to an Eiffel object
2907 * which must be part of the local roots for the collector.
2908 */
2909
2910 RT_GET_CONTEXT
2911 if ((rt_g_data.status & GC_STOP) GC_THREAD_PROTECT(|| !thread_can_launch_gc))
2912 return; /* Garbage collection stopped */
2913
2914 SIGBLOCK; /* Block all signals during garbage collection */
2915 GC_THREAD_PROTECT(eif_synchronize_gc(rt_globals));
2916 init_plsc(); /* Initialize scavenging (find 'to' space) */
2917
2918 /* This object needs to be taken care of, because it might be dead from
2919 * the GC's point of view although we know that it is not... As its location
2920 * may change, we use an indirection to reach it.
2921 */
2922 *object = MARK_SWITCH(object); /* Ensure object is alive */
2923
2924 run_plsc(); /* Normal sequence */
2925 SIGRESUME; /* Dispatch any signal which has been queued */
2926
2927 GC_THREAD_PROTECT(eif_unsynchronize_gc(rt_globals));
2928 }
2929
2930 rt_private void clean_zones(void)
2931 {
2932 /* This routine is called after a partial scavenging has been done.
2933 * Run over the 'from' field, coalescing all the Eiffel block we find
2934 * there. If we reach a C block, then the entire zone is polluted and the
2935 * free blocks are returned to the free list. Otherwise, the whole chunk
2936 * is kept for the next partial scavenge (this should be the case all the
2937 * time, but some C blocks may pollute the zones if we are low in memory).
2938 */
2939
2940 int is_ps_to_keep, has_block_been_split;
2941
2942 REQUIRE("GC_PART", rt_g_data.status & GC_PART);
2943
2944 /* Compute the amount of copied bytes and the size of the scavenging zone
2945 * we were dealing with. This is used by scollect to update its statistics
2946 * about the memory collected (since when scavenging is done, some memory
2947 * is collected without having the free-list disturbed, thus making the
2948 * malloc statistics inaccurate in this respect)--RAM.
2949 */
2950 rt_g_data.mem_copied += ps_from.sc_size; /* Bytes subject to copying */
2951 rt_g_data.mem_move += ps_to.sc_top - ps_to.sc_active_arena;
2952
2953 /* Update the average. */
2954 if ((ps_to.sc_top >= ps_to.sc_end) || (ps_to.sc_overflowed_size > 0)) {
2955 /* If we have reached the end of the `ps_to' zone, or if we have overflowed
2956 * the `ps_to' zone, then we cannot keep it. */
2957 is_ps_to_keep = 0;
2958 } else {
2959 /* If the zone overflowed, we have to use a new `to' zone, thus we cannot keep it. */
2960 is_ps_to_keep = 1;
2961 }
2962
2963 /* Put final free block back to the free list?
2964 * If we cannot do that, then `is_ps_to_keep' has to be updated
2965 * accordingly, that is to say, we cannot keep it for next partial collection. */
2966 has_block_been_split = split_to_block(is_ps_to_keep);
2967 is_ps_to_keep = is_ps_to_keep && (has_block_been_split == 1);
2968
2969 if (is_ps_to_keep) {
2970 /* Update `ps_to' so that we can reuse it for next compaction. */
2971 /* Reset `sc_active_arena', so that it corresponds to `top'. */
2972 ps_to.sc_overflowed_size = 0;
2973 ps_to.sc_size = ps_to.sc_size - (ps_to.sc_top - ps_to.sc_active_arena);
2974 ps_to.sc_active_arena = ps_to.sc_top;
2975 /* Update size so that it is seen as a non-free block of memory. */
2976 ps_to.sc_flags = ((union overhead *) ps_to.sc_top)->ov_size;
2977 /* B_BUSY and B_LAST flag should be set by `split_to_block'. */
2978 CHECK("B_BUSY set", ((union overhead *) ps_to.sc_top)->ov_size & B_BUSY);
2979 CHECK("B_LAST set", ((union overhead *) ps_to.sc_top)->ov_size & B_LAST);
2980 } else {
2981 /* Reset `ps_to' since we cannot reuse it. */
2982 memset (&ps_to, 0, sizeof(struct partial_sc_zone));
2983 }
2984
2985 if (0 == sweep_from_space()) { /* Clean up 'from' space */
2986 /* For malloc, set the B_LAST bit to indicate that the block held
2987 * in the space is the last one in the chunk.
2988 */
2989 ((union overhead *) ps_from.sc_arena)->ov_size |= B_LAST;
2990
2991 /* The whole 'from' space is now free. If the 'to' space holds at least
2992 * one object, then the `from' space will become the new 'to' space.
2993 * Otherwise, we keep the same 'to' space and the 'from' space is put
2994 * back to the free list.
2995 */
2996 if (is_ps_to_keep) {
2997 /* The 'to' space is partially empty -- Free the 'from' space but keep the
2998 * to zone for next scavenge (it's so hard to find). Before
2999 * freeing the scavenge zone, do not forget to set the B_BUSY
3000 * flag for eif_rt_xfree. The number of 'to' zones allocated is also
3001 * decreased by one, since its allocation is compensated by the
3002 * release of the from space (well, sort of).
3003 */
3004 ((union overhead *) ps_from.sc_arena)->ov_size |= B_BUSY;
3005 eif_rt_xfree (ps_from.sc_arena + OVERHEAD); /* One big bloc */
3006 if (rt_g_data.gc_to > 0) {
3007 rt_g_data.gc_to--;
3008 }
3009 } else {
3010 /* The 'to' space holds at least one object (normal case). The
3011 * 'from' space is completely empty and will be the next 'to' space
3012 * in the next partial scavenging.
3013 */
3014 memcpy (&ps_to, &ps_from, sizeof(struct partial_sc_zone));
3015 ps_to.sc_flags = ((union overhead *) ps_from.sc_arena)->ov_size;
3016 }
3017 /* Reset `ps_from' since not needed anymore. */
3018 memset (&ps_from, 0, sizeof(struct partial_sc_zone)); /* Was freed */
3019 return;
3020 }
3021 }
3022
3023 rt_private void init_plsc(void)
3024 {
3025 /* Set a correct status for the garbage collector, so that the recursive
3026 * mark process knows about what we are doing. If we are unable to get
3027 * a valid 'to' zone for the scavenge, a simple mark and sweep will be done.
3028 */
3029
3030 if (0 == find_scavenge_spaces())
3031 rt_g_data.status = (char) ((gen_scavenge & GS_ON) ? GC_PART | GC_GEN : GC_PART);
3032 else
3033 rt_g_data.status = (char) ((gen_scavenge & GS_ON) ? GC_GEN : 0);
3034
3035 /* If partial scavenging was not activated, make sure no scavenge space is
3036 * recorded at all, to avoid problems with malloc and core releasing.
3037 */
3038
3039 if (!(rt_g_data.status & GC_PART)) {
3040 ps_from.sc_arena = (EIF_REFERENCE) 0; /* Will restart from end */
3041 if (ps_to.sc_arena != (EIF_REFERENCE) 0) { /* One chunk was kept in reserve */
3042 CHECK("Block is indeed busy", ((union overhead *) ps_to.sc_active_arena)->ov_size & B_BUSY);
3043 eif_rt_xfree (ps_to.sc_active_arena + OVERHEAD);
3044 ps_to.sc_arena = (EIF_REFERENCE) 0; /* No to zone yet */
3045 }
3046 }
3047 }
3048
3049 /*
3050 doc: <routine name="split_to_block" return_type="int" export="private">
3051 doc: <summary>The `ps_to' space may well not be full. Thus if `is_to_keep' is set to `0' then we will return the remaining part at the end to the free list, if `is_to_keep' is set to `1' then we will not return the end to the free list, it will be used for the next partial collection as the new `ps_to' zone. This routine is also responsible to set the B_LAST flag to the last block in `ps_to'.</summary>
3052 doc: <param name="is_to_keep" type="int">Is the remaining part of `ps_to' being kept for next partial collection?</param>
3053 doc: <return>1 when block was split, 0 otherwise.</return>
3054 doc: <thread_safety>Safe with synchronization</thread_safety>
3055 doc: <synchronization>Synchronization done through `scollect'.</synchronization>
3056 doc: </routine>
3057 */
3058
3059 rt_private int split_to_block (int is_to_keep)
3060 {
3061 union overhead *base; /* Base address */
3062 rt_uint_ptr size; /* Amount of bytes used (malloc point's of view) */
3063 rt_uint_ptr old_size; /* To save the old size for the leading object */
3064 int result;
3065
3066 REQUIRE("Valid sc_top", !is_to_keep || (ps_to.sc_top < ps_to.sc_end));
3067
3068 base = (union overhead *) ps_to.sc_active_arena;
3069 size = ps_to.sc_top - (EIF_REFERENCE) base; /* Plus overhead for first block */
3070
3071
3072 if (size == 0) {
3073 CHECK("previous_same_as_top", ps_to.sc_top == ps_to.sc_previous_top);
3074 CHECK("valid sc_top", ps_to.sc_top < ps_to.sc_end);
3075 CHECK("base is top", base == (void *) ps_to.sc_top);
3076 /* No objects were scavenged, ensure that `ps_to.sc_top' (aka base) refers to a B_LAST block. */
3077 base->ov_size |= B_LAST;
3078 /* Mark `ps_to.sc_top' as non-free block. */
3079 base->ov_size |= B_BUSY;
3080 if (!is_to_keep) {
3081 /* Block cannot be kept. We have to return it to the free list. */
3082 eif_rt_xfree ((EIF_REFERENCE) (base + 1));
3083 }
3084 /* Even if no split occurred, it is still a successful split when
3085 * seen from the client of this routine. */
3086 result = 1;
3087 } else {
3088 /* I'm faking a big block which will hold all the scavenged object,
3089 * so that eif_rt_split_block() will be fooled and correctly split the block
3090 * after the last scavenged object--RAM. In fact, I'm restoring the
3091 * state the space was in when it was selected for a scavenge.
3092 * The malloc flags attached to the 'to' zone are restored. The two
3093 * which matters are B_LAST and B_CTYPE (needed by eif_rt_split_block).
3094 */
3095 old_size = base->ov_size; /* Save size of 1st block */
3096 base->ov_size = ps_to.sc_flags; /* Malloc flags for whole space */
3097 result = (eif_rt_split_block(base, size - OVERHEAD) != (rt_uint_ptr) -1);
3098 base->ov_size = old_size; /* Restore 1st block integrity */
3099
3100 /* Perform memory update only if we can split block, if not possible. */
3101 if (!result) {
3102 /* Could not split the block, it means that nothing remains otherwise we would
3103 * be in big trouble. */
3104 CHECK("No more space available", ps_to.sc_end == ps_to.sc_top)
3105 CHECK("Valid sc_previous_top", ps_to.sc_previous_top < ps_to.sc_end);
3106 base = (union overhead *) ps_to.sc_previous_top;
3107 /* Make it last block. Otherwise it would corrupt the coalesce process. */
3108 base->ov_size |= B_LAST;
3109 /* Update `ps_to.sc_top' and `ps_to.sc_previous_top' to the end of block. */
3110 ps_to.sc_previous_top = ps_to.sc_end;
3111 } else {
3112 /* We were able to split the block, so we simply need to put the B_LAST flag
3113 * on sc_top, not on sc_previous_top. */
3114 ((union overhead *) ps_to.sc_top)->ov_size |= B_LAST;
3115 /* Update `ps_to.sc_previous_top' to now point at the same location as `ps_to.sc_top'
3116 * since we don't want to update the flags or size twice of the previous block
3117 * in case a partial collection does nothing. */
3118 ps_to.sc_previous_top = ps_to.sc_top;
3119 if (is_to_keep) {
3120 /* Block needs to be kept, so we remove it from free list and mark it B_BUSY,
3121 * so that it is not freed later. */
3122 lxtract((union overhead *) ps_to.sc_top);
3123 /* Mark `ps_to' as non-free block. */
3124 ((union overhead *) ps_to.sc_top)->ov_size |= B_BUSY;
3125 /* Update accounting information, we need to remove one OVERHEAD since block
3126 * is not free anymore. */
3127 rt_m_data.ml_over -= OVERHEAD;
3128 if (ps_to.sc_flags & B_CTYPE) {
3129 rt_c_data.ml_over -= OVERHEAD;
3130 } else {
3131 rt_e_data.ml_over -= OVERHEAD;
3132 }
3133 } else {
3134 /* Update accounting information: the eif_rt_split_block() routine only update
3135 * the overhead usage, because it assumes the block it is splitting is
3136 * still "used", so the split only appears to add overhead. This is not
3137 * the case here. We also free some memory which was accounted as used.
3138 */
3139 size = ps_to.sc_end - ps_to.sc_top; /* Memory unused (freed) */
3140
3141 rt_m_data.ml_used -= size;
3142 if (ps_to.sc_flags & B_CTYPE) {
3143 rt_c_data.ml_used -= size;
3144 } else {
3145 rt_e_data.ml_used -= size;
3146 }
3147 }
3148 }
3149 }
3150 return result;
3151 }
3152
3153 rt_private int sweep_from_space(void)
3154 {
3155 /* After a scavenging, the 'ps_from' zone has to be cleaned up. If by
3156 * chance the whole zone is free, it will be kept for the next 'to' zone,
3157 * unless that one is also empty in which case the 'from' space will be
3158 * returned to the free list anyway. If only one C block remains, then
3159 * every block is freed, after having been coalesced.
3160 * Note that the coalescing has to be done manually. I would prefer to use
3161 * the coalesc() routine in the malloc package, but it assumes that the
3162 * coalesced block belongs to the free list, which is not true here.
3163 * The function returns 0 if the whole space is free, -1 otherwise.
3164 * When this function produces a big free block (i.e. when it returns 0),
3165 * that block should carry the B_LAST bit. Currently, it is added by the
3166 * caller because there is no reason this should be true if we did not use.
3167 */
3168 union overhead *zone; /* Currently inspected block */
3169 union overhead *next; /* Address of next block */
3170 rt_uint_ptr flags; /* Malloc flags and size infos */
3171 EIF_REFERENCE end; /* First address beyond from space */
3172 EIF_TYPE_INDEX dtype; /* Dynamic type of object */
3173 EIF_REFERENCE base; /* First address of 'from' space */
3174 rt_uint_ptr size; /* Size of current object */
3175 char gc_status; /* Saved GC status */
3176
3177 base = ps_from.sc_arena;
3178 zone = (union overhead *) base; /* Start of from space */
3179 end = ps_from.sc_end; /* End of zone */
3180
3181 /* New macro to make writing end condition tests easier to read.
3182 * It is undefined at the end of this routine.
3183 * Its meaning is the following, an object is alive if either:
3184 * - it is an object with the B_C flag (object cannot be moved)
3185 * - it is an object which has the B_BUSY flag (i.e. not in the free list) and
3186 * that has not been forwarded and was marked by the GC cycle with EO_MARK.
3187 */
3188 #define is_object_alive(zone) (((zone)->ov_size & B_C) || (!((zone)->ov_size & B_FWD) && ((zone)->ov_size & B_BUSY) && ((zone)->ov_flags & EO_MARK)))
3189
3190 #ifdef DEBUG
3191 dprintf(1)("sweep_from_space: chunk from 0x%lx to 0x%lx (excluded)\n",
3192 base, end);
3193 flush;
3194 #endif
3195
3196 for (;;) {
3197
3198 /* Loop until we reach an Eiffel block which is not marked. */
3199 while (((char *) zone < end) && is_object_alive(zone)) {
3200 #ifdef DEBUG
3201 dprintf(8)("sweep_from_space: found a %d bytes C block at 0x%lx\n",
3202 zone->ov_size & B_SIZE, zone + 1);
3203 flush;
3204 #endif
3205 /* Fetch next header. */
3206 flags = zone->ov_size;
3207 next = (union overhead *) (((EIF_REFERENCE) zone) + (flags & B_SIZE) + OVERHEAD);
3208
3209 /* Make sure object is unmarked (could be a frozen Eiffel object).
3210 * The C objects do not use the EO_MARK bit so there is no need
3211 * for tests.
3212 */
3213
3214 zone->ov_flags &= ~EO_MARK; /* Unconditionally unmark object */
3215 zone = next; /* Advance to next object */
3216 }
3217
3218 /* Either we reached an Eiffel block, a free block or the end of the
3219 * 'from' space. I could have tested for the B_LAST bit to check for
3220 * one big free block at the head instead of comparing zone and base.
3221 * Never mind, this would have added a test whereas the one here is
3222 * mandatory--RAM.
3223 */
3224
3225 if ((EIF_REFERENCE) zone >= end) { /* Seems we reached the end of space */
3226 return -1; /* 'from' holds at least one C block */
3227 } else {
3228 flags = zone->ov_size;
3229 next = (union overhead *) (((EIF_REFERENCE) zone) + (flags & B_SIZE) + OVERHEAD);
3230 }
3231
3232 #ifdef DEBUG
3233 dprintf(8)(
3234 "sweep_from_space: %sfound a %s %s%s%sblock (%d bytes) at 0x%lx\n",
3235 (EIF_REFERENCE) zone == base ? "" : "(spoilt) ",
3236 zone->ov_size & B_LAST ? "last" : "normal",
3237 zone->ov_size & B_BUSY ? "" : "free ",
3238 zone->ov_size & B_FWD ? "" : "dead ",
3239 zone->ov_size & B_C ? "BUG " : "",
3240 zone->ov_size & B_SIZE,
3241 zone + 1);
3242 flush;
3243 #endif
3244
3245 /* Every free block has to be extracted from the free list for
3246 * coalescing to occur safely. Non-free blocks which are dead
3247 * have to be "dispose"ed properly. Memory accounting has to be
3248 * performed diligently, but, halas (!), this simply undoes the work
3249 * done during the allocation. And all that overhead will be there for
3250 * nothing if the space is spoilt and has to be freed.
3251 */
3252
3253
3254 if (flags & B_BUSY) { /* We reached a busy block */
3255
3256 /* Update statistics: every block we extract from the free list and
3257 * every dead object we find here is still "used" in the sense that
3258 * it is part of a scavenge zone which is still allocated in memory.
3259 */
3260
3261 if (!(flags & B_FWD)) { /* Non-forwarded block is dead */
3262 if (zone->ov_flags & EO_DISP) { /* Exists ? */
3263 dtype = zone->ov_dtype; /* Dispose ptr */
3264 gc_status = rt_g_data.status; /* Save GC current status */
3265 rt_g_data.status |= GC_STOP; /* Stop GC */
3266 DISP(dtype, (EIF_REFERENCE) (zone + 1)); /* Call it */
3267 rt_g_data.status = gc_status; /* Restart GC */
3268 }
3269 #ifdef EIF_EXPENSIVE_ASSERTIONS
3270 CHECK ("Cannot be in object ID stack",
3271 !st_has (&object_id_stack, (EIF_REFERENCE) zone + 1));
3272 #endif
3273 }
3274 } else {
3275 size = flags & B_SIZE; /* Pre-compute that guy */
3276 lxtract(zone); /* Extract it from free list */
3277 rt_m_data.ml_used += size; /* Memory accounting */
3278 if (flags & B_CTYPE) { /* Bloc is in a C chunk */
3279 rt_c_data.ml_used += size;
3280 } else {
3281 rt_e_data.ml_used += size;
3282 }
3283 }
3284
3285 /* Whenever we reach a "first" block which will be the first one in the
3286 * coalesced block, we MUST make sure it is marked B_BUSY. In the event
3287 * the whole coalesced block would be freed (e.g. when we reach a C
3288 * block), eif_rt_xfree() would be called and that would be a no-op if no
3289 * B_BUSY mark was carried. Of course, the 'flags' variable, which is
3290 * carrying the original version of the malloc flags is left
3291 * undisturbed.
3292 */
3293
3294 zone->ov_size |= B_BUSY; /* Or free would not do anything */
3295
3296 /* Loop over the Eiffel/free blocks and merge them into one (as
3297 * described in the 'zone' header). Stop at the end of the space or
3298 * when a C block is reached.
3299 */
3300 while (((char *) next < end) && !is_object_alive(next)) {
3301
3302 #ifdef DEBUG
3303 dprintf(8)(
3304 "sweep_from_space: followed by a %s %s%sblock (%d bytes) at 0x%lx\n",
3305 next->ov_size & B_LAST ? "last" : "normal",
3306 next->ov_size & B_BUSY ? "" : "free ",
3307 next->ov_size & B_C ? "C " : next->ov_size & B_FWD ? "":"dead ",
3308 next->ov_size & B_SIZE,
3309 next + 1);
3310 flush;
3311 #endif
3312
3313 /* Any coalesced free block must be removed from the free list,
3314 * otherwise, if the coalesced block is finally freed because
3315 * the space is spoilt, the block will be listed twice in the list,
3316 * once in the original entry and once as being part of a bigger
3317 * block. Gulp!
3318 * Other non-forwarded blocks are dead and dispose is called if
3319 * necessary.
3320 */
3321
3322 flags = next->ov_size;
3323 size = flags & B_SIZE; /* Pre-compute that guy */
3324 if (flags & B_BUSY) { /* We reached a busy block */
3325
3326 /* I don't expect any overflow which could corrupt the flags.
3327 * The updating of the overhead is only done when the object
3328 * was dead, otherwise its overhead has been transferred to
3329 * the other scavenging zone (we are talking about partial
3330 * scavenging here, so there is no tenuring involved)--RAM.
3331 */
3332
3333 if (!(flags & B_FWD)) { /* Non-forwarded block is dead */
3334 if (next->ov_flags & EO_DISP) { /* Exists ? */
3335 dtype = next->ov_dtype; /* Dispose ptr */
3336 gc_status = rt_g_data.status; /* Save GC current status */
3337 rt_g_data.status |= GC_STOP; /* Stop GC */
3338 DISP(dtype,(EIF_REFERENCE) (next + 1));/* Call it */
3339 rt_g_data.status = gc_status; /* Restore previous GC status */
3340 }
3341 #ifdef EIF_EXPENSIVE_ASSERTIONS
3342 CHECK ("Cannot be in object ID stack",
3343 !st_has (&object_id_stack, (EIF_REFERENCE) next + 1));
3344 #endif
3345
3346 rt_m_data.ml_over -= OVERHEAD; /* Memory accounting */
3347 rt_m_data.ml_used += OVERHEAD; /* Overhead is used */
3348 if (flags & B_CTYPE) {
3349 rt_c_data.ml_over -= OVERHEAD; /* Overhead is decreasing */
3350 rt_c_data.ml_used += OVERHEAD;
3351 } else {
3352 rt_e_data.ml_over -= OVERHEAD; /* Block in Eiffel chunk */
3353 rt_e_data.ml_used += OVERHEAD;
3354 #ifdef MEM_STAT
3355 printf ("Eiffel: %ld used (+%ld), %ld total (sweep_from_space)\n",
3356 rt_e_data.ml_used, OVERHEAD, rt_e_data.ml_total);
3357 #endif
3358 }
3359 }
3360 } else {
3361 lxtract(next); /* Remove it from free list */
3362 rt_m_data.ml_over -= OVERHEAD; /* Memory accounting */
3363 rt_m_data.ml_used += OVERHEAD + size;
3364 if (flags & B_CTYPE) { /* Bloc is in a C chunk */
3365 rt_c_data.ml_over -= OVERHEAD; /* Overhead is decreasing */
3366 rt_c_data.ml_used += OVERHEAD + size;
3367 } else {
3368 rt_e_data.ml_over -= OVERHEAD; /* Block in Eiffel chunk */
3369 rt_e_data.ml_used += OVERHEAD + size;
3370 #ifdef MEM_STAT
3371 printf ("Eiffel: %ld used (+%ld), %ld total (sweep_from_space)\n",
3372 rt_e_data.ml_used, OVERHEAD + size, rt_e_data.ml_total);
3373 #endif
3374 }
3375 }
3376
3377 zone->ov_size += size + OVERHEAD; /* Do coalescing */
3378
3379 #ifdef DEBUG
3380 dprintf(8)("sweep_from_space: coalesced %s block is now %d bytes\n",
3381 zone->ov_flags & B_BUSY ? "busy" : "free",
3382 zone->ov_size & B_SIZE);
3383 flush;
3384 #endif
3385 /* Go to next element. */
3386 next = (union overhead *) (((EIF_REFERENCE) next) + size + OVERHEAD);
3387 }
3388
3389 /* Either we reached a C block or the end of the 'from' space. In case
3390 * we have a last block to free within a spoilt zone, we have to set
3391 * the B_LAST bit, in case we coalesced on the fly.
3392 */
3393
3394 if ((EIF_REFERENCE) next >= end) { /* We reached the end */
3395
3396 #ifdef DEBUG
3397 if ((EIF_REFERENCE) zone != base)
3398 dprintf(8)("sweep_from_space: freed %d bytes (zone spoilt)\n",
3399 zone->ov_size & B_SIZE);
3400 flush;
3401 #endif
3402
3403 if ((EIF_REFERENCE) zone == base) /* The whole space is free */
3404 return 0; /* 'from' may become next 'to' */
3405 else { /* At least one C block */
3406 zone->ov_size |= B_LAST; /* Ensure malloc sees it as last */
3407 eif_rt_xfree((EIF_REFERENCE) (zone + 1)); /* Back to free list */
3408 return -1; /* Space is spoilt */
3409 }
3410 }
3411
3412 #ifdef DEBUG
3413 dprintf(8)("sweep_from_space: giving %d bytes to free list\n",
3414 zone->ov_size & B_SIZE);
3415 flush;
3416 #endif
3417
3418 /* We must have reached a C block, which means we can free the block.
3419 * Free the coalesced block we have so far, starting at zone and reset
3420 * the coalescing base to the next object. We will then enter the first
3421 * loop which walks other the C blocks, and this loop will unmark the
3422 * current object.
3423 * This routine is a mess and needs rewriting--RAM.
3424 */
3425
3426 #ifdef EIF_ASSERTIONS
3427 size = zone->ov_size & B_SIZE;
3428 #endif
3429 eif_rt_xfree((EIF_REFERENCE) (zone + 1)); /* Put block back to free list */
3430 CHECK("No bigger than expected", (zone->ov_size & B_SIZE) == size);
3431 zone = next; /* Reset coalescing base */
3432 }
3433 /* NOTREACHED */
3434
3435 /* Remove macro definition. */
3436 #undef is_object_alive
3437 }
3438
3439 rt_private int find_scavenge_spaces(void)
3440 {
3441 /* Look for a 'from' and a 'to' space for partial scavenging. Usually, the
3442 * Eiffel memory is viewed as a cyclic memory, where the old 'from' becomes
3443 * the 'to' space and the next 'from' will be the chunk following the new
3444 * 'to' (i.e the old 'from') until 'to' is near the break, at which point
3445 * it is given back to the kernel.
3446 * The function returns 0 if all is ok, -1 otherwise.
3447 */
3448 #if defined EIF_NO_SCAVENGING
3449 return -1;
3450 #else /* EIF_NO_SCAVENGING */
3451 size_t from_size; /* Size of selected 'from' space */
3452 EIF_REFERENCE to_space; /* Location of the 'to' space */
3453
3454 #ifdef DEBUG
3455 dprintf(1)("find_scavenge_spaces: last from was 0x%lx\n", last_from);
3456 flush;
3457 #endif
3458
3459 /* Find next from zone for scavenging. */
3460 last_from = find_from_space();
3461
3462 #ifdef DEBUG
3463 dprintf(1)("find_scavenge_spaces: from space is now 0x%lx\n", last_from);
3464 flush;
3465 #endif
3466
3467 if (last_from == (struct chunk *) 0) /* There are no space available. */
3468 return -1;
3469
3470 /* Water-mark is unused by the partial scavenging algorithm */
3471 from_size = last_from->ck_length; /* Record length */
3472 ps_from.sc_size = from_size; /* Subject to copying */
3473 ps_from.sc_arena = (EIF_REFERENCE) (last_from + 1); /* Overwrites first header */
3474 ps_from.sc_active_arena = (EIF_REFERENCE) (last_from + 1); /* Overwrites first header */
3475 ps_from.sc_end = ps_from.sc_arena + from_size; /* First location beyond */
3476 ps_from.sc_previous_top = ps_from.sc_top = ps_from.sc_arena; /* Empty for now */
3477
3478 if (ps_to.sc_arena) {
3479 CHECK("valid ps_to", ps_to.sc_previous_top == ps_to.sc_top);
3480 /* Clear B_LAST flag, it will be set in `split_to_block' at the end of this GC cycle. */
3481 ((union overhead *) ps_to.sc_top)->ov_size &= ~B_LAST;
3482 return 0; /* We already have a 'to' space */
3483 }
3484
3485 find_to_space(); /* Try to find a 'to' space by coalescing */
3486 if (ps_to.sc_arena) /* It worked */
3487 return 0; /* We got our 'to' space */
3488
3489 /* We cannot indefinitely ask for malloced chunks, as the size of the
3490 * process may increase each time we do so... Therefore, we count each
3491 * time we do so and are allowed at most TO_MAX allocations. Passed this
3492 * limit, there cannot be any partial scavenging, at least for this cycle.
3493 * Note that when core is released to the kernel, the count of malloc'ed
3494 * 'to' zones decreases accordingly.
3495 */
3496
3497 if (rt_g_data.gc_to >= TO_MAX || rt_e_data.ml_chunk < CHUNK_MIN)
3498 return -1; /* Cannot allocate a 'to' space */
3499
3500 /* Find a 'to' space.
3501 * We ask for more core, I repeat: we ask for more core. If this fails, then no
3502 * scavenging will be done. This is why it is so important to be able to
3503 * always have a 'to' handy for the next scavenge (i.e. no C blocks in the
3504 * 'from' space).
3505 * It's necessary to have the 'to' space in the Eiffel space, so I call a
3506 * somewhat low-level malloc routine.
3507 *
3508 * The get_to_from_core replaces the previous call to malloc_from_eiffel_list_no_gc which used
3509 * to get a to_space anywhere in the free list. But we want an
3510 * empty chunk and if we arrive here, the only way to get a free chunk
3511 * is to get it from the kernel. It does not happen so often. Usually
3512 * it happens the first time partial scavenging is called.
3513 * Fixes random-string-blank-panic and random-array-alloc-loop.
3514 * -- Fabrice.
3515 */
3516
3517 to_space = get_to_from_core (); /* Allocation from free list */
3518 if ((EIF_REFERENCE) 0 == to_space)
3519 return -1; /* Unable to find a 'to' space */
3520
3521 /* The 'to' space will see its header overwritten, which is basically why
3522 * we have to save the flags associated with the arena. When it's time to
3523 * split the 'to' block, we can always fake the original block by saving the
3524 * size header (which belongs to the first scavenged object), restoring the
3525 * original, doing the split and finally restoring the saved header.
3526 */
3527
3528 rt_g_data.gc_to++; /* Count 'to' zone allocation */
3529 ps_to.sc_arena = to_space - OVERHEAD; /* Overwrite the header */
3530 ps_to.sc_active_arena = to_space - OVERHEAD; /* Overwrite the header */
3531 ps_to.sc_flags = HEADER(to_space)->ov_size; /* Save flags */
3532 ps_to.sc_size = (ps_to.sc_flags & B_SIZE) + OVERHEAD; /* Used for statistics */
3533 ps_to.sc_end = ps_to.sc_arena + (ps_to.sc_flags & B_SIZE) + OVERHEAD; /* First free location beyond */
3534 ps_to.sc_top = ps_to.sc_arena; /* Is empty */
3535 ps_to.sc_previous_top = ps_to.sc_arena; /* Is empty */
3536 /* Clear B_LAST flag, it will be set in `split_to_block' at the end of this GC cycle. */
3537 ((union overhead *) ps_to.sc_top)->ov_size &= ~B_LAST;
3538
3539 #ifdef DEBUG
3540 dprintf(1)("find_scavenge_spaces: malloc'ed a to space at 0x%lx (#%d)\n",
3541 ps_to.sc_arena, rt_g_data.gc_to);
3542 dprintf(1)("find_scavenge_spaces: from [0x%lx, 0x%lx] to [0x%lx, 0x%lx]\n",
3543 ps_from.sc_arena, ps_from.sc_end - 1,
3544 ps_to.sc_arena, ps_to.sc_end - 1);
3545 flush;
3546 #endif
3547
3548 return 0; /* Ok, we got a 'to' space */
3549 #endif /* EIF_NO_SCAVENGING */
3550 }
3551
3552 #ifndef EIF_NO_SCAVENGING
3553
3554 /*
3555 doc: <routine name="find_from_space" return_type="struct chunk *" export="private">
3556 doc: <summary>Find the next chunk that can be used as from space for `ps_from'. We cycle through the list of available Eiffel chunks and updates the Eiffel chunk cursor accordingly.</summary>
3557 doc: <return>NULL when not found, otherwise a chunk of Eiffel memory.</return>
3558 doc: <thread_safety>Safe with synchronization</thread_safety>
3559 doc: <synchronization>Synchronization done through `scollect'.</synchronization>
3560 doc: </routine>
3561 */
3562 rt_private struct chunk *find_from_space(void)
3563 {
3564 char *l_arena;
3565 struct chunk *start, *real_start;
3566
3567 if (last_from == NULL) {
3568 /* If `last_from' is null, then it was never set, we start from the beginning by
3569 * flagging `real_start' to NULL. */
3570 real_start = NULL;
3571 } else if (last_from != cklst.e_cursor) {
3572 /* `last_from' was set, but now it is different from the cursor position, we take
3573 * the current cursor position as a from space. */
3574 /* Note that it can be NULL if we are off the list. */
3575 real_start = cklst.e_cursor;
3576 } else {
3577 /* We continue our iteration to the next block. */
3578 /* Note that it can be NULL if we are off the list. */
3579 real_start = last_from->ck_lnext;
3580 }
3581 if (!real_start) {
3582 /* Could not find a valid start, we start from the begginning. */
3583 real_start = cklst.eck_head;
3584 }
3585 for (start = real_start; start != NULL; start = start->ck_lnext) {
3586 /* Skip the active 'to' space, if any: we must not have 'from' and
3587 * 'to' at the same location, otherwise it's a 4 days bug--RAM.
3588 */
3589 l_arena = (char *) (start + 1);
3590 if (l_arena != ps_to.sc_arena) {
3591 cklst.e_cursor = start;
3592 return start; /* No, it's ok */
3593 }
3594 }
3595
3596 /* We haven't found a block, so we restart from beginning if `real_start' was not
3597 * already at the beginning. */
3598 for (start = cklst.eck_head; start != real_start; start = start->ck_lnext) {
3599 /* See previous loop for explanations. */
3600 l_arena = (char *) (start + 1);
3601 if (l_arena != ps_to.sc_arena) {
3602 cklst.e_cursor = start;
3603 return start; /* No, it's ok */
3604 }
3605 }
3606 return NULL; /* No chunk found */
3607 }
3608
3609 rt_private void find_to_space(void)
3610 /* The zone structure we want to fill in */
3611 {
3612 /* Look for a suitable space which could be used by partial scanvenging
3613 * as `ps_to' zone. If the leading block in the chunk is free but not
3614 * equal to the whole chunk, we even attempt block coalescing.
3615 */
3616 struct chunk *cur; /* Current chunk we are considering */
3617 rt_uint_ptr flags = 0; /* Malloc info flags */
3618 EIF_REFERENCE arena = (EIF_REFERENCE) 0; /* Where chunk's arena starts */
3619
3620 for (cur = cklst.eck_head; cur != (struct chunk *) 0; cur = cur->ck_lnext) {
3621 arena = (EIF_REFERENCE) cur + sizeof(struct chunk);
3622 if (arena == ps_from.sc_arena)
3623 continue; /* Skip scanvenging from space */
3624 flags = ((union overhead *) arena)->ov_size;
3625 if (flags & B_BUSY) /* Leading block allocated */
3626 continue; /* Chunk not completely free */
3627 if (!(flags & B_LAST)) /* Looks like a fragmented chunk */
3628 if (0 == chunk_coalesc(cur)) /* Coalescing was useless */
3629 continue; /* Skip this chunk */
3630 flags = ((union overhead *) arena)->ov_size;
3631 if (flags & B_LAST) /* One big happy free block */
3632 break;
3633 }
3634
3635 if (cur == (struct chunk *) 0) /* Did not find any suitable chunk */
3636 return;
3637 CHECK ("Flags must be initialized", flags != 0);
3638 CHECK ("Arena must be initialized", arena != (EIF_REFERENCE) 0);
3639
3640 /* Initialize scavenging zone. Note that the arena of the zone starts at
3641 * the header of the first block, but we save the malloc flags so that we
3642 * can restore the block later when it is time to put it back to the free
3643 * list world.
3644 */
3645
3646 ps_to.sc_previous_top = ps_to.sc_top = ps_to.sc_arena = ps_to.sc_active_arena = arena;
3647 ps_to.sc_flags = flags;
3648 ps_to.sc_end = ps_to.sc_arena + (flags & B_SIZE) + OVERHEAD;
3649 ps_to.sc_size = (flags & B_SIZE) + OVERHEAD;
3650 /* Clear B_LAST flag, it will be set in `split_to_block' at the end of this GC cycle. */
3651 ((union overhead *) ps_to.sc_top)->ov_size &= ~B_LAST;
3652
3653 /* This zone is now used for scavening, so it must be removed from the free
3654 * list so that further mallocs do not attempt to use this space (when
3655 * tenuring, for instance). Also the block is used from the statistics
3656 * point of view.
3657 */
3658
3659 lxtract((union overhead *) arena); /* Extract block from free list */
3660
3661 rt_m_data.ml_used += (flags & B_SIZE);
3662 if (flags & B_CTYPE) {
3663 rt_c_data.ml_used += (flags & B_SIZE);
3664 } else {
3665 rt_e_data.ml_used += (flags & B_SIZE);
3666 }
3667
3668 #ifdef DEBUG
3669 dprintf(1)("find_to_space: coalesced a to space at 0x%lx (#%d)\n",
3670 ps_to.sc_arena, rt_g_data.gc_to);
3671 dprintf(1)("find_to_space: from [0x%lx, 0x%lx] to [0x%lx, 0x%lx]\n",
3672 ps_from.sc_arena, ps_from.sc_end - 1,
3673 ps_to.sc_arena, ps_to.sc_end - 1);
3674 flush;
3675 #endif
3676 }
3677 #endif
3678
3679 rt_private EIF_REFERENCE scavenge(register EIF_REFERENCE root, char **top)
3680 {
3681 /* The object pointed to by 'root' is to be scavenged in the 'to' space,
3682 * provided it is not an expanded object (otherwise, it has already been
3683 * scavenged as part of the object that holds it). The function returns the
3684 * pointer to the new object's location, in the 'to' space.
3685 */
3686 union overhead *zone; /* Malloc info header */
3687 rt_uint_ptr length; /* Length of scavenged object */
3688
3689 REQUIRE ("Algorithm moves objects",
3690 rt_g_data.status & (GC_GEN | GC_PART) || rt_g_data.status & GC_FAST);
3691
3692 zone = HEADER(root);
3693
3694 /* If object has the EO_STACK mark, then it means that it cannot move. So we have
3695 * to return immediately. */
3696 if (zone->ov_flags & EO_STACK) {
3697 CHECK ("EO_STACK not in Generation Scavenge From zone",
3698 !((rt_g_data.status & GC_GEN) &&
3699 (root > sc_from.sc_arena) &&
3700 (root <= sc_from.sc_top)));
3701 CHECK ("EO_STACK not in Generation Scavenge TO zone",
3702 !((rt_g_data.status & GC_GEN) &&
3703 (root > sc_to.sc_arena) &&
3704 (root <= sc_to.sc_top)));
3705 CHECK ("EO_STACK not in Partial Scavenge From zone.",
3706 !((rt_g_data.status & GC_PART) &&
3707 (root > ps_from.sc_active_arena) &&
3708 (root <= ps_from.sc_end)));
3709 CHECK ("EO_STACK not in Partial Scavenge TO zone.",
3710 !((rt_g_data.status & GC_PART) &&
3711 (root > ps_to.sc_active_arena) &&
3712 (root <= ps_to.sc_top)));
3713 return root;
3714 }
3715
3716 /* Expanded objects are held in one object, and a pseudo-reference field
3717 * in the father object points to them. However, the scavenging process
3718 * does not update this reference. Instead, the expanded header knows how to
3719 * reach the header of the father object. If scavenging is on, we reach the
3720 * expanded once the father has been scavenged, and we get the new address
3721 * by following the forwarding pointer left behind. Nearly a kludge.
3722 * Let A be the address of the original object (zone below) and A' (new) the
3723 * address of the scavenged object (given by following the forwarding
3724 * pointer left) and P the pointed expanded object in the original (root).
3725 * Then the address of the scavenged expanded is A'+(P-A).
3726 */
3727 if (eif_is_nested_expanded(zone->ov_flags)) {
3728 /* Compute original object's address (before scavenge) */
3729 EIF_REFERENCE exp; /* Expanded data space */
3730 EIF_REFERENCE new; /* New object's address */
3731 union overhead *container_zone; /* Header of object containing
3732 * expanded object `root' */
3733 container_zone = (union overhead *) ((EIF_REFERENCE) zone - (zone->ov_size & B_SIZE));
3734
3735 if (!(container_zone->ov_size & B_FWD)) {
3736 /* Container object of `root' did not move, so nothing
3737 * needs to be done */
3738 return root;
3739 }
3740
3741 CHECK ("In Generation Scavenge From zone",
3742 (rt_g_data.status & GC_PART) ||
3743 ((rt_g_data.status & (GC_GEN | GC_FAST)) &&
3744 (root > sc_from.sc_arena) &&
3745 (root <= sc_from.sc_top)));
3746
3747 CHECK ("In Partial Scavenge From zone",
3748 (rt_g_data.status & (GC_GEN | GC_FAST)) ||
3749 ((rt_g_data.status & GC_PART) &&
3750 (root > ps_from.sc_active_arena) &&
3751 (root <= ps_from.sc_end)));
3752
3753 new = container_zone->ov_fwd; /* Data space of the scavenged object */
3754 exp = new + (root - (EIF_REFERENCE) (container_zone + 1)); /* New data space */
3755
3756 zone->ov_fwd = exp; /* Leave forwarding pointer */
3757 zone->ov_size |= B_FWD; /* Mark object as forwarded */
3758 return exp; /* This is the new location of expanded */
3759 }
3760
3761 CHECK ("In Generation Scavenge From zone",
3762 (rt_g_data.status & GC_PART) ||
3763 ((rt_g_data.status & (GC_GEN | GC_FAST)) &&
3764 (root > sc_from.sc_arena) &&
3765 (root <= sc_from.sc_top)));
3766
3767 CHECK ("In Partial Scavenge From zone",
3768 (rt_g_data.status & (GC_GEN | GC_FAST)) ||
3769 ((rt_g_data.status & GC_PART) &&
3770 (root > ps_from.sc_active_arena) &&
3771 (root <= ps_from.sc_end)));
3772
3773 CHECK ("Not in Generation Scavenge TO zone",
3774 !((rt_g_data.status & GC_GEN) &&
3775 (root > sc_to.sc_arena) &&
3776 (root <= sc_to.sc_top))
3777 );
3778 CHECK ("Not in Partial Scavenge TO zone.",
3779 !((rt_g_data.status & GC_PART) &&
3780 (root > ps_to.sc_active_arena) &&
3781 (root <= ps_to.sc_top))
3782 );
3783
3784 /* If an Eiffel object holds the B_C mark (we know it's an Eiffel object
3785 * because it is referenced by an Eiffel object), then simply ignore it.
3786 * It is important to mark the object though, because it might be a frozen
3787 * object part of the remembered set, and it will be removed if it is not
3788 * marked. Besides, we need to cut recursion to prevent loops.
3789 */
3790 if (zone->ov_size & B_C) {
3791 zone->ov_flags |= EO_MARK; /* Mark it */
3792 return root; /* Leave object where it is */
3793 }
3794
3795 root = *top; /* New location in 'to' space */
3796 length = (zone->ov_size & B_SIZE) + OVERHEAD;
3797 *top += length; /* Update free-location pointer */
3798 memcpy (root, zone, length); /* The scavenge process itself */
3799 zone->ov_fwd = root + OVERHEAD; /* Leave forwarding pointer */
3800 zone->ov_size |= B_FWD; /* Mark object as forwarded */
3801
3802 #ifdef EIF_NO_SCAVENGING
3803 CHECK ("Scavenging is not disabled", 0);
3804 #endif /* EIF_NO_SCAVENGING */
3805
3806 return root + OVERHEAD; /* New object's location */
3807 }
3808
3809 /* Generation-based collector. This is a non incremental fast collector, which
3810 * is derived from Ungar's papers (ACM 1984 and OOPSLA'88). Provision is made
3811 * for both generation collection and generation scavenging.
3812 */
3813
3814 #endif /* ISE_GC */
3815 /*
3816 doc: <routine name="collect" return_type="int" export="public">
3817 doc: <summary>The generational collector entry point, with statistics updating. The time spent in the algorithm is monitored by scollect and accessible to the user via MEMORY primitives.</summary>
3818 doc: <return>0 if collection was done, -1 otherwise.</return>
3819 doc: <thread_safety>Safe</thread_safety>
3820 doc: <synchronization>Synchronization done through `scollect'.</synchronization>
3821 doc: </routine>
3822 */
3823
3824 rt_public int collect(void)
3825 {
3826 #ifdef ISE_GC
3827 int result;
3828 result = scollect(generational_collect, GST_GEN);
3829 return result;
3830 #else
3831 return 0;
3832 #endif /* ISE_GC */
3833 }
3834
3835 #ifdef ISE_GC
3836 rt_private int generational_collect(void)
3837 {
3838 /* Generation collector -- The new generation is completely collected
3839 * and survival objects are tenured (i.e. promoted to the old generation).
3840 * The routine returns 0 if collection performed normally, -1 if GC is
3841 * stopped or generation scavenging was stopped for some reason.
3842 */
3843
3844 RT_GET_CONTEXT
3845 size_t age; /* Computed tenure age */
3846 rt_uint_ptr overused; /* Amount of data over watermark */
3847 EIF_REFERENCE watermark; /* Watermark in generation zone */
3848
3849 if (rt_g_data.status & GC_STOP)
3850 return -1; /* Garbage collection stopped */
3851
3852 SIGBLOCK; /* Block signals during garbage collection */
3853 rt_g_data.status = GC_FAST; /* Fast generation collection */
3854 rt_g_data.nb_partial++; /* One more partial collection */
3855
3856 #ifdef DEBUG
3857 dprintf(1)("collect: tenure age is %d for this cycle\n", tenure);
3858 flush;
3859 #endif
3860
3861 /* First, reset the age tables, so that we can recompute the tenure
3862 * threshold for the next pass (feedback). We reset the all array
3863 * instead of just using `eif_tenure_max' elements for two reasons:
3864 * 1 - most of the time `eif_tenure_max == TENURE_MAX'
3865 * 2 - can be optimized better by C compiler.
3866 */
3867 memset (size_table, 0, TENURE_MAX * sizeof (rt_uint_ptr));
3868 memset (age_table, 0, TENURE_MAX * sizeof (uint32));
3869
3870 mark_new_generation(MTC_NOARG); /* Mark all new reachable objects */
3871 full_update(); /* Sweep the youngest generation */
3872 unmark_c_stack_objects (); /* Unmark all objects allocated on C stack. */
3873 if (gen_scavenge & GS_ON)
3874 swap_gen_zones(); /* Swap generation scavenging spaces */
3875
3876 /* Compute the tenure for the next pass. If generation scavenging is on,
3877 * we use Ungar's feedback technique on the size_table array, otherwise
3878 * the age_table array is used.
3879 */
3880
3881 tenure = eif_tenure_max;
3882
3883 if (gen_scavenge == GS_ON) {
3884 /* Generation scavenging is on and has not been stopped. If less than
3885 * the watermark is used, set tenure to eif_tenure_max,
3886 * to avoid tenuring for the next cycle. Otherwise, set it so
3887 * that we tenure at least
3888 * 'overused' bytes next cycle. GS_FLOATMARK is set to 40% of the
3889 * scavenge zone size and lets us get some free space to let other
3890 * young objects die, hopefully.
3891 * When compiled for speed however, we do not want to spend our time
3892 * copying objects back and forth too many times. So in this case,
3893 * we test for the watermark minus GS_FLOATMARK to maintain at least
3894 * that space free. This should limit collection racing when objects
3895 * are allocated at a high rate, at the price of more memory usage,
3896 * since this will incur some tenuring.
3897 */
3898
3899 watermark = cc_for_speed ? sc_from.sc_mark - GS_FLOATMARK : sc_from.sc_mark;
3900
3901 if (sc_from.sc_top >= watermark) {
3902 overused = sc_from.sc_top - sc_from.sc_mark + GS_FLOATMARK;
3903 for (age = eif_tenure_max - 1; age >= 0; age--) {
3904 if (overused >= size_table[age]) {
3905 /* Amount tenured at 'age' */
3906 overused -= size_table [age];
3907 } else {
3908 break;
3909 }
3910 }
3911 tenure = age; /* Tenure threshold for next cycle */
3912 }
3913 }
3914
3915 /* Deal with the objects in the chunk space. Our aim is to limit the number
3916 * of young objects to OBJ_MAX, so that we do not spend a considerable time
3917 * walking through the moved set and the remembered set.
3918 */
3919
3920 overused = 0;
3921 for (age = 0; age < eif_tenure_max; age++)
3922 overused += age_table[age];
3923
3924 #ifdef DEBUG
3925 dprintf(1)("collect: %d objects in moved set\n", overused);
3926 dprintf(1)("collect: scavenged %d bytes (%d bytes free in zone)\n",
3927 sc_from.sc_top - sc_from.sc_arena, sc_from.sc_end - sc_from.sc_top);
3928 #endif
3929
3930 if (overused > OBJ_MAX) {
3931 overused -= OBJ_MAX; /* Amount of spurious objects */
3932 for (age = eif_tenure_max - 1; age >= 0; age--) {
3933 if (overused >= age_table [age]) {
3934 /* Amount tenured at 'age' */
3935 overused -= age_table[age];
3936 } else {
3937 break;
3938 }
3939 }
3940 tenure = tenure < age ? tenure : age; /* Tenure for next cycle */
3941 }
3942
3943 /* We have computed the tenure based on the current state, but at the next
3944 * cycle, the objects will be one generation older, so we have to increase
3945 * the tenure limit by one, in not at eif_tenure_max - 1 or above.
3946 */
3947
3948 if (tenure + 1 < eif_tenure_max)
3949 tenure++;
3950
3951 #ifdef DEBUG
3952 dprintf(1)("collect: tenure fixed to %d for next cycle\n", tenure);
3953 flush;
3954 #endif
3955
3956 SIGRESUME; /* Restore signal handling and dispatch queued ones */
3957
3958 return (gen_scavenge & GS_STOP) ? -1 : 0; /* Signals error if stopped */
3959 }
3960
3961 rt_private void mark_new_generation(EIF_CONTEXT_NOARG)
3962 {
3963 /* Genration mark phase -- All the young objects which are reachable
3964 * from the remembered set are alive. Old objects have reached immortality,
3965 * not less--RAM. We must not forgive new objects that are referenced only
3966 * via new objects. The new generation is described by the moved set.
3967 * I am aware of the code duplication, but this is a trade for speed over
3968 * run-time size and maintainability--RAM.
3969 */
3970 int moving = gen_scavenge & GS_ON; /* May objects be moved? */
3971
3972 /* Initialize our overflow depth */
3973 overflow_stack_depth = 0;
3974
3975 #ifdef EIF_THREADS
3976 /* Initialize list of live indexes for threads. */
3977 /* This should be done before any marking. */
3978 prepare_live_index ();
3979 #endif
3980
3981 /* First deal with the root object. If it is not old, then mark it */
3982 if (root_obj && !(HEADER(root_obj)->ov_flags & EO_OLD))
3983 root_obj = GEN_SWITCH(&root_obj);
3984 #ifdef WORKBENCH
3985 if (rt_extension_obj && !(HEADER(rt_extension_obj)->ov_flags & EO_OLD))
3986 rt_extension_obj = GEN_SWITCH(&rt_extension_obj);
3987 #endif
3988 if (scp_mnger && !(HEADER(scp_mnger)->ov_flags & EO_OLD))
3989 scp_mnger = GEN_SWITCH(&scp_mnger);
3990
3991 if (except_mnger && !(HEADER(except_mnger)->ov_flags & EO_OLD))
3992 except_mnger = GEN_SWITCH(&except_mnger);
3993
3994 /* Deal with remembered set, which records the addresses of all the
3995 * old objects pointing to new ones.
3996 */
3997 mark_simple_stack(&rem_set, GEN_SWITCH, moving);
3998
3999 internal_marking(GEN_SWITCH, moving);
4000 }
4001
4002 rt_private EIF_REFERENCE hybrid_gen_mark(EIF_REFERENCE *a_root)
4003 {
4004 /* Mark all the objects referenced by the root object.
4005 * All the attributes of an object are recursively marked,
4006 * except the last one. This brings a noticeable
4007 * improvement with structures like LINKED_LIST when its `right'
4008 * part is the last reference (note this is not always the case).
4009 * It also prevents stack overflow with the `overflow_stack_set'.
4010 */
4011 union overhead *zone; /* Malloc info zone fields */
4012 uint16 flags; /* Eiffel flags */
4013 long offset; /* Reference's offset */
4014 uint32 size; /* Size of items (for array of expanded) */
4015 EIF_REFERENCE *object; /* Sub-objects scanned */
4016 EIF_REFERENCE current; /* Object currently inspected */
4017 EIF_REFERENCE *prev; /* Holder of current (for update) */
4018 EIF_REFERENCE root = *a_root; /* Root object */
4019 long count; /* Number of references */
4020
4021 /* If 'root' is a void reference, return immediately. This is redundant
4022 * with the beginning of the loop, but this case occurs quite often.
4023 */
4024
4025 if (root == (EIF_REFERENCE) 0)
4026 return (EIF_REFERENCE) 0;
4027
4028 /* Stack overflow protection */
4029 overflow_stack_depth++;
4030 if (overflow_stack_depth > overflow_stack_limit) {
4031 /* If we can add to the stack overflow recursion, then we do it, otherwise
4032 * we hope we will have enough stack to complete the GC cycle. */
4033 if (epush(&overflow_stack_set, a_root) != -1) {
4034 overflow_stack_count++;
4035 overflow_stack_depth--;
4036 return root;
4037 }
4038 }
4039
4040 /* Initialize the variables for the loop */
4041 current = root;
4042 prev = (EIF_REFERENCE *) 0;
4043
4044 do {
4045 if (current == (EIF_REFERENCE) 0) /* No further exploration */
4046 goto done; /* Exit the procedure */
4047
4048 zone = HEADER(current); /* Malloc info zone */
4049
4050 #ifdef DEBUG
4051 if (zone->ov_size & B_FWD) {
4052 dprintf(16)("hybrid_gen_mark: 0x%lx fwd to 0x%lx (DT %d, %d bytes)\n",
4053 current,
4054 zone->ov_fwd,
4055 HEADER(zone->ov_fwd)->ov_dftype,
4056 zone->ov_size & B_SIZE);
4057 } else {
4058 dprintf(16)("hybrid_gen_mark: 0x%lx %s%s%s%s(DT %d, %d bytes)\n",
4059 current,
4060 zone->ov_flags & EO_MARK ? "marked " : "",
4061 zone->ov_flags & EO_OLD ? "old " : "",
4062 zone->ov_flags & EO_NEW ? "new " : "",
4063 zone->ov_flags & EO_REM ? "remembered " : "",
4064 zone->ov_dftype,
4065 zone->ov_size & B_SIZE);
4066 }
4067 flush;
4068 #endif
4069
4070 /* If we reach a marked object or a forwarded object, return
4071 * immediately: the object has been already processed. Otherwise, an old
4072 * object which is not remembered is not processed, as it can't
4073 * reference any new objects. Old remembered objects are marked when
4074 * they are processed, otherwise it would be possible to process it
4075 * twice (once via a reference from another object and once because it's
4076 * in the remembered list).
4077 */
4078
4079 if (zone->ov_size & B_FWD) { /* Object was forwarded (scavenged) */
4080 if(prev)
4081 *prev = zone->ov_fwd; /* Update with its new location */
4082 goto done; /* Exit the procedure */
4083 }
4084
4085 flags = zone->ov_flags; /* Fetch Eiffel flags */
4086 if (flags & EO_MARK) /* Object has been already processed? */
4087 goto done; /* Exit the procedure */
4088
4089 if (flags & EO_OLD) { /* Old object unmarked */
4090 if (flags & EO_REM) { /* But remembered--mark it as processed */
4091 zone->ov_flags = flags | EO_MARK;
4092 } else /* Old object is not remembered */
4093 goto done; /* Skip it--object did not move */
4094 }
4095
4096 if (flags & EO_STACK) {
4097 /* Object is on the C stack, so we need to record it to unmark it later. */
4098 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4099 epush(&c_stack_object_set, current);
4100 zone->ov_flags = flags | EO_MARK;
4101 }
4102
4103 /* If we reach an expanded object, then we already dealt with the object
4104 * which holds it. If this object has been forwarded, we need to update
4105 * the reference field. Of course object with EO_OLD set are ignored.
4106 * It's easy to know whether a normal object has to be scavenged or
4107 * marked. The new objects outside the scavenge zone carry the EO_NEW
4108 * mark.
4109 */
4110 if (!(flags & EO_OLD) || eif_is_nested_expanded(flags)) {
4111 current = gscavenge(current); /* Generation scavenging */
4112 zone = HEADER(current); /* Update zone */
4113 flags = zone->ov_flags; /* And Eiffel flags */
4114 if (prev) /* Update referencing pointer */
4115 *prev = current;
4116 }
4117
4118 /* It's useless to mark an expanded which has a prent object since the later is marked.
4119 * Scavengend objects need not any mark either, as the forwarding mark
4120 * tells that they are alive. */
4121 if (!eif_is_nested_expanded(flags) && (flags & EO_NEW)) {
4122 flags |= EO_MARK;
4123 zone->ov_flags = flags;
4124 }
4125
4126 /* Now explore all the references of the current object.
4127 * For each object of type 'type', Reference[type] gives the number
4128 * of references in the objects. The references are placed at the
4129 * beginning of the data space by the Eiffel compiler. Expanded
4130 * objects have a reference to them, so no special treatment is
4131 * required. Special objects full of references are also explored.
4132 */
4133 if (flags & EO_SPEC) { /* Special object */
4134 /* Special objects may have no references (e.g. an array of
4135 * integer or a string), so we have to skip those.
4136 */
4137 if (!(flags & EO_REF))
4138 goto done; /* Skip if no references */
4139
4140 /* At the end of the special data zone, there are two long integers
4141 * which give informations to the run-time about the content of the
4142 * zone: the first is the 'count', i.e. the number of items, and the
4143 * second is the size of each item (for expandeds, the overhead of
4144 * the header is not taken into account).
4145 */
4146 count = offset = RT_SPECIAL_COUNT(current); /* Get # items */
4147
4148 if (flags & EO_TUPLE) {
4149 EIF_TYPED_VALUE *l_item = (EIF_TYPED_VALUE *) current;
4150 /* Don't forget that first element of TUPLE is the BOOLEAN
4151 * `object_comparison' attribute. */
4152 l_item++;
4153 offset--;
4154 if (gen_scavenge & GS_ON) {
4155 for (; offset > 1; offset--, l_item++ ) {
4156 if (eif_is_reference_tuple_item(l_item)) {
4157 eif_reference_tuple_item(l_item) =
4158 hybrid_gen_mark (&eif_reference_tuple_item(l_item));
4159 }
4160 }
4161 } else {
4162 for (; offset > 1; offset--, l_item++ ) {
4163 if (eif_is_reference_tuple_item(l_item)) {
4164 (void) hybrid_gen_mark(&eif_reference_tuple_item(l_item));
4165 }
4166 }
4167 }
4168 if ((count >= 1) && eif_is_reference_tuple_item(l_item)) {
4169 /* If last element of TUPLE is a reference, then we continue the
4170 * iteration. */
4171 prev = &eif_reference_tuple_item(l_item);
4172 current = eif_reference_tuple_item(l_item);
4173 continue;
4174 } else
4175 goto done; /* End of iteration; exit procedure */
4176 } else if (flags & EO_COMP) {
4177 /* Treat arrays of expanded object here, because we have a special
4178 * way of looping over the array (we must take the size of each item
4179 * into account).
4180 */
4181 size = RT_SPECIAL_ELEM_SIZE(current); /* Item's size */
4182 if (gen_scavenge & GS_ON) { /* Moving objects */
4183 object = (EIF_REFERENCE *) (current + OVERHEAD);/* First expanded */
4184 for (; offset > 1; offset--) { /* Loop over array */
4185 if (*object) {
4186 *object = hybrid_gen_mark(object);
4187 object = (EIF_REFERENCE *) ((EIF_REFERENCE) object + size);
4188 }
4189 }
4190 } else { /* Object can't move */
4191 object = (EIF_REFERENCE *) (current + OVERHEAD);/* First expanded */
4192 for (; offset > 1; offset--) { /* Loop over array */
4193 if (*object) {
4194 (void) hybrid_gen_mark(object);
4195 object = (EIF_REFERENCE *) ((EIF_REFERENCE) object + size);
4196 }
4197 }
4198 }
4199 /* Keep iterating if and only if the current object has at
4200 * least one attribute.
4201 */
4202 if (count >= 1) {
4203 prev = object;
4204 current = *object;
4205 continue;
4206 } else
4207 goto done; /* End of iteration; exit procedure */
4208 }
4209 } else {
4210 count = offset = References(zone->ov_dtype); /* # of references */
4211 }
4212
4213 #ifdef DEBUG
4214 dprintf(16)("hybrid_gen_mark: %d references for 0x%lx\n", offset, current);
4215 if (DEBUG & 16 && debug_ok(16)) {
4216 int i;
4217 for (i = 0; i < offset; i++)
4218 printf("\t0x%lx\n", *((EIF_REFERENCE *) current + i));
4219 }
4220 flush;
4221 #endif
4222
4223 /* Mark all objects under root, updating the references if scavenging */
4224
4225 if (gen_scavenge & GS_ON) {
4226 for (object = (EIF_REFERENCE *) current; offset > 1; offset--, object++) {
4227 if (*object) {
4228 *object = hybrid_gen_mark(object);
4229 }
4230 }
4231 } else {
4232 for (object = (EIF_REFERENCE *) current; offset > 1; offset--, object++) {
4233 if (*object) {
4234 (void) hybrid_gen_mark(object);
4235 }
4236 }
4237 }
4238
4239 if (count >= 1) {
4240 prev = object;
4241 current = *object;
4242 } else
4243 goto done;
4244
4245 } while(current);
4246
4247 done:
4248 /* Return the [new] address of the root object */
4249 zone = HEADER(root);
4250 overflow_stack_depth--;
4251 return ((zone->ov_size & B_FWD) ? zone->ov_fwd : root);
4252 }
4253
4254 rt_private EIF_REFERENCE gscavenge(EIF_REFERENCE root)
4255 {
4256 /* Generation scavenging of 'root', with tenuring done on the fly. The
4257 * address of the new object (scavenged or tenured) is returned.
4258 * Whenever tenuring fails, the flag GS_STOP is set which means that
4259 * scavenging is to be done without tenuring.
4260 */
4261 union overhead *zone; /* Malloc header zone */
4262 uint16 age; /* Object's age */
4263 uint16 flags; /* Eiffel flags */
4264 uint16 pid; /* SCOOP Processor ID */
4265 EIF_TYPE_INDEX dftype, dtype;
4266 EIF_REFERENCE new; /* Address of new object (tenured) */
4267 rt_uint_ptr size; /* Size of scavenged object */
4268 int ret; /* status returned by "epush" */
4269
4270 zone = HEADER(root); /* Info header */
4271 flags = zone->ov_flags; /* Eiffel flags */
4272 dftype = zone->ov_dftype;
4273 dtype = zone->ov_dtype;
4274 pid = zone->ov_pid;
4275
4276 if (gen_scavenge & GS_STOP) /* Generation scavenging was stopped */
4277 if (!(flags & EO_NEW)) /* Object inside scavenge zone */
4278 return scavenge(root, &sc_to.sc_top); /* Simple scavenging */
4279
4280 if (eif_is_nested_expanded(flags)) { /* Expanded object */
4281 if (!(flags & EO_NEW)) /* Object inside scavenge zone */
4282 return scavenge(root, &sc_to.sc_top); /* Update reference pointer */
4283 else
4284 return root; /* Do nothing for expanded objects */
4285 }
4286
4287 if ((flags & EO_STACK) || (zone->ov_size & B_C)) /* Eiffel object not under GC control */
4288 return root; /* Leave it alone */
4289
4290 /* Get the age of the object, update it and fill in the age table.
4291 * Tenure when necessary (i.e. when age is greater than 'tenure_age',
4292 * overflow is taken into account but this relies on the fact that
4293 * the age is not stored in the leftmost bits, to leave room for the
4294 * overflow bit--RAM). Note that when tenure is set to eif_tenure_max,
4295 * no object can ever be tenured.
4296 */
4297
4298 age = flags & EO_AGE; /* Fetch object's age */
4299 age += AGE_ONE; /* Make a wish, it's your birthday */
4300
4301 if (age >= (tenure << AGE_OFFSET)) { /* Object is to be tenured */
4302
4303 if (flags & EO_NEW) { /* Object outside scavenge zone */
4304
4305 /* Object is becomming old, so maybe it has to be remembered. Add
4306 * it to the remembered set for later perusal. If the object cannot
4307 * be remembered, it remains in the new generation.
4308 */
4309
4310 ret = epush (&rem_set, root);
4311 if (-1 != ret) { /* We could record it */
4312
4313 /* Mark the object as being old, but do not remove the EO_MARK
4314 * mark. See comment about GC_FAST in update_moved_set()--RAM.
4315 */
4316
4317 flags &= ~EO_NEW; /* No longer new */
4318 flags |= EO_OLD | EO_REM | EO_MARK; /* See below for EO_MARK */
4319 zone->ov_flags = flags; /* Store updated flags */
4320
4321 #ifdef DEBUG
4322 dprintf(4)("gscavenge: tenured 0x%lx at age %d (%d bytes)\n",
4323 root, age >> AGE_OFFSET, zone->ov_size & B_SIZE);
4324 flush;
4325 #endif
4326
4327 return root; /* Object did not move */
4328 }
4329
4330 } else { /* Object is inside scavenge zone */
4331
4332 /* Try tenuring after having marked the object as old. We get the size
4333 * from the object to perform the copy.
4334 * Addition: now it can be a special object if the EIF_GSZ_ALLOC_OPTIMIZATION
4335 * has been enabled, because we try to allocate them in the GSZ
4336 * as much as possible (70% speed improvement on the eiffel compiler).
4337 */
4338
4339 #ifndef EIF_GSZ_ALLOC_OPTIMIZATION
4340 CHECK ("Not a special", !(HEADER (root)->ov_flags & EO_SPEC));
4341 #endif
4342 size = zone->ov_size & B_SIZE; /* Size without header */
4343
4344 new = malloc_from_eiffel_list_no_gc (size); /* Try in Eiffel chunks first */
4345 if ((EIF_REFERENCE) 0 == new) { /* Out of memory */
4346 gen_scavenge |= GS_STOP; /* Stop generation scavenging */
4347 return scavenge(root, &sc_to.sc_top); /* Simple scavenge */
4348 }
4349
4350 /* Object is promoted, so add it to the remebered set for later
4351 * perusal (the set will be scanned and eventually some items will
4352 * be removed--object does not reference any young ones any more).
4353 */
4354
4355 ret = epush (&rem_set, new);
4356 if (ret == -1) { /* Cannot record it */
4357 gen_scavenge |= GS_STOP; /* Mark failure */
4358 eif_rt_xfree(new); /* Back where we found it */
4359 return scavenge(root, &sc_to.sc_top); /* Simple scavenge */
4360 }
4361
4362 /* Copy the object to its new location, then update the header: the
4363 * object is now an old one. Leave a forwarding pointer behind us.
4364 * The data part of the object is copied as-is, but references on
4365 * expanded will be correctly updated by the recursive process.
4366 * The address of the new object has been inserted in the remembered
4367 * set, so we must not remove the collector's mark otherwise it
4368 * would be considered dead by update_rem_set()--RAM.
4369 */
4370
4371 flags |= EO_OLD | EO_REM | EO_MARK; /* See below for EO_MARK */
4372
4373 /* It is imperative to mark the object we've just tenured, so that
4374 * we do not process it twice!! The tenured object will be processed
4375 * as we return from this routine, but hybrid_mark has already
4376 * dealt with EO_MARK, so it is our responsability... This was a bug
4377 * I spent three days tracking--RAM.
4378 */
4379 memcpy (new, root, size); /* Copy data part */
4380
4381 #ifdef EIF_NO_SCAVENGING
4382 eif_panic ("Generation Scavenging is not disabled");
4383 #endif /* EIF_NO_SCAVENGING */
4384 zone->ov_size |= B_FWD; /* Mark object as forwarded */
4385 zone->ov_fwd = new; /* Leave forwarding pointer */
4386 zone = HEADER(new); /* New info zone */
4387 zone->ov_flags = flags; /* Copy flags for new object */
4388 zone->ov_dftype = dftype;
4389 zone->ov_dtype = dtype;
4390 zone->ov_pid = pid;
4391 zone->ov_size &= ~B_C; /* Object is an Eiffel one */
4392
4393 CHECK("Valid size", size <= (zone->ov_size & B_SIZE));
4394
4395 /* If it was not exactly the same size, we would be in trouble
4396 * in the case of EO_SPEC objects for which there is some important
4397 * information about the special at the end of the allocated memory,
4398 * the size being changed, the information will not be copied at
4399 * its right location with the `memcpy' call above, so we do it now. */
4400 if ((flags & EO_SPEC) && (size < (zone->ov_size & B_SIZE))) {
4401 /* We cannot really increase the count, because otherwise it would
4402 * cause some strange behavior in the Eiffel code where a SPECIAL of
4403 * capacity 5 would magically end up with capacity 6 after a GC collection. */
4404 /* This code is commented because not necessary. I'm leaving it there
4405 * to show that there is indeed no need to clean the extra memory because
4406 * in theory it should never be accessed. */
4407 memset (new + size - RT_SPECIAL_PADDED_DATA_SIZE, 0xFFFFFFFF , (zone->ov_size & B_SIZE) - size);
4408 memcpy (new + (zone->ov_size & B_SIZE) - RT_SPECIAL_PADDED_DATA_SIZE, root + size - RT_SPECIAL_PADDED_DATA_SIZE, RT_SPECIAL_PADDED_DATA_SIZE);
4409 }
4410
4411 #ifdef DEBUG
4412 dprintf(4)("gscavenge: tenured 0x%lx to 0x%lx at age %d (%d bytes)\n",
4413 root, new, age >> AGE_OFFSET, size);
4414 flush;
4415 #endif
4416
4417 return new; /* Done */
4418 }
4419 }
4420
4421 /* Object is to be kept in the new generation */
4422
4423 #ifdef DEBUG
4424 dprintf(4)("gscavenge: keeping %s0x%lx at age %d (%d bytes)\n",
4425 flags & EO_NEW ? "new " : "",
4426 root, age >> AGE_OFFSET, zone->ov_size & B_SIZE);
4427 flush;
4428 #endif
4429
4430 age |= flags & (~EO_AGE); /* New Eiffel flags */
4431 zone->ov_flags = age & (~EO_MARK); /* Age merged, object unmarked */
4432 age = (age & EO_AGE) >> AGE_OFFSET; /* Scalar value of age */
4433 if (flags & EO_NEW) { /* Object allocated from free list */
4434 age_table[age]++; /* One more object for this age */
4435 return root; /* Object not moved */
4436 } else { /* Object is in the scavenge zone */
4437 size_table[age] += (zone->ov_size & B_SIZE) + OVERHEAD;
4438 return scavenge(root, &sc_to.sc_top); /* Move object */
4439 }
4440 /* NOTREACHED */
4441 }
4442
4443 rt_private void update_moved_set(void)
4444 {
4445 /* Update the moved set. This routine is called to throw away from the moved
4446 * set all the dead objects.
4447 * Generation collection has its own treatment for that, as we need to
4448 * eventually collect the dead objects. If partial collection has been done
4449 * we may need to update some references.
4450 * Note that I could free the dead objects here, but I chose to wait for
4451 * the general sweep process because of the swapping problems--RAM. Only
4452 * the generation-based collectors have their free here, for the objects
4453 * outside the scavenge zone (those in the moved set, precisely).
4454 */
4455
4456 EIF_REFERENCE *obj; /* Pointer to objects held in a stack */
4457 rt_uint_ptr i; /* Number of items in stack chunk */
4458 union overhead *zone; /* Referenced object's header */
4459 struct stchunk *s; /* To walk through each stack's chunk */
4460 uint32 flags; /* Used only if GC_FAST */
4461 struct stack new_stack; /* The new stack built from the old one */
4462 int done = 0; /* Top of stack not reached yet */
4463
4464 memcpy (&new_stack, &moved_set, sizeof(struct stack));
4465 s = new_stack.st_cur = moved_set.st_hd; /* New empty stack */
4466 if (s) {
4467 new_stack.st_top = s->sk_arena; /* Lowest possible top */
4468 new_stack.st_end = s->sk_end; /* End of first chunk */
4469 }
4470
4471 #ifdef DEBUG
4472 dprintf(1)("update_moved_set: %d objects to be studied\n",
4473 nb_items(&moved_set));
4474 flush;
4475 #endif
4476
4477 /* If generation collection is active, a marked object is kept in the
4478 * moved set if and only if it has not been promoted (in which case the
4479 * EO_NEW bit has been cleared). In that case, the object is unmarked.
4480 * Otherwise, if object has been promoted, it is not kept, but we
4481 * unmark it only if it has not been remembered (because after the update
4482 * of the moved set, we're going to update the remembered set and any
4483 * alive object must still be marked).
4484 */
4485
4486 if (rt_g_data.status & GC_PART) { /* Partial collection */
4487 for (; s && !done; s = s->sk_next) {
4488 obj = s->sk_arena; /* Start of stack */
4489 if (s != moved_set.st_cur) /* Top is before after 's' */
4490 i = s->sk_end - obj; /* Look at the whole chunk */
4491 else {
4492 i = moved_set.st_top - obj; /* Stop at the top */
4493 done = 1; /* Reached end of stack */
4494 }
4495 for (; i > 0; i--, obj++) { /* Stack viewed as an array */
4496 zone = HEADER(*obj); /* Referenced object */
4497 if (zone->ov_size & B_FWD) { /* Object forwarded? */
4498 zone = HEADER(zone->ov_fwd); /* Look at fwd object */
4499 if (zone->ov_flags & EO_NEW) /* It's a new one */
4500 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4501 epush(&new_stack, (EIF_REFERENCE)(zone+1)); /* Update reference */
4502 } else if (EO_MOVED == (zone->ov_flags & EO_MOVED))
4503 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4504 epush(&new_stack, (EIF_REFERENCE)(zone+1)); /* Remain as is */
4505 }
4506 }
4507 } else if (rt_g_data.status & GC_FAST) { /* Generation collection */
4508 for (; s && !done; s = s->sk_next) {
4509 obj = s->sk_arena; /* Start of stack */
4510 if (s != moved_set.st_cur) /* Top is before after 's' */
4511 i = s->sk_end - obj; /* Look at the whole chunk */
4512 else {
4513 i = moved_set.st_top - obj; /* Stop at the top */
4514 done = 1; /* Reached end of stack */
4515 }
4516 for (; i > 0; i--, obj++) { /* Stack viewed as an array */
4517 zone = HEADER(*obj); /* Referenced object */
4518 flags = zone->ov_flags; /* Get Eiffel flags */
4519 if (flags & EO_MARK) { /* Object is alive? */
4520 if (flags & EO_NEW) { /* Not tenrured */
4521 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4522 epush(&new_stack, (EIF_REFERENCE)(zone+1)); /* Remains "as is" */
4523 zone->ov_flags &= ~EO_MARK; /* Unmark object */
4524 } else if (!(flags & EO_REM)) /* Not remembered */
4525 zone->ov_flags &= ~EO_MARK; /* Unmark object */
4526 } else if (!(zone->ov_size & B_C) && (zone->ov_size & B_BUSY))
4527 gfree(zone); /* Free if under GC control */
4528 }
4529 }
4530 } else { /* Mark and sweep */
4531 for (; s && !done; s = s->sk_next) {
4532 obj = s->sk_arena; /* Start of stack */
4533 if (s != moved_set.st_cur) /* Top is before after 's' */
4534 i = s->sk_end - obj; /* Look at the whole chunk */
4535 else {
4536 i = moved_set.st_top - obj; /* Stop at the top */
4537 done = 1; /* Reached end of stack */
4538 }
4539 for (; i > 0; i--, obj++) { /* Stack viewed as an array */
4540 zone = HEADER(*obj); /* Referenced object */
4541 if (EO_MOVED == (zone->ov_flags & EO_MOVED))
4542 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4543 epush(&new_stack,(EIF_REFERENCE)(zone+1)); /* Remains "as is" */
4544 }
4545 }
4546 }
4547
4548 #ifdef DEBUG
4549 dprintf(1)("update_moved_set: %d objects remaining\n",
4550 nb_items(&new_stack));
4551 flush;
4552 #endif
4553
4554 memcpy (&moved_set, &new_stack, sizeof(struct stack));
4555
4556 /* As for the remembered set (see comment in update_rem_set), we release the
4557 * spurious chunks used by the moved set stack.
4558 */
4559
4560 st_truncate(&moved_set);
4561 }
4562
4563 rt_private void update_rem_set(void)
4564 {
4565 /* Update the remembered set. This is an iterative process: for each item,
4566 * we look for references to new objects. If there are none, the object is
4567 * removed from the set. This "in place" updating cannot lead to a stack
4568 * overflow.
4569 * This routine checks that the object is alive only when doing a full
4570 * collection. Otherwise, the objects in the remembered set are old ones,
4571 * which means they are not affected by generation collections.
4572 */
4573
4574 EIF_REFERENCE *object; /* Current inspected object */
4575 rt_uint_ptr n; /* Number of objects to be dealt with */
4576 struct stack new_stack; /* The new stack built from the old one */
4577 EIF_REFERENCE current; /* Address of inspected object */
4578 char moving; /* May GC move objects around? */
4579 union overhead *zone; /* Malloc info zone */
4580 struct stchunk *s; /* To walk through each stack's chunk */
4581 int done = 0; /* Top of stack not reached yet */
4582 int generational; /* Are we in a generational cycle? */
4583
4584 memcpy (&new_stack, &rem_set, sizeof(struct stack));
4585 s = new_stack.st_cur = rem_set.st_hd; /* New empty stack */
4586 if (s) {
4587 new_stack.st_top = s->sk_arena; /* Lowest possible top */
4588 new_stack.st_end = s->sk_end; /* End of first chunk */
4589 }
4590
4591 moving = rt_g_data.status; /* Garbage collector's state */
4592 generational = moving & GC_FAST; /* Is this a collect() cycle? */
4593 moving &= GC_PART | GC_GEN; /* Current algorithm moves objects? */
4594
4595 #ifdef DEBUG
4596 dprintf(1)("update_rem_set: %d objects to be studied\n",
4597 nb_items(&rem_set));
4598 flush;
4599 #endif
4600
4601 for (; s && !done; s = s->sk_next) {
4602 object = s->sk_arena; /* Start of stack */
4603 if (s != rem_set.st_cur) /* Top is before after 's' */
4604 n = s->sk_end - object; /* Look at the whole chunk */
4605 else {
4606 n = rem_set.st_top - object; /* Stop at the top */
4607 done = 1; /* Reached end of stack */
4608 }
4609
4610 for (; n > 0; n--, object++) {
4611 current = *object; /* Save that for later perusal */
4612 zone = HEADER(current); /* Object's header */
4613
4614 #ifdef DEBUG
4615 dprintf(4)("update_rem_set: at 0x%lx (type %d, %d bytes) %s%s\n",
4616 current,
4617 HEADER(
4618 zone->ov_size & B_FWD ? zone->ov_fwd : current
4619 )->ov_ov_dftype,
4620 zone->ov_size & B_SIZE,
4621 zone->ov_size & B_FWD ? "forwarded" : "",
4622 zone->ov_flags & EO_MARK ? "marked" : ""
4623 );
4624 flush;
4625 #endif
4626
4627 /* When objects are moving, we need to focus on both the B_FWD
4628 * mark (which is an indication that the object is alive) and the
4629 * EO_MARK which is the traditional alive mark. Dead objects are
4630 * simply removed from the remembered set.
4631 */
4632
4633 if (moving) { /* Object may move? */
4634 if (zone->ov_size & B_FWD) /* Object was forwarded */
4635 current = zone->ov_fwd; /* Follow forwarding pointer */
4636 else if (!(zone->ov_flags & EO_MARK)) /* Object is dead */
4637 continue; /* Remove it from remembered set */
4638 } else if (!(zone->ov_flags & EO_MARK)) /* Object cannot move */
4639 continue; /* It's dead -- remove it */
4640
4641 /* In a generational cycle, we need to explicitely unmark all the
4642 * alive objects, whether we keep them in the remembered set or
4643 * not. Why? Because when we tenure those objects, we mark them
4644 * and also set the EO_OLD and EO_REM bit. If we do not unmark them
4645 * and those are once functions, then we will never scan the inside
4646 * of those objects. In other cycles, the unmarking will be done
4647 * by the full sweep operation.
4648 */
4649
4650 if (generational)
4651 HEADER(current)->ov_flags &= ~EO_MARK; /* Unmark object */
4652
4653 /* The objects referred by the current object could have been
4654 * tenured, so we need to recheck whether it has its place in the
4655 * remembered set.
4656 */
4657
4658 if (refers_new_object(current)) /* Object deserves remembering? */
4659 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4660 epush(&new_stack, current); /* Save it for posterity */
4661 else
4662 HEADER(current)->ov_flags &= ~EO_REM; /* Not remembered */
4663
4664 #ifdef DEBUG
4665 dprintf(4)("update_rem_set: %s object %lx (type %d, %d bytes) %s\n",
4666 HEADER(current)->ov_flags & EO_OLD ? "old" :
4667 HEADER(current)->ov_flags & EO_NEW ? "new" : "gen",
4668 current, HEADER(current)->ov_dftype,
4669 HEADER(current)->ov_size & B_SIZE,
4670 HEADER(current)->ov_flags & EO_REM ? "remembered":"forgotten");
4671 flush;
4672 #endif
4673
4674 }
4675 }
4676
4677 #ifdef DEBUG
4678 dprintf(1)("update_rem_set: %d objects remaining\n",
4679 nb_items(&new_stack));
4680 flush;
4681 #endif
4682
4683 /* Objects remembered have been pushed on stack "new_stack". Now reset the
4684 * remembered set correctly by making "rem_set" refer to the new stack.
4685 */
4686
4687 memcpy (&rem_set, &new_stack, sizeof(struct stack));
4688
4689 /* Usually, the remembered set shrinks after a collection. The unused chunks
4690 * in the stack are freed. Yet, we'll have to call malloc() again to extend
4691 * the stack, but this raises the chances of being able to shrink the
4692 * process size--RAM.
4693 */
4694
4695 st_truncate(&rem_set);
4696 }
4697
4698 rt_private void update_memory_set (void)
4699 /* Traverse the memory_set which contains all the objects, which have a
4700 * dispose routine. It calls the dispose routine
4701 * on the objects thar are garbage.
4702 * To be compared with "update_rem_set ()".
4703 * -- ET
4704 */
4705 {
4706 EIF_GET_CONTEXT /* In MT-mode, for memory_set. */
4707
4708 EIF_REFERENCE *object; /* Current inspected object */
4709 rt_uint_ptr n; /* Number of objects to be dealt with */
4710 struct stack new_stack; /* The new stack built from the old one */
4711 EIF_REFERENCE current; /* Address of inspected object */
4712 union overhead *zone; /* Malloc info zone */
4713 struct stchunk *s; /* To walk through each stack's chunk */
4714 int saved_in_assertion; /* Saved assertion level. */
4715 char gc_status; /* Saved GC status. */
4716 EIF_TYPE_INDEX dtype; /* Dynamic type of Current object. */
4717 int done = 0; /* Top of stack not reached yet */
4718
4719 REQUIRE ("GC is not stopped", !(rt_g_data.status & GC_STOP));
4720
4721 /************************* End of postconditions. **********************/
4722
4723 memcpy (&new_stack, &memory_set, sizeof(struct stack));
4724 s = new_stack.st_cur = memory_set.st_hd; /* New empty stack */
4725 if (s) {
4726 new_stack.st_top = s->sk_arena; /* Lowest possible top */
4727 new_stack.st_end = s->sk_end; /* End of first chunk */
4728 }
4729
4730 #ifdef DEBUG_UPDATE_MEMORY_SET
4731 printf("update_memory_set: %d objects to be studied\n",
4732 nb_items(&memory_set));
4733 #endif
4734
4735 /* Traverse stack. */
4736 for (; s && !done; s = s->sk_next)
4737 {
4738 object = s->sk_arena; /* Start of stack */
4739 if (s != memory_set.st_cur) /* Top is before after 's' */
4740 n = s->sk_end - object; /* Look at the whole chunk */
4741 else
4742 {
4743 n = memory_set.st_top - object; /* Stop at the top */
4744 done = 1; /* Reached end of stack */
4745 }
4746
4747 for (; n > 0; n--, object++)
4748 {
4749 current = *object; /* Save that for later perusal */
4750 zone = HEADER(current); /* Object's header */
4751
4752 #ifdef DEBUG_UPDATE_MEMORY_SET
4753 printf("update_memory_set: at 0x%lx (type %d, %d bytes) %s\n",
4754 current,
4755 HEADER(
4756 zone->ov_size & B_FWD ? zone->ov_fwd : current
4757 )->ov_dftype,
4758 zone->ov_size & B_SIZE,
4759 zone->ov_size & B_FWD ? "forwarded" : "dead"
4760 );
4761 #endif /* DEBUG_UPDATE_MEMORY_SET */
4762
4763 /* We need to call the dispose routine on the objects that
4764 * not alive any longer.
4765 * If the object holds the B_FWD flag, it has survived the
4766 * generational collection and we just need to update the proper
4767 * entry in the stack. Otherwise it is garbage: we must call the
4768 * dispose routine on it and remove it from the stack.
4769 */
4770
4771 if (zone->ov_size & B_FWD) { /* Object survived GS collection. */
4772 current = zone->ov_fwd; /* Update entry. */
4773
4774 CHECK ("Has dispose routine", Disp_rout (Dtype (HEADER (current) + 1)));
4775 CHECK ("Not forwarded twice", !(HEADER (current)->ov_size & B_FWD));
4776
4777 if (!(HEADER (current)->ov_flags & (EO_OLD | EO_NEW)))
4778 /* Forwarded object is still in GSZ. */
4779 /* FIXME: Manu 2009/04/29: Code is not safe if `epush' returns -1. */
4780 epush(&new_stack, current); /* Save it in the stack. */
4781 } else {
4782 /* Object is dead, we call dispose routine.*/
4783 CHECK ("Objects not in GSZ", !(zone->ov_flags & (EO_OLD | EO_NEW | EO_MARK | EO_SPEC)));
4784
4785 dtype = zone->ov_dtype; /* Need it for dispose. */
4786
4787 CHECK ("Has with dispose routine", Disp_rout (dtype));
4788
4789 gc_status = rt_g_data.status; /* Save GC status. */
4790 rt_g_data.status |= GC_STOP; /* Stop GC. */
4791
4792 /* We should disable invariants but not postconditions
4793 * (see `dispose' from IDENTIFIED).
4794 */
4795 saved_in_assertion = in_assertion; /* Save in_assertion. */
4796 in_assertion = ~0; /* Turn off assertion checking. */
4797 DISP(dtype,(EIF_REFERENCE) (zone + 1)); /* Call 'dispose'. */
4798 in_assertion = saved_in_assertion; /* Set in_assertion back. */
4799 rt_g_data.status = gc_status; /* Restore previous GC status.*/
4800 #ifdef EIF_EXPENSIVE_ASSERTIONS
4801 CHECK ("Cannot be in object ID stack",
4802 !st_has (&object_id_stack, (EIF_REFERENCE) zone + 1));
4803 #endif
4804 }
4805
4806 #ifdef DEBUG_UPDATE_MEMORY_SET
4807 printf("update_memory_set: object %lx (type %d, %d bytes) %s\n",
4808 current, HEADER(current)->ov_dftype,
4809 HEADER(current)->ov_size & B_SIZE,
4810 HEADER(current)->ov_size & B_FWD ? "updated":"disposed");
4811 #endif /* DEBUG_UPDATE_MEMORY_SET */
4812
4813 } /* for ... */
4814 } /* for ... */
4815
4816 #ifdef DEBUG_UPDATE_MEMORY_SET
4817 printf("update_memory_set: %d objects remaining\n",
4818 nb_items(&new_stack));
4819 #endif /* DEBUG_UPDATE_MEMORY_SET */
4820
4821 /* Memory objects have been pushed on stack "new_stack". Now reset the
4822 * the memory set correctly by making "memory_set" refer to the new stack.
4823 */
4824
4825 memcpy (&memory_set, &new_stack, sizeof(struct stack));
4826
4827 /* Usually, the memory set shrinks after a collection. The unused chunks
4828 * in the stack are freed.
4829 */
4830
4831 st_truncate(&memory_set);
4832
4833 } /* update_memory_set() */
4834
4835
4836 rt_shared int refers_new_object(register EIF_REFERENCE object)
4837 {
4838 /* Does 'object' directly refers to a new object? Stop as soon as the
4839 * answer is known. Return a boolean value stating the result. This
4840 * routine is recursively called to deal with expanded objects. However,
4841 * there are few of them, so I chose to delcare locals in registers--RAM.
4842 */
4843
4844 uint32 flags; /* Eiffel flags */
4845 int refs; /* Number of references */
4846 EIF_REFERENCE root; /* Address of referred object */
4847 uint32 size; /* Size in bytes of an item */
4848
4849 #ifdef MAY_PANIC
4850 /* If 'object' is a void reference, panic immediately */
4851 if (object == (EIF_REFERENCE) 0)
4852 eif_panic("remembered set botched");
4853 #endif
4854
4855 size = REFSIZ;
4856 flags = HEADER(object)->ov_flags; /* Fetch Eiffel flags */
4857 if (flags & EO_SPEC) { /* Special object */
4858 if (!(flags & EO_REF)) /* (see hybrid_mark() for details) */
4859 return 0; /* No references at all */
4860 refs = RT_SPECIAL_COUNT(object);
4861 if (flags & EO_TUPLE) {
4862 EIF_TYPED_VALUE *l_item = (EIF_TYPED_VALUE *) object;
4863 l_item ++;
4864 refs--;
4865 for (; refs > 0; refs--, l_item++) {
4866 if (eif_is_reference_tuple_item(l_item)) {
4867 root = eif_reference_tuple_item(l_item);
4868 if (root) {
4869 if (!(HEADER(root)->ov_flags & EO_OLD)) {
4870 return 1;
4871 }
4872 }
4873 }
4874 }
4875 /* Job is now done */
4876 return 0;
4877 } else if (flags & EO_COMP) { /* Composite object = has expandeds */
4878 size = RT_SPECIAL_ELEM_SIZE(object);
4879 object += OVERHEAD;
4880 /* Recurse here on each element.
4881 * The loop for normal objects cannot be used
4882 * as it walks through references but there
4883 * are no references to the objects that are
4884 * stored sequentially without any references
4885 * to them.
4886 */
4887 for (; refs != 0; refs--, object += size) {
4888 if (refers_new_object(object)) {
4889 return 1; /* Object references a young one */
4890 }
4891 }
4892 return 0; /* Object does not reference any new object */
4893 } else
4894 size = REFSIZ; /* Usual item size */
4895 } else {
4896 refs = References(Dtype(object)); /* Number of references */
4897 }
4898
4899 /* Loop over the referenced objects to see if there is a new one. If the
4900 * reference is on an expanded object, recursively explore that object.
4901 * No infinite loop is to be feared, as expanded object can only have ONE
4902 * reference from the object within which they are held.
4903 * When checking for new object, I check for not EO_OLD, because the new
4904 * objects in the scavenge zone do not carry the EO_NEW mark.
4905 */
4906 for (; refs != 0; refs--, object += size) {
4907 root = *(EIF_REFERENCE *) object; /* Get reference */
4908 if (root == (EIF_REFERENCE) 0)
4909 continue; /* Skip void references */
4910 flags = HEADER(root)->ov_flags;
4911 if (eif_is_nested_expanded(flags)) { /* Expanded object, grrr... */
4912 if (refers_new_object(root))
4913 return 1; /* Object references a young one */
4914 } else if (!(flags & EO_OLD))
4915 return 1; /* Object references a young one */
4916 }
4917
4918 return 0; /* Object does not reference any new object */
4919 }
4920
4921 rt_private void swap_gen_zones(void)
4922 {
4923 /* After a generation scavenging, swap the 'from' and 'to' zones. There is
4924 * no need to loop over the old 'from' and dispose dead objects: no objects
4925 * with a dispose procedure are allowed to be allocated there.
4926 */
4927
4928 struct sc_zone temp; /* For swapping */
4929
4930 /* Before swapping, we have to compute the amount of bytes we copied and
4931 * the size of the original scavenging zone so that we can update the
4932 * statistics accordingly. Unfortunately, those figures are counting the
4933 * overhead associated with the objects--RAM.
4934 */
4935
4936 rt_g_data.mem_copied += sc_from.sc_top - sc_from.sc_arena; /* Initial */
4937 rt_g_data.mem_move += sc_to.sc_top - sc_to.sc_arena; /* Moved */
4938
4939 memcpy (&temp, &sc_from, sizeof(struct sc_zone));
4940 memcpy (&sc_from, &sc_to, sizeof(struct sc_zone));
4941 memcpy (&sc_to, &temp, sizeof(struct sc_zone));
4942
4943 sc_to.sc_top = sc_to.sc_arena; /* Make sure 'to' is empty */
4944 }
4945
4946 rt_public void check_gc_tracking (EIF_REFERENCE parent, EIF_REFERENCE source) {
4947 if (((source) != (EIF_REFERENCE) 0) && (RTAN(source))) {
4948 if (eif_is_nested_expanded(HEADER(parent)->ov_flags)) {
4949 EIF_REFERENCE z = (EIF_REFERENCE) parent - (HEADER (parent)->ov_size & B_SIZE);
4950 if (RTAG(z)) RTAM(z);
4951 } else if (RTAG(parent)) RTAM(parent);
4952 }
4953 }
4954
4955 rt_public void eremb(EIF_REFERENCE obj)
4956 {
4957 /* Remembers the object 'obj' by pushing it in the remembered set.
4958 * It is up to the caller to ensure that 'obj' is not already remembered
4959 * by testing the EO_REM bit in the header. In affectations, it is
4960 * normally done by the RTAR macro.
4961 */
4962
4963 RT_GET_CONTEXT
4964 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
4965 if (-1 == epush(&rem_set, obj)) { /* Low on memory */
4966 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4967 urgent_plsc(&obj); /* Compacting garbage collection */
4968 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
4969 if (-1 == epush(&rem_set, obj)) { /* Still low on memory */
4970 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4971 enomem(MTC_NOARG); /* Critical exception */
4972 } else {
4973 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4974 }
4975 } else {
4976 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4977 }
4978
4979 #ifdef DEBUG
4980 dprintf(4)("eremb: remembering object %lx (type %d, %d bytes) at age %d\n",
4981 obj,
4982 HEADER(obj)->ov_dftype,
4983 HEADER(obj)->ov_size & B_SIZE,
4984 (HEADER(obj)->ov_flags & EO_AGE) >> AGE_OFFSET);
4985 flush;
4986 #endif
4987
4988 /* If we come here, the object was successfully pushed in the stack */
4989
4990 HEADER(obj)->ov_flags |= EO_REM; /* Mark it as remembered */
4991 }
4992
4993 rt_public void erembq(EIF_REFERENCE obj)
4994 {
4995 /* Quick version of eremb(), but without any call to the GC. This is
4996 * provided for special objects (we don't want to ask for GC hooks
4997 * on every 'put' operation).
4998 */
4999
5000 RT_GET_CONTEXT
5001 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
5002 if (-1 == epush(&rem_set, obj)) { /* Cannot record object */
5003 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
5004 enomem(MTC_NOARG); /* Critical exception */
5005 } else {
5006 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
5007 }
5008
5009 HEADER(obj)->ov_flags |= EO_REM; /* Mark object as remembered */
5010 }
5011
5012 /*
5013 doc: <routine name="eif_tenure_object" return_type="EIF_REFERENCE" export="public">
5014 doc: <summary>Given an object, if the object is still in the scavenge zone, promotes it outside of the scavenge zone. This is used for `eif_freeze'. If it fails, it raises an out of memory exception.</summary>
5015 doc: <param name="obj" type="EIF_REFERENCE *">Object to promote.</param>
5016 doc: <thread_safety>Safe</thread_safety>
5017 doc: <synchronization>None.</synchronization>
5018 doc: </routine>
5019 */
5020 rt_public EIF_REFERENCE eif_tenure_object(EIF_REFERENCE obj)
5021 {
5022 EIF_GET_CONTEXT
5023 union overhead *zone;
5024
5025 REQUIRE("object not null", obj);
5026
5027 zone = HEADER(obj);
5028
5029 /* If object is already outside the scavenge zone, nothing to be done. */
5030 if (!(zone->ov_size & B_BUSY)) {
5031 RT_GC_PROTECT(obj);
5032 /* We change artificially the age of the object to the maximum possible age.
5033 * If we cannot tenure the object, we trigger a full collection and if it is
5034 * still failing after that we raise a no-more memory exception. */
5035 zone->ov_flags |= EO_AGE; /* Maximum reachable age */
5036 collect(); /* Run a generation scavenging cycle */
5037 zone = HEADER(obj); /* Get new zone (object has moved) */
5038 if (!(zone->ov_size & B_BUSY)) { /* Object still in generation zone */
5039 urgent_plsc(&obj);
5040 if (!(zone->ov_size & B_BUSY)) { /* Object still in generation zone */
5041 enomem(MTC_NOARG); /* Critical exception */
5042 }
5043 }
5044 RT_GC_WEAN(obj);
5045 }
5046 ENSURE("Outside scavenge zone", (HEADER(obj)->ov_size) & B_BUSY);
5047 return obj; /* Promotion succeeded, return new location. */
5048 }
5049
5050 /*
5051 * Freeing objects: each class has a chance to redefine the 'dispose' routine
5052 * which will be called whenever the object is released under GC control.
5053 * Of course the dispose MUST NOT do anything fancy: the collection cycle is
5054 * done so all the pointers are safe, but the object is meant to be destroyed,
5055 * so its address cannot be kept to be put in a free-list for instance. To
5056 * minimize problems, the garbage collector is stopped before invoking the
5057 * 'dispose' routine--RAM.
5058 */
5059
5060 rt_shared void gfree(register union overhead *zone)
5061 /* Pointer on malloc info zone */
5062 {
5063 /* The entry Dispose(type) holds a pointer to the dispose function to
5064 * be called when freeing an entity of dynamic type 'type'. A void entry
5065 * means nothing has to be done. Of course, the object is physically
5066 * freed AFTER dispose has been called...
5067 */
5068
5069 EIF_GET_CONTEXT
5070 char gc_status; /* Saved GC status */
5071 int saved_in_assertion; /* Saved in_assertion value */
5072 EIF_TYPE_INDEX dtype; /* Dynamic type of object */
5073
5074 REQUIRE("Busy", zone->ov_size & B_BUSY);
5075
5076 if (!(zone->ov_size & B_FWD)) { /* If object has not been forwarded
5077 then call the dispose routine */
5078 if (zone->ov_flags & EO_DISP) {
5079 RT_GET_CONTEXT
5080 dtype = zone->ov_dtype;
5081 EIF_G_DATA_MUTEX_LOCK;
5082 gc_status = rt_g_data.status; /* Save GC status */
5083 rt_g_data.status |= GC_STOP; /* Stop GC */
5084 EIF_G_DATA_MUTEX_UNLOCK;
5085 saved_in_assertion = in_assertion; /* Save in_assertion */
5086 in_assertion = ~0; /* Turn off assertion checking */
5087 DISP(dtype,(EIF_REFERENCE) (zone + 1)); /* Call 'dispose' */
5088 in_assertion = saved_in_assertion; /* Set in_assertion back */
5089 EIF_G_DATA_MUTEX_LOCK;
5090 rt_g_data.status = gc_status; /* Restore previous GC status */
5091 EIF_G_DATA_MUTEX_UNLOCK;
5092 }
5093 }
5094
5095 #ifdef EIF_EXPENSIVE_ASSERTIONS
5096 CHECK ("Cannot be in object ID stack",
5097 !st_has (&object_id_stack, (EIF_REFERENCE) zone + 1));
5098 #endif
5099
5100 #ifdef DEBUG
5101 dprintf(8)("gfree: freeing object 0x%lx, DT = %d\n",
5102 zone + 1, dtype);
5103 flush;
5104 #endif
5105
5106 eif_rt_xfree((EIF_REFERENCE) (zone + 1)); /* Put object back to free-list */
5107 }
5108
5109 #endif
5110
5111 /* Once functions need to be kept in a dedicated stack, so that they are
5112 * always kept alive. Before returning, a once function needs to call the
5113 * following to properly record itself.
5114 */
5115
5116 rt_public EIF_REFERENCE * onceset(void)
5117 {
5118 /* Record result of once functions onto the once_set stack, so that the
5119 * run-time may update the address should the result be moved around by
5120 * the garbage collector (we are storing the address of a C static variable.
5121 */
5122
5123 EIF_GET_CONTEXT
5124 #ifdef DEBUG
5125 dprintf(32)("onceset");
5126 flush;
5127 #endif
5128
5129 if (-1 == epush(&once_set, (EIF_REFERENCE) 0 ))
5130 eraise("once function recording", EN_MEM);
5131
5132 return once_set.st_top - 1;
5133 }
5134
5135 rt_public void new_onceset(EIF_REFERENCE address)
5136 {
5137 /* Record result of once functions onto the once_set stack, so that the
5138 * run-time may update the address should the result be moved around by
5139 * the garbage collector (we are storing the address of a C static variable.
5140 */
5141
5142 EIF_GET_CONTEXT
5143 if (-1 == epush(&once_set, (EIF_REFERENCE) address))
5144 eraise("once function recording", EN_MEM);
5145 }
5146
5147 /*
5148 doc: <routine name="register_oms" return_type="void" export="public">
5149 doc: <summary>Register an address of a once manifest string in the
5150 doc: `oms_set' so that the run-time may update the address should
5151 doc: the string objec be moved around by the garbage collector.</summary>
5152 doc: <param name="address" type="EIF_REFERENCE *">Address of a memory location
5153 doc: that stores a string object.</param>
5154 doc: <thread_safety>Safe</thread_safety>
5155 doc: <synchronization>Uses only per thread data.</synchronization>
5156 doc: </routine>
5157 */
5158 rt_public void register_oms (EIF_REFERENCE *address)
5159 {
5160 EIF_GET_CONTEXT
5161 if (-1 == epush(&oms_set, address)) {
5162 eraise("once manifest string recording", EN_MEM);
5163 }
5164 }
5165
5166 #if defined(WORKBENCH) || defined(EIF_THREADS)
5167 /*
5168 doc: <routine name="alloc_once_indexes" export="shared">
5169 doc: <summary>Allocate array to store once routine indexes during start-up.</summary>
5170 doc: <thread_safety>Not safe</thread_safety>
5171 doc: <synchronization>Not required when used during start-up in main thread.</synchronization>
5172 doc: </routine>
5173 */
5174 rt_shared void alloc_once_indexes (void)
5175 {
5176 if ((EIF_once_indexes == (BODY_INDEX *) 0) && (eif_nb_org_routines != 0)) {
5177 /* Indexes have not been allocated yet. */
5178 EIF_once_indexes = (BODY_INDEX *) eif_calloc (eif_nb_org_routines, sizeof *EIF_once_indexes);
5179 if (EIF_once_indexes == (BODY_INDEX *) 0) { /* Out of memory */
5180 enomem ();
5181 }
5182 }
5183 #ifdef EIF_THREADS
5184 if ((EIF_process_once_indexes == (BODY_INDEX *) 0) && (eif_nb_org_routines != 0)) {
5185 /* Indexes have not been allocated yet. */
5186 EIF_process_once_indexes = (BODY_INDEX *) eif_calloc (eif_nb_org_routines, sizeof *EIF_process_once_indexes);
5187 if (EIF_process_once_indexes == (BODY_INDEX *) 0) { /* Out of memory */
5188 enomem ();
5189 }
5190 }
5191 #endif
5192 }
5193
5194 /*
5195 doc: <routine name="free_once_indexes" export="shared">
5196 doc: <summary>Free array of once routine indexes.</summary>
5197 doc: <thread_safety>Not safe</thread_safety>
5198 doc: <synchronization>Not required when used during start-up in main thread.</synchronization>
5199 doc: </routine>
5200 */
5201 rt_shared void free_once_indexes (void)
5202 {
5203 if (EIF_once_indexes != (BODY_INDEX *) 0) {
5204 eif_free (EIF_once_indexes);
5205 EIF_once_indexes = (BODY_INDEX *) 0;
5206 }
5207 #ifdef EIF_THREADS
5208 if (EIF_process_once_indexes != (BODY_INDEX *) 0) {
5209 eif_free (EIF_process_once_indexes);
5210 EIF_process_once_indexes = (BODY_INDEX *) 0;
5211 }
5212 #endif
5213 }
5214
5215 /*
5216 doc: <routine name="once_index" return_type="ONCE_INDEX" export="public">
5217 doc: <summary>Calculate index of a once routine in an array of
5218 doc: once routine results given its code index.</summary>
5219 doc: <param name="code_id" type="BODY_INDEX">Code index of a once routine.</param>
5220 doc: <thread_safety>Not safe</thread_safety>
5221 doc: <synchronization>Not required when used during start-up in main thread.</synchronization>
5222 doc: </routine>
5223 */
5224 rt_public ONCE_INDEX once_index (BODY_INDEX code_id)
5225 {
5226 BODY_INDEX * p = EIF_once_indexes;
5227 ONCE_INDEX i = 0;
5228 int done = 0;
5229 while (!done) {
5230 BODY_INDEX index = p [i];
5231 if (index == code_id) {
5232 /* Once routine with this `code_id' is found. */
5233 /* Use it. */
5234 break;
5235 } else if (index == 0) {
5236 /* Once routine with this `code_id' is not found. */
5237 /* Add it. */
5238 p [i] = code_id;
5239 EIF_once_count++;
5240 break;
5241 }
5242 i++;
5243 }
5244 return i;
5245 }
5246 #endif
5247
5248 #ifdef EIF_THREADS
5249 /*
5250 doc: <routine name="process_once_index" return_type="ONCE_INDEX" export="public">
5251 doc: <summary>Calculate index of a process-relative once routine in an array of
5252 doc: process-relative once routine results given its code index.</summary>
5253 doc: <param name="code_id" type="BODY_INDEX">Code index of a once routine.</param>
5254 doc: <thread_safety>Not safe</thread_safety>
5255 doc: <synchronization>Not required when used during start-up in main thread.</synchronization>
5256 doc: </routine>
5257 */
5258 rt_public ONCE_INDEX process_once_index (BODY_INDEX code_id)
5259 {
5260 BODY_INDEX * p = EIF_process_once_indexes;
5261 ONCE_INDEX i = 0;
5262 int done = 0;
5263 while (!done) {
5264 BODY_INDEX index = p [i];
5265 if (index == code_id) {
5266 /* Once routine with this `code_id' is found. */
5267 /* Use it. */
5268 break;
5269 } else if (index == 0) {
5270 /* Once routine with this `code_id' is not found. */
5271 /* Add it. */
5272 p [i] = code_id;
5273 EIF_process_once_count++;
5274 break;
5275 }
5276 i++;
5277 }
5278 return i;
5279 }
5280
5281 /*
5282 doc: <routine name="globalonceset" export="public">
5283 doc: <summary>Insert a global once result `address' which is an EIF_REFERENCE into `global_once_set' so that GC can update `address' and track objects references by `address' during a collection.</summary>
5284 doc: <param name="address" type="EIF_REFERENCE">Address that needs to be tracked/protected.</param>
5285 doc: <thread_safety>Safe</thread_safety>
5286 doc: <synchronization>Through `eif_global_once_set_mutex'</synchronization>
5287 doc: </routine>
5288 */
5289
5290 rt_public void globalonceset(EIF_REFERENCE address)
5291 {
5292 RT_GET_CONTEXT
5293 EIF_ASYNC_SAFE_CS_LOCK(eif_global_once_set_mutex);
5294 if (-1 == epush(&global_once_set, address)) {
5295 EIF_ASYNC_SAFE_CS_UNLOCK(eif_global_once_set_mutex);
5296 eraise("once function recording", EN_MEM);
5297 }
5298 EIF_ASYNC_SAFE_CS_UNLOCK(eif_global_once_set_mutex);
5299 }
5300 #endif
5301
5302 /*