/[eiffelstudio]/branches/eth/eve/Src/C/run-time/malloc.c
ViewVC logotype

Contents of /branches/eth/eve/Src/C/run-time/malloc.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 92811 - (show annotations)
Fri Jul 26 04:35:53 2013 UTC (6 years, 2 months ago) by jasonw
File MIME type: text/plain
File size: 165718 byte(s)
<<Merged from trunk#92810.>>
1 /*
2 description: "Memory allocation management routines."
3 date: "$Date$"
4 revision: "$Revision$"
5 copyright: "Copyright (c) 1985-2013, Eiffel Software."
6 license: "GPL version 2 see http://www.eiffel.com/licensing/gpl.txt)"
7 licensing_options: "Commercial license is available at http://www.eiffel.com/licensing"
8 copying: "[
9 This file is part of Eiffel Software's Runtime.
10
11 Eiffel Software's Runtime is free software; you can
12 redistribute it and/or modify it under the terms of the
13 GNU General Public License as published by the Free
14 Software Foundation, version 2 of the License
15 (available at the URL listed under "license" above).
16
17 Eiffel Software's Runtime is distributed in the hope
18 that it will be useful, but WITHOUT ANY WARRANTY;
19 without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.
21 See the GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public
24 License along with Eiffel Software's Runtime; if not,
25 write to the Free Software Foundation, Inc.,
26 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 ]"
28 source: "[
29 Eiffel Software
30 356 Storke Road, Goleta, CA 93117 USA
31 Telephone 805-685-1006, Fax 805-685-6869
32 Website http://www.eiffel.com
33 Customer support http://support.eiffel.com
34 ]"
35 */
36
37 /*
38 doc:<file name="malloc.c" header="eif_malloc.h" version="$Id$" summary="Memory allocation management routines">
39 */
40
41 /*#define MEMCHK */
42 /*#define MEM_STAT */
43
44 #include "eif_portable.h"
45 #include "eif_project.h"
46 #include "rt_lmalloc.h" /* for eif_calloc, eif_malloc, eif_free */
47 #include <errno.h> /* For system calls error report */
48 #include <sys/types.h> /* For caddr_t */
49 #include "rt_assert.h"
50 #include <stdio.h> /* For eif_trace_types() */
51 #include <signal.h>
52
53 #include "eif_eiffel.h" /* For bcopy/memcpy */
54 #include "rt_timer.h" /* For getcputime */
55 #include "rt_malloc.h"
56 #include "rt_macros.h"
57 #include "rt_garcol.h" /* For Eiffel flags and function declarations */
58 #include "rt_threads.h"
59 #include "rt_gen_types.h"
60 #include "rt_gen_conf.h"
61 #include "eif_except.h" /* For exception raising */
62 #include "eif_local.h" /* For epop() */
63 #include "rt_sig.h"
64 #include "rt_err_msg.h"
65 #include "rt_globals.h"
66 #include "rt_struct.h"
67 #if ! defined CUSTOM || defined NEED_OBJECT_ID_H
68 #include "rt_object_id.h" /* For the object id and separate stacks */
69 #endif
70 #ifdef VXWORKS
71 #include <string.h>
72 #endif
73 #include "rt_main.h"
74
75 #ifdef BOEHM_GC
76 #include "rt_boehm.h"
77 #ifdef WORKBENCH
78 #include "rt_interp.h" /* For definition of `call_disp' */
79 #endif
80 #endif
81
82 /* For debugging */
83 #define dprintf(n) if (DEBUG & (n)) printf
84 #define flush fflush(stdout)
85
86 /*#define MEMCHK */ /* Define for memory checker */
87 /*#define EMCHK */ /* Define for calls to memck */
88 /*#define MEMPANIC */ /* Panic if memck reports a trouble */
89 /*#define DEBUG 63 */ /* Activate debugging code */
90
91 #ifdef MEMPANIC
92 #define mempanic eif_panic("memory inconsistency")
93 #else
94 #define mempanic fflush(stdout);
95 #endif
96
97 #ifdef ISE_GC
98 /*
99 doc: <description>
100 doc: The handling of the free list has changed over time. Initially, insertions to the free list through `connect_free_list' guaranteed that the blocks were inserted in increasing order of their address. It was thought that although this costs in case of insertions, it can drastically improve performances, because as the malloc routine uses a first fit in the free list, the objects won't get sparsed in the whole memory, and this should limit swapping overhead and enable the process to give memory back to the kernel. You get this behavior by defining EIF_SORTED_FREE_LIST. And removals of a block had to traverse the list to find the right block to remove.
101 doc:
102 doc: We found that it you have a lot of allocated memory and that the free list are quite full, this would kill the performance of the GC. This is why if EIF_SORTED_FREE_LIST is not defined we do not perform any sorting, thus `connect_free_list' always insert in first position in the free list. And the need for buffer cache is useless, and are not defined in that mode.
103 doc:
104 doc: Last remain the cost of removal in `disconnect_free_list'. We had the idea for blocks whose size is larger than the size of a pointer to allocate a pointer given us the location of the previous element. Making our free list a two way list. This only works for i > 0, for i == 0 we still have to do a linear search and hopefully this is rare to have 0-sized block.
105 doc:
106 doc: In the case EIF_SORTED_FREE_LIST is defined, the above two way list is also available, but we thought that by having the previous element we could make insertion possibly faster by going backwards from the buffer cache, rather than going from the beginning when the element we try to insert is less than the buffer cache. Our experiment on the compiler shows that it is actually a degradation. In case you want this behavior, simply define EIF_SORTED_FREE_LIST_BACKWARD_TRAVERSAL.
107 doc: </description>
108 */
109
110 /* Give the type of an hlist, by doing pointer comparaison (classic).
111 * Also give the address of the hlist of a given type and the address of
112 * the buffer related to a free list.
113 */
114 #define CHUNK_TYPE(c) (((c) == c_hlist)? C_T : EIFFEL_T)
115 #define FREE_LIST(t) ((t)? c_hlist : e_hlist)
116 #ifdef EIF_SORTED_FREE_LIST
117 #define BUFFER(c) (((c) == c_hlist)? c_buffer : e_buffer)
118 #endif
119 #define NEXT(zone) (zone)->ov_next
120 #define PREVIOUS(zone) (*(union overhead **) (zone + 1))
121
122 /* Objects of tiny size 0, 4 are very expensive to manage in the free-list, thus we make them not small,
123 * but large enough to hold a pointer to the previous block (see PREVIOUS for where it is used). */
124 #define MIN_OBJECT_SIZE(n) ((n) > sizeof(union overhead *) ? (n) : sizeof(union overhead *))
125
126 /* Fast access to `hlist'. All sizes between `0' and HLIST_SIZE_LIMIT - ALIGNMAX
127 * with their own padding which is a multiple of ALIGNMAX
128 * have their own entry in the `hlist'.
129 * E.g.: 0, 8, 16, ...., 504 in case where ALIGNMAX = 8
130 * E.g.: 0, 16, 32, ...., 1008 in case where ALIGNMAX = 16
131
132 * Above or at `HLIST_SIZE_LIMIT', the corresponding entry `i' (i >= HLIST_INDEX_LIMIT) has
133 * the sizes between 2^(i - HLIST_INDEX_LIMIT + HLIST_DEFAULT_SHIFT) and
134 * (2^(i - HLIST_INDEX_LIMIT + HLIST_DEFAULT_SHIFT + 1) - ALIGNMAX).
135 * Explanation: Since we already occupy slots below HLIST_INDEX_LIMIT for sizes smaller
136 * than HLIST_SIZE_LIMIT, there will be non-used slots after slot HLIST_INDEX_LIMIT.
137 * Because we cannot use ALIGNMAX for testing in preprocessor macros, we are taking the minimum
138 * number of slots that we are guaranteed not to occupy when ALIGNAX is at its minimum value,
139 * this value is HLIST_DEFAULT_SHIFT (the minimum possible value for ALIGNMAX being 4, this gives
140 * a smallest greatest size for HLIST_SIZE_LIMIT of 256, i.e. 2^HLIST_DEFAULT_SHIFT).
141 * Note: on other platforms where ALIGNMAX is greater, we get some more unused slots and it would
142 * be great to dynamically compute HLIST_DEFAULT_SHIFT at compile time, but this is not
143 * yet possible.
144 *
145 * Because the maximum size we can allocate is either 2^27 or 2^59 (depending or not
146 * you are running 64 bits) this gives us (27 - HLIST_DEFAULT_SHIFT) or (59 - HLIST_DEFAULT_SHIFT)
147 * more possibilities in addition to the HLIST_INDEX_LIMIT possibilities, thus having the definition
148 * below for NBLOCKS.
149 */
150
151 #define HLIST_INDEX_LIMIT 64
152 #define HLIST_DEFAULT_SHIFT 8
153
154 #ifdef EIF_64_BITS
155 #define NBLOCKS HLIST_INDEX_LIMIT + 59 - HLIST_DEFAULT_SHIFT
156 #else
157 #define NBLOCKS HLIST_INDEX_LIMIT + 27 - HLIST_DEFAULT_SHIFT
158 #endif
159
160 #define HLIST_SIZE_LIMIT HLIST_INDEX_LIMIT * ALIGNMAX
161 #define HLIST_INDEX(size) (((size) < HLIST_SIZE_LIMIT)? \
162 (uint32) (size / ALIGNMAX) : compute_hlist_index (size))
163
164 /* For eif_trace_types() */
165
166 #define CHUNK_T 0 /* Scanning a chunk */
167 #define ZONE_T 1 /* Scanning a generation scavenging zone */
168
169 /* The main data-structures for eif_malloc are filled-in statically at
170 * compiled time, so that no special initialization routine is
171 * necessary. (Except in MT mode --ZS)
172 */
173
174 /*
175 doc: <attribute name="rt_m_data" return_type="struct emallinfo" export="shared">
176 doc: <summary>This structure records some general information about the memory, the number of chunck, etc... These informations are available via the eif_rt_meminfo() routine. Only used by current and garcol.c</summary>
177 doc: <access>Read/Write</access>
178 doc: <thread_safety>Safe</thread_safety>
179 doc: <synchronization>Through `eif_free_list_mutex' or GC synchronization.</synchronization>
180 doc: <eiffel_classes>MEM_INFO</eiffel_classes>
181 doc: </attribute>
182 */
183 rt_shared struct emallinfo rt_m_data = {
184 0, /* ml_chunk */
185 0, /* ml_total */
186 0, /* ml_used */
187 0, /* ml_over */
188 };
189
190 /*
191 doc: <attribute name="rt_c_data" return_type="struct emallinfo" export="shared">
192 doc: <summary>For C memory, we keep track of general informations too. This enables us to pilot the garbage collector correctly or to call coalescing over the memory only if it is has a chance to succeed. Only used by current and garcol.c</summary>
193 doc: <access>Read/Write</access>
194 doc: <thread_safety>Safe</thread_safety>
195 doc: <synchronization>Through `eif_free_list_mutex' or GC synchronization.</synchronization>
196 doc: <eiffel_classes>MEM_INFO</eiffel_classes>
197 doc: </attribute>
198 */
199 rt_shared struct emallinfo rt_c_data = { /* Informations on C memory */
200 0, /* ml_chunk */
201 0, /* ml_total */
202 0, /* ml_used */
203 0, /* ml_over */
204 };
205
206 /*
207 doc: <attribute name="rt_e_data" return_type="struct emallinfo" export="shared">
208 doc: <summary>For Eiffel memory, we keep track of general informations too. This enables us to pilot the garbage collector correctly or to call coalescing over the memory only if it is has a chance to succeed. Only used by current and garcol.c</summary>
209 doc: <access>Read/Write</access>
210 doc: <thread_safety>Safe</thread_safety>
211 doc: <synchronization>Through `eif_free_list_mutex' or GC synchronization.</synchronization>
212 doc: <eiffel_classes>MEM_INFO</eiffel_classes>
213 doc: <fixme>Unsafe access to `rt_e_data' from `acollect' when compiled with `-DEIF_CONDITIONAL_COLLECT' option.</fixme>
214 doc: </attribute>
215 */
216 rt_shared struct emallinfo rt_e_data = { /* Informations on Eiffel memory */
217 0, /* ml_chunk */
218 0, /* ml_total */
219 0, /* ml_used */
220 0, /* ml_over */
221 };
222
223 /* */
224 /*
225 doc: <attribute name="cklst" return_type="struct ck_list" export="shared">
226 doc: <summary>Record head and tail of the chunk list. Only used by current and garcol.c</summary>
227 doc: <access>Read/Write</access>
228 doc: <thread_safety>Safe</thread_safety>
229 doc: <synchronization>Through `eif_free_list_mutex' or GC synchronization.</synchronization>
230 doc: </attribute>
231 */
232 rt_shared struct ck_list cklst = {
233 (struct chunk *) 0, /* ck_head */
234 (struct chunk *) 0, /* ck_tail */
235 (struct chunk *) 0, /* cck_head */
236 (struct chunk *) 0, /* cck_tail */
237 (struct chunk *) 0, /* eck_head */
238 (struct chunk *) 0, /* eck_tail */
239 (struct chunk *) 0, /* cursor */
240 (struct chunk *) 0, /* c_cursor */
241 (struct chunk *) 0, /* e_cursor */
242 };
243
244 /*
245 doc: <attribute name="c_hlist" return_type="union overhead * [NBLOCKS]" export="private">
246 doc: <summary>Records all C blocks with roughly the same size. The entry at index 'i' is a block whose size is at least 2^i. All the blocks with same size are chained, and the head of each list is kept in the array. As an exception, index 0 holds block with a size of zero, and as there cannot be blocks of size 1 (OVERHEAD > 1 anyway), it's ok--RAM.</summary>
247 doc: <access>Read/Write</access>
248 doc: <indexing>i for access to block of size 2^i</indexing>
249 doc: <thread_safety>Safe</thread_safety>
250 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
251 doc: </attribute>
252 */
253
254 rt_private union overhead *c_hlist[NBLOCKS];
255
256 /*
257 doc: <attribute name="e_hlist" return_type="union overhead * [NBLOCKS]" export="private">
258 doc: <summary>Records all Eiffel blocks with roughly the same size. The entry at index 'i' is a block whose size is at least 2^i. All the blocks with same size are chained, and the head of each list is kept in the array. As an exception, index 0 holds block with a size of zero, and as there cannot be blocks of size 1 (OVERHEAD > 1 anyway), it's ok--RAM.</summary>
259 doc: <access>Read/Write</access>
260 doc: <indexing>i for access to block of size 2^i</indexing>
261 doc: <thread_safety>Safe</thread_safety>
262 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
263 doc: </attribute>
264 */
265 rt_private union overhead *e_hlist[NBLOCKS];
266
267 #ifdef EIF_SORTED_FREE_LIST
268 /*
269 doc: <attribute name="c_buffer" return_type="union overhead * [NBLOCKS]" export="private">
270 doc: <summary>The following arrays act as a buffer cache for every operation in the C free list. They simply record the address of the last access. Whenever we wish to insert/find an element in the list, we first look at the buffer cache value to see if we can start the traversing from that point.</summary>
271 doc: <access>Read/Write</access>
272 doc: <indexing>i for access to block of size 2^i</indexing>
273 doc: <thread_safety>Safe</thread_safety>
274 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
275 doc: </attribute>
276 */
277 rt_private union overhead *c_buffer[NBLOCKS];
278
279 /*
280 doc: <attribute name="e_buffer" return_type="union overhead * [NBLOCKS]" export="private">
281 doc: <summary>The following arrays act as a buffer cache for every operation in the Eiffel free list. They simply record the address of the last access. Whenever we wish to insert/find an element in the list, we first look at the buffer cache value to see if we can start the traversing from that point.</summary>
282 doc: <access>Read/Write</access>
283 doc: <indexing>i for access to block of size 2^i</indexing>
284 doc: <thread_safety>Safe</thread_safety>
285 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
286 doc: </attribute>
287 */
288 rt_private union overhead *e_buffer[NBLOCKS];
289 #endif
290
291 /*
292 doc: <attribute name="sc_from" return_type="struct sc_zone" export="shared">
293 doc: <summary>The sc_from zone is the `from' zone used by the generation scavenging garbage collector. They are shared with the garbage collector. This zone may be put back into the free list in case we are low in RAM.</summary>
294 doc: <access>Read/Write</access>
295 doc: <thread_safety>Safe</thread_safety>
296 doc: <synchronization>Through `eif_gc_gsz_mutex'.</synchronization>
297 doc: </attribute>
298 */
299 rt_shared struct sc_zone sc_from;
300
301 /*
302 doc: <attribute name="sc_to" return_type="struct sc_zone" export="shared">
303 doc: <summary>The sc_to zone is the `to' zone used by the generation scavenging garbage collector. They are shared with the garbage collector. This zone may be put back into the free list in case we are low in RAM.</summary>
304 doc: <access>Read/Write</access>
305 doc: <thread_safety>Safe</thread_safety>
306 doc: <synchronization>Through `eif_gc_gsz_mutex'.</synchronization>
307 doc: </attribute>
308 */
309 rt_shared struct sc_zone sc_to;
310
311 /*
312 doc: <attribute name="gen_scavenge" return_type="uint32" export="shared">
313 doc: <summary>Generation scavenging status which can be either GS_OFF (disabled) or GS_ON (enabled). When it is GC_ON, it can be temporarly disabled in which case it holds the GS_STOP flag. By default it is GS_OFF, and it will be enabled by `create_scavenge_zones' if `cc_for_speed' is enabled and enough memory is available to allocate the zones.</summary>
314 doc: <access>Read/Write</access>
315 doc: <thread_safety>Safe</thread_safety>
316 doc: <synchronization>Through GC synchronization.</synchronization>
317 doc: <fixme>Is the visibility of the change is not guaranteed among all threads.</fixme>
318 doc: </attribute>
319 */
320 rt_shared uint32 gen_scavenge = GS_OFF;
321
322 /*
323 doc: <attribute name="eiffel_usage" return_type="rt_uint_ptr" export="shared">
324 doc: <summary>Monitor Eiffel memory usage. Each time an Eiffel object is created outside the scavenge zone (via emalloc or tenuring), we record its size in eiffel_usage variable. Then, once the amount of allocated data goes beyond th_alloc, a cycle of acollect() is run.</summary>
325 doc: <access>Read/Write</access>
326 doc: <thread_safety>Safe</thread_safety>
327 doc: <synchronization>Through `eiffel_usage_mutex' or GC synchronization.</synchronization>
328 doc: </attribute>
329 */
330 rt_shared rt_uint_ptr eiffel_usage = 0;
331
332 /*
333 doc: <attribute name="eif_max_mem" return_type="size_t" export="shared">
334 doc: <summary>This variable is the maximum amount of memory the run-time can allocate. If it is null or negative, there is no limit.</summary>
335 doc: <access>Read/Write</access>
336 doc: <thread_safety>Safe</thread_safety>
337 doc: <synchronization>Use `eif_memory_mutex' when updating its value in `memory.c'.</synchronization>
338 doc: </attribute>
339 */
340 rt_shared size_t eif_max_mem = 0;
341
342 /*
343 doc: <attribute name="eif_tenure_max" return_type="size_t" export="shared">
344 doc: <summary>Maximum age of tenuring.</summary>
345 doc: <access>Read/Write once</access>
346 doc: <thread_safety>Safe</thread_safety>
347 doc: <synchronization>None since initialized in `eif_alloc_init' (main.c)</synchronization>
348 doc: </attribute>
349 */
350 rt_shared size_t eif_tenure_max;
351
352 /*
353 doc: <attribute name="eif_gs_limit" return_type="size_t" export="shared">
354 doc: <summary>Maximum size of object in GSZ.</summary>
355 doc: <access>Read/Write once</access>
356 doc: <thread_safety>Safe</thread_safety>
357 doc: <synchronization>None since initialized in `eif_alloc_init' (main.c)</synchronization>
358 doc: </attribute>
359 */
360 rt_shared size_t eif_gs_limit;
361
362 /*
363 doc: <attribute name="eif_scavenge_size" return_type="size_t" export="shared">
364 doc: <summary>Size of scavenge zones. Should be a multiple of ALIGNMAX.</summary>
365 doc: <access>Read/Write once</access>
366 doc: <thread_safety>Safe</thread_safety>
367 doc: <synchronization>None since initialized in `eif_alloc_init' (main.c)</synchronization>
368 doc: </attribute>
369 */
370 rt_shared size_t eif_scavenge_size;
371 #endif
372
373 /*
374 doc: <attribute name="eif_stack_chunk" return_type="size_t" export="shared">
375 doc: <summary>Size of local stack chunk. Should be a multiple of ALIGNMAX.</summary>
376 doc: <access>Read/Write once</access>
377 doc: <thread_safety>Safe</thread_safety>
378 doc: <synchronization>None since initialized in `eif_alloc_init' (main.c)</synchronization>
379 doc: </attribute>
380 */
381 rt_shared size_t eif_stack_chunk;
382
383 /*
384 doc: <attribute name="eif_chunk_size" return_type="size_t" export="shared">
385 doc: <summary>Size of memory chunks. Should be a multiple of ALIGNMAX.</summary>
386 doc: <access>Read/Write once</access>
387 doc: <thread_safety>Safe</thread_safety>
388 doc: <synchronization>None since initialized in `eif_alloc_init' (main.c)</synchronization>
389 doc: </attribute>
390 */
391 rt_shared size_t eif_chunk_size;
392
393 #ifdef WITH_OBJECT_IDENTIFIER
394 rt_shared EIF_INTEGER eif_object_id_count = 0; /* object identifier counter */
395 #endif
396
397 #ifdef ISE_GC
398 /* Functions handling free list */
399 rt_private uint32 compute_hlist_index (size_t size);
400 rt_shared EIF_REFERENCE eif_rt_xmalloc(size_t nbytes, int type, int gc_flag); /* General free-list allocation */
401 rt_shared void rel_core(void); /* Release core to kernel */
402 rt_private union overhead *add_core(size_t nbytes, int type); /* Get more core from kernel */
403 rt_private void connect_free_list(union overhead *zone, rt_uint_ptr i); /* Insert a block in free list */
404 rt_private void disconnect_free_list(union overhead *next, rt_uint_ptr i); /* Remove a block from free list */
405 rt_private rt_uint_ptr coalesc(union overhead *zone); /* Coalescing (return # of bytes) */
406 rt_private EIF_REFERENCE malloc_free_list(size_t nbytes, union overhead **hlist, int type, int gc_flag); /* Allocate block in one of the lists */
407 rt_private EIF_REFERENCE allocate_free_list(size_t nbytes, union overhead **hlist); /* Allocate block from free list */
408 rt_private union overhead * allocate_free_list_helper (size_t i, size_t nbytes, union overhead **hlist);
409 rt_private EIF_REFERENCE allocate_from_core(size_t nbytes, union overhead **hlist, int maximized); /* Allocate block asking for core */
410 rt_private EIF_REFERENCE set_up(register union overhead *selected, size_t nbytes); /* Set up block before public usage */
411 rt_shared rt_uint_ptr chunk_coalesc(struct chunk *c); /* Coalescing on a chunk */
412 rt_private void xfreeblock(union overhead *zone, rt_uint_ptr r); /* Release block to the free list */
413 rt_shared rt_uint_ptr full_coalesc(int chunk_type); /* Coalescing over specified chunks */
414 rt_private rt_uint_ptr full_coalesc_unsafe(int chunk_type); /* Coalescing over specified chunks */
415 rt_private void free_chunk(struct chunk *); /* Detach chunk from list and release it to core. */
416
417 /* Functions handling scavenging zone */
418 rt_private EIF_REFERENCE malloc_from_eiffel_list (rt_uint_ptr nbytes);
419 rt_private EIF_REFERENCE malloc_from_zone(rt_uint_ptr nbytes); /* Allocate block in scavenging zone */
420 rt_shared void create_scavenge_zones(void); /* Attempt creating the two zones */
421 rt_private void explode_scavenge_zone(struct sc_zone *sc); /* Release objects to free-list */
422 rt_public void sc_stop(void); /* Stop the scavenging process */
423 #endif
424
425 /* Eiffel object setting */
426 rt_private EIF_REFERENCE eif_set(EIF_REFERENCE object, uint16 flags, EIF_TYPE_INDEX dftype, EIF_TYPE_INDEX dtype); /* Set Eiffel object prior use */
427 rt_private EIF_REFERENCE eif_spset(EIF_REFERENCE object, EIF_BOOLEAN in_scavenge); /* Set special Eiffel object */
428
429 #ifdef ISE_GC
430 rt_private int trigger_gc_cycle (void);
431 rt_private int trigger_smart_gc_cycle (void);
432 rt_private EIF_REFERENCE add_to_stack (EIF_REFERENCE, struct stack *);
433 rt_private EIF_REFERENCE add_to_moved_set (EIF_REFERENCE);
434
435 /* Also used by the garbage collector */
436 rt_shared void lxtract(union overhead *next); /* Extract a block from free list */
437 rt_shared EIF_REFERENCE malloc_from_eiffel_list_no_gc (rt_uint_ptr nbytes); /* Wrapper to eif_rt_xmalloc */
438 rt_shared EIF_REFERENCE get_to_from_core(void); /* Get a free eiffel chunk from kernel */
439 #ifdef EIF_EXPENSIVE_ASSERTIONS
440 rt_private void check_free_list (size_t nbytes, register union overhead **hlist);
441 #endif
442 #endif
443
444 /* Compiled with -DTEST, we turn on DEBUG if not already done */
445 #ifdef TEST
446
447 /* This is to make tests */
448 #undef References
449 #undef Size
450 #undef Disp_rout
451 #undef XCreate
452 #define References(type) 2 /* Number of references in object */
453 #define EIF_Size(type) 40 /* Size of the object */
454 #define Disp_rout(type) 0 /* No dispose procedure */
455 #define XCreate(type) 0 /* No creation procedure */
456 /* char *(**ecreate)(void); FIXME: SEE EIF_PROJECT.C */
457
458 #ifndef DEBUG
459 #define DEBUG 127 /* Highest debug level */
460 #endif
461 #endif
462
463 #ifdef BOEHM_GC
464 /*
465 doc: <routine name="boehm_dispose" export="private">
466 doc: <summary>Record `dispose' routine for Boehm GC</summary>
467 doc: <param name="header" type="union overhead *">Zone allocated by Boehm GC which needs to be recorded so that `dispose' is called by Boehm GC when collectin `header'.</param>
468 doc: <thread_safety>Safe</thread_safety>
469 doc: <synchronization>None required</synchronization>
470 doc: </routine>
471 */
472
473 rt_private void boehm_dispose (union overhead *header, void *cd)
474 /* Record `dispose' routine fro Boehm GC. */
475 {
476 DISP(header->ov_dtype, (EIF_REFERENCE) (header + 1));
477 }
478 #endif
479
480 #if defined(BOEHM_GC) || defined(NO_GC)
481 /*
482 doc: <routine name="external_allocation" return_type="EIF_REFERENCE" export="private">
483 doc: <summary>Allocate new object using an external allocator such as `Boehm GC' or `standard malloc'.</summary>
484 doc: <param name="atomic" type="int">Is object to be allocated empty of references to other objects?</param>
485 doc: <param name="has_dispose" type="int">Has object to be allocated a `dispose' routine to be called when object will be collected?</param>
486 doc: <param name="nbytes" type="uint32">Size in bytes of object to be allocated.</param>
487 doc: <return>A newly allocated object of size `nbytes'.</return>
488 doc: <thread_safety>Safe</thread_safety>
489 doc: <synchronization>Performed by external allocator</synchronization>
490 doc: </routine>
491 */
492
493 rt_private EIF_REFERENCE external_allocation (int atomic, int has_dispose, uint32 nbytes)
494 {
495 unsigned int real_nbytes; /* Real object size */
496 int mod; /* Remainder for padding */
497 union overhead *header;
498
499 /* We need to allocate room for the header and
500 * make sure that we won't alignment problems. We
501 * really use at least ALIGNMAX, so even if `nbytes'
502 * is 0, some memory will be used (the header).
503 */
504 real_nbytes = nbytes;
505 mod = real_nbytes % ALIGNMAX;
506 if (mod != 0)
507 real_nbytes += ALIGNMAX - mod;
508 if (real_nbytes & ~B_SIZE) {
509 /* Object too big */
510 return NULL;
511 } else {
512 DISCARD_BREAKPOINTS
513 #ifdef BOEHM_GC
514 if (real_nbytes == 0) {
515 real_nbytes++;
516 }
517 if (atomic) {
518 header = (union overhead *) GC_malloc_atomic (real_nbytes + OVERHEAD);
519 } else {
520 header = (union overhead *) GC_malloc (real_nbytes + OVERHEAD);
521 }
522 #endif
523 #ifdef NO_GC
524 header = (union overhead *) malloc(real_nbytes + OVERHEAD);
525 #endif
526 if (header != NULL) {
527 header->ov_size = real_nbytes;
528 /* Point to the first data byte, just after the header. */
529 #ifdef BOEHM_GC
530 #ifdef EIF_ASSERTIONS
531 GC_is_valid_displacement(header);
532 GC_is_valid_displacement((EIF_REFERENCE)(header + 1));
533 #endif
534 if (has_dispose) {
535 GC_register_finalizer(header, (void (*) (void*, void*)) &boehm_dispose, NULL, NULL, NULL);
536 }
537 #endif
538 UNDISCARD_BREAKPOINTS
539 return (EIF_REFERENCE)(header + 1);
540 } else {
541 UNDISCARD_BREAKPOINTS
542 return NULL;
543 }
544 }
545 }
546
547 /*
548 doc: <routine name="external_reallocation" return_type="EIF_REFERENCE" export="private">
549 doc: <summary>Reallocate a given object with a bigger size using external allocator.</summary>
550 doc: <param name="obj" type="EIF_REFERENCE">Object to be reallocated</param>
551 doc: <param name="nbytes" type="uint32">New size in bytes of `obj'.</param>
552 doc: <return>A reallocated object of size `nbytes'.</return>
553 doc: <thread_safety>Safe</thread_safety>
554 doc: <synchronization>Performed by external allocator</synchronization>
555 doc: </routine>
556 */
557
558 rt_private EIF_REFERENCE external_reallocation (EIF_REFERENCE obj, uint32 nbytes) {
559 unsigned int real_nbytes; /* Real object size */
560 int mod; /* Remainder for padding */
561 union overhead *header;
562
563 /* We need to allocate room for the header and
564 * make sure that we won't alignment problems. We
565 * really use at least ALIGNMAX, so even if `nbytes'
566 * is 0, some memory will be used (the header).
567 */
568 real_nbytes = nbytes;
569 mod = real_nbytes % ALIGNMAX;
570 if (mod != 0)
571 real_nbytes += ALIGNMAX - mod;
572 if (real_nbytes & ~B_SIZE) {
573 /* Object too big */
574 return NULL;
575 } else {
576 DISCARD_BREAKPOINTS
577 #ifdef BOEHM_GC
578 header = (union overhead *) GC_realloc((union overhead *) obj - 1, real_nbytes + OVERHEAD);
579 #endif
580 #ifdef NO_GC
581 header = (union overhead *) realloc((union overhead *) obj - 1, real_nbytes + OVERHEAD);
582 #endif
583 UNDISCARD_BREAKPOINTS
584
585 if (header != NULL) {
586 header->ov_size = real_nbytes;
587 /* Point to the first data byte, just after the header. */
588 return (EIF_REFERENCE)(header + 1);
589 } else {
590 return NULL;
591 }
592 }
593 }
594
595 #endif
596
597 #if defined(ISE_GC) && defined(EIF_THREADS)
598 /*
599 doc: <attribute name="eif_gc_gsz_mutex" return_type="EIF_CS_TYPE *" export="shared">
600 doc: <summary>Mutex used to protect GC allocation in scavenge zone.</summary>
601 doc: <thread_safety>Safe</thread_safety>
602 doc: </attribute>
603 */
604 rt_shared EIF_CS_TYPE *eif_gc_gsz_mutex = NULL;
605 #define EIF_GC_GSZ_LOCK EIF_ASYNC_SAFE_CS_LOCK(eif_gc_gsz_mutex)
606 #define EIF_GC_GSZ_UNLOCK EIF_ASYNC_SAFE_CS_UNLOCK(eif_gc_gsz_mutex)
607
608 /*
609 doc: <attribute name="eif_free_list_mutex" return_type="EIF_CS_TYPE *" export="shared">
610 doc: <summary>Mutex used to protect access and update to private/shared member of this module.</summary>
611 doc: <thread_safety>Safe</thread_safety>
612 doc: </attribute>
613 */
614
615 rt_shared EIF_CS_TYPE *eif_free_list_mutex = NULL;
616 #define EIF_FREE_LIST_MUTEX_LOCK EIF_ASYNC_SAFE_CS_LOCK(eif_free_list_mutex)
617 #define EIF_FREE_LIST_MUTEX_UNLOCK EIF_ASYNC_SAFE_CS_UNLOCK(eif_free_list_mutex)
618
619 /*
620 doc: <attribute name="eiffel_usage_mutex" return_type="EIF_CS_TYPE *" export="shared">
621 doc: <summary>Mutex used to protect access and update `eiffel_usage'.</summary>
622 doc: <thread_safety>Safe</thread_safety>
623 doc: </attribute>
624 */
625
626 rt_shared EIF_CS_TYPE *eiffel_usage_mutex = NULL;
627 #define EIFFEL_USAGE_MUTEX_LOCK EIF_ASYNC_SAFE_CS_LOCK(eiffel_usage_mutex)
628 #define EIFFEL_USAGE_MUTEX_UNLOCK EIF_ASYNC_SAFE_CS_UNLOCK(eiffel_usage_mutex)
629
630 /*
631 doc: <attribute name="trigger_gc_mutex" return_type="EIF_CS_TYPE *" export="shared">
632 doc: <summary>Mutex used to protect execution of `trigger_gc' routines. So that even if more than one threads enter this routine because there is a need to launch a GC cycle, hopefully one will run it, and not all of them.</summary>
633 doc: <thread_safety>Safe</thread_safety>
634 doc: </attribute>
635 */
636
637 rt_shared EIF_CS_TYPE *trigger_gc_mutex = NULL;
638 #define TRIGGER_GC_LOCK EIF_ASYNC_SAFE_CS_LOCK(trigger_gc_mutex)
639 #define TRIGGER_GC_UNLOCK EIF_ASYNC_SAFE_CS_UNLOCK(trigger_gc_mutex)
640 #endif
641
642 #ifdef EIF_THREADS
643 /*
644 doc: <attribute name="eif_type_set_mutex" return_type="EIF_CS_TYPE *" export="public">
645 doc: <summary>Mutex used to guarantee unique access to `rt_type_set'.</summary>
646 doc: <thread_safety>Safe</thread_safety>
647 doc: </attribute>
648 */
649 rt_shared EIF_CS_TYPE *eif_type_set_mutex = NULL;
650 #define TYPE_SET_MUTEX_LOCK EIF_ASYNC_SAFE_CS_LOCK(eif_type_set_mutex)
651 #define TYPE_SET_MUTEX_UNLOCK EIF_ASYNC_SAFE_CS_UNLOCK(eif_type_set_mutex)
652 #endif
653
654
655 /*
656 doc: <routine name="smart_emalloc" return_type="EIF_REFERENCE" export="public">
657 doc: <summary>Perform smart allocation of either a TUPLE object or a normal object. It does not take into account SPECIAL or BIT creation as a size is required for those. See `emalloc' comments for me details.</summary>
658 doc: <param name="ftype" type="EIF_TYPE_INDEX">Full dynamic type used to determine if we are creating a TUPLE or a normal object.</param>
659 doc: <return>A newly allocated object if successful, otherwise throw an exception</return>
660 doc: <exception>"No more memory" when it fails</exception>
661 doc: <thread_safety>Safe</thread_safety>
662 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
663 doc: </routine>
664 */
665
666 rt_public EIF_REFERENCE smart_emalloc (EIF_TYPE_INDEX ftype)
667 {
668 EIF_TYPE_INDEX type = To_dtype(ftype);
669 if (type == egc_tup_dtype) {
670 return tuple_malloc (ftype);
671 } else {
672 return emalloc_size (ftype, type, EIF_Size(type));
673 }
674 }
675
676 /*
677 doc: <routine name="emalloc" return_type="EIF_REFERENCE" export="public">
678 doc: <summary>Perform allocation of normal object (i.e. not a BIT, SPECIAL or TUPLE object) based on `ftype' full dynamic type which is used to find out object's size in bytes. Note that the size of all the Eiffel objects is correctly padded, but do not take into account the header's size.</summary>
679 doc: <param name="ftype" type="uint32">Full dynamic type used to determine if we are creating a TUPLE or a normal object.</param>
680 doc: <return>A newly allocated object if successful, otherwise throw an exception.</return>
681 doc: <exception>"No more memory" when it fails</exception>
682 doc: <thread_safety>Safe</thread_safety>
683 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
684 doc: </routine>
685 */
686
687 rt_public EIF_REFERENCE emalloc (EIF_TYPE_INDEX ftype)
688 {
689 EIF_TYPE_INDEX type = To_dtype(ftype);
690 return emalloc_size (ftype, type, EIF_Size(type));
691 }
692
693 /*
694 doc: <routine name="emalloc_size" return_type="EIF_REFERENCE" export="public">
695 doc: <summary>Memory allocation for a normal Eiffel object (i.e. not BIT, SPECIAL or TUPLE).</summary>
696 doc: <param name="ftype" type="EIF_TYPE_INDEX">Full dynamic type used to initialize full dynamic type overhead part of Eiffel object.</param>
697 doc: <param name="type" type="EIF_TYPE_INDEX">Dynamic type used to initialize flags overhead part of Eiffel object, mostly used if type is a deferred one, or if it is an expanded one, or if it has a dispose routine.</param>
698 doc: <param name="nbytes" type="uint32">Number of bytes to allocate for this type.</param>
699 doc: <return>A newly allocated object holding at least `nbytes' if successful, otherwise throw an exception.</return>
700 doc: <exception>"No more memory" when it fails</exception>
701 doc: <thread_safety>Safe</thread_safety>
702 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
703 doc: </routine>
704 */
705
706 rt_public EIF_REFERENCE emalloc_size(EIF_TYPE_INDEX ftype, EIF_TYPE_INDEX type, uint32 nbytes)
707 {
708 EIF_REFERENCE object; /* Pointer to the freshly created object */
709 #ifdef ISE_GC
710 uint32 mod; /* Remainder for padding */
711 #endif
712
713 #ifdef WORKBENCH
714 if (EIF_IS_DEFERRED_TYPE(System(type))) { /* Cannot create deferred */
715 eraise(System(type).cn_generator, EN_CDEF);
716 return (EIF_REFERENCE) 0; /* In case they chose to ignore EN_CDEF */
717 }
718 #endif
719
720 #if defined(BOEHM_GC) || defined(NO_GC)
721 object = external_allocation (References(type) == 0, (int) Disp_rout(type), nbytes);
722 if (object != NULL) {
723 return eif_set(object, EO_NEW, ftype, type);
724 } else {
725 eraise("object allocation", EN_MEM); /* Signals no more memory */
726 return NULL;
727 }
728 #endif
729
730 #ifdef ISE_GC
731 /* Objects of tiny size are very expensive to manage in the free-list, thus we make them not tiny. */
732 nbytes = MIN_OBJECT_SIZE(nbytes);
733
734 /* We really use at least ALIGNMAX, to avoid alignement problems.
735 * So even if nbytes is 0, some memory will be used (the header), he he !!
736 */
737 mod = nbytes % ALIGNMAX;
738 if (mod != 0)
739 nbytes += ALIGNMAX - mod;
740
741 /* Depending on the optimization chosen, we allocate the object in
742 * the Generational Scavenge Zone (GSZ) or in the free-list.
743 * All the objects smaller than `eif_gs_limit' are allocated
744 * in the the GSZ, otherwise they are allocated in the free-list.
745 */
746
747 if ((gen_scavenge == GS_ON) && (nbytes <= eif_gs_limit)) {
748 object = malloc_from_zone(nbytes);
749 if (object) {
750 return eif_set(object, 0, ftype, type); /* Set for Eiffel use */
751 } else if (trigger_smart_gc_cycle()) {
752 /* First allocation in scavenge zone failed. If `trigger_smart_gc_cycle' was
753 * successful, let's try again as this is a more efficient way to allocate
754 * in the scavenge zone. */
755 object = malloc_from_zone (nbytes);
756 if (object) {
757 return eif_set(object, 0, ftype, type); /* Set for Eiffel use */
758 }
759 }
760 }
761
762 /* Try an allocation in the free list, with garbage collection turned on. */
763 CHECK("Not too big", !(nbytes & ~B_SIZE));
764 object = malloc_from_eiffel_list (nbytes);
765 if (object) {
766 return eif_set(object, EO_NEW, ftype, type); /* Set for Eiffel use */
767 } else {
768 /*
769 * Allocation failed even if GC was requested. We can only make some space
770 * by turning off generation scavenging and getting the two scavenge zones
771 * back for next allocation. A last attempt is then made before raising
772 * an exception if it also failed.
773 */
774 if (gen_scavenge & GS_ON) /* If generation scaveging was on */
775 sc_stop(); /* Free 'to' and explode 'from' space */
776
777 object = malloc_from_eiffel_list_no_gc (nbytes); /* Retry with GC off this time */
778
779 if (object) {
780 return eif_set(object, EO_NEW, ftype, type); /* Set for Eiffel use */
781 }
782 }
783
784
785 eraise("object allocation", EN_MEM); /* Signals no more memory */
786
787 /* NOTREACHED, to avoid C compilation warning. */
788 return NULL;
789 #endif /* ISE_GC */
790 }
791
792 /*
793 doc: <routine name="emalloc_as_old" return_type="EIF_REFERENCE" export="public">
794 doc: <summary>Memory allocation for a normal Eiffel object (i.e. not BIT, SPECIAL or TUPLE) as an old object. Useful for once manifest strings for example which we know are going to stay alive for a while.</summary>
795 doc: <param name="ftype" type="uint32">Full dynamic type used to initialize full dynamic type overhead part of Eiffel object.</param>
796 doc: <return>A newly allocated object if successful, otherwise throw an exception.</return>
797 doc: <exception>"No more memory" when it fails</exception>
798 doc: <thread_safety>Safe</thread_safety>
799 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
800 doc: </routine>
801 */
802
803 rt_public EIF_REFERENCE emalloc_as_old(EIF_TYPE_INDEX ftype)
804 {
805 EIF_REFERENCE object; /* Pointer to the freshly created object */
806 EIF_TYPE_INDEX type = To_dtype(ftype);
807 uint32 nbytes = EIF_Size(type);
808
809 #ifdef ISE_GC
810 uint32 mod; /* Remainder for padding */
811 #endif
812
813 #ifdef WORKBENCH
814 if (EIF_IS_DEFERRED_TYPE(System(type))) { /* Cannot create deferred */
815 eraise(System(type).cn_generator, EN_CDEF);
816 return (EIF_REFERENCE) 0; /* In case they chose to ignore EN_CDEF */
817 }
818 #endif
819
820 #if defined(BOEHM_GC) || defined(NO_GC)
821 object = external_allocation (References(type) == 0, (int) Disp_rout(type), nbytes);
822 if (object != NULL) {
823 return eif_set(object, EO_OLD, ftype, type);
824 } else {
825 eraise("object allocation", EN_MEM); /* Signals no more memory */
826 return NULL;
827 }
828 #endif
829
830 #ifdef ISE_GC
831 /* We really use at least ALIGNMAX, to avoid alignement problems.
832 * So even if nbytes is 0, some memory will be used (the header), he he !!
833 */
834 mod = nbytes % ALIGNMAX;
835 if (mod != 0)
836 nbytes += ALIGNMAX - mod;
837
838 /* Direct allocation in the free list, with garbage collection turned on. */
839 CHECK("Not too big", !(nbytes & ~B_SIZE));
840 object = malloc_from_eiffel_list (nbytes);
841 if (object) {
842 return eif_set(object, EO_OLD, ftype, type); /* Set for Eiffel use */
843 } else {
844 /*
845 * Allocation failed even if GC was requested. We can only make some space
846 * by turning off generation scavenging and getting the two scavenge zones
847 * back for next allocation. A last attempt is then made before raising
848 * an exception if it also failed.
849 */
850 if (gen_scavenge & GS_ON) /* If generation scaveging was on */
851 sc_stop(); /* Free 'to' and explode 'from' space */
852
853 object = malloc_from_eiffel_list_no_gc (nbytes); /* Retry with GC off this time */
854
855 if (object) {
856 return eif_set(object, EO_OLD, ftype, type); /* Set for Eiffel use */
857 }
858 }
859
860 eraise("object allocation", EN_MEM); /* Signals no more memory */
861
862 /* NOTREACHED, to avoid C compilation warning. */
863 return NULL;
864 #endif /* ISE_GC */
865 }
866
867 /*
868 doc: <routine name="sp_init" return_type="EIF_REFERENCE" export="public">
869 doc: <summary>Initialize special object of expanded `obj' from `lower' position to `upper' position. I.e. creating new instances of expanded objects and assigning them from `obj [lower]' to `obj [upper]'.</summary>
870 doc: <param name="obj" type="EIF_REFERENCE">Special object of expanded which will be initialized.</param>
871 doc: <param name="dftype" type="EIF_TYPE_INDEX">Dynamic type of expanded object to create for each entry of special object `obj'.</param>
872 doc: <param name="lower" type="EIF_INTEGER">Lower bound of `obj'.</param>
873 doc: <param name="upper" type="EIF_INTEGER">Upper bound of `obj'.</param>
874 doc: <return>New address of `obj' in case a GC collection was performed.</return>
875 doc: <thread_safety>Safe</thread_safety>
876 doc: <synchronization>None required</synchronization>
877 doc: </routine>
878 */
879
880 rt_public EIF_REFERENCE sp_init (EIF_REFERENCE obj, EIF_TYPE_INDEX dftype, EIF_INTEGER lower, EIF_INTEGER upper)
881 {
882 EIF_GET_CONTEXT
883
884 EIF_INTEGER i;
885 rt_uint_ptr elem_size, offset;
886 union overhead *zone;
887 EIF_TYPE_INDEX dtype = To_dtype(dftype);
888 void (*cp) (EIF_REFERENCE);
889 void (*init) (EIF_REFERENCE, EIF_REFERENCE);
890
891 REQUIRE ("obj not null", obj != (EIF_REFERENCE) 0);
892 REQUIRE ("Not forwarded", !(HEADER (obj)->ov_size & B_FWD));
893 REQUIRE ("Special object", HEADER (obj)->ov_flags & EO_SPEC);
894 REQUIRE ("Special object of expanded", HEADER (obj)->ov_flags & EO_COMP);
895 REQUIRE ("Valid lower", ((lower >= 0) && (lower <= RT_SPECIAL_CAPACITY(obj))));
896 REQUIRE ("Valid upper", ((upper >= lower - 1) && (upper <= RT_SPECIAL_CAPACITY(obj))));
897
898 if (upper >= lower) {
899 #ifdef WORKBENCH
900 cp = init_exp;
901 #else
902 cp = egc_exp_create [dtype];
903 #endif
904 init = XCreate(dtype);
905
906 elem_size = RT_SPECIAL_ELEM_SIZE(obj);
907 #ifndef WORKBENCH
908 if (References(dtype) > 0) {
909 #endif
910 if (init) {
911 if (cp) {
912 RT_GC_PROTECT(obj);
913 for (i = lower, offset = elem_size * i; i <= upper; i++) {
914 zone = (union overhead *) (obj + offset);
915 zone->ov_size = OVERHEAD + offset; /* For GC */
916 zone->ov_flags = EO_EXP; /* Expanded type */
917 zone->ov_dftype = dftype;
918 zone->ov_dtype = dtype;
919 (init) (obj + OVERHEAD + offset, obj + OVERHEAD + offset);
920 (cp) (obj + OVERHEAD + offset);
921 offset += elem_size;
922 }
923 RT_GC_WEAN(obj);
924 } else {
925 RT_GC_PROTECT(obj);
926 for (i = lower, offset = elem_size * i; i <= upper; i++) {
927 zone = (union overhead *) (obj + offset);
928 zone->ov_size = OVERHEAD + offset; /* For GC */
929 zone->ov_flags = EO_EXP; /* Expanded type */
930 zone->ov_dftype = dftype;
931 zone->ov_dtype = dtype;
932 (init) (obj + OVERHEAD + offset, obj + OVERHEAD + offset);
933 offset += elem_size;
934 }
935 RT_GC_WEAN(obj);
936 }
937 } else {
938 if (cp) {
939 RT_GC_PROTECT(obj);
940 for (i = lower, offset = elem_size * i; i <= upper; i++) {
941 zone = (union overhead *) (obj + offset);
942 zone->ov_size = OVERHEAD + offset; /* For GC */
943 zone->ov_flags = EO_EXP; /* Expanded type */
944 zone->ov_dftype = dftype;
945 zone->ov_dtype = dtype;
946 (cp) (obj + OVERHEAD + offset);
947 offset += elem_size;
948 }
949 RT_GC_WEAN(obj);
950 } else {
951 for (i = lower, offset = elem_size * i; i <= upper; i++) {
952 zone = (union overhead *) (obj + offset);
953 zone->ov_size = OVERHEAD + offset; /* For GC */
954 zone->ov_flags = EO_EXP; /* Expanded type */
955 zone->ov_dftype = dftype;
956 zone->ov_dtype = dtype;
957 offset += elem_size;
958 }
959 }
960 }
961 #ifndef WORKBENCH
962 } else {
963 if (cp) {
964 RT_GC_PROTECT(obj);
965 for (i = lower, offset = elem_size * i; i <= upper; i++) {
966 cp (obj + offset);
967 offset += elem_size;
968 }
969 RT_GC_WEAN(obj);
970 }
971 }
972 #endif
973 }
974
975 return obj;
976 }
977
978 /*
979 doc: <routine name="special_malloc" return_type="EIF_REFERENCE" export="public">
980 doc: <summary>Allocated new SPECIAL object with flags `flags' (flags includes the full dynamic type). Elements are zeroed, It initializes elements of a special of expanded.</summary>
981 doc: <param name="flags" type="uint16">Flags of SPECIAL.</param>
982 doc: <param name="dftype" type="EIF_TYPE_INDEX">Full dynamic type of SPECIAL.</param>
983 doc: <param name="nb" type="EIF_INTEGER">Number of element in special.</param>
984 doc: <param name="element_size" type="uint32">Size of element in special.</param>
985 doc: <param name="atomic" type="EIF_BOOLEAN">Is this a special of basic types?</param>
986 doc: <return>A newly allocated SPECIAL object if successful, otherwise throw an exception.</return>
987 doc: <exception>"No more memory" when it fails</exception>
988 doc: <thread_safety>Safe</thread_safety>
989 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
990 doc: </routine>
991 */
992
993 rt_public EIF_REFERENCE special_malloc (uint16 flags, EIF_TYPE_INDEX dftype, EIF_INTEGER nb, uint32 element_size, EIF_BOOLEAN atomic)
994 {
995 EIF_REFERENCE result = NULL;
996 union overhead *zone;
997
998 result = spmalloc (nb, element_size, atomic);
999
1000 /* At this stage we are garanteed to have an initialized object, otherwise an
1001 * exception would have been thrown by the call to `spmalloc'. */
1002 CHECK("result not null", result);
1003
1004 zone = HEADER(result);
1005 zone->ov_flags |= flags;
1006 zone->ov_dftype = dftype;
1007 zone->ov_dtype = To_dtype(dftype);
1008
1009 if (egc_has_old_special_semantic) {
1010 RT_SPECIAL_COUNT(result) = nb;
1011 } else {
1012 RT_SPECIAL_COUNT(result) = 0;
1013 }
1014 RT_SPECIAL_ELEM_SIZE(result) = element_size;
1015 RT_SPECIAL_CAPACITY(result) = nb;
1016
1017 if (flags & EO_COMP) {
1018 /* It is a composite object, that is to say a special of expanded,
1019 * we need to initialize every entry properly. */
1020 result = sp_init (result, eif_gen_param_id(dftype, 1), 0, nb - 1);
1021 }
1022 return result;
1023 }
1024
1025 /*
1026 doc: <routine name="tuple_malloc" return_type="EIF_REFERENCE" export="public">
1027 doc: <summary>Allocated new TUPLE object of type `ftype'. It internally calls `tuple_malloc_specific' therefore it computes `count' of TUPLE to create as wekl as determines if TUPLE is atomic or not.</summary>
1028 doc: <param name="ftype" type="EIF_TYPE_INDEX">Dynamic type of TUPLE object to create.</param>
1029 doc: <return>A newly allocated TUPLE object of type `ftype' if successful, otherwise throw an exception.</return>
1030 doc: <exception>"No more memory" when it fails</exception>
1031 doc: <thread_safety>Safe</thread_safety>
1032 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
1033 doc: </routine>
1034 */
1035
1036 rt_public EIF_REFERENCE tuple_malloc (EIF_TYPE_INDEX ftype)
1037 {
1038 uint32 i, count;
1039 EIF_BOOLEAN is_atomic = EIF_TRUE;
1040
1041 REQUIRE("Is a tuple type", To_dtype(ftype) == egc_tup_dtype);
1042
1043 /* We add + 1 since TUPLE objects have `count + 1' element
1044 * to avoid doing -1 each time we try to access an item at position `pos'.
1045 */
1046 count = eif_gen_count_with_dftype (ftype) + 1;
1047
1048 /* Let's find out if this is a tuple which contains some reference
1049 * when there is no reference then `is_atomic' is True which enables
1050 * us to do some optimization at the level of the GC */
1051 for (i = 1; (i < count) && (is_atomic); i++) {
1052 if (eif_gen_typecode_with_dftype(ftype, i) == EIF_REFERENCE_CODE) {
1053 is_atomic = EIF_FALSE;
1054 }
1055 }
1056
1057 return tuple_malloc_specific(ftype, count, is_atomic);
1058 }
1059
1060 /*
1061 doc: <routine name="eif_type_malloc" return_type="EIF_REFERENCE" export="public">
1062 doc: <summary>Create a new TYPE [like ftype] instance for type `ftype' if it was not yet created, otherwise return an already existing one. Objects are created as old object since once allocated they cannot be garbage collected.</summary>
1063 doc: <param name="ftype" type="EIF_TYPE_INDEX">Dynamic type of the type for which we want to create the `TYPE [like ftype]' instance to return.</param>
1064 doc: <return>A TYPE instance for `ftype' if successful, otherwise throw an exception.</return>
1065 doc: <exception>"No more memory" when it fails</exception>
1066 doc: <thread_safety>Safe</thread_safety>
1067 doc: <synchronization>Through `eif_type_set_mutex'</synchronization>
1068 doc: </routine>
1069 */
1070
1071 rt_public EIF_REFERENCE eif_type_malloc (EIF_TYPE_INDEX ftype)
1072 {
1073 RT_GET_CONTEXT
1074 EIF_REFERENCE result;
1075 rt_uint_ptr l_array_index;
1076
1077 REQUIRE("Valid actual generic type", (ftype <= MAX_DTYPE) || (RT_IS_NONE_TYPE(ftype)));
1078
1079 /* The actual offset in the `rt_type_set' array is increased by '2' so that
1080 * we can store TYPE [detachable NONE] and TYPE [attached NONE] at index `0' and `1'. */
1081 if (!RT_IS_NONE_TYPE(ftype)) {
1082 l_array_index = ftype + 2;
1083 } else {
1084 if (ftype == DETACHABLE_NONE_TYPE) {
1085 l_array_index = 0;
1086 } else {
1087 l_array_index = 1;
1088 }
1089 }
1090
1091 /* Synchronization required to access `rt_type_set'. */
1092 EIF_ENTER_C;
1093 GC_THREAD_PROTECT(TYPE_SET_MUTEX_LOCK);
1094
1095 if (rt_type_set_count > l_array_index) {
1096 result = rt_type_set [l_array_index];
1097 if (!result) {
1098 result = emalloc_as_old(eif_typeof_type_of (ftype));
1099 CHECK("Not in scavenge `from' zone", (result < sc_from.sc_arena) && (result > sc_from.sc_top));
1100 CHECK("Not in scavenge `to' zone", (result < sc_to.sc_arena) && (result > sc_to.sc_top));
1101 rt_type_set [l_array_index] = result;
1102 }
1103 } else {
1104 rt_uint_ptr old_count = rt_type_set_count;
1105 /* Ensures we allocate at least 2 entries. */
1106 rt_uint_ptr new_count = (l_array_index == 0 ? 2 : l_array_index * 2);
1107 if (rt_type_set) {
1108 rt_type_set = (EIF_REFERENCE *) crealloc(rt_type_set, sizeof(EIF_REFERENCE) * new_count);
1109 } else {
1110 rt_type_set = (EIF_REFERENCE *) cmalloc(sizeof(EIF_REFERENCE) * new_count);
1111 }
1112 memset(rt_type_set + old_count, 0, sizeof(EIF_REFERENCE) * (new_count - old_count));
1113 result = emalloc_as_old(eif_typeof_type_of (ftype));
1114 CHECK("Not in scavenge `from' zone", (result < sc_from.sc_arena) && (result > sc_from.sc_top));
1115 CHECK("Not in scavenge `to' zone", (result < sc_to.sc_arena) && (result > sc_to.sc_top));
1116 rt_type_set [l_array_index] = result;
1117 rt_type_set_count = new_count;
1118 }
1119
1120 GC_THREAD_PROTECT(TYPE_SET_MUTEX_UNLOCK);
1121 EIF_EXIT_C;
1122 RTGC;
1123 return result;
1124 }
1125
1126 /*
1127 doc: <routine name="tuple_malloc_specific" return_type="EIF_REFERENCE" export="public">
1128 doc: <summary>Allocated new TUPLE object of type `ftype', of count `count' and `atomic'. TUPLE is alloated through `spmalloc', but the element size is the one of TUPLE element, i.e. sizeof (EIF_TYPED_VALUE).</summary>
1129 doc: <param name="ftype" type="EIF_TYPE_INDEX">Dynamic type of TUPLE object to create.</param>
1130 doc: <param name="count" type="uint32">Number of elements in TUPLE object to create.</param>
1131 doc: <param name="atomic" type="EIF_BOOLEAN">Does current TUPLE object to create has reference or not? True means no.</param>
1132 doc: <return>A newly allocated TUPLE object if successful, otherwise throw an exception.</return>
1133 doc: <exception>"No more memory" when it fails</exception>
1134 doc: <thread_safety>Safe</thread_safety>
1135 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
1136 doc: </routine>
1137 */
1138
1139 rt_public EIF_REFERENCE tuple_malloc_specific (EIF_TYPE_INDEX ftype, uint32 count, EIF_BOOLEAN atomic)
1140 {
1141 EIF_REFERENCE object;
1142 uint32 t;
1143 REQUIRE("Is a tuple type", To_dtype(ftype) == egc_tup_dtype);
1144
1145 object = spmalloc(count, sizeof(EIF_TYPED_VALUE), atomic);
1146
1147 if (object == NULL) {
1148 eraise ("Tuple allocation", EN_MEM); /* signals no more memory */
1149 } else {
1150 /* Initialize TUPLE headers and end of special object */
1151 union overhead * zone = HEADER(object);
1152 unsigned int i;
1153 EIF_TYPED_VALUE *l_item = (EIF_TYPED_VALUE *) object;
1154 RT_SPECIAL_COUNT(object) = count;
1155 RT_SPECIAL_ELEM_SIZE(object) = sizeof(EIF_TYPED_VALUE);
1156 RT_SPECIAL_CAPACITY(object) = count;
1157 if (!egc_has_old_special_semantic) {
1158 /* If by default allocation does not clear the data of a TUPLE,
1159 * we actually need to do it otherwise we end up with TUPLE objects
1160 * with invalid data. */
1161 memset(object, 0, RT_SPECIAL_VISIBLE_SIZE(object));
1162 }
1163 /* Mark it is a tuple object */
1164 zone->ov_flags |= EO_TUPLE;
1165 zone->ov_dftype = ftype;
1166 zone->ov_dtype = To_dtype(ftype);
1167 if (!atomic) {
1168 zone->ov_flags |= EO_REF;
1169 }
1170 /* Initialize type information held in TUPLE instance*/
1171 /* Don't forget that first element of TUPLE is the BOOLEAN
1172 * `object_comparison' attribute. */
1173 eif_tuple_item_sk_type(l_item) = SK_BOOL;
1174 l_item++;
1175 for (i = 1; i < count; i++,l_item++) {
1176 switch (eif_gen_typecode_with_dftype(ftype, i)) {
1177 case EIF_BOOLEAN_CODE: t = SK_BOOL; break;
1178 case EIF_CHARACTER_8_CODE: t = SK_CHAR8; break;
1179 case EIF_CHARACTER_32_CODE: t = SK_CHAR32; break;
1180 case EIF_INTEGER_8_CODE: t = SK_INT8; break;
1181 case EIF_INTEGER_16_CODE: t = SK_INT16; break;
1182 case EIF_INTEGER_32_CODE: t = SK_INT32; break;
1183 case EIF_INTEGER_64_CODE: t = SK_INT64; break;
1184 case EIF_NATURAL_8_CODE: t = SK_UINT8; break;
1185 case EIF_NATURAL_16_CODE: t = SK_UINT16; break;
1186 case EIF_NATURAL_32_CODE: t = SK_UINT32; break;
1187 case EIF_NATURAL_64_CODE: t = SK_UINT64; break;
1188 case EIF_REAL_32_CODE: t = SK_REAL32; break;
1189 case EIF_REAL_64_CODE: t = SK_REAL64; break;
1190 case EIF_POINTER_CODE: t = SK_POINTER; break;
1191 case EIF_REFERENCE_CODE: t = SK_REF; break;
1192 default: t = 0;
1193 }
1194 eif_tuple_item_sk_type(l_item) = t;
1195 }
1196 }
1197 return object;
1198 }
1199
1200 /*
1201 doc: <routine name="spmalloc" return_type="EIF_REFERENCE" export="shared">
1202 doc: <summary>Memory allocation for an Eiffel special object. It either succeeds or raises the "No more memory" exception. The routine returns the pointer on a new special object holding at least 'nbytes'. `atomic' means that it is a special object without references.</summary>
1203 doc: <param name="nb" type="EIF_INTEGER">Number of elements to allocate.</param>
1204 doc: <param name="element_size" type="uint32">Element size.</param>
1205 doc: <param name="atomic" type="EIF_BOOLEAN">Does current special object to create has reference or not? True means no.</param>
1206 doc: <return>A newly allocated TUPLE object if successful, otherwise throw an exception.</return>
1207 doc: <exception>"No more memory" when it fails</exception>
1208 doc: <thread_safety>Safe</thread_safety>
1209 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
1210 doc: </routine>
1211 */
1212
1213 rt_shared EIF_REFERENCE spmalloc(EIF_INTEGER nb, uint32 element_size, EIF_BOOLEAN atomic)
1214 {
1215 EIF_REFERENCE object; /* Pointer to the freshly created special object */
1216 #ifdef ISE_GC
1217 rt_uint_ptr mod;
1218 #endif
1219 rt_uint_ptr n = (rt_uint_ptr) nb * (rt_uint_ptr) element_size;
1220 rt_uint_ptr nbytes = CHRPAD(n) + RT_SPECIAL_PADDED_DATA_SIZE;
1221 /* Check if there is no overflow. */
1222 /* The check should avoid division by zero when `element_size == 0' */
1223 if (((element_size > 0) && (n / (rt_uint_ptr) element_size != (rt_uint_ptr) nb)) || (nbytes < n)) {
1224 eraise("Special allocation", EN_MEM); /* Overflow in calculating memory size. */
1225 }
1226
1227 #if defined(BOEHM_GC) || defined(NO_GC)
1228 /* No dispose routine associated, therefore `0' for second argument */
1229 object = external_allocation (atomic, 0, nbytes);
1230 if (object != NULL) {
1231 return eif_spset(object, EIF_FALSE);
1232 } else {
1233 eraise("Special allocation", EN_MEM); /* Signals no more memory */
1234 return NULL;
1235 }
1236 #endif
1237
1238 #ifdef ISE_GC
1239 /* Objects of tiny size are very expensive to manage in the free-list, thus we make them not tiny. */
1240 nbytes = MIN_OBJECT_SIZE(nbytes);
1241
1242 /* We really use at least ALIGNMAX, to avoid alignement problems.
1243 * So even if nbytes is 0, some memory will be used (the header), he he !!
1244 */
1245 mod = nbytes % ALIGNMAX;
1246 if (mod != 0)
1247 nbytes += ALIGNMAX - mod;
1248
1249 if ((gen_scavenge == GS_ON) && (nbytes <= eif_gs_limit)) {
1250 object = malloc_from_zone (nbytes); /* allocate it in scavenge zone. */
1251 if (object) {
1252 return eif_spset(object, EIF_TRUE);
1253 } else if (trigger_smart_gc_cycle()) {
1254 object = malloc_from_zone (nbytes);
1255 if (object) {
1256 return eif_spset(object, EIF_TRUE);
1257 }
1258 }
1259 }
1260
1261 /* New special object is too big to be created in generational scavenge zone or there is
1262 * more space in scavenge zone. So we allocate it in free list only if it is less
1263 * than the authorized size `2^27'. */
1264 if (!(nbytes & ~B_SIZE)) {
1265 object = malloc_from_eiffel_list (nbytes);
1266 if (object) {
1267 return eif_spset(object, EIF_FALSE);
1268 } else {
1269 /*
1270 * Allocation failed even if GC was requested. We can only make some space
1271 * by turning off generation scavenging and getting the two scavenge zones
1272 * back for next allocation. A last attempt is then made before raising
1273 * an exception if it also failed.
1274 */
1275 if (gen_scavenge & GS_ON) /* If generation scaveging was on */
1276 sc_stop(); /* Free 'to' and explode 'from' space */
1277
1278 object = malloc_from_eiffel_list_no_gc (nbytes); /* Retry with GC off this time */
1279
1280 if (object) {
1281 return eif_spset(object, EIF_FALSE); /* Set for Eiffel use */
1282 }
1283 }
1284 }
1285
1286 eraise("Special allocation", EN_MEM); /* No more memory */
1287
1288 /* Not reached - to avoid C compilation warning */
1289 return NULL;
1290 #endif
1291 }
1292
1293 /*
1294 doc: <routine name="sprealloc" return_type="EIF_REFERENCE" export="public">
1295 doc: <summary>Reallocate a special object `ptr' so that it can hold at least `nbitems'.</summary>
1296 doc: <param name="ptr" type="EIF_REFERENCE">Special object which will be reallocated.</param>
1297 doc: <param name="nbitems" type="unsigned int">New number of items wanted.</param>
1298 doc: <return>A newly allocated special object if successful, otherwise throw an exception.</return>
1299 doc: <exception>"No more memory" when it fails</exception>
1300 doc: <thread_safety>Safe</thread_safety>
1301 doc: <synchronization>None required, it is done by the features we are calling.</synchronization>
1302 doc: </routine>
1303 */
1304
1305 rt_public EIF_REFERENCE sprealloc(EIF_REFERENCE ptr, unsigned int nbitems)
1306 {
1307 EIF_GET_CONTEXT
1308 union overhead *zone; /* Malloc information zone */
1309 EIF_REFERENCE object;
1310 unsigned int count, elem_size, capacity;
1311 rt_uint_ptr old_size, new_size; /* New and old size of special object. */
1312 rt_uint_ptr old_real_size, new_real_size; /* Size occupied by items of special */
1313 #ifdef ISE_GC
1314 EIF_BOOLEAN need_update = EIF_FALSE; /* Do we need to remember content of special? */
1315 #endif
1316 EIF_BOOLEAN need_expanded_initialization = EIF_FALSE; /* Do we need to initialize new entries? */
1317
1318 REQUIRE ("ptr not null", ptr != (EIF_REFERENCE) 0);
1319 REQUIRE ("Not forwarded", !(HEADER (ptr)->ov_size & B_FWD));
1320 REQUIRE ("Special object:", HEADER (ptr)->ov_flags & EO_SPEC);
1321
1322 /* At the end of the special object arena, there are two long values which
1323 * are kept up-to-date: the actual number of items held and the size in
1324 * byte of each item (the same for all items).
1325 */
1326 zone = HEADER(ptr);
1327 old_size = zone->ov_size & B_SIZE; /* Old size of array */
1328 count = RT_SPECIAL_COUNT(ptr); /* Current number of elements */
1329 elem_size = RT_SPECIAL_ELEM_SIZE(ptr);
1330 capacity = RT_SPECIAL_CAPACITY(ptr);
1331 old_real_size = (rt_uint_ptr) capacity * (rt_uint_ptr) elem_size; /* Size occupied by items in old special */
1332 new_real_size = nbitems * (rt_uint_ptr) elem_size; /* Size occupied by items in new special */
1333 new_size = new_real_size + RT_SPECIAL_PADDED_DATA_SIZE; /* New required size */
1334
1335 if (nbitems == capacity) { /* OPTIMIZATION: Does resized object have same size? */
1336 return ptr; /* If so, we return unchanged `ptr'. */
1337 }
1338
1339 RT_GC_PROTECT(ptr); /* Object may move if GC called */
1340
1341 CHECK ("Stil not forwarded", !(HEADER(ptr)->ov_size & B_FWD));
1342
1343 #ifdef ISE_GC
1344 #ifdef EIF_GSZ_ALLOC_OPTIMIZATION
1345 if (zone->ov_flags & (EO_NEW | EO_OLD)) { /* Is it out of GSZ? */
1346 #endif
1347 /* It is very important to give the GC_FREE flag to xrealloc, as if the
1348 * special object happens to move during the reallocing operation, its
1349 * old "location" must not be freed in case it is in the moved_set or
1350 * whatever GC stack. This old copy will be normally reclaimed by the
1351 * GC. Also, this prevents a nasty bug when Eiffel objects share the area.
1352 * When one of this area is resized and moved, the other object still
1353 * references somthing valid (although the area is no longer shared)--RAM.
1354 */
1355
1356 object = xrealloc (ptr, new_size, GC_ON | GC_FREE);
1357 #endif
1358
1359 #if defined(BOEHM_GC) || defined(NO_GC)
1360 object = external_reallocation (ptr, new_size);
1361 #endif
1362
1363 if ((EIF_REFERENCE) 0 == object) {
1364 eraise("special reallocation", EN_MEM);
1365 return (EIF_REFERENCE) 0;
1366 }
1367
1368 zone = HEADER (object);
1369 new_size = zone->ov_size & B_SIZE; /* `xrealloc' can change the `new_size' value for padding */
1370
1371 CHECK("Valid_size", new_size >= new_real_size);
1372
1373 CHECK ("Not forwarded", !(HEADER(ptr)->ov_size & B_FWD));
1374
1375 /* Reset extra-items with zeros or default expanded value if any */
1376 if (new_real_size > old_real_size) {
1377 /* When the actual memory actually increased, we need to reset
1378 * the various new element to their default value. */
1379 memset (object + old_real_size, 0, new_size - old_real_size);
1380 if (zone->ov_flags & EO_COMP)
1381 need_expanded_initialization = EIF_TRUE;
1382 } else { /* Smaller object requested. */
1383 /* We need to remove existing elements between `new_real_size'
1384 * and `new_size'. Above `new_size' it has been taken care
1385 * by `xrealloc' when moving the memory area above `new_size'
1386 * in the free list.
1387 */
1388 memset (object + new_real_size, 0, new_size - new_real_size);
1389 }
1390
1391 #ifdef ISE_GC
1392 if (ptr != object) { /* Has ptr moved in the reallocation? */
1393 /* If the reallocation had to allocate a new object, then we have to do
1394 * some further settings: if the original object was marked EO_NEW, we
1395 * must push the new object into the moved set. Also we must reset the B_C
1396 * flags set by malloc (which makes it impossible to freeze a special
1397 * object, but it cannot be achieved anyway since a reallocation may
1398 * have to move it).
1399 */
1400
1401 zone->ov_size &= ~B_C; /* Cannot freeze a special object */
1402 need_update = EIF_TRUE;
1403 }
1404 #ifdef EIF_GSZ_ALLOC_OPTIMIZATION
1405 } else { /* Was allocated in the GSZ. */
1406 CHECK ("In scavenge zone", !(HEADER (ptr)->ov_size & B_BUSY));
1407
1408 /* We do not need to reallocate an array if the existing one has enough
1409 * space to accomadate the resizing */
1410 if (new_size > old_size) {
1411 /* Reserve `new_size' bytes in GSZ if possible. */
1412 object = spmalloc (nbitems, elem_size, EIF_TEST(!(zone->ov_flags & EO_REF)));
1413 } else
1414 object = ptr;
1415
1416 if (object == (EIF_REFERENCE) 0) {
1417 eraise("Special reallocation", EN_MEM);
1418 return (EIF_REFERENCE) 0;
1419 }
1420
1421 /* Set flags of newly created object */
1422 zone = HEADER (object);
1423 new_size = zone->ov_size & B_SIZE; /* `spmalloc' can change the `new_size' value for padding */
1424
1425 /* Copy only dynamic type and object nature and age from old object
1426 * We cannot copy HEADER(ptr)->ov_flags because `ptr' might have
1427 * moved outside the GSZ during reallocation of `object'. */
1428 zone->ov_flags |= HEADER(ptr)->ov_flags & (EO_REF | EO_COMP);
1429 zone->ov_dftype = HEADER(ptr)->ov_dftype;
1430 zone->ov_dtype = HEADER(ptr)->ov_dtype;
1431 zone->ov_pid = HEADER(ptr)->ov_pid;
1432
1433 /* Update flags of new object if it contains references and the object is not
1434 * in the scavenge zone anymore. */
1435 if ((zone->ov_flags & (EO_NEW | EO_OLD)) && (zone->ov_flags & (EO_REF | EO_COMP))) {
1436 /* New object has been created outside the scavenge zone. Although it might
1437 * contains no references to young objects, we need to remember it just in case. */
1438 erembq (object); /* Usual remembrance process. */
1439 }
1440
1441 CHECK ("Not forwarded", !(HEADER (ptr)->ov_size & B_FWD));
1442
1443 /* Copy `ptr' in `object'. */
1444 if (new_real_size > old_real_size) {
1445 CHECK ("New size bigger than old one", new_size >= old_size);
1446 /* If object has been resized we do not need to clean the new
1447 * allocated memory because `spmalloc' does it for us. */
1448 if (object != ptr)
1449 /* Object has been resized to grow, we need to copy old items. */
1450 memcpy (object, ptr, old_real_size);
1451 else {
1452 CHECK ("New size same as old one", new_size == old_size);
1453 /* We need to clean area between `old_real_size' and
1454 * `new_real_size'.
1455 */
1456 memset (object + old_real_size, 0, new_size - old_real_size);
1457 }
1458
1459 if (zone->ov_flags & EO_COMP)
1460 /* Object has been resized and contains expanded objects.
1461 * We need to initialize the newly allocated area. */
1462 need_expanded_initialization = EIF_TRUE;
1463 } else { /* Smaller object requested. */
1464 CHECK ("New size smaller than old one", new_size <= old_size);
1465 /* We need to remove existing elements between `new_real_size'
1466 * and `new_size'. Above `new_size' we do not care since it
1467 * is in the scavenge zone and no one is going to access it
1468 */
1469 memset (object + new_real_size, 0, new_size - new_real_size);
1470 }
1471 }
1472 #endif
1473 #endif
1474
1475 RT_GC_WEAN(ptr); /* Unprotect `ptr'. No more collection is expected. */
1476
1477 /* Update special attributes count and element size at the end */
1478 if (egc_has_old_special_semantic) {
1479 /* New count equal to capacity. */
1480 RT_SPECIAL_COUNT(object) = nbitems;
1481 } else {
1482 /* We preserve the count if smaller than new capacity, otherwise new capacity. */
1483 RT_SPECIAL_COUNT(object) = (nbitems < count ? nbitems : count);
1484 }
1485 RT_SPECIAL_ELEM_SIZE(object) = elem_size; /* New item size */
1486 RT_SPECIAL_CAPACITY(object) = nbitems; /* New capacity */
1487
1488 if (need_expanded_initialization) {
1489 /* Case of a special object of expanded structures. */
1490 /* Initialize remaining items. */
1491 object = sp_init(object, eif_gen_param_id (Dftype(object), 1), count, nbitems - 1);
1492 }
1493
1494 #ifdef ISE_GC
1495 if (need_update) {
1496 /* If the object has moved in the reallocation process and was in the
1497 * remembered set, we must re-issue a memorization call otherwise all the
1498 * old contents of the area could be freed. Note that the test below is
1499 * NOT perfect, since the area could have been moved by the GC and still
1500 * have not been moved around wrt the GC stacks. But it doen't hurt--RAM.
1501 */
1502
1503 if (HEADER (ptr)->ov_flags & EO_REM) {
1504 erembq (object); /* Usual remembrance process. */
1505 /* A simple remembering for other special objects. */
1506 }
1507
1508 if (HEADER(ptr)->ov_flags & EO_NEW) { /* Original was new, ie not allocated in GSZ. */
1509 object = add_to_stack (object, &moved_set);
1510 }
1511 }
1512 #endif
1513
1514 ENSURE ("Special object", HEADER (object)->ov_flags & EO_SPEC);
1515 ENSURE ("Valid new size", (HEADER (object)->ov_size & B_SIZE) >= new_size);
1516
1517 /* The accounting of memory used by Eiffel is not accurate here, but it is
1518 * not easy to know at this level whether the block was merely extended or
1519 * whether we had to allocate a new block. However, if the reallocation
1520 * shrinks the object, we know xrealloc will not move the block but shrink
1521 * it in place, so there is no need to update the usage.
1522 */
1523
1524 #ifdef ISE_GC
1525 if (new_size > old_size) { /* Then update memory usage. */
1526 RT_GET_CONTEXT
1527 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_LOCK);
1528 eiffel_usage += (new_size - old_size);
1529 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_UNLOCK);
1530 }
1531 #endif
1532
1533 return object;
1534 }
1535
1536 /*
1537 doc: <routine name="cmalloc" return_type="void *" export="public">
1538 doc: <summary>Memory allocation for a C object. This is the same as the traditional malloc routine, excepted that the memory management is done by the Eiffel run time, so Eiffel keeps a kind of control over this memory. Memory is `zeroed'.</summary>
1539 doc: <param name="nbytes" type="size_t">Number of bytes to allocated.</param>
1540 doc: <return>Upon success, it returns a pointer on a new free zone holding at least 'nbytes' free. Otherwise, a null pointer is returned.</return>
1541 doc: <thread_safety>Safe</thread_safety>
1542 doc: <synchronization>None required</synchronization>
1543 doc: </routine>
1544 */
1545
1546 rt_public void *cmalloc(size_t nbytes)
1547 {
1548 #ifdef ISE_GC
1549 return eif_rt_xmalloc(nbytes, C_T, GC_OFF);
1550 #else
1551 return eif_malloc (nbytes);
1552 #endif
1553 }
1554
1555 #ifdef ISE_GC
1556 /*
1557 doc: <routine name="malloc_from_eiffel_list_no_gc" return_type="EIF_REFERENCE" export="shared">
1558 doc: <summary>Requests 'nbytes' from the free-list (Eiffel if possible), garbage collection turned off. This entry point is used by some garbage collector routines, so it is really important to turn the GC off. This routine being called from within the garbage collector, there is no need to make it a critical section with SIGBLOCK / SIGRESUME.</summary>
1559 doc: <param name="nbytes" type="rt_uint_ptr">Number of bytes to allocated, should be properly aligned an no greater than the maximum size we can allocate (i.e. 2^27 currently).</param>
1560 doc: <return>Upon success, it returns a pointer on a new free zone holding at least 'nbytes' free. Otherwise, a null pointer is returned.</return>
1561 doc: <thread_safety>Safe</thread_safety>
1562 doc: <synchronization>None required</synchronization>
1563 doc: </routine>
1564 */
1565
1566 rt_shared EIF_REFERENCE malloc_from_eiffel_list_no_gc (rt_uint_ptr nbytes)
1567 {
1568 EIF_REFERENCE result;
1569
1570 REQUIRE("nbytes properly padded", (nbytes % ALIGNMAX) == 0);
1571 REQUIRE("nbytes not too big (less than 2^27)", !(nbytes & ~B_SIZE));
1572
1573 /* We try to find an empty spot in the free list. If not found, we
1574 * will try `malloc_free_list' which will either allocate more
1575 * memory or coalesc some zone of the free list to create a bigger
1576 * one that will be able to accommodate `nbytes'.
1577 */
1578 result = allocate_free_list (nbytes, e_hlist);
1579 if (!result) {
1580 RT_GET_CONTEXT
1581 result = malloc_free_list (nbytes, e_hlist, EIFFEL_T, GC_OFF);
1582
1583 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_LOCK);
1584 /* Increment allocated bytes outside scavenge zone. */
1585 eiffel_usage += nbytes;
1586 /* No more space found in free memory, we force a full collection next time
1587 * we do a collect. */
1588 force_plsc++;
1589 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_UNLOCK);
1590 }
1591
1592 ENSURE ("Allocated size big enough", !result || (nbytes <= (HEADER(result)->ov_size & B_SIZE)));
1593 return result;
1594 }
1595
1596 /*
1597 doc: <routine name="malloc_from_eiffel_list" return_type="EIF_REFERENCE" export="shared">
1598 doc: <summary>Requests 'nbytes' from the free-list (Eiffel if possible), garbage collection turned on. If no more space is found in the free list, we will launch a GC cycle to make some room and then try again, if it fails we try one more time with garbage collection turned off.</summary>
1599 doc: <param name="nbytes" type="rt_uint_ptr">Number of bytes to allocated, should be properly aligned an no greater than the maximum size we can allocate (i.e. 2^27 currently).</param>
1600 doc: <return>Upon success, it returns a pointer on a new free zone holding at least 'nbytes' free. Otherwise, a null pointer is returned.</return>
1601 doc: <thread_safety>Safe</thread_safety>
1602 doc: <synchronization>Use `eiffel_usage_mutex' to perform safe update to `eiffel_usage', otherwise rest is naturally thread safe.</synchronization>
1603 doc: </routine>
1604 */
1605
1606 rt_private EIF_REFERENCE malloc_from_eiffel_list (rt_uint_ptr nbytes)
1607 {
1608 EIF_REFERENCE result;
1609
1610 REQUIRE("nbytes properly padded", (nbytes % ALIGNMAX) == 0);
1611 REQUIRE("nbytes not too big (less than 2^27)", !(nbytes & ~B_SIZE));
1612
1613 /* Perform allocation in free list. If not successful, we try again
1614 * by trying a GC cycle. */
1615 result = allocate_free_list(nbytes, e_hlist);
1616
1617 if (!result) {
1618 if (trigger_gc_cycle()) {
1619 result = allocate_free_list(nbytes, e_hlist);
1620 }
1621 if (!result) {
1622 /* We try to put Eiffel blocks in Eiffel chunks
1623 * If the free list cannot hold the block, switch to the C chunks list.
1624 */
1625 result = malloc_free_list(nbytes, e_hlist, EIFFEL_T, GC_ON);
1626 if (!result) {
1627 result = allocate_free_list (nbytes, c_hlist);
1628 if (!result) {
1629 result = malloc_free_list(nbytes, c_hlist, C_T, GC_OFF);
1630 }
1631 }
1632 }
1633 }
1634
1635 if (result) {
1636 RT_GET_CONTEXT
1637 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_LOCK);
1638 eiffel_usage += nbytes + OVERHEAD; /* Memory used by Eiffel */
1639 GC_THREAD_PROTECT(EIFFEL_USAGE_MUTEX_UNLOCK);
1640 }
1641
1642 ENSURE ("Allocated size big enough", !result || (nbytes <= (HEADER(result)->ov_size & B_SIZE)));
1643 return result;
1644 }
1645 #endif
1646
1647 /*
1648 doc: <routine name="eif_rt_xmalloc" return_type="EIF_REFERENCE" export="shared">
1649 doc: <summary>This routine is the main entry point for free-list driven memory allocation. It allocates 'nbytes' of type 'type' (Eiffel or C) and will call the garbage collector if necessary, unless it is turned off. The function returns a pointer to the free location found, or a null pointer if there is no memory available.</summary>
1650 doc: <param name="nbytes" type="size_t">Number of bytes requested.</param>
1651 doc: <param name="type" type="int">Type of block (C_T or EIFFEL_T).</param>
1652 doc: <param name="gc_flag" type="int">Is GC on or off?</param>
1653 doc: <return>New block of memory if successful, otherwise a null pointer.</return>
1654 doc: <thread_safety>Safe</thread_safety>
1655 doc: <synchronization>None required</synchronization>
1656 doc: </routine>
1657 */
1658
1659 rt_shared EIF_REFERENCE eif_rt_xmalloc(size_t nbytes, int type, int gc_flag)
1660 {
1661 #ifdef ISE_GC
1662 size_t mod; /* Remainder for padding */
1663 EIF_REFERENCE result; /* Pointer to the free memory location we found */
1664 union overhead **first_hlist, **second_hlist;
1665 int second_type;
1666 #ifdef EIF_ASSERTIONS
1667 size_t old_nbytes = nbytes;
1668 #endif
1669
1670 /* We really use at least ALIGNMAX, to avoid alignement problems.
1671 * So even if nbytes is 0, some memory will be used (the header), he he !!
1672 * The maximum size for nbytes is 2^27, because the upper 5 bits ot the
1673 * size field are used to mark the blocks.
1674 */
1675 mod = nbytes % ALIGNMAX;
1676 if (mod != 0)
1677 nbytes += ALIGNMAX - mod;
1678
1679 if (nbytes & ~B_SIZE)
1680 return (EIF_REFERENCE) 0; /* I guess we can't malloc more than 2^27 */
1681
1682 #ifdef DEBUG
1683 dprintf(1)("eif_rt_xmalloc: requesting %d bytes from %s list (GC %s)\n", nbytes,
1684 type == C_T ? "C" : "Eiffel", gc_flag == GC_ON ? "on" : "off");
1685 flush;
1686 #endif
1687
1688 /* We try to put Eiffel blocks in Eiffel chunks and C blocks in C chunks.
1689 * That way, we do not spoil the Eiffel chunks for scavenging (C blocks
1690 * cannot be moved). The Eiffel objects that are referenced from C must
1691 * be moved to C chunks and become C blocks (so that the GC skips them).
1692 * If the free list cannot hold the block, switch to the other list. Note
1693 * that the GC flag makes sense only when allocating from a free list for
1694 * the first time (it does make sense for the C list in case we had to
1695 * allocate Eiffel blocks in the C list due to a "low on memory" condition).
1696 */
1697
1698 if (type == EIFFEL_T) {
1699 first_hlist = e_hlist;
1700 second_hlist = c_hlist;
1701 second_type = C_T;
1702 } else {
1703 first_hlist = c_hlist;
1704 second_hlist = e_hlist;
1705 second_type = EIFFEL_T;
1706 }
1707
1708 result = allocate_free_list (nbytes, first_hlist);
1709 if (!result) {
1710 if (gc_flag && (type == EIFFEL_T)) {
1711 if (trigger_gc_cycle()) {
1712 result = allocate_free_list(nbytes, e_hlist);
1713 }
1714 }
1715 if (!result) {
1716 result = malloc_free_list (nbytes, first_hlist, type, gc_flag);
1717 if (result == (EIF_REFERENCE) 0 && gc_flag != GC_OFF) {
1718 result = allocate_free_list (nbytes, second_hlist);
1719 if (!result) {
1720 result = malloc_free_list(nbytes, second_hlist, second_type, GC_OFF);
1721 }
1722 }
1723 }
1724 }
1725
1726 ENSURE ("Allocated size big enough", !result || (old_nbytes <= (HEADER(result)->ov_size & B_SIZE)));
1727 return result; /* Pointer to free data space or null if out of memory */
1728 #else
1729 return (EIF_REFERENCE) eif_malloc (nbytes);
1730 #endif
1731 }
1732
1733 #ifdef ISE_GC
1734 /*
1735 doc: <routine name="malloc_free_list" return_type="EIF_REFERENCE" export="private">
1736 doc: <summary>We tried to find a free block in `hlist' before calling this routine but could not find any. Therefore here we will try to launch a GC cycle if permitted, or we will try to coalesc the memory so that bigger blocks of memory can be found in the free list.</summary>
1737 doc: <param name="nbytes" type="unsigned int">Number of bytes to allocated, should be properly aligned.</param>
1738 doc: <param name="hlist" type="union overhead **">List from which we try to find a free block or allocated a new block.</param>
1739 doc: <param name="type" type="int">Type of list (EIFFEL_T or C_T).</param>
1740 doc: <param name="gc_flag" type="int">Is GC on or off?</param>
1741 doc: <return>An aligned block of 'nbytes' bytes or null if no more memory is available.</return>
1742 doc: <thread_safety>Safe</thread_safety>
1743 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
1744 doc: </routine>
1745 */
1746
1747 rt_private EIF_REFERENCE malloc_free_list (size_t nbytes, union overhead **hlist, int type, int gc_flag)
1748 {
1749 RT_GET_CONTEXT
1750 EIF_REFERENCE result; /* Location of the malloc'ed block */
1751 unsigned int estimated_free_space;
1752
1753 REQUIRE("Valid list", CHUNK_TYPE(hlist) == type);
1754
1755 if (cc_for_speed) {
1756 /* They asked for speed (over memory, of course), so we first try
1757 * to allocate by requesting some core from the kernel. If this fails,
1758 * we try to do block coalescing before attempting a new allocation
1759 * from the free list if the coalescing brought a big enough bloc.
1760 */
1761 result = allocate_from_core (nbytes, hlist, 0); /* Ask for more core */
1762 if (result) {
1763 return result; /* We got it */
1764 }
1765 }
1766
1767 /* Call garbage collector if it is not turned off and restart our
1768 * attempt from the beginning. We always call the partial scavenging
1769 * collector to benefit from the memory compaction, if possible.
1770 */
1771 if (gc_flag == GC_ON) {
1772 #ifdef EIF_THREADS
1773 RT_GET_CONTEXT
1774 if ((gc_thread_status == EIF_THREAD_GC_RUNNING) || thread_can_launch_gc) {
1775 plsc(); /* Call garbage collector */
1776 return malloc_free_list (nbytes, hlist, type, GC_OFF);
1777 }
1778 #else
1779 plsc(); /* Call garbage collector */
1780 return malloc_free_list (nbytes, hlist, type, GC_OFF);
1781 #endif
1782 }
1783
1784 /* Optimize: do not try to run a full coalescing if there is not
1785 * enough free memory anyway. To give an estimation of the amount of
1786 * free memory, we substract the amount used from the total allocated:
1787 * in a perfect world, the amount of overhead would be zero... Anyway,
1788 * the coalescing operation will reduce the overhead, so we must not
1789 * deal with it or we may wrongly reject some situtations--RAM.
1790 */
1791
1792 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
1793 if (type == C_T) {
1794 estimated_free_space = (unsigned int) (rt_c_data.ml_total - rt_c_data.ml_used);
1795 } else {
1796 estimated_free_space = (unsigned int) (rt_e_data.ml_total - rt_e_data.ml_used);
1797 }
1798
1799 if ((nbytes <= estimated_free_space) && (nbytes < (unsigned int) full_coalesc_unsafe (type))) {
1800 #ifdef EIF_ASSERTIONS
1801 EIF_REFERENCE result;
1802 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
1803 result = allocate_free_list (nbytes, hlist);
1804 CHECK ("result not null", result);
1805 return result;
1806 #else
1807 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
1808 return allocate_free_list (nbytes, hlist);
1809 #endif
1810 } else {
1811 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
1812 /* No other choice but to request for more core */
1813 return allocate_from_core (nbytes, hlist, 0);
1814 }
1815 }
1816
1817 /*
1818 doc: <routine name="allocate_free_list" return_type="EIF_REFERENCE" export="private">
1819 doc: <summary>Given a correctly padded size 'nbytes', we try to find a free block from the free list described in 'hlist'.</summary>
1820 doc: <param name="nbytes" type="size_t">Number of bytes requested.</param>
1821 doc: <param name="hlist" type="union overhead **">List from which we try to find a free block.</param>
1822 doc: <return>Return the address of the (splited) block if found, a null pointer otherwise.</return>
1823 doc: <thread_safety>Safe</thread_safety>
1824 doc: <synchronization>Through `eif_free_list_mutex'</synchronization>
1825 doc: </routine>
1826 */
1827
1828 rt_private EIF_REFERENCE allocate_free_list(size_t nbytes, register union overhead **hlist)
1829 {
1830 RT_GET_CONTEXT
1831 size_t i; /* Index in hlist */
1832 union overhead *selected;
1833 #ifndef EIF_SORTED_FREE_LIST
1834 union overhead *n;
1835 #endif
1836 EIF_REFERENCE result;
1837
1838 #ifdef DEBUG
1839 dprintf(4)("allocate_free_list: requesting %d bytes from %s list\n",
1840 nbytes, (CHUNK_TYPE(hlist) == C_T) ? "C" : "Eiffel");
1841 flush;
1842 #endif
1843
1844 /* Quickly compute the index in the hlist array where we have a
1845 * chance to find the right block. */
1846 i = HLIST_INDEX(nbytes);
1847
1848 /* Look in free list to find a suitable block. */
1849
1850 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
1851
1852 #ifdef EIF_EXPENSIVE_ASSERTIONS
1853 check_free_list (nbytes, hlist);
1854 #endif
1855
1856 if (i >= HLIST_INDEX_LIMIT) {
1857 selected = allocate_free_list_helper (i, nbytes, hlist);
1858 } else {
1859 /* We are below the limit `HLIST_INDEX_LIMIT', therefore if the entry of
1860 * `hlist' at index `i' is not NULL, then it means that we have `nbytes'
1861 * available. No need to check the size here. If block is null, then we
1862 * go through all other blocks to find the first one available. */
1863 selected = hlist[i];
1864 if (selected) {
1865 #ifdef EIF_SORTED_FREE_LIST
1866 disconnect_free_list (selected, i);
1867 #else
1868 /* Remove `selected' from `hlist'. */
1869 n = NEXT(selected);
1870 hlist[i] = n;
1871 if (n && (i != 0)) {
1872 PREVIOUS(n) = NULL;
1873 }
1874 #endif
1875 } else {
1876 selected = hlist[i + 1];
1877 if (selected) {
1878 /* We could find a free space in `i + 1' so we take that
1879 * space and we make `set_up'believe we were asking
1880 * for `nbytes + ALIGNAMX' to avoid creation of a 0-sized block. */
1881 nbytes +=ALIGNMAX;
1882 CHECK("Correct size", nbytes == (selected->ov_size & B_SIZE));
1883 #ifdef EIF_SORTED_FREE_LIST
1884 disconnect_free_list (selected, i + 1);
1885 #else
1886 /* Remove `selected' from `hlist'. */
1887 n = NEXT(selected);
1888 hlist[i + 1] = n;
1889 if (n) {
1890 PREVIOUS(n) = NULL;
1891 }
1892 #endif
1893 } else {
1894 /* Could not find in `i + 1', let's search above. Here no risk
1895 * of creating a 0-sized block.*/
1896 selected = allocate_free_list_helper (i + 2, nbytes, hlist);
1897 }
1898 }
1899 }
1900
1901 /* Now, either 'i' is NBLOCKS and 'selected' still holds a null
1902 * pointer or 'selected' holds the wanted address and 'i' is the
1903 * index in the hlist array.
1904 */
1905 if (!selected) { /* We did not find it */
1906 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
1907 return NULL; /* Failed */
1908 }
1909
1910 #ifdef DEBUG
1911 dprintf(8)("allocate_free_list: got block from list #%d\n", i);
1912 flush;
1913 #endif
1914
1915 /* Block is ready to be set up for use of 'nbytes' (eventually after
1916 * having been split). Memory accounting is done in set_up().
1917 */
1918 result = set_up(selected, nbytes);
1919 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
1920 return result;
1921 }
1922
1923 /*
1924 doc: <routine name="allocate_free_list_helper" return_type="union overhead *" export="private">
1925 doc: <summary>This is the heart of malloc: Look in the hlist array to see if there is already a block available. If so, we take the first one and we eventually split the block. If no block is available, we look for some bigger one. If none is found, then we fail.</summary>
1926 doc: <param name="i" type="size_t">Index from where we start looking for a block of `nbytes' in `hlist'.</param>
1927 doc: <param name="nbytes" type="size_t">Number of bytes requested to be found.</param>
1928 doc: <param name="hlist" type="union overhead **">Free list where search will take place.</param>
1929 doc: <return>Location of a zone that can hold `nbytes', null otherwise.</return>
1930 doc: <thread_safety>Not safe</thread_safety>
1931 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
1932 doc: </routine>
1933 */
1934
1935 rt_private union overhead * allocate_free_list_helper(size_t i, size_t nbytes, register union overhead **hlist)
1936 {
1937 union overhead *selected; /* The selected block */
1938 union overhead *p; /* To walk through free-list */
1939
1940 for (; i < NBLOCKS; i++) {
1941 if ((selected = hlist[i]) == NULL)
1942 continue;
1943 else if ((selected->ov_size & B_SIZE) >= nbytes) {
1944 #ifdef EIF_SORTED_FREE_LIST
1945 disconnect_free_list (selected, i);
1946 #else
1947 /* Remove `selected' from `hlist'. */
1948 p = NEXT(selected);
1949 hlist[i] = p;
1950 if (p && (i != 0)) {
1951 PREVIOUS(p) = NULL;
1952 }
1953 #endif
1954 return selected; /* Found it, selected points to it */
1955 } else {
1956 /* Walk through list, until we find a good block. This
1957 * is only done for the first 'i'. Afterwards, either the
1958 * first item will fit, or we'll have to report failure.
1959 */
1960 for (
1961 p = selected, selected = NEXT(p);
1962 selected != NULL;
1963 p = selected, selected = NEXT(p)
1964 ) {
1965 if ((selected->ov_size & B_SIZE) >= nbytes) {
1966 disconnect_free_list (selected, i);
1967 return selected; /* Found it, selected points to it */
1968 }
1969 }
1970 CHECK ("Not found", selected == NULL);
1971 }
1972 }
1973
1974 return NULL;
1975 }
1976
1977
1978 #ifdef EIF_EXPENSIVE_ASSERTIONS
1979 /*
1980 doc: <routine name="check_free_list" return_type="void" export="private">
1981 doc: <summary>Perform a sanity check of the free list to ensure that content of the X_data accounting match the actual content of the free list.</summary>
1982 doc: <param name="nbytes" type="size_t">Number of bytes requested to be found.</param>
1983 doc: <param name="hlist" type="union overhead **">Free list where search will take place.</param>
1984 doc: <thread_safety>Not safe</thread_safety>
1985 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
1986 doc: </routine>
1987 */
1988
1989 rt_private void check_free_list (size_t nbytes, register union overhead **hlist)
1990 {
1991 union overhead *selected; /* The selected block */
1992 union overhead *p; /* To walk through free-list */
1993 size_t bytes_available = 0;
1994 int j, found = 0;
1995
1996 #ifdef DEBUG
1997 fprintf(stderr, "\nallocate_free_list_helper: Requested %d\n", nbytes);
1998 #endif
1999 for (j = 0; j < NBLOCKS; j++) {
2000 selected = hlist [j];
2001 if (selected) {
2002 size_t count = 0;
2003 size_t list_bytes = 0;
2004 for (
2005 p = selected;
2006 selected != NULL;
2007 p = selected, selected = NEXT(p)
2008 ) {
2009 CHECK("valid_previous", (j== 0) || ((p == selected) || (p == PREVIOUS(selected))));
2010
2011 if ((selected->ov_size & B_SIZE) >= nbytes) {
2012 found++;
2013 }
2014 list_bytes += selected->ov_size & B_SIZE;
2015 count++;
2016 }
2017 #ifdef DEBUG
2018 fprintf(stderr, "hlist [%d] has %d elements and %d free bytes.\n", j, count, list_bytes);
2019 #endif
2020 bytes_available += list_bytes;
2021 } else {
2022 /* Fee list empty. */
2023 }
2024 }
2025
2026 if (CHUNK_TYPE(hlist) == EIFFEL_T) {
2027 CHECK("Consistent", bytes_available == (rt_e_data.ml_total - rt_e_data.ml_over - rt_e_data.ml_used));
2028 } else {
2029 CHECK("Consistent", bytes_available == (rt_c_data.ml_total - rt_c_data.ml_over - rt_c_data.ml_used));
2030 }
2031
2032 #ifdef DEBUG
2033 if (found) {
2034 fprintf(stderr, "We found a possible %d block(s) of size greater than %d bytes.\n", found, nbytes);
2035 }
2036 fprintf(stderr, "Total available bytes is %d\n", bytes_available);
2037 if (CHUNK_TYPE(hlist) == EIFFEL_T) {
2038 fprintf(stderr, "Eiffel free list has %d bytes allocated, %d used and %d free.\n",
2039 rt_e_data.ml_total, rt_e_data.ml_used, rt_e_data.ml_total - rt_e_data.ml_used - rt_e_data.ml_over);
2040 } else {
2041 fprintf(stderr, "C free list has %d bytes allocated, %d used and %d free.\n",
2042 rt_c_data.ml_total, rt_c_data.ml_used, rt_c_data.ml_total - rt_c_data.ml_used - rt_c_data.ml_over);
2043 }
2044 flush;
2045 #endif
2046 }
2047 #endif
2048
2049 /*
2050 doc: <routine name="get_to_from_core" return_type="EIF_REFERENCE" export="shared">
2051 doc: <summary>For the partial scavenging algorithm, gets a new free chunk for the to_space. The chunk size is `eif_chunk_size', it is not relevant how big is the `from_space' as the partial scavenging handle the case where the `to_space' is smaller than the `from_space'.</summary>
2052 doc: <return>New block if successful, otherwise a null pointer.</return>
2053 doc: <thread_safety>Safe</thread_safety>
2054 doc: <synchronization>Call to `allocate_from_core' is safe.</synchronization>
2055 doc: </routine>
2056 */
2057
2058 rt_shared EIF_REFERENCE get_to_from_core (void)
2059 {
2060 EIF_REFERENCE Result;
2061
2062 /* We substract OVERHEAD and the size of a chunk, because in `allocate_from_core' which
2063 * calls `add_core' we will add `OVERHEAD' and the size of a chunk to make sure we have indeed
2064 * the number of bytes allocated.
2065 */
2066 Result = allocate_from_core (eif_chunk_size - OVERHEAD - sizeof(struct chunk), e_hlist, 1);
2067
2068 ENSURE("block is indeed of the right size", !Result || ((eif_chunk_size - OVERHEAD) == (HEADER(Result)->ov_size & B_SIZE)));
2069
2070 return Result;
2071 }
2072
2073 /*
2074 doc: <routine name="allocate_from_core" return_type="EIF_REFERENCE" export="private">
2075 doc: <summary>Given a correctly padded size 'nbytes', we ask for some core to be able to make a chunk capable of holding 'nbytes'. The chunk will be placed in the specified `hlist'.</summary>
2076 doc: <param name="nbytes" type="size_t">Number of bytes requested.</param>
2077 doc: <param name="hlist" type="union overhead **">List in which we try to allocated a free block.</param>
2078 doc: <param name="maximize" type="int">Even though we asked for `nbytes' should we perform the split in case more than `nbytes' were allocated? `0' means yes, '1' means no.</param>
2079 doc: <return>Address of new block, or null if no more core is available.</return>
2080 doc: <thread_safety>Safe</thread_safety>
2081 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
2082 doc: </routine>
2083 */
2084
2085 rt_private EIF_REFERENCE allocate_from_core(size_t nbytes, union overhead **hlist, int maximize)
2086 {
2087 RT_GET_CONTEXT
2088 union overhead *selected; /* The selected block */
2089 struct chunk *chkbase; /* Base address of new chunk */
2090 EIF_REFERENCE result;
2091 int type = CHUNK_TYPE(hlist);
2092
2093 #ifdef DEBUG
2094 dprintf(4)("allocate_from_core: requesting %d bytes from %s list\n",
2095 nbytes, (type == C_T) ? "C" : "Eiffel");
2096 flush;
2097 #endif
2098
2099 SIGBLOCK; /* Critical section */
2100 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
2101
2102 selected = add_core(nbytes, type); /* Ask for more core */
2103 if (!selected) {
2104 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2105 SIGRESUME; /* End of critical section */
2106 return (EIF_REFERENCE) 0; /* Could not obtain enough memory */
2107 }
2108
2109 /* Add_core() returns a pointer of the info zone of the sole block
2110 * currently in the new born chunk. We have to set the "type" of the
2111 * chunk correctly, along with the type of the block held in it (so that
2112 * a free can put the block back into the right eif_free list. Note that an
2113 * Eiffel object may well be in a C chunk.
2114 */
2115 chkbase = ((struct chunk *) selected) - 1; /* Chunk info zone */
2116
2117 /* Hang on. The following should avoid some useless swapping when
2118 * walking through a well defined type of chunk list--RAM.
2119 * All the chunks are added to the list at the end of it, so the
2120 * addresses are always increasing when walking through the list. This
2121 * property is used by the garbage collector, for efficiency reasons
2122 * that are too long to be explained here--RAM.
2123 */
2124
2125 if (C_T == type) {
2126 /* C block chunck */
2127 if (cklst.cck_head == (struct chunk *) 0) { /* No C chunk yet */
2128 cklst.cck_head = chkbase; /* First C chunk */
2129 chkbase->ck_lprev = (struct chunk *) 0; /* First item in list */
2130 } else {
2131 cklst.cck_tail->ck_lnext = chkbase; /* Added at the tail */
2132 chkbase->ck_lprev = cklst.cck_tail; /* Previous item */
2133 }
2134 cklst.cck_tail = chkbase; /* New tail */
2135 chkbase->ck_lnext = (struct chunk *) 0; /* Last block in list */
2136 chkbase->ck_type = C_T; /* Dedicated to C */
2137 selected->ov_size |= B_CTYPE; /* Belongs to C free list */
2138 } else {
2139 /* Eiffel block chunck */
2140 if (cklst.eck_head == (struct chunk *) 0) { /* No Eiffel chunk yet */
2141 cklst.eck_head = chkbase; /* First Eiffel chunk */
2142 chkbase->ck_lprev = (struct chunk *) 0; /* First item in list */
2143 } else {
2144 cklst.eck_tail->ck_lnext = chkbase; /* Added at the tail */
2145 chkbase->ck_lprev = cklst.eck_tail; /* Previous item */
2146 }
2147 cklst.eck_tail = chkbase; /* New tail */
2148 chkbase->ck_lnext = (struct chunk *) 0; /* Last block in list */
2149 chkbase->ck_type = EIFFEL_T; /* Dedicated to Eiffel */
2150 }
2151
2152 SIGRESUME; /* End of critical section */
2153
2154 #ifdef DEBUG
2155 dprintf(4)("allocate_from_core: %d user bytes chunk added to %s list\n",
2156 chkbase->ck_length, (type == C_T) ? "C" : "Eiffel");
2157 flush;
2158 #endif
2159
2160 if (maximize == 1) {
2161 /* Because we do not want to split the block, we now say
2162 * that what we really asked for was the size of the block
2163 * returned by `add_core'. This is still necessary to call
2164 * `set_up' for the memory accounting. */
2165 nbytes = selected->ov_size & B_SIZE;
2166 }
2167
2168 /* Block is ready to be set up for use of 'nbytes' (eventually after
2169 * having been split). Memory accounting is done in set_up().
2170 */
2171 result = set_up(selected, nbytes);
2172
2173 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2174 return result;
2175 }
2176
2177 /*
2178 doc: <routine name="add_core" return_type="union overhead *" export="private">
2179 doc: <summary>Get more core from kernel, increasing the data segment of the process by calling mmap() or sbrk() or. We try to request at least CHUNK bytes to allow for efficient scavenging. If more than that amount is requested, the value is padded to the nearest multiple of the system page size. If less than that amount are requested but the system call fails, successive attempts are made, decreasing each time by one system page size. A pointer to a new chunk suitable for at least 'nbytes' bytes is returned, or a null pointer if no more memory is available. The chunk is linked in the main list, but left out of any free list.</summary>
2180 doc: <param name="nbytes" type="size_t">Number of bytes requested.</param>
2181 doc: <param name="type" type="int">Type of block to allocated (EIFFEL_T or C_T).</param>
2182 doc: <return>Address of new block of `nbytes' bytes, or null if no more core is available.</return>
2183 doc: <thread_safety>Not safe</thread_safety>
2184 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
2185 doc: </routine>
2186 */
2187
2188 rt_private union overhead *add_core(size_t nbytes, int type)
2189 {
2190 union overhead *oldbrk; /* Initialized with `failed' value. */
2191 size_t mod; /* Remainder for padding */
2192 size_t asked = nbytes; /* Bytes requested */
2193
2194 /* We want at least 'nbytes' bytes for use, so we must add the overhead
2195 * for each block and for each chunk. */
2196 asked += sizeof(struct chunk) + OVERHEAD;
2197
2198 /* Requesting less than CHUNK implies requesting CHUNK bytes, at least.
2199 * Requesting more implies at least CHUNK plus the needed number of
2200 * extra pages necessary (tiny fit).
2201 */
2202 if (asked <= eif_chunk_size) {
2203 asked = eif_chunk_size;
2204 CHECK("Multiple of ALIGNMAX", (asked % ALIGNMAX) == 0);
2205 } else {
2206 asked = eif_chunk_size + (((asked - eif_chunk_size) / PAGESIZE_VALUE) + 1) * PAGESIZE_VALUE;
2207 /* Make sure that `asked' is a multiple of ALIGNMAX. */
2208 mod = asked % ALIGNMAX;
2209 if (mod != 0) {
2210 asked += ALIGNMAX - mod;
2211 }
2212 }
2213
2214 /* Size of chunk has to be added, otherwise the remaining space
2215 * might not be a multiple of ALIGNMAX. */
2216 asked += sizeof(struct chunk);
2217
2218 /* We check that we are not asking for more than the limit
2219 * the user has fixed:
2220 * - eif_max_mem (total allocated memory)
2221 * If the value of eif_max_mem is 0, there is no limit.
2222 */
2223 if (eif_max_mem > 0) {
2224 if (rt_m_data.ml_total + asked > eif_max_mem) {
2225 return (union overhead *) 0;
2226 }
2227 }
2228 oldbrk = (union overhead *) eif_malloc (asked); /* Use malloc () */
2229 if (!oldbrk) {
2230 return NULL;
2231 }
2232
2233 /* Accounting informations */
2234 rt_m_data.ml_chunk++;
2235 rt_m_data.ml_total += asked; /* Counts overhead */
2236 rt_m_data.ml_over += sizeof(struct chunk) + OVERHEAD;
2237
2238 /* Accounting is also done for each type of memory (C/Eiffel) */
2239 if (type == EIFFEL_T) {
2240 rt_e_data.ml_chunk++;
2241 rt_e_data.ml_total += asked;
2242 rt_e_data.ml_over += sizeof(struct chunk) + OVERHEAD;
2243 } else {
2244 rt_c_data.ml_chunk++;
2245 rt_c_data.ml_total += asked;
2246 rt_c_data.ml_over += sizeof(struct chunk) + OVERHEAD;
2247 }
2248
2249 /* We got the memory we wanted. Make a chunk out of it, build one
2250 * big block inside and return the pointer to that block. This is
2251 * a somewhat costly operation, but it is note done very often.
2252 */
2253 asked -= sizeof(struct chunk) + OVERHEAD; /* Was previously increased */
2254
2255 /* Update all the pointers for the double linked list. This
2256 * is somewhat heavy code, hard to read because of all these
2257 * casts, but believe me, it is simple--RAM.
2258 */
2259
2260 #define chkstart ((struct chunk *) oldbrk)
2261
2262 if (cklst.ck_head == (struct chunk *) 0) { /* No chunk yet */
2263 cklst.ck_head = chkstart; /* First chunk */
2264 chkstart->ck_prev = (struct chunk *) 0; /* First item in list */
2265 } else {
2266 cklst.ck_tail->ck_next = chkstart; /* Added at the tail */
2267 chkstart->ck_prev = cklst.ck_tail; /* Previous item */
2268 }
2269
2270 cklst.ck_tail = chkstart; /* New tail */
2271 chkstart->ck_next = (struct chunk *) 0; /* Last block in list */
2272 chkstart->ck_length = asked + OVERHEAD; /* Size of chunk */
2273
2274 /* Address of new block (skip chunck overhead) */
2275 oldbrk = (union overhead *) (chkstart + 1);
2276
2277 #undef chkstart
2278
2279 /* Set the size of the new block. Note that this new block
2280 * is the first and the last one in the chunk, so we set the
2281 * B_LAST bit. All the other flags are set to false.
2282 */
2283 CHECK("asked not too big", asked <= B_SIZE);
2284 oldbrk->ov_size = asked | B_LAST;
2285
2286 return oldbrk; /* Pointer to new free zone */
2287 }
2288
2289 /*
2290 doc: <routine name="rel_core" export="shared">
2291 doc: <summary>Release core if possible, giving pages back to the kernel. This will shrink the size of the process accordingly. To release memory, we need at least two free chunks at the end (i.e. near the break), and of course, a chunk not reserved for next partial scavenging as a 'to' zone.</summary>
2292 doc: <thread_safety>Not safe</thread_safety>
2293 doc: <synchronization>Safe under GC synchronization.</synchronization>
2294 doc: </routine>
2295 */
2296
2297 rt_shared void rel_core(void)
2298 {
2299 struct chunk *c, *cn;
2300
2301 for (c = cklst.ck_head; c; c = cn) {
2302 /* Store next chunk before trying to free `c' as otherwise the
2303 * access `c->ck_next' would result in a segfault. */
2304 cn = c->ck_next;
2305 free_chunk (c);
2306 }
2307 }
2308
2309 /*
2310 doc: <routine name="free_chunk" export="private">
2311 doc: <summary>If `a_chk' is not used, then it gets removed from `cklst' and given back to the system.</summary>
2312 doc: <param name="a_chk" type="struct chunk *">Chunk being analyzed for potential removal from `cklst' and returned to system.</param>
2313 doc: <thread_safety>Not safe</thread_safety>
2314 doc: <synchronization>Safe under GC synchronization.</synchronization>
2315 doc: </routine>
2316 */
2317
2318 rt_private void free_chunk(struct chunk *a_chk)
2319 {
2320 RT_GET_CONTEXT
2321 size_t nbytes; /* Number of bytes to be freed */
2322 union overhead *arena; /* The address of the arena enclosed in chunk */
2323 rt_uint_ptr r; /* To compute hashing index for released block */
2324
2325 REQUIRE("a_chk not null", a_chk);
2326
2327 /* Ok, let's see if this chunk is free. If it holds a free last block,
2328 * that's fine. Otherwise, we run a coalescing on the chunk and check again.
2329 * If we do not succeed either, then abort the procedure.
2330 */
2331 arena = (union overhead *) ((EIF_REFERENCE) (a_chk + 1));
2332 if (arena->ov_size & B_BUSY) {
2333 /* Block is busy, we can forget about freeing this chunk, but
2334 * we still try to coalesc the free memory as much as we can
2335 * as it speeds up allocation later on. */
2336 r = chunk_coalesc (a_chk);
2337 return;
2338 } else if (!(arena->ov_size & B_LAST)) {
2339 /* Block is not busy, but it is not the last one of the chunk. We try
2340 * to coalesce it and check if it is now the last one. Of course
2341 * no need to do the check if we did not coalesce. */
2342 r = chunk_coalesc (a_chk); /* Try to coalesc `a_chk'. */
2343 if ((r == 0) || !(arena->ov_size & B_LAST)) {
2344 /* Chunk is not free. */
2345 return;
2346 }
2347 }
2348
2349 CHECK("arena free and last", (arena->ov_size & B_LAST) && !(arena->ov_size & B_BUSY));
2350
2351 SIGBLOCK; /* Entering in critical section */
2352
2353 r = arena->ov_size & B_SIZE;
2354 disconnect_free_list(arena, HLIST_INDEX(r)); /* Remove arena from free list */
2355
2356 /* The garbage collectors counts the amount of allocated 'to' zones. A limit
2357 * is fixed to avoid a nasty memory leak when all the zones used would be
2358 * spoilt by frozen objects. However, each time we successfully decrease
2359 * the process size by releasing some core, we may allow a new allocation.
2360 */
2361 if (rt_g_data.gc_to > 0) {
2362 rt_g_data.gc_to--; /* Decrease number of allocated 'to' */
2363 }
2364
2365 /* Amount of bytes is chunk's length plus the header overhead */
2366 nbytes = a_chk->ck_length + sizeof(struct chunk);
2367
2368 /* It's now time to update the internal data structure which keep track of
2369 * the memory status. */
2370 rt_m_data.ml_chunk--;
2371 rt_m_data.ml_total -= nbytes; /* Counts overhead */
2372 rt_m_data.ml_over -= sizeof(struct chunk) + OVERHEAD;
2373 /* Update list. */
2374 if (a_chk == cklst.ck_head) {
2375 cklst.ck_head = a_chk->ck_next;
2376 if (a_chk->ck_next) {
2377 a_chk->ck_next->ck_prev = NULL;
2378 }
2379 } else if (a_chk == cklst.ck_tail) {
2380 cklst.ck_tail = a_chk->ck_prev;
2381 CHECK("Has previous chunk", a_chk->ck_prev);
2382 a_chk->ck_prev->ck_next = NULL;
2383 } else {
2384 a_chk->ck_prev->ck_next = a_chk->ck_next;
2385 a_chk->ck_next->ck_prev = a_chk->ck_prev;
2386 }
2387 /* Update cursor. Cursors are moved to the right.*/
2388 if (a_chk == cklst.cursor) {
2389 cklst.cursor = a_chk->ck_next;
2390 }
2391
2392 /* Now do the same but for the Eiffel list and the C list. */
2393 if (a_chk->ck_type == EIFFEL_T) { /* Chunk was an Eiffel one */
2394 rt_e_data.ml_chunk--;
2395 rt_e_data.ml_total -= nbytes;
2396 rt_e_data.ml_over -= sizeof(struct chunk) + OVERHEAD;
2397 if (a_chk == cklst.eck_head) {
2398 cklst.eck_head = a_chk->ck_lnext;
2399 if (a_chk->ck_lnext) {
2400 a_chk->ck_lnext->ck_lprev = NULL;
2401 }
2402 } else if (a_chk == cklst.eck_tail) {
2403 cklst.eck_tail = a_chk->ck_lprev;
2404 CHECK("Has previous chunk", a_chk->ck_lprev);
2405 a_chk->ck_lprev->ck_lnext = NULL;
2406 } else {
2407 a_chk->ck_lprev->ck_lnext = a_chk->ck_lnext;
2408 a_chk->ck_lnext->ck_lprev = a_chk->ck_lprev;
2409 }
2410 if (a_chk == cklst.e_cursor) {
2411 cklst.e_cursor = a_chk->ck_lnext;
2412 }
2413 } else { /* Chunk was a C one */
2414 rt_c_data.ml_chunk--;
2415 rt_c_data.ml_total -= nbytes;
2416 rt_c_data.ml_over -= sizeof(struct chunk) + OVERHEAD;
2417 if (a_chk == cklst.cck_head) {
2418 cklst.cck_head = a_chk->ck_lnext;
2419 if (a_chk->ck_lnext) {
2420 a_chk->ck_lnext->ck_lprev = NULL;
2421 }
2422 } else if (a_chk == cklst.cck_tail) {
2423 cklst.cck_tail = a_chk->ck_lprev;
2424 CHECK("Has previous chunk", a_chk->ck_lprev);
2425 a_chk->ck_lprev->ck_lnext = NULL;
2426 } else {
2427 a_chk->ck_lprev->ck_lnext = a_chk->ck_lnext;
2428 a_chk->ck_lnext->ck_lprev = a_chk->ck_lprev;
2429 }
2430 if (a_chk == cklst.c_cursor) {
2431 cklst.e_cursor = a_chk->ck_lnext;
2432 }
2433 }
2434
2435 /* We can free our block now. */
2436 eif_free (a_chk);
2437
2438 SIGRESUME; /* Critical section ends */
2439
2440 return; /* Signals no error */
2441 }
2442
2443 /*
2444 doc: <routine name="set_up" return_type="EIF_REFERENCE" export="private">
2445 doc: <summary>Given a 'selected' block which may be too big to hold 'nbytes', we set it up to, updating memory accounting infos and setting the correct flags in the malloc info zone (header). We then return the address the user will know (points to the first datum byte).</summary>
2446 doc: <param name="selected" type="union overhead *">Block of memory which is too big to hold `nbytes'.</param>
2447 doc: <param name="nbytes" type="size_t">Size in bytes of block we should return.</param>
2448 doc: <return>Address of location of object of size `nbytes' in `selected'.</return>
2449 doc: <thread_safety>Not safe</thread_safety>
2450 doc: <synchronization>Safe if caller is synchronized on `eif_free_list_mutex', or under GC synchronization.</synchronization>
2451 doc: </routine>
2452 */
2453
2454 rt_private EIF_REFERENCE set_up(register union overhead *selected, size_t nbytes)
2455 {
2456 RT_GET_CONTEXT
2457 rt_uint_ptr r; /* For temporary storage */
2458 rt_uint_ptr i; /* To store true size */
2459
2460 #ifdef DEBUG
2461 dprintf(8)("set_up: selected is 0x%lx (%s, %d bytes)\n",
2462 selected, selected->ov_size & B_LAST ? "last" : "normal",
2463 selected->ov_size & B_SIZE);
2464 flush;
2465 #endif
2466
2467 SIGBLOCK; /* Critical section, cannot be interrupted */
2468
2469 (void) eif_rt_split_block(selected, nbytes); /* Eventually split the area */
2470
2471 /* The 'selected' block is now in use and the real size is in
2472 * the ov_size area. To mark the block as used, we have to set
2473 * two bits in the flags part (block is busy and it is a C block).
2474 * Another part of the run-time will overwrite this if Eiffel is
2475 * to ever use this object.
2476 */
2477
2478 r = selected->ov_size;
2479 #ifdef EIF_TID
2480 #ifdef EIF_THREADS
2481 selected->ovs_tid = (rt_uint_ptr) eif_thr_context->thread_id; /* tid from eif_thr_context */
2482 #else
2483 selected->ovs_tid = (rt_uint_ptr) 0; /* In non-MT-mode, it is NULL by convention */
2484 #endif /* EIF_THREADS */
2485 #endif /* EIF_TID */
2486
2487 selected->ov_size = r | B_NEW;
2488 i = r & B_SIZE; /* Keep only true size */
2489 rt_m_data.ml_used += i; /* Account for memory used */
2490 if (r & B_CTYPE)
2491 rt_c_data.ml_used += i;
2492 else {
2493 rt_e_data.ml_used += i;
2494 #ifdef MEM_STAT
2495 printf ("Eiffel: %ld used (+%ld) %ld total (set_up)\n",
2496 rt_e_data.ml_used, i, rt_e_data.ml_total);
2497 #endif
2498 }
2499
2500 SIGRESUME; /* Re-enable signal exceptions */
2501
2502 /* Now it's done. We return the address of data space, that
2503 * is to say (selected + 1) -- yes ! The area holds at least
2504 * 'nbytes' (entrance value) of free space.
2505 */
2506
2507 #ifdef DEBUG
2508 dprintf(8)("set_up: returning %s %s block starting at 0x%lx (%d bytes)\n",
2509 (selected->ov_size & B_CTYPE) ? "C" : "Eiffel",
2510 (selected->ov_size & B_LAST) ? "last" : "normal",
2511 (EIF_REFERENCE) (selected + 1), selected->ov_size & B_SIZE);
2512 flush;
2513 #endif
2514
2515 return (EIF_REFERENCE) (selected + 1); /* Free data space */
2516 }
2517
2518 #endif /* ISE_GC */
2519
2520 /*
2521 doc: <routine name="eif_rt_xfree" export="public">
2522 doc: <summary>Frees the memory block which starts at 'ptr'. This has to be a pointer returned by eif_rt_xmalloc, otherwise impredictable results will follow... The contents of the block are preserved, though one should not rely on this as it may change without notice.</summary>
2523 doc: <param name="ptr" type="void *">Address of memory to be freed.</param>
2524 doc: <thread_safety>Safe</thread_safety>
2525 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
2526 doc: </routine>
2527 */
2528
2529 rt_public void eif_rt_xfree(register void * ptr)
2530 {
2531 #ifdef ISE_GC
2532 RT_GET_CONTEXT
2533 rt_uint_ptr r; /* For shifting purposes */
2534 union overhead *zone; /* The to-be-freed zone */
2535 rt_uint_ptr i; /* Index in hlist */
2536
2537 REQUIRE("ptr not null", ptr);
2538
2539 #ifdef LMALLOC_CHECK
2540 if (is_in_lm (ptr))
2541 fprintf (stderr, "Warning: try to eif_rt_xfree a malloc'ed ptr\n");
2542 #endif /* LMALLOC_CHECK */
2543 zone = ((union overhead *) ptr) - 1; /* Walk backward to header */
2544 r = zone->ov_size; /* Size of block */
2545
2546 /* If the bloc is in the generation scavenge zone, nothing has to be done.
2547 * This is easy to detect because objects in the scavenge zone have the
2548 * B_BUSY bit reset. Testing for that bit will also enable the routine
2549 * to return immediately if the address of a free block is given.
2550 */
2551 if (!(r & B_BUSY))
2552 return; /* Nothing to be done */
2553
2554 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
2555
2556 /* Memory accounting */
2557 i = r & B_SIZE; /* Amount of memory released */
2558 rt_m_data.ml_used -= i; /* At least this is free */
2559 if (r & B_CTYPE) {
2560 rt_c_data.ml_used -= i;
2561 } else {
2562 rt_e_data.ml_used -= i;
2563 #ifdef MEM_STAT
2564 printf ("Eiffel: %ld used (-%ld), %ld total (eif_rt_xfree)\n",
2565 rt_e_data.ml_used, i, rt_e_data.ml_total);
2566 #endif
2567 }
2568
2569 #ifdef DEBUG
2570 dprintf(1)("eif_rt_xfree: on a %s %s block starting at 0x%lx (%d bytes)\n",
2571 (zone->ov_size & B_LAST) ? "last" : "normal",
2572 (zone->ov_size & B_CTYPE) ? "C" : "Eiffel",
2573 ptr, zone->ov_size & B_SIZE);
2574 flush;
2575 #endif
2576
2577 /* Now put back in the free list a memory block starting at `zone', of
2578 * size 'r' bytes.
2579 */
2580 xfreeblock(zone, r);
2581
2582 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2583
2584 #ifdef DEBUG
2585 dprintf(8)("eif_rt_xfree: %s %s block starting at 0x%lx holds %d bytes free\n",
2586 (zone->ov_size & B_LAST) ? "last" : "normal",
2587 (zone->ov_size & B_CTYPE) ? "C" : "Eiffel",
2588 ptr, zone->ov_size & B_SIZE);
2589 #endif
2590 #else
2591 eif_free(ptr);
2592 #endif
2593 }
2594
2595 /*
2596 doc: <routine name="eif_rt_xcalloc" export="shared">
2597 doc: <summary>Allocate space for 'nelem' elements of 'elsize' bytes and set the new space with zeros. This is NEVER used by the Eiffel run time but it is provided to keep the C interface with the standard malloc package.</summary>
2598 doc: <param name="nelem" type="size_t">Number of elements to allocate.</param>
2599 doc: <param name="elsize" type="size_t">Size of element.</param>
2600 doc: <return>New block of memory of size nelem * elsize if successful, otherwise null pointer.</return>
2601 doc: <thread_safety>Safe</thread_safety>
2602 doc: <synchronization>Handled by `eif_rt_xmalloc'.</synchronization>
2603 doc: </routine>
2604 */
2605
2606 rt_shared EIF_REFERENCE eif_rt_xcalloc(size_t nelem, size_t elsize)
2607 {
2608 #ifdef ISE_GC
2609 size_t nbytes; /* Number of bytes requested */
2610 EIF_REFERENCE allocated; /* Address of new arena */
2611
2612 nbytes = nelem * elsize;
2613 allocated = eif_rt_xmalloc(nbytes, C_T, GC_ON); /* Ask for C space */
2614
2615 if (allocated != (EIF_REFERENCE) 0) {
2616 memset (allocated, 0, nbytes); /* Fill arena with zeros */
2617 }
2618
2619 return allocated; /* Pointer to new zero-filled zone */
2620 #else
2621 return (EIF_REFERENCE) eif_calloc(nelem, elsize);
2622 #endif
2623 }
2624
2625 #ifdef ISE_GC
2626 /*
2627 doc: <routine name="xfreeblock" export="private">
2628 doc: <summary>Put the memory block starting at 'zone' into the free_list. Note that zone points at the beginning of the memory block (beginning of the header) and not at an object data area.</summary>
2629 doc: <param name="zone" type="union overhead *">Zone to be freed.</param>
2630 doc: <param name="r" type="rt_uint_ptr">Size of block.</param>
2631 doc: <thread_safety>Not safe</thread_safety>
2632 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
2633 doc: </routine>
2634 */
2635
2636 rt_private void xfreeblock(union overhead *zone, rt_uint_ptr r)
2637 {
2638 RT_GET_CONTEXT
2639 rt_uint_ptr i; /* Index in hlist */
2640 #ifndef EIF_MALLOC_OPTIMIZATION
2641 rt_uint_ptr size; /* Size of the coalesced block */
2642 #endif
2643
2644 SIGBLOCK; /* Critical section starts */
2645
2646 /* The block will be inserted in the sorted hashed free list.
2647 * The current size is fetched from the header. If the block
2648 * is not the last one in a chunk, we check the next one. If
2649 * it happens to be free, then we do coalescing. And so on...
2650 */
2651
2652 #ifndef EIF_MALLOC_OPTIMIZATION
2653 size = coalesc(zone);
2654 while (size) { /* Perform coalescing as long as possible */
2655 r += size; /* And upadte size of block */
2656 size = coalesc(zone);
2657 }
2658 #endif /* EIF_MALLOC_OPTIMIZATION */
2659
2660 /* Now 'zone' points to the block to be freed, and 'r' is the
2661 * size (eventually, this is a coalesced block). Reset all the
2662 * flags but B_LAST and put the block in the free list again.
2663 */
2664
2665 i = zone->ov_size & ~B_SIZE; /* Save flags */
2666 r &= B_SIZE; /* Clear all flags */
2667 zone->ov_size = r | (i & (B_LAST | B_CTYPE)); /* Save size B_LAST & type */
2668
2669 i = HLIST_INDEX(r);
2670 connect_free_list(zone, i); /* Insert block in free list */
2671
2672 SIGRESUME; /* Critical section ends */
2673 }
2674 #endif
2675
2676 /*
2677 doc: <routine name="crealloc" return_type="void *" export="shared">
2678 doc: <summary>This is the C interface with xrealloc, which is fully compatible with the realloc() function in the standard C library (excepted that no storage compaction is done). The function simply calls xrealloc with garbage collection turned on.</summary>
2679 doc: <param name="ptr" type="void *">Address that will be reallocated.</param>
2680 doc: <param name="nbytes" type="size_t">New size in bytes of `ptr'.</param>
2681 doc: <return>New block of memory of size `nbytes', otherwise null pointer or throw an exception.</return>
2682 doc: <exception>"No more memory" exception</exception>
2683 doc: <thread_safety>Safe</thread_safety>
2684 doc: <synchronization>None required</synchronization>
2685 doc: </routine>
2686 */
2687
2688 rt_shared void * crealloc(void * ptr, size_t nbytes)
2689 {
2690
2691 #ifdef ISE_GC
2692 return xrealloc((EIF_REFERENCE) ptr, nbytes, GC_ON);
2693 #else
2694 return eif_realloc(ptr, nbytes);
2695 #endif
2696 }
2697
2698 /*
2699 doc: <routine name="xrealloc" return_type="EIF_REFERENCE" export="shared">
2700 doc: <summary>Modify the size of the block pointed to by 'ptr' to 'nbytes'. The 'storage compaction' mechanism mentionned in the old malloc man page is not implemented (i.e the 'ptr' block has to be an allocated block, and not a freed block). If 'gc_flag' is GC_ON, then the GC is called when mallocing a new block. If GC_FREE is activated, then no free is performed: the GC will take care of the object (this is crucial when reallocing an object which is part of the moved set).</summary>
2701 doc: <param name="ptr" type="EIF_REFERENCE">Address that will be reallocated.</param>
2702 doc: <param name="nbytes" type="size_t">New size in bytes of `ptr'.</param>
2703 doc: <param name="gc_flag" type="int">New size in bytes of `ptr'.</param>
2704 doc: <return>New block of memory of size `nbytes', otherwise null pointer or throw an exception.</return>
2705 doc: <exception>"No more memory" exception</exception>
2706 doc: <thread_safety>Safe</thread_safety>
2707 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
2708 doc: </routine>
2709 */
2710
2711 rt_shared EIF_REFERENCE xrealloc(register EIF_REFERENCE ptr, size_t nbytes, int gc_flag)
2712 {
2713 RT_GET_CONTEXT
2714 EIF_GET_CONTEXT
2715 #ifdef ISE_GC
2716 rt_uint_ptr r; /* For shifting purposes */
2717 rt_uint_ptr i; /* Index in free list */
2718 union overhead *zone; /* The to-be-reallocated zone */
2719 EIF_REFERENCE safeptr = NULL; /* GC-safe pointer */
2720 size_t size, size_gain; /* Gain in size brought by coalesc */
2721
2722 REQUIRE("ptr not null", ptr);
2723
2724 #ifdef LMALLOC_CHECK
2725 if (is_in_lm (ptr))
2726 fprintf (stderr, "Warning: try to xrealloc a malloc'ed pointer\n");
2727 #endif
2728 if (nbytes & ~B_SIZE)
2729 return (EIF_REFERENCE) 0; /* I guess we can't malloc more than 2^27 */
2730
2731 zone = HEADER(ptr);
2732
2733 #ifdef DEBUG
2734 dprintf(16)("realloc: reallocing block 0x%lx to be %d bytes\n",
2735 zone, nbytes);
2736 if (zone->ov_flags & EO_SPEC) {
2737 dprintf(16)("eif_realloc: special has count = %d, elemsize = %d\n",
2738 RT_SPECIAL_COUNT(ptr), RT_SPECIAL_ELEM_SIZE(ptr));
2739 if (zone->ov_flags & EO_REF)
2740 dprintf(16)("realloc: special has object references\n");
2741 }
2742 flush;
2743 #endif
2744
2745 /* First get the size of the block pointed to by 'ptr'. If, by
2746 * chance the size is the same or less than the current size,
2747 * we won't have to move the block. However, we may have to split
2748 * the block...
2749 */
2750
2751 r = zone->ov_size & B_SIZE; /* Size of block */
2752 i = (rt_uint_ptr) (nbytes % ALIGNMAX);
2753 if (i != 0)
2754 nbytes += ALIGNMAX - i; /* Pad nbytes */
2755
2756 if ((r == nbytes) || (r == nbytes + OVERHEAD)) { /* Same size, lucky us... */
2757 return ptr; /* That's all I wrote */
2758 }
2759
2760 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
2761 SIGBLOCK; /* Beginning of critical section */
2762
2763 if (r > (nbytes + OVERHEAD)) { /* New block is smaller */
2764
2765 #ifdef DEBUG
2766 dprintf(16)("realloc: new size is smaller (%d versus %d bytes)\n",
2767 nbytes, r);
2768 flush;
2769 #endif
2770
2771 r = eif_rt_split_block(zone, nbytes); /* Split block, r holds size */
2772 if (r == (rt_uint_ptr) -1) { /* If we did not split it */
2773 SIGRESUME; /* Exiting from critical section */
2774 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2775 return ptr; /* Leave block unchanged */
2776 }
2777
2778 rt_m_data.ml_used -= r + OVERHEAD; /* Data we lose in realloc */
2779 if (zone->ov_size & B_CTYPE)
2780 rt_c_data.ml_used -= r + OVERHEAD;
2781 else {
2782 #ifdef MEM_STAT
2783 printf ("Eiffel: %ld used (-%ld), %ld total (xrealloc)\n",
2784 rt_e_data.ml_used, r + OVERHEAD, rt_e_data.ml_total);
2785 #endif
2786 rt_e_data.ml_used -= r + OVERHEAD;
2787 }
2788
2789 #ifdef DEBUG
2790 dprintf(16)("realloc: shrinked block is now %d bytes (lost %d bytes)\n",
2791 zone->ov_size & B_SIZE, r + OVERHEAD);
2792 flush;
2793 #endif
2794
2795 SIGRESUME; /* Exiting from critical section */
2796 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2797 return ptr; /* Block address did not change */
2798 }
2799
2800 /* As we would like to avoid moving the block unless it is
2801 * absolutely necessary, we check to see if the block after
2802 * us is not, by extraordinary, free.
2803 */
2804
2805 #ifdef EIF_MALLOC_OPTIMIZATION
2806 size_gain = 0;
2807 #else /* EIF_MALLOC_OPTIMIZATION */
2808 size_gain = 0;
2809 size = coalesc(zone);
2810 while (size) { /* Perform coalescing as long as possible */
2811 size_gain += size;
2812 size = coalesc(zone);
2813 }
2814 /* Update memory statistic. No need to handle the overheads,
2815 * it was already done in `coalesc'. */
2816 rt_m_data.ml_used += size_gain;
2817 if (zone->ov_size & B_CTYPE) {
2818 rt_c_data.ml_used += size_gain;
2819 } else {
2820 rt_e_data.ml_used += size_gain;
2821 }
2822 #endif /* EIF_MALLOC_OPTIMIZATION */
2823
2824 #ifdef DEBUG
2825 dprintf(16)("realloc: coalescing added %d bytes (block is now %d bytes)\n",
2826 size_gain, zone->ov_size & B_SIZE);
2827 flush;
2828 #endif
2829
2830 /* If the garbage collector is on and the object is a SPECIAL, then
2831 * after attempting a coalescing we must update those information because
2832 * they are now invalid, we copy them from their old location.
2833 * Of course, this matters only if coalescing has been done, which is
2834 * indicated by a non-zero return value from coalesc.
2835 * The reason it is needed is because some other objects might still be
2836 * referring to `ptr' and thus the new `ptr' should be valid even if later,
2837 * in xrealloc, we end up allocating a new SPECIAL object.
2838 */
2839 if ((size_gain != 0) && (gc_flag & GC_ON) && (zone->ov_flags & EO_SPEC)) {
2840 EIF_REFERENCE l_info; /* Pointer to new count/elemsize */
2841 l_info = RT_SPECIAL_DATA(ptr);
2842 memmove(l_info, ((char *) l_info - size_gain), RT_SPECIAL_DATA_SIZE);
2843 }
2844
2845 i = zone->ov_size & B_SIZE; /* Coalesc modified data in zone */
2846
2847 if (i > (nbytes + OVERHEAD)) { /* Total size is ok ? */
2848 r = i - r; /* Amount of memory over-used */
2849 CHECK("computation correct", size_gain == r);
2850 i = eif_rt_split_block(zone, nbytes); /* Split block, i holds size */
2851 if (i == (rt_uint_ptr) -1) { /* If we did not split it */
2852 SIGRESUME; /* Exiting from critical section */
2853 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2854 return ptr; /* Leave block unsplit */
2855 } else {
2856 /* Split occurred, return unused part and overhead as free for memory accounting. */
2857 rt_m_data.ml_used -= i + OVERHEAD;
2858 if (zone->ov_size & B_CTYPE) {
2859 rt_c_data.ml_used -= i + OVERHEAD;
2860 } else {
2861 rt_e_data.ml_used -= i + OVERHEAD;
2862 }
2863
2864 #ifdef DEBUG
2865 dprintf(16)("realloc: block is now %d bytes\n",
2866 zone->ov_size & B_SIZE);
2867 flush;
2868 #endif
2869
2870 SIGRESUME; /* Exiting from critical section */
2871 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2872 return ptr; /* Block address did not change */
2873 }
2874 }
2875
2876 SIGRESUME; /* End of critical section */
2877 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
2878
2879 /* If we come here, we have to use malloc/free. I use 'zone' as
2880 * a temporary variable, because in fact, pointers returned by
2881 * malloc are (union overhead *) cast to (EIF_REFERENCE) and also
2882 * because I do not want to declare another register variable.
2883 *
2884 * There is no need to update the rt_m_data accounting variables,
2885 * because malloc and free will do that for us. We allocate the
2886 * block from the correct free list, if at all possible.
2887 */
2888
2889 i = (rt_uint_ptr) ((HEADER(ptr)->ov_size & B_C) ? C_T : EIFFEL_T); /* Best type */
2890
2891 if (gc_flag & GC_ON) {
2892 safeptr = ptr;
2893 if (-1 == RT_GC_PROTECT(safeptr)) { /* Protect against moves */
2894 eraise("object reallocation", EN_MEM); /* No more memory */
2895 return (EIF_REFERENCE) 0; /* They ignored it */
2896 }
2897 }
2898
2899 zone = (union overhead *) eif_rt_xmalloc(nbytes, (int) i, gc_flag);
2900
2901 if (gc_flag & GC_ON) {
2902 CHECK("safeptr not null", safeptr);
2903 ptr = safeptr;
2904 RT_GC_WEAN(safeptr); /* Remove protection */
2905 }
2906
2907 /* Keep Eiffel flags. If GC was on, it might have run its cycle during
2908 * the reallocation process and the original object might have moved.
2909 * In that case, we take the flags from the forwarded address and we
2910 * do NOT free the object itself, but its forwarded copy!!
2911 */
2912
2913 if (zone != (union overhead *) 0) {
2914 CHECK("Correct size", (r & B_SIZE) <= (HEADER(zone)->ov_size & B_SIZE));
2915 memcpy (zone, ptr, r & B_SIZE); /* Move to new location */
2916 HEADER(zone)->ov_flags = HEADER(ptr)->ov_flags; /* Keep Eiffel flags */
2917 HEADER(zone)->ov_dftype = HEADER(ptr)->ov_dftype;
2918 HEADER(zone)->ov_dtype = HEADER(ptr)->ov_dtype;
2919 HEADER(zone)->ov_pid = HEADER(ptr)->ov_pid;
2920 if (!(gc_flag & GC_FREE)) { /* Will GC take care of free? */
2921 eif_rt_xfree(ptr); /* Free old location */
2922 } else {
2923 /* We cannot free the object here, but if the old object size occupied more than
2924 * 20MB and more than a quarter of the available memory we should force a full
2925 * collection as otherwise if it turns out the the old object is not referenced
2926 * anymore, it won't be collected and the memory allocated will not see this huge
2927 * free space available. See eweasel test#exec107 for an example. */
2928 if ((r & B_SIZE) > 20971520) {
2929 if ((r & B_SIZE) > ((rt_m_data.ml_used + rt_m_data.ml_over) / 4)) {
2930 force_plsc++;
2931 }
2932 }
2933 }
2934 } else if (i == EIFFEL_T) /* Could not reallocate object */
2935 eraise("object reallocation", EN_MEM); /* No more memory */
2936
2937
2938 #ifdef DEBUG
2939 if (zone != (union overhead *) 0)
2940 dprintf(16)("realloc: malloced a new arena at 0x%lx (%d bytes)\n",
2941 zone, (zone-1)->ov_size & B_SIZE);
2942 else
2943 dprintf(16)("realloc: failed for %d bytes, garbage collector %s\n",
2944 nbytes, gc_flag == GC_OFF ? "off" : "on");
2945 flush;
2946 #endif
2947
2948 return (EIF_REFERENCE) zone; /* Pointer to new arena or 0 if failed */
2949 #else
2950 return (EIF_REFERENCE) eif_realloc(ptr, nbytes);
2951 #endif
2952 }
2953
2954 #ifdef ISE_GC
2955 /*
2956 doc: <routine name="eif_rt_meminfo" return_type="struct emallinfo *" export="public">
2957 doc: <summary>Return the pointer to the static data held in rt_m_data. The user must not corrupt these data. It will be harmless to malloc, however, but may fool the garbage collector. Type selects the kind of information wanted.</summary>
2958 doc: <param name="type" type="int">Type of memory (M_C or M_EIFFEL or M_ALL) to get info from.</param>
2959 doc: <return>Pointer to an internal structure used by `malloc.c'.</return>
2960 doc: <thread_safety>Not Safe</thread_safety>
2961 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
2962 doc: </routine>
2963 */
2964
2965 rt_public struct emallinfo *eif_rt_meminfo(int type)
2966 {
2967 switch(type) {
2968 case M_C:
2969 return &rt_c_data; /* Pointer to static data */
2970 case M_EIFFEL:
2971 return &rt_e_data; /* Pointer to static data */
2972 }
2973
2974 return &rt_m_data; /* Pointer to static data */
2975 }
2976
2977 /*
2978 doc: <routine name="eif_rt_split_block" return_type="rt_uint_ptr" export="shared">
2979 doc: <summary>The block 'selected' may be too big to hold only 'nbytes', so it is split and the new block is put in the free list. At the end, 'selected' will hold only 'nbytes'. From the accounting point's of vue, only the overhead is incremented (the split block is assumed to be already free). The function returns -1 if no split occurred, or the length of the split block otherwise (which means it must fit in a signed int, argh!!--RAM). Caller is responsible for issuing a SIGBLOCK before any call to this critical routine.</summary>
2980 doc: <param name="selected" type="union overhead *">Selected block from which we try to extract a block of `nbytes' bytes.</param>
2981 doc: <param name="nbytes" type="rt_uint_ptr">Size of block we should retur</param>
2982 doc: <return>Address of location of object of size `nbytes' in `selected'.</return>
2983 doc: <thread_safety>Not safe</thread_safety>
2984 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
2985 doc: </routine>
2986 */
2987
2988 rt_shared rt_uint_ptr eif_rt_split_block(register union overhead *selected, register rt_uint_ptr nbytes)
2989 {
2990 rt_uint_ptr flags; /* Flags of original block */
2991 rt_uint_ptr r; /* For shifting purposes */
2992 rt_uint_ptr i; /* Index in free list */
2993
2994 REQUIRE("nbytes less than selected size", (selected->ov_size & B_SIZE) >= nbytes);
2995
2996 /* Compute residual bytes. The flags bits should remain clear */
2997 i = selected->ov_size & B_SIZE; /* Hope it will fit in an int */
2998 r = (i - nbytes); /* Actual usable bytes */
2999
3000 /* Do the split only if possible.
3001 * Note: one could say that to avoid 0-sized block in the free list, we
3002 * could have `r <= OVERHEAD', but the issue is that in `gscavenge'
3003 * it would most likely cause a check violation because `gscavenge'
3004 * assumes that reallocation does not change the size of objects.
3005 */
3006 if (r < OVERHEAD)
3007 return (rt_uint_ptr) -1; /* Not enough space to split */
3008
3009 /* Check wether the block we split was the last one in a
3010 * chunk. If so, then the remaining will be the last, but
3011 * the 'selected' block is no longer the last one anyway.
3012 */
3013 flags = i = selected->ov_size; /* Optimize for speed, phew !! */
3014 i &= ~B_SIZE & ~B_LAST; /* Keep flags but clear B_LAST */
3015 selected->ov_size = i | nbytes; /* Block has been split */
3016
3017 /* Base address of new block (skip overhead and add nbytes) */
3018 selected = (union overhead *) (((EIF_REFERENCE) (selected+1)) + nbytes);
3019
3020 r -= OVERHEAD; /* This is the overhead for split block */
3021 selected->ov_size = r; /* Set the size of new block */
3022 rt_m_data.ml_over += OVERHEAD; /* Added overhead */
3023 if (i & B_CTYPE) /* Holds flags (without B_LAST) */
3024 rt_c_data.ml_over += OVERHEAD;
3025 else
3026 rt_e_data.ml_over += OVERHEAD;
3027
3028 /* Compute hash index */
3029 i = HLIST_INDEX(r);
3030
3031 /* If the block we split was the last one in the chunk, the new block is now
3032 * the last one. There is no need to clear the B_BUSY flag, as normally the
3033 * size fits in 27 bits, thus the upper 5 bits are clear--RAM.
3034 */
3035 r = selected->ov_size;
3036 if (flags & B_LAST)
3037 r |= B_LAST; /* Mark it last block */
3038 if (flags & B_CTYPE)
3039 r |= B_CTYPE; /* Propagate the information */
3040 selected->ov_size = r;
3041 connect_free_list(selected, i); /* Insert block in free list */
3042
3043 #ifdef DEBUG
3044 dprintf(32)("eif_rt_split_block: split %s %s block starts at 0x%lx (%d bytes)\n",
3045 (selected->ov_size & B_LAST) ? "last" : "normal",
3046 (selected->ov_size & B_CTYPE) ? "C" : "Eiffel",
3047 selected, selected->ov_size & B_SIZE);
3048 flush;
3049 #endif
3050
3051 return r & B_SIZE; /* Length of split block */
3052 }
3053
3054 /*
3055 doc: <routine name="coalesc" return_type="rt_uint_ptr" export="private">
3056 doc: <summary>Given a zone to be freed, test whether we can do some coalescing with the next block, if it happens to be free. Overhead accounting is updated. It is up to the caller to put the coalesced block back to the free list (in case this is called by a free operation). It is up to the caller to issue a SIGBLOCK prior any call to this critical routine.</summary>
3057 doc: <param name="zone" type="union overhead *">Starting block from which we are trying to coalesc next block to it, if next block is free.</param>
3058 doc: <return>Number of new free bytes available (i.e. the size of the coalesced block plus the overhead) or 0 if no coalescing occurred.</return>
3059 doc: <thread_safety>Not safe</thread_safety>
3060 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
3061 doc: </routine>
3062 */
3063
3064 rt_private rt_uint_ptr coalesc(register union overhead *zone)
3065 {
3066 rt_uint_ptr r; /* For shifting purposes */
3067 rt_uint_ptr i; /* Index in hlist */
3068 union overhead *next; /* Pointer to next block */
3069
3070 i = zone->ov_size; /* Fetch size and flags */
3071 if (i & B_LAST)
3072 return 0; /* Block is the last one in chunk */
3073
3074 /* Compute address of next block */
3075 next = (union overhead *) (((EIF_REFERENCE) zone) + (i & B_SIZE) + OVERHEAD);
3076
3077 if ((next->ov_size & B_BUSY))
3078 return 0; /* Next block is not free */
3079
3080 r = next->ov_size & B_SIZE; /* Fetch its size */
3081 zone->ov_size = i + r + OVERHEAD; /* Update size (no overflow possible) */
3082 rt_m_data.ml_over -= OVERHEAD; /* Overhead freed */
3083 if (i & B_CTYPE) {
3084 rt_c_data.ml_over -= OVERHEAD;
3085 } else {
3086 rt_e_data.ml_over -= OVERHEAD;
3087 }
3088
3089 #ifdef DEBUG
3090 dprintf(1)("coalesc: coalescing with a %d bytes %s %s block at 0x%lx\n",
3091 r, (next->ov_size & B_LAST) ? "last" : "normal",
3092 (next->ov_size & B_CTYPE) ? "C" : "Eiffel", next);
3093 flush;
3094 #endif
3095
3096 /* Now the longest part... We have to find the block we've just merged and
3097 * remove it from the free list.
3098 */
3099
3100 /* First, compute the position in hash list */
3101 i = HLIST_INDEX(r);
3102 disconnect_free_list(next, i); /* Remove block from free list */
3103
3104 /* Finally, we set the new coalesced block correctly, checking for last
3105 * position. The other flags were kept from the original block.
3106 */
3107
3108 if ((i = next->ov_size) & B_LAST) /* Next block was the last one */
3109 zone->ov_size |= B_LAST; /* So coalesced is now the last one */
3110
3111 #ifdef DEBUG
3112 dprintf(1)("coalesc: coalescing provided a %s %d bytes %s block at 0x%lx\n",
3113 zone->ov_size & B_LAST ? "last" : "normal",
3114 zone->ov_size & B_SIZE,
3115 zone->ov_size & B_CTYPE ? "C" : "Eiffel", zone);
3116 flush;
3117 #endif
3118
3119 return (i & B_SIZE) + OVERHEAD; /* Number of coalesced free bytes */
3120 }
3121
3122 /*
3123 doc: <routine name="connect_free_list" export="private">
3124 doc: <summary>The block 'zone' is inserted in the free list #i. It is up to the caller to ensure signal exceptions are blocked when entering in this critical routine.</summary>
3125 doc: <param name="zone" type="union overhead *">Block to insert in free list #`i'.</param>
3126 doc: <param name="i" type="rt_uint_ptr">Free list index to insert `zone'.</param>
3127 doc: <thread_safety>Not safe</thread_safety>
3128 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
3129 doc: </routine>
3130 */
3131
3132 rt_private void connect_free_list(register union overhead *zone, register rt_uint_ptr i)
3133 {
3134 #ifndef EIF_SORTED_FREE_LIST
3135 union overhead *p; /* To walk along free list */
3136 union overhead **hlist; /* The free list */
3137
3138 REQUIRE("enough space", (i == 0) || (zone->ov_size > sizeof(union overhead *)));
3139
3140 hlist = FREE_LIST(zone->ov_size & B_CTYPE); /* Get right list ptr */
3141 p = hlist[i];
3142 hlist[i] = zone;
3143 NEXT(zone) = p;
3144
3145 if (i != 0) {
3146 PREVIOUS(zone) = NULL;
3147 if (p) {
3148 PREVIOUS(p) = zone;
3149 }
3150 }
3151 #else
3152 union overhead *p, *last; /* To walk along free list */
3153 union overhead **hlist; /* The free list */
3154 union overhead **blist; /* Associated buffer cache. */
3155
3156 REQUIRE("enough space", (i == 0) || (zone->ov_size > sizeof(union overhead *)));
3157
3158 hlist = FREE_LIST(zone->ov_size & B_CTYPE); /* Get right list ptr */
3159 blist = BUFFER(hlist); /* And associated cache. */
3160
3161 p = hlist[i]; /* Head of list */
3162
3163 /* If list is empty or if first element of list is greater than `zone',
3164 * we simply need to add `zone' as first element. */
3165 if ((!p) || (zone < p)) {
3166 hlist[i] = zone;
3167 blist[i] = zone;
3168 NEXT(zone) = p;
3169 if (i != 0) {
3170 PREVIOUS(zone) = NULL;
3171 if (p) {
3172 PREVIOUS(p) = zone;
3173 }
3174 }
3175 return;
3176 }
3177
3178 CHECK("p not null", p);
3179
3180 /* We have to scan the list to find the right place for inserting our block.
3181 * With the help of the buffer cache, we may not have to scan all the list. */
3182 #ifndef EIF_SORTED_FREE_LIST_BACKWARD_TRAVERSAL
3183 p = blist [i];
3184 if (!p || (zone < p)) {
3185 /* We have to start from beginning. We are not doing any backward traversing. */
3186 p = hlist [i];
3187 }
3188 for (last = p, p = NEXT(p); p ; last = p, p = NEXT(p)) {
3189 if (zone < p) {
3190 NEXT(zone) = p;
3191 NEXT(last) = zone;
3192 if (i != 0) {
3193 PREVIOUS(zone) = last;
3194 PREVIOUS(p) = zone;
3195 }
3196 blist[i] = zone;
3197 return;
3198 }
3199 }
3200 /* We reached the last element, simply extend.
3201 * Do not change buffer location. */
3202 NEXT(last) = zone;
3203 NEXT(zone) = NULL;
3204 if (i != 0) {
3205 PREVIOUS(zone) = last;
3206 }
3207 #else
3208 /* Now perform forward traversal or backward traversal depending on position of `zone' to `blist [i]'.
3209 * `zone' cannot be inserted at the beginning of the list since it is already taken care of above. */
3210 p = blist [i];
3211 if ((zone > p) || (i == 0)) {
3212 if (i == 0) {
3213 /* It means that `zone < p' and thus we need to start
3214 * from the beginning. */
3215 p = hlist [i];
3216 }
3217 for (last = p, p = NEXT(p); p ; last = p, p = NEXT(p)) {
3218 if (zone < p) {
3219 NEXT(zone) = p;
3220 NEXT(last) = zone;
3221 if (i != 0) {
3222 PREVIOUS(zone) = last;
3223 PREVIOUS(p) = zone;
3224 }
3225 blist[i] = zone;
3226 return;
3227 }
3228 }
3229 /* We reached the last element, simply extend.
3230 * Do not change buffer location. */
3231 NEXT(last) = zone;
3232 NEXT(zone) = NULL;
3233 if (i != 0) {
3234 PREVIOUS(zone) = last;
3235 }
3236 } else {
3237 /* It looks like an infinite loop, but it is not because
3238 * the above code guarantees that `zone' cannot be inserted at
3239 * the beginning of the list. */
3240 for (last = p, p = PREVIOUS(p); ; last = p, p = PREVIOUS(p)) {
3241 if (zone > p) {
3242 NEXT(zone) = last;
3243 PREVIOUS(zone) = p;
3244 NEXT(p) = zone;
3245 PREVIOUS(last) = zone;
3246 blist[i] = zone;
3247 return;
3248 }
3249 }
3250 }
3251 #endif
3252 #endif
3253 }
3254
3255 /*
3256 doc: <routine name="disconnect_free_list" export="private">
3257 doc: <summary>Removes block pointed to by 'zone' from free list #i. It is up to the caller to ensure signal exceptions are blocked when entering in this critical routine.</summary>
3258 doc: <param name="zone" type="union overhead *">Block to remove from free list #`i'.</param>
3259 doc: <param name="i" type="rt_uint_ptr">Free list index to remove `zone'.</param>
3260 doc: <thread_safety>Not safe</thread_safety>
3261 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
3262 doc: </routine>
3263 */
3264
3265 rt_private void disconnect_free_list(register union overhead *zone, register rt_uint_ptr i)
3266 {
3267 #ifndef EIF_SORTED_FREE_LIST
3268 union overhead *p, *n; /* To walk along free list */
3269
3270 REQUIRE("enough space", (i == 0) || (zone->ov_size > sizeof(union overhead *)));
3271
3272 if (i != 0) {
3273 /* Get previous element of the list. */
3274 p = PREVIOUS(zone);
3275 n = NEXT(zone);
3276 if (p) {
3277 NEXT(p) = n;
3278 } else {
3279 /* There is no previous elements, so we need to update the head of the list. */
3280 FREE_LIST(zone->ov_size & B_CTYPE)[i] = n;
3281 }
3282 if (n) {
3283 PREVIOUS(n) = p;
3284 }
3285 } else {
3286 union overhead **hlist; /* The free list */
3287 /* We have to perform a linear search because we do not
3288 * have enough space to store the back pointer. */
3289 hlist = FREE_LIST(zone->ov_size & B_CTYPE); /* Get right list ptr */
3290 p = hlist[i];
3291 if (zone != p) {
3292 for (; p; p = NEXT(p)) {
3293 if (NEXT(p) == zone) { /* Next block is ok */
3294 NEXT(p) = NEXT(zone); /* Remove from free list */
3295 return; /* Exit */
3296 }
3297 }
3298 } else {
3299 hlist[i] = NEXT(p);
3300 }
3301 }
3302 #else
3303 union overhead *p, *n; /* To walk along free list */
3304 union overhead **hlist; /* The free list */
3305 union overhead **blist; /* Associated buffer cache. */
3306
3307 REQUIRE("enough space", (i == 0) || (zone->ov_size > sizeof(union oveyrhead *)));
3308
3309 hlist = FREE_LIST(zone->ov_size & B_CTYPE); /* Get right list ptr */
3310 blist = BUFFER(hlist); /* And associated cache. */
3311
3312 if (i != 0) {
3313 /* Get previous element of the list. */
3314 p = PREVIOUS(zone);
3315 n = NEXT(zone);
3316 if (p) {
3317 NEXT(p) = n;
3318 blist[i] = p;
3319 } else {
3320 /* There is no previous elements, so we need to update the head of the list. */
3321 hlist [i] = n;
3322 blist[i] = n;
3323 }
3324 if (n) {
3325 PREVIOUS(n) = p;
3326 }
3327 } else {
3328 /* We have to perform a linear search because we do not
3329 * have enough space to store the back pointer. */
3330 if (zone != hlist[i]) {
3331 p = blist[i]; /* Cached value = location of last op */
3332 if ((!p) || (zone <=p)) { /* Is it ok ? */
3333 p = hlist[i]; /* No, it is before the cached location */
3334 }
3335 for (; p; p = NEXT(p)) {
3336 if (NEXT(p) == zone) { /* Next block is ok */
3337 NEXT(p) = NEXT(zone); /* Remove from free list */
3338 blist[i] = p; /* Last operation */
3339 break; /* Exit from loop */
3340 }
3341 }
3342 } else {
3343 hlist[i] = NEXT(hlist[i]);
3344 blist[i] = hlist[i];
3345 }
3346 }
3347 #endif
3348 }
3349
3350
3351 /*
3352 doc: <routine name="lxtract" export="shared">
3353 doc: <summary>Remove 'zone' from the free list. This routine is used by the garbage collector, and thus it is visible from the outside world, hence the cryptic name for portability--RAM. Note that the garbage collector performs with signal exceptions blocked.</summary>
3354 doc: <param name="zone" type="union overhead *">Block to remove from free list.</param>
3355 doc: <thread_safety>Not safe</thread_safety>
3356 doc: <synchronization>Safe under GC synchronization.</synchronization>
3357 doc: </routine>
3358 */
3359
3360 rt_shared void lxtract(union overhead *zone)
3361 {
3362 rt_uint_ptr r; /* For shifting purposes */
3363 rt_uint_ptr i; /* Index in H-list (free list) */
3364
3365 r = zone->ov_size & B_SIZE; /* Pure size of block */
3366 i = HLIST_INDEX(r); /* Compute hash index */
3367 disconnect_free_list(zone, i); /* Remove from free list */
3368 }
3369
3370 /*
3371 doc: <routine name="chunk_coalesc" return_type="rt_uint_ptr" export="shared">
3372 doc: <summary>Do block coalescing inside the chunk wherever possible.</summary>
3373 doc: <param name="c" type="struct chunk *">Block to remove from free list.</param>
3374 doc: <return>Size of the largest coalesced block or 0 if no coalescing occurred.</return>
3375 doc: <thread_safety>Not safe</thread_safety>
3376 doc: <synchronization>Safe under GC synchronization.</synchronization>
3377 doc: </routine>
3378 */
3379
3380 rt_shared rt_uint_ptr chunk_coalesc(struct chunk *c)
3381 {
3382 RT_GET_CONTEXT
3383 union overhead *zone; /* Malloc info zone */
3384 rt_uint_ptr flags; /* Malloc flags */
3385 rt_uint_ptr i; /* Index in free list */
3386 rt_uint_ptr r; /* For shifting purposes */
3387 rt_uint_ptr old_i; /* To save old index in free list */
3388 rt_uint_ptr max_size = 0; /* Size of biggest coalesced block */
3389
3390 SIGBLOCK; /* Take no risks with signals */
3391
3392 for (
3393 zone = (union overhead *) (c + 1); /* First malloc block */
3394 /* empty */;
3395 zone = (union overhead *)
3396 (((EIF_REFERENCE) (zone + 1)) + (flags & B_SIZE))
3397 ) {
3398 flags = zone->ov_size; /* Size and flags */
3399
3400 #ifdef DEBUG
3401 dprintf(32)("chunk_coalesc: %s block 0x%lx, %d bytes, %s\n",
3402 flags & B_LAST ? "last" : "normal",
3403 zone, flags & B_SIZE,
3404 flags & B_BUSY ?
3405 (flags & B_C ? "busy C" : "busy Eiffel") : "free");
3406 flush;
3407 #endif
3408
3409 if (flags & B_LAST)
3410 break; /* Last block reached */
3411 if (flags & B_BUSY) /* Block is busy */
3412 continue; /* Skip block */
3413
3414 /* In case we are coalescsing a block, we have to store the
3415 * free list to which it belongs, so that we can remove it
3416 * if necessary (i.e. if its size changes).
3417 */
3418 r = flags & B_SIZE; /* Pure size */
3419 i = HLIST_INDEX(r);
3420
3421 while (!(zone->ov_size & B_LAST)) { /* Not last block */
3422 if (0 == coalesc(zone))
3423 break; /* Could not merge block */
3424 }
3425 flags = zone->ov_size; /* Possible coalesced block */
3426
3427 /* Check whether coalescing occurred. If so, we have to remove
3428 * 'zone' from list #i and then put it back to a possible
3429 * different list. Also update the size of the biggest coalesced
3430 * block. This value should help malloc in its decisions--RAM.
3431 */
3432 if (r != (flags & B_SIZE)) { /* Size changed */
3433
3434 /* Compute new list number for coalesced block */
3435 old_i = i; /* Save old index */
3436 r = flags & B_SIZE; /* Size of coalesced block */
3437 if (max_size < r)
3438 max_size = r; /* Update maximum size yielded */
3439 i = HLIST_INDEX(r);
3440
3441 /* Do the update only if necessary */
3442 if (old_i != i) {
3443 disconnect_free_list(zone, old_i); /* Remove (old list) */
3444 connect_free_list(zone, i); /* Add in new list */
3445 }
3446 }
3447
3448 if (flags & B_LAST) /* We reached last malloc block */
3449 break; /* Finished for that block */
3450 }
3451
3452 SIGRESUME; /* Restore signal handling */
3453
3454 #ifdef DEBUG
3455 dprintf(32)("chunk_coalesc: %d bytes is the largest coalesced block\n",
3456 max_size);
3457 flush;
3458 #endif
3459
3460 return max_size; /* Maximum size of coalesced block or 0 */
3461 }
3462
3463 /*
3464 doc: <routine name="full_coalesc" return_type="rt_uint_ptr" export="shared">
3465 doc: <summary>Same as `full_coalesc_unsafe' except it is a safe thread version.</summary>
3466 doc: <param name="chunk_type" type="int">Type of chunk on which we perform coalescing (C_T, EIFFEL_T or ALL_T).</param>
3467 doc: <return>Size of the largest block made available by coalescing, or 0 if no coalescing ever occurred.</return>
3468 doc: <thread_safety>Safe</thread_safety>
3469 doc: <synchronization>Through `eif_free_list_mutex'.</synchronization>
3470 doc: </routine>
3471 */
3472
3473 rt_shared rt_uint_ptr full_coalesc (int chunk_type)
3474 {
3475 RT_GET_CONTEXT
3476 rt_uint_ptr result;
3477 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_LOCK);
3478 result = full_coalesc_unsafe (chunk_type);
3479 GC_THREAD_PROTECT(EIF_FREE_LIST_MUTEX_UNLOCK);
3480 return result;
3481 }
3482
3483 /*
3484 doc: <routine name="full_coalesc_unsafe" return_type="rt_uint_ptr" export="private">
3485 doc: <summary>Walks through the designated chunk list (type 'chunk_type') and do block coalescing wherever possible.</summary>
3486 doc: <param name="chunk_type" type="int">Type of chunk on which we perform coalescing (C_T, EIFFEL_T or ALL_T).</param>
3487 doc: <return>Size of the largest block made available by coalescing, or 0 if no coalescing ever occurred.</return>
3488 doc: <thread_safety>Not safe</thread_safety>
3489 doc: <synchronization>Safe if caller holds `eif_free_list_mutex' or is under GC synchronization.</synchronization>
3490 doc: </routine>
3491 */
3492
3493 rt_private rt_uint_ptr full_coalesc_unsafe(int chunk_type)
3494 {
3495 struct chunk *c; /* To walk along chunk list */
3496 rt_uint_ptr max_size = 0; /* Size of biggest coalesced block */
3497 rt_uint_ptr max_coalesced; /* Size of coalesced block in a chunk */
3498
3499 /* Choose the correct head for the list depending on the memory type.
3500 * If ALL_T is used, then the whole memory is scanned and coalesced.
3501 */
3502
3503 switch (chunk_type) {
3504 case C_T: /* Only walk through the C chunks */
3505 c = cklst.cck_head;
3506 break;
3507 case EIFFEL_T: /* Only walk through the Eiffel chunks */
3508 c = cklst.eck_head;
3509 break;
3510 case ALL_T: /* Walk through all the memory */
3511 c = cklst.ck_head;
3512 break;
3513 default:
3514 return (rt_uint_ptr) -1; /* Invalid request */
3515 }
3516
3517 for (
3518 /* empty */;
3519 c != (struct chunk *) 0;
3520 c = chunk_type == ALL_T ? c->ck_next : c->ck_lnext
3521 ) {
3522
3523 #ifdef DEBUG
3524 dprintf(1+32)("full_coalesc_unsafe: entering %s chunk 0x%lx (%d bytes)\n",
3525 c->ck_type == C_T ? "C" : "Eiffel", c, c->ck_length);
3526 flush;
3527 #endif
3528
3529 max_coalesced = chunk_coalesc(c); /* Deal with the chunk */
3530 if (max_coalesced > max_size) /* Keep track of largest block */
3531 max_size = max_coalesced;
3532 }
3533
3534 #ifdef DEBUG
3535 dprintf(1+8+32)("full_coalesc_unsafe: %d bytes is the largest coalesced block\n",
3536 max_size);
3537 flush;
3538 #endif
3539
3540 return max_size; /* Maximum size of coalesced block or 0 */
3541 }
3542
3543 /*
3544 doc: <routine name="trigger_smart_gc_cycle" return_type="int" export="private">
3545 doc: <summary>Launch a GC cycle. If we have allocated more than `th_alloc' then we perform an automatic collection, otherwise we try a simple generation scavenging. If we are under the control of a complete synchronization, we do not try to acquire mutex. This is useful for `retrieve' which blocks all threads but if one is blocked in here through a lock on `trigger_gc_mutex' then we end up with a dead lock, where actually we didn't need to hold the `trigger_gc_mutex'.</summary>
3546 doc: <return>1 if collection was successful, 0 otherwise.</return>
3547 doc: <thread_safety>Safe</thread_safety>
3548 doc: <synchronization>Through `eiffel_usage_mutex' for getting value of `eiffel_usage'.</synchronization>
3549 doc: </routine>
3550 */
3551
3552 rt_private int trigger_smart_gc_cycle (void)
3553 {
3554 RT_GET_CONTEXT
3555 int result = 0;
3556
3557 #ifdef EIF_THREADS
3558 if (gc_thread_status == EIF_THREAD_GC_RUNNING) {
3559 #endif
3560 if (eiffel_usage > th_alloc) {
3561 if (0 == acollect()) {
3562 result = 1;
3563 }
3564 } else if (0 == collect()) {
3565 result = 1;
3566 }
3567 return result;
3568 #ifdef EIF_THREADS
3569 } else if (thread_can_launch_gc) {
3570 rt_uint_ptr e_usage;
3571 EIF_ENTER_C;
3572 TRIGGER_GC_LOCK;
3573 EIFFEL_USAGE_MUTEX_LOCK;
3574 e_usage = eiffel_usage;
3575 EIFFEL_USAGE_MUTEX_UNLOCK;
3576 if (e_usage > th_alloc) { /* Above threshold */
3577 if (0 == acollect()) { /* Perform automatic collection */
3578 result = 1;
3579 }
3580 } else if (0 == collect()) { /* Simple generation scavenging */
3581 result = 1;
3582 }
3583 TRIGGER_GC_UNLOCK;
3584 EIF_EXIT_C;
3585 RTGC;
3586 return result;
3587 } else {
3588 return result;
3589 }
3590 #endif
3591 }
3592
3593 /*
3594 doc: <routine name="trigger_gc_cycle" return_type="int" export="private">
3595 doc: <summary>Launch a GC cycle. If we have allocated more than `th_alloc' then we perform an automatic collection. If we are under the control of a complete synchronization, we do not try to acquire mutex. This is useful for `retrieve' which blocks all threads but if one is blocked in here through a lock on `trigger_gc_mutex' then we end up with a dead lock, where actually we didn't need to hold the `trigger_gc_mutex'.</summary>
3596 doc: <return>1 if collection was successful, 0 otherwise</return>
3597 doc: <thread_safety>Safe</thread_safety>
3598 doc: <synchronization>Through `eiffel_usage_mutex' for getting value of `eiffel_usage'.</synchronization>
3599 doc: </routine>
3600 */
3601
3602 rt_private int trigger_gc_cycle (void)
3603 {
3604 RT_GET_CONTEXT
3605 int result = 0;
3606 #ifdef EIF_THREADS
3607 if (gc_thread_status == EIF_THREAD_GC_RUNNING) {
3608 #endif
3609 if (eiffel_usage > th_alloc) {
3610 if (0 == acollect()) {
3611 result = 1;
3612 }
3613 }
3614 return result;
3615 #ifdef EIF_THREADS
3616 } else if (thread_can_launch_gc) {
3617 rt_uint_ptr e_usage;
3618 EIF_ENTER_C;
3619 TRIGGER_GC_LOCK;
3620 EIFFEL_USAGE_MUTEX_LOCK;
3621 e_usage = eiffel_usage;
3622 EIFFEL_USAGE_MUTEX_UNLOCK;
3623 if (e_usage > th_alloc) { /* Above threshold */
3624 if (0 == acollect()) { /* Perform automatic collection */
3625 result = 1;
3626 }
3627 }
3628 TRIGGER_GC_UNLOCK;
3629 EIF_EXIT_C;
3630 RTGC;
3631 return result;
3632 } else {
3633 return 0;
3634 }
3635 #endif
3636 }
3637
3638 /*
3639 doc: <routine name="malloc_from_zone" return_type="EIF_REFERENCE" export="private">
3640 doc: <summary>Try to allocate 'nbytes' in the scavenge zone. Returns a pointer to the object's location or a null pointer if an error occurred.</summary>
3641 doc: <param name="nbytes" type="rt_uint_ptr">Size in bytes of zone to allocated.</param>
3642 doc: <return>Address of a block in scavenge zone if successful, null pointer otherwise.</return>
3643 doc: <thread_safety>Safe</thread_safety>
3644 doc: <synchronization>Through `eif_gc_gsz_mutex'.</synchronization>
3645 doc: </routine>
3646 */
3647
3648 rt_private EIF_REFERENCE malloc_from_zone(rt_uint_ptr nbytes)
3649 {
3650 RT_GET_CONTEXT
3651 EIF_REFERENCE object; /* Address of the allocated object */
3652
3653 REQUIRE("Scavenging enabled", gen_scavenge == GS_ON);
3654 REQUIRE("Has from zone", sc_from.sc_arena);
3655 REQUIRE("nbytes properly padded", (nbytes % ALIGNMAX) == 0);
3656
3657 /* Allocating from a scavenging zone is easy and fast. It's basically a
3658 * pointer update... However, if the level in the 'from' zone reaches
3659 * the watermark GS_WATERMARK, we return NULL immediately, it is up to
3660 * the caller to decide whether or not he will run the generation scavenging
3661 * The tenuring threshold for the next scavenge is computed to make the level
3662 * of occupation go below the watermark at the next collection so that
3663 * the next call to `malloc_from_zone' is most likely to succeed.
3664 *
3665 * Aslo, if there is not enough space in the scavenge zone, we need to return
3666 * immediately.
3667 */
3668 GC_THREAD_PROTECT(EIF_GC_GSZ_LOCK);
3669
3670 object = sc_from.sc_top; /* First eif_free location */
3671
3672 if ((object >= sc_from.sc_mark) || ((ALIGNMAX + nbytes + object) > sc_from.sc_end)) {
3673 GC_THREAD_PROTECT(EIF_GC_GSZ_UNLOCK);
3674 return NULL;
3675 }
3676
3677 SIGBLOCK; /* Block signals */
3678 sc_from.sc_top += nbytes + ALIGNMAX; /* Update free-location pointer */
3679 ((union overhead *) object)->ov_size = nbytes; /* All flags cleared */
3680
3681 /* No account for memory used is to be done. The memory used by the two
3682 * scavenge zones is already considered to be in full use.
3683 */
3684
3685 #ifdef DEBUG
3686 dprintf(4)("malloc_from_zone: returning block starting at 0x%lx (%d bytes)\n",
3687 (EIF_REFERENCE) (((union overhead *) object ) + 1),
3688 ((union overhead *) object)->ov_size);
3689 flush;
3690 #endif
3691
3692 SIGRESUME; /* Restore signal handling */
3693 GC_THREAD_PROTECT(EIF_GC_GSZ_UNLOCK);
3694
3695 ENSURE ("Allocated size big enough", nbytes <= (((union overhead *) object)->ov_size & B_SIZE));
3696
3697 return (EIF_REFERENCE) (((union overhead *) object ) + 1); /* Free data space */
3698 }
3699
3700 /*
3701 doc: <routine name="create_scavenge_zones" export="shared">
3702 doc: <summary>Attempt creation of two scavenge zones: the 'from' zone and the 'to' zone. If it is successful, `gen_scavenge' is set to `GS_ON' otherwise the value remained unchanged and is `GS_OFF'. Upon success, the routine updates the structures accordingly.</summary>
3703 doc: <thread_safety>Not safe</thread_safety>
3704 doc: <synchronization>Safe when under GC synchronization or during run-time initialization.</synchronization>
3705 doc: </routine>
3706 */
3707
3708 rt_shared void create_scavenge_zones(void)
3709 {
3710 REQUIRE("Not already allocated", (sc_from.sc_arena == NULL) && (sc_to.sc_arena == NULL));
3711 REQUIRE("Generation scavenging off", gen_scavenge == GS_OFF);
3712
3713 /* Initialize `gen_scavenge' to GS_OFF in case it fails to allocate the scavenge zones. */
3714 if (cc_for_speed) {
3715 RT_GET_CONTEXT
3716 EIF_REFERENCE from; /* From zone */
3717 EIF_REFERENCE to; /* To zone */
3718
3719 /* I think it's best to allocate the spaces in the C list. Firstly, this
3720 * space must never be moved, secondly it should never be reclaimed,
3721 * excepted when we are low on memory, but then it does not really matters.
3722 * Lastly, the garbage collector will simply ignore the block, which is
3723 * just fine--RAM.
3724 */
3725 from = eif_rt_xmalloc(eif_scavenge_size, C_T, GC_OFF);
3726 if (from) {
3727 to = eif_rt_xmalloc(eif_scavenge_size, C_T, GC_OFF);
3728 if (!to) {
3729 eif_rt_xfree(from);
3730 } else {
3731 /* Now set up the zones */
3732 SIGBLOCK; /* Critical section */
3733 sc_from.sc_arena = (char *) ((union overhead *) from); /* Base address */
3734 sc_to.sc_arena = (char *) ((union overhead *) to);
3735 sc_from.sc_top = sc_from.sc_arena; /* First free address */
3736 sc_to.sc_top = sc_to.sc_arena;
3737 sc_from.sc_mark = from + GS_WATERMARK; /* Water mark (nearly full) */
3738 sc_to.sc_mark = to + GS_WATERMARK;
3739 sc_from.sc_end = from + eif_scavenge_size; /* First free location beyond */
3740 sc_to.sc_end = to + eif_scavenge_size;
3741 SIGRESUME; /* End of critical section */
3742
3743 gen_scavenge = GS_ON; /* Generation scavenging activated */
3744 }
3745 }
3746 }
3747
3748 ENSURE("Correct_value", (gen_scavenge == GS_OFF) || (gen_scavenge == GS_ON));
3749 ENSURE("Created", !(gen_scavenge == GS_ON) || (sc_from.sc_arena && sc_to.sc_arena));
3750 ENSURE("Not created", (gen_scavenge == GS_ON) || (!sc_from.sc_arena && !sc_to.sc_arena));
3751 }
3752
3753 /*
3754 doc: <routine name="explode_scavenge_zones" export="private">
3755 doc: <summary>Take a scavenge zone and destroy it letting all the objects held in it go back under the free-list management scheme. The memory accounting has to be done exactely: all the zone was handled as being in use for the statistics, but now we have to account for the overhead used by each stored object...</summary>
3756 doc: <param name="sc" type="struct sc_zone *">Zone to be freed. Usually the from zone of the 2 scavenge zones.</param>
3757 doc: <thread_safety>Not safe</thread_safety>
3758 doc: <synchronization>Safe when under GC synchronization.</synchronization>
3759 doc: </routine>
3760 */
3761
3762 rt_private void explode_scavenge_zone(struct sc_zone *sc)
3763 {
3764 RT_GET_CONTEXT
3765 rt_uint_ptr flags; /* Store some flags */
3766 union overhead *zone; /* Malloc info zone */
3767 union overhead *next; /* Next zone to be studied */
3768 rt_uint_ptr size = 0; /* Flags to bo OR'ed on each object */
3769 EIF_REFERENCE top = sc->sc_top; /* Top in scavenge space */
3770 rt_uint_ptr new_objects_overhead = 0; /* Overhead size corresponding to new objects
3771 which have now a life of their own outside
3772 the scavenge zone. */
3773
3774 next = (union overhead *) sc->sc_arena;
3775 if (next == (union overhead *) 0)
3776 return;
3777 zone = next - 1;
3778 flags = zone->ov_size;
3779
3780 if (flags & B_CTYPE) /* This is the usual case */
3781 size |= B_CTYPE; /* Scavenge zone is in a C chunk */
3782
3783 size |= B_BUSY; /* Every released object is busy */
3784
3785 /* Loop over the zone and build for each object a header that would have
3786 * been given to it if it had been malloc'ed in from the free-list.
3787 */
3788
3789 SIGBLOCK; /* Beginning of critical section */
3790
3791 for (zone = next; (EIF_REFERENCE) zone < top; zone = next) {
3792
3793 /* Set the flags for the new block and compute the location of
3794 * the next object in the space.
3795 */
3796 flags = zone->ov_size;
3797 next = (union overhead *) (((EIF_REFERENCE) zone) + (flags & B_SIZE) + OVERHEAD);
3798 zone->ov_size = flags | size;
3799
3800 /* The released object belongs to the new generation so add it
3801 * to the moved_set. If it is not possible (stack full), abort. We
3802 * do that because there should be room as the 'to' space should have
3803 * been released before exploding the 'from' space, thus leaving
3804 * room for stack growth.
3805 */
3806 if (-1 == epush(&moved_set, (EIF_REFERENCE) (zone + 1)))
3807 enomem(MTC_NOARG); /* Critical exception */
3808 zone->ov_flags |= EO_NEW; /* Released object is young */
3809 new_objects_overhead += OVERHEAD;
3810 }
3811
3812 #ifdef MAY_PANIC
3813 /* Consitency check. We must have reached the top of the zone */
3814 if ((EIF_REFERENCE) zone != top)
3815 eif_panic("scavenge zone botched");
3816 #endif
3817
3818 /* If we did not reach the end of the scavenge zone, then there is at
3819 * least some room for one header. We're going to fake a malloc block and
3820 * call eif_rt_xfree() to release it.
3821 */
3822
3823 if ((EIF_REFERENCE) zone != sc->sc_end) {
3824
3825 /* Everything from 'zone' to the end of the scavenge space is now free.
3826 * Set up a normal busy block before calling eif_rt_xfree. If the scavenge zone
3827 * was the last block in the chunk, then this remaining space is the
3828 * last in the chunk too, so set the flags accordingly.
3829 */
3830
3831 CHECK("new size fits on B_SIZE", ((sc->sc_end - (EIF_REFERENCE) (zone + 1)) & ~B_SIZE) == 0);
3832
3833 zone->ov_size = size | (sc->sc_end - (EIF_REFERENCE) (zone + 1));
3834 next = HEADER(sc->sc_arena);
3835 if (next->ov_size & B_LAST) /* Scavenge zone was a last block ? */
3836 zone->ov_size |= B_LAST; /* So is it for the remainder */
3837
3838 #ifdef DEBUG
3839 dprintf(1)("explode_scavenge_zone: remainder is a %s%d bytes bloc\n",
3840 zone->ov_size & B_LAST ? "last " : "", zone->ov_size & B_SIZE);
3841 flush;
3842 #endif
3843
3844 eif_rt_xfree((EIF_REFERENCE) (zone + 1)); /* Put remainder back to free-list */
3845 new_objects_overhead += OVERHEAD;
3846 } else {
3847 next = HEADER(sc->sc_arena); /* Point to the header of the arena */
3848 }
3849
3850 /* Freeing the header of the arena (the scavenge zone) is easy. We fake a
3851 * zero length block and call free on it. Note that this does not change
3852 * statistics at all: this overhead was already accounted for and it remains
3853 * an overhead.
3854 */
3855
3856 next->ov_size = size; /* A zero length bloc */
3857 eif_rt_xfree((EIF_REFERENCE) (next + 1)); /* Free header of scavenge zone */
3858
3859 /* Update the statistics: we released 'new_objects_overhead , so we created that
3860 * amount of overhead. Note that we do have to change the amount of
3861 * memory used as the above call to `eif_rt_xfree' to mark the remaining
3862 * zone as free did not take into account the `new_objects_overhead' that was
3863 * added (it considered it was a single block).
3864 */
3865
3866 rt_m_data.ml_used -= new_objects_overhead;
3867 rt_m_data.ml_over += new_objects_overhead;
3868 if (size & B_CTYPE) { /* Scavenge zone in a C chunk */
3869 rt_c_data.ml_used -= new_objects_overhead;
3870 rt_c_data.ml_over += new_objects_overhead;
3871 } else { /* Scavenge zone in an Eiffel chunk */
3872 rt_e_data.ml_used -= new_objects_overhead;
3873 rt_e_data.ml_over += new_objects_overhead;
3874 }
3875
3876 SIGRESUME; /* End of critical section */
3877 }
3878
3879 /*
3880 doc: <routine name="sc_stop" export="shared">
3881 doc: <summary>Stop the scavenging process by freeing the zones. In MT mode, it forces a GC synchronization as some threads might be still running and trying to allocated in the scavenge zone.</summary>
3882 doc: <thread_safety>Safe</thread_safety>
3883 doc: <synchronization>Through GC synchronization.</synchronization>
3884 doc: </routine>
3885 */
3886
3887 rt_shared void sc_stop(void)
3888 {
3889 RT_GET_CONTEXT
3890 GC_THREAD_PROTECT(eif_synchronize_gc(rt_globals));
3891 gen_scavenge = GS_OFF; /* Generation scavenging is off */
3892 eif_rt_xfree(sc_to.sc_arena); /* This one is completely eif_free */
3893 explode_scavenge_zone(&sc_from); /* While this one has to be exploded */
3894 st_reset (&memory_set);
3895 /* Reset values to their default value */
3896 memset (&sc_to, 0, sizeof(struct sc_zone));
3897 memset (&sc_from, 0, sizeof(struct sc_zone));
3898 GC_THREAD_PROTECT(eif_unsynchronize_gc(rt_globals));
3899 }
3900 #endif
3901
3902 /*
3903 doc: <routine name="eif_box" return_type="EIF_REFERENCE" export="public">
3904 doc: <summary>Create a boxed version of a basic value.</summary>
3905 doc: <param name="v" type="EIF_TYPED_VALUE">Value to be boxed.</param>
3906 doc: <return>A newly allocated object if successful.</return>
3907 doc: <exception>"No more memory" when it fails</exception>
3908 doc: <thread_safety>Safe</thread_safety>
3909 doc: <synchronization>Done by different allocators to whom we request memory</synchronization>
3910 doc: </routine>
3911 */
3912
3913 rt_public EIF_REFERENCE eif_box (EIF_TYPED_VALUE v)
3914 {
3915 EIF_REFERENCE Result;
3916 switch (v.type & SK_HEAD)
3917 {
3918 case SK_BOOL: Result = RTLN(egc_bool_dtype); * Result = v.it_b; break;
3919 case SK_CHAR8: Result = RTLN(egc_char_dtype); * Result = v.it_c1; break;
3920 case SK_CHAR32: Result = RTLN(egc_wchar_dtype); *(EIF_CHARACTER_32 *) Result = v.it_c4; break;
3921 case SK_UINT8: Result = RTLN(egc_uint8_dtype); *(EIF_NATURAL_8 *) Result = v.it_n1; break;
3922 case SK_UINT16: Result = RTLN(egc_uint16_dtype); *(EIF_NATURAL_16 *) Result = v.it_n2; break;
3923 case SK_UINT32: Result = RTLN(egc_uint32_dtype); *(EIF_NATURAL_32 *) Result = v.it_n4; break;
3924 case SK_UINT64: Result = RTLN(egc_uint64_dtype); *(EIF_NATURAL_64 *) Result = v.it_n8; break;
3925 case SK_INT8: Result = RTLN(egc_int8_dtype); *(EIF_INTEGER_8 *) Result = v.it_i1; break;
3926 case SK_INT16: Result = RTLN(egc_int16_dtype); *(EIF_INTEGER_16 *) Result = v.it_i2; break;
3927 case SK_INT32: Result = RTLN(egc_int32_dtype); *(EIF_INTEGER_32 *) Result = v.it_i4; break;
3928 case SK_INT64: Result = RTLN(egc_int64_dtype); *(EIF_INTEGER_64 *) Result = v.it_i8; break;
3929 case SK_REAL32: Result = RTLN(egc_real32_dtype); *(EIF_REAL_32 *) Result = v.it_r4; break;
3930 case SK_REAL64: Result = RTLN(egc_real64_dtype); *(EIF_REAL_64 *) Result = v.it_r8; break;
3931 case SK_POINTER: Result = RTLN(egc_point_dtype); *(EIF_POINTER *) Result = v.it_p; break;
3932 case SK_REF: Result = v.it_r; break;
3933 default:
3934 Result = NULL; /* To avoid C warnings. */
3935 eif_panic("illegal value type");
3936 }
3937 return Result;
3938 }
3939
3940 /*
3941 doc: <routine name="eif_set" return_type="EIF_REFERENCE" export="private">
3942 doc: <summary>Set an Eiffel object for use: reset the zone with zeros, and try to record the object inside the moved set, if necessary. The function returns the address of the object (it may move if a GC cycle is raised).</summary>
3943 doc: <param name="object" type="EIF_REFERENCE">Object to setup.</param>
3944 doc: <param name="flags" type="uint16">Full dynamic type of object to setup.</param>
3945 doc: <param name="dftype" type="EIF_TYPE_INDEX">Full dynamic type of object to setup.</param>
3946 doc: <param name="dtype" type="EIF_TYPE_INDEX">Dynamic type of object to setup.</param>
3947 doc: <return>New value of `object' as this routine can trigger a GC cycle</return>
3948 doc: <thread_safety>Safe</thread_safety>
3949 doc: <synchronization>None required</synchronization>
3950 doc: </routine>
3951 */
3952
3953 rt_private EIF_REFERENCE eif_set(EIF_REFERENCE object, uint16 flags, EIF_TYPE_INDEX dftype, EIF_TYPE_INDEX dtype)
3954 {
3955 RT_GET_CONTEXT
3956 union overhead *zone = HEADER(object); /* Object's header */
3957 void (*init)(EIF_REFERENCE, EIF_REFERENCE); /* The optional initialization */
3958
3959 SIGBLOCK; /* Critical section */
3960 memset (object, 0, zone->ov_size & B_SIZE); /* All set with zeros */
3961
3962 #ifdef WITH_OBJECT_IDENTIFIER
3963 eif_object_id_count++;
3964 zone->ov_id = (rt_uint_ptr)eif_object_id_count;
3965 #endif
3966
3967 #ifdef EIF_TID
3968 #ifdef EIF_THREADS
3969 zone->ovs_tid = (rt_uint_ptr) eif_thr_context->thread_id; /* tid from eif_thr_context */
3970 #else
3971 zone->ovs_tid = (rt_uint_ptr) 0; /* In non-MT-mode, it is NULL by convention */
3972 #endif /* EIF_THREADS */
3973 #endif /* EIF_TID */
3974
3975 // Set SCOOP Processor if available.
3976 #ifdef EIF_THREADS
3977 zone->ov_pid = (EIF_SCP_PID)(eif_thr_context->logical_id != -1 ? eif_thr_context->logical_id : 0);
3978 #else
3979 zone->ov_pid = (EIF_SCP_PID)0;
3980 #endif
3981 zone->ov_size &= ~B_C; /* Object is an Eiffel one */
3982 zone->ov_flags = flags;
3983 zone->ov_dftype = dftype; /* Set Full dynamic type */
3984 zone->ov_dtype = dtype; /* Set dynamic type */
3985 if (EIF_IS_EXPANDED_TYPE(System (dtype))) {
3986 zone->ov_flags |= EO_EXP | EO_REF;
3987 }
3988
3989 #ifdef ISE_GC
3990 if (flags & EO_NEW) { /* New object outside scavenge zone */
3991 object = add_to_moved_set (object);
3992 }
3993 if (Disp_rout(dtype)) {
3994 /* Special marking of MEMORY object allocated in scavenge zone */
3995 if (!(flags & EO_NEW)) {
3996 object = add_to_stack (object, &memory_set);
3997 }
3998 zone->ov_flags |= EO_DISP;
3999 }
4000 #endif
4001
4002 /* If the object has an initialization routine, call it now. This routine
4003 * is in charge of setting some other flags like EO_COMP and initializing
4004 * of expanded inter-references.
4005 */
4006
4007
4008 init = XCreate(dtype);
4009 if (init) {
4010 EIF_GET_CONTEXT
4011 DISCARD_BREAKPOINTS
4012 RT_GC_PROTECT(object);
4013 (init)(object, object);
4014 RT_GC_WEAN(object);
4015 UNDISCARD_BREAKPOINTS
4016 }
4017
4018 SIGRESUME; /* End of critical section */
4019
4020 #ifdef DEBUG
4021 dprintf(256)("eif_set: %d bytes for DT %d at 0x%lx%s\n",
4022 zone->ov_size & B_SIZE, dftype, object, dftype & EO_NEW ? " (new)" : "");
4023 flush;
4024 #endif
4025
4026 #ifdef EIF_EXPENSIVE_ASSERTIONS
4027 CHECK ("Cannot be in object ID stack", !st_has (&object_id_stack, object));
4028 #endif
4029 return object;
4030 }
4031
4032 /*
4033 doc: <routine name="eif_spset" return_type="EIF_REFERENCE" export="private">
4034 doc: <summary>Set the special Eiffel object for use: reset the zone with zeros. If special object not allocated in scavenge zone, we also try to remember the object (has to be in the new generation outside scavenge zone). The dynamic type of the special object is left blank. It is up to the caller of spmalloc() to set a proper dynamic type. The function returns the location of the object (it may move if a GC cycle has been raised to remember the object).</summary>
4035 doc: <param name="object" type="EIF_REFERENCE">Special object to setup.</param>
4036 doc: <param name="in_scavenge" type="EIF_BOOLEAN">Is new special object in scavenge zone?</param>
4037 doc: <return>New value of `object' as this routine can trigger a GC cycle</return>
4038 doc: <thread_safety>Safe</thread_safety>
4039 doc: <synchronization>None required</synchronization>
4040 doc: </routine>
4041 */
4042
4043 rt_private EIF_REFERENCE eif_spset(EIF_REFERENCE object, EIF_BOOLEAN in_scavenge)
4044 {
4045 RT_GET_CONTEXT
4046 union overhead *zone = HEADER(object); /* Malloc info zone */
4047
4048 SIGBLOCK; /* Critical section */
4049 if (egc_has_old_special_semantic) {
4050 memset (object, 0, zone->ov_size & B_SIZE); /* All set with zeros */
4051 }
4052 #ifdef WITH_OBJECT_IDENTIFIER
4053 eif_object_id_count++;
4054 zone->ov_id = (rt_uint_ptr)eif_object_id_count;
4055 #endif
4056
4057 #ifdef EIF_TID
4058 #ifdef EIF_THREADS
4059 zone->ovs_tid = (rt_uint_ptr) eif_thr_context->thread_id; /* tid from eif_thr_context */
4060 #else
4061 zone->ovs_tid = (rt_uint_ptr) 0; /* In non-MT-mode, it is NULL by convention */
4062 #endif /* EIF_THREADS */
4063 #endif /* EIF_TID */
4064
4065 // Set SCOOP Processor if available.
4066 #ifdef EIF_THREADS
4067 zone->ov_pid = (EIF_SCP_PID)(eif_thr_context->logical_id != -1 ? eif_thr_context->logical_id : 0);
4068 #else
4069 zone->ov_pid = (EIF_SCP_PID)0;
4070 #endif
4071 zone->ov_size &= ~B_C; /* Object is an Eiffel one */
4072 #ifdef ISE_GC
4073 if (in_scavenge == EIF_FALSE) {
4074 zone->ov_flags = EO_SPEC | EO_NEW; /* Object is special and new */
4075 object = add_to_moved_set (object);
4076 } else
4077 #endif
4078 zone->ov_flags = EO_SPEC; /* Object is special */
4079
4080 SIGRESUME; /* End of critical section */
4081
4082 #ifdef DEBUG
4083 dprintf(256)("eif_spset: %d bytes special at 0x%lx\n", zone->ov_size & B_SIZE, object);
4084 flush;
4085 #endif
4086
4087 #ifdef EIF_EXPENSIVE_ASSERTIONS
4088 CHECK ("Cannot be in object ID stack", !st_has (&object_id_stack, object));
4089 #endif
4090
4091 return object;
4092 }
4093
4094 #ifdef ISE_GC
4095
4096 /*
4097 doc: <routine name="add_to_moved_set" return_type="EIF_REFERENCE" export="private">
4098 doc: <summary>Add `object' into `moved_set' but only if `moved_set' is not full. At the moment `not full' means simply that the first chunk of the set is full. The reason is that we noticed that if too many objects are allocated as EO_NEW, then we will spend a lot of time in `update_moved_set'.</summary>
4099 doc: <param name="object" type="EIF_REFERENCE">Object to add in `memory_set'.</param>
4100 doc: <return>Location of `object' as it might have moved.</return>
4101 doc: <thread_safety>Safe</thread_safety>
4102 doc: <synchronization>Through `eif_gc_set_mutex'.</synchronization>
4103 doc: <fixme>Refine notion of `full'.</fixme>
4104 doc: </routine>
4105 */
4106
4107 rt_private EIF_REFERENCE add_to_moved_set (EIF_REFERENCE object)
4108 {
4109 RT_GET_CONTEXT
4110 union overhead * zone;
4111
4112 REQUIRE("object not null", object);
4113 REQUIRE("object has EO_NEW", HEADER(object)->ov_flags & EO_NEW);
4114
4115 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
4116 /* Check that we can actually add something to the stack. */
4117 if ((moved_set.st_top == NULL) || (moved_set.st_end != moved_set.st_top)) {
4118 if (-1 == epush(&moved_set, object)) { /* Cannot record object */
4119 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4120 /* We don't bother, we simply remove the EO_NEW flag from the object
4121 * and mark it old. */
4122 zone = HEADER(object);
4123 zone->ov_flags &= ~EO_NEW;
4124 zone->ov_flags |= EO_OLD;
4125 } else {
4126 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4127 }
4128 } else {
4129 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4130 /* `moved_set' was full so we don't bother, we simply remove the EO_NEW flag from the object
4131 * and mark it old. */
4132 zone = HEADER(object);
4133 zone->ov_flags &= ~EO_NEW;
4134 zone->ov_flags |= EO_OLD;
4135 }
4136 return object;
4137 }
4138
4139 /*
4140 doc: <routine name="add_to_stack" return_type="EIF_REFERENCE" export="private">
4141 doc: <summary>Add `object' into `stk'.</summary>
4142 doc: <param name="object" type="EIF_REFERENCE">Object to add in `memory_set'.</param>
4143 doc: <param name="stk" type="struct stack *">Stack in which we wish to add `object'.</param>
4144 doc: <return>Location of `object' as it might have moved.</return>
4145 doc: <thread_safety>Safe</thread_safety>
4146 doc: <synchronization>Through `eif_gc_set_mutex'.</synchronization>
4147 doc: </routine>
4148 */
4149
4150 rt_private EIF_REFERENCE add_to_stack (EIF_REFERENCE object, struct stack *stk)
4151 {
4152 RT_GET_CONTEXT
4153 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
4154 if (-1 == epush(stk, object)) { /* Cannot record object */
4155 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4156 urgent_plsc(&object); /* Full collection */
4157 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_LOCK);
4158 if (-1 == epush(stk, object)) { /* Still failed */
4159 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4160 enomem(MTC_NOARG); /* Critical exception */
4161 } else {
4162 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4163 }
4164 } else {
4165 GC_THREAD_PROTECT(EIF_GC_SET_MUTEX_UNLOCK);
4166 }
4167 return object;
4168 }
4169
4170 /*
4171 doc: <routine name="compute_hlist_index" return_type="uint32" export="private">
4172 doc: <summary>Quickly compute the index in the hlist array where we have a chance to find the right block. The idea is to do a right logical shift until the register is zero. The number of shifts done is the base 2 logarithm of 'nbytes'.</summary>
4173 doc: <param name="size" type="size_t">Size of block from which we want to find its associated index in free list.</param>
4174 doc: <return>Index of free list where block of size `size' will be found.</return>
4175 doc: <thread_safety>Safe</thread_safety>
4176 doc: <synchronization>None required</synchronization>
4177 doc: </routine>
4178 */
4179
4180 rt_private uint32 compute_hlist_index (size_t size)
4181 {
4182 uint32 i = HLIST_INDEX_LIMIT;
4183
4184 /* When we call this routine it means that `size' was bigger or equal to HLIST_SIZE_LIMIT */
4185 REQUIRE ("Not a precomputed index", size >= HLIST_SIZE_LIMIT);
4186
4187 size >>= HLIST_DEFAULT_SHIFT;
4188 while (size >>= 1)
4189 i++;
4190 return i;
4191 }
4192
4193 #endif
4194 /*
4195 doc:</file>
4196 */

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.23