/[eiffelstudio]/branches/eth/eve/Src/library/base/ise/runtime/scoop/classic/ise_scoop_manager.e
ViewVC logotype

Contents of /branches/eth/eve/Src/library/base/ise/runtime/scoop/classic/ise_scoop_manager.e

Parent Directory Parent Directory | Revision Log Revision Log


Revision 88379 - (show annotations)
Fri Mar 9 09:40:16 2012 UTC (7 years, 7 months ago) by jasonw
File size: 91725 byte(s)
<<Merged from trunk#88377.>>
1 ´╗┐note
2 description: "Core for handling of separate calls in SCOOP"
3 patent: "[
4 The concurrency techniques associated with this and other classes are covered by a
5 filed patent application.
6 ]"
7 legal: "See notice at end of class."
8 status: "See notice at end of class."
9 date: "$Date$"
10 revision: "$Revision$"
11
12 frozen class
13 ISE_SCOOP_MANAGER
14
15 create {NONE}
16 init_scoop_manager
17
18 feature -- C callback function
19
20 scoop_manager_task_callback (scoop_task: NATURAL_8; client_processor_id, supplier_processor_id: like processor_id_type; a_callback_data: POINTER)
21 -- Entry point to ISE_SCOOP_MANAGER from RTS SCOOP macros.
22 do
23 inspect
24 scoop_task
25 when check_uncontrolled_call_task_id then
26 set_boolean_return_value (a_callback_data, is_uncontrolled (client_processor_id, supplier_processor_id))
27 when add_call_task_id then
28 log_call_on_processor (client_processor_id, supplier_processor_id, a_callback_data)
29 when signify_start_of_new_chain_task_id then
30 signify_start_of_request_chain (client_processor_id)
31 when signify_end_of_new_chain_task_id then
32 if supplier_processor_id = null_processor_id then
33 signify_end_of_wait_condition_chain (client_processor_id)
34 else
35 signify_end_of_request_chain (client_processor_id)
36 end
37 when add_supplier_to_request_chain_task_id then
38 assign_supplier_processor_to_request_chain (client_processor_id, supplier_processor_id)
39 when wait_for_supplier_processor_locks_task_id then
40 wait_for_request_chain_supplier_processor_locks (client_processor_id)
41
42 debug ("ETH_SCOOP")
43 -- Make client wait until supplier processors are at the top of their respective queues.
44 wait_for_request_chain_to_begin (
45 client_processor_id,
46 (request_chain_meta_data [client_processor_id]) [request_chain_meta_data_header_size],
47 request_chain_meta_data [client_processor_id]
48 )
49 end
50
51 when wait_for_processor_redundancy_task_id then
52 root_processor_creation_routine_exited
53 when assign_processor_task_id then
54 set_integer_32_return_value (a_callback_data, assign_free_processor_id)
55 when free_processor_task_id then
56 free_processor_id (client_processor_id)
57 when add_processor_reference_task_id then
58 add_processor_reference (supplier_processor_id)
59 when remove_processor_reference_task_id then
60 remove_processor_reference (supplier_processor_id)
61 else
62 check invalid_task: False end
63 end
64 end
65
66 add_processor_reference (a_processor_id: like processor_id_type)
67 -- Increase reference count for `a_processor_id'.
68 --| FIXME Needs GC implementation for use
69 local
70 l_ref: INTEGER_32
71 do
72 l_ref := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [a_processor_id].item_address (processor_reference_count_index))
73 end
74
75 remove_processor_reference (a_processor_id: like processor_id_type)
76 -- Decrease reference count for `a_processor_id'.
77 --| FIXME Needs GC implementation for use
78 local
79 l_ref: INTEGER_32
80 do
81 l_ref := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 (processor_meta_data [a_processor_id].item_address (processor_reference_count_index))
82 end
83
84 maximum_dirty_processor_client_count: INTEGER = 16
85 -- Maximum number of client processors a supplier processor may be dirty to.
86
87 flag_processor_dirty (
88 a_logical_processor_id: like processor_id_type;
89 a_executing_request_chain_node_meta_data: like new_request_chain_node_meta_data_queue_entry)
90 local
91 l_dirty_flag_count: INTEGER
92 l_dirty_processor_client_list: detachable like new_request_chain_meta_data_entry
93 l_client_processor_id: like processor_id_type
94 i: INTEGER
95 l_lock_request_return: INTEGER
96 do
97 -- Signal the request chain as dirty so that any other processors in the request chain cease applying calls.
98 a_executing_request_chain_node_meta_data [request_chain_status_index] := request_chain_status_dirty
99
100 -- Retrieve currently executing request chain so that we can flag the client processor as dirty with regards to `a_processor_id'.
101 l_dirty_processor_client_list := (request_chain_meta_data_stack_list [a_logical_processor_id]) [max_request_chain_depth]
102 if l_dirty_processor_client_list = Void then
103 create l_dirty_processor_client_list.make_filled (null_processor_id, maximum_dirty_processor_client_count)
104 -- Set count to zero.
105 (request_chain_meta_data_stack_list [a_logical_processor_id]) [max_request_chain_depth] := l_dirty_processor_client_list
106 end
107
108 l_client_processor_id := a_executing_request_chain_node_meta_data [request_chain_client_pid_index]
109 -- Find the next available slot to add `l_client_processor_id'.
110
111 l_lock_request_return := request_processor_resource (
112 processor_dirty_processor_client_list_lock_index,
113 a_logical_processor_id,
114 l_client_processor_id,
115 True, -- Wait until granted, we cannot continue until we have control over the value.
116 True -- High Priority, wait is minimal as this is a temporary lock value
117 )
118 check resource_attained: l_lock_request_return = resource_lock_newly_attained end
119
120 from
121 i := 0
122 until
123 i >= maximum_dirty_processor_client_count
124 loop
125 if l_dirty_processor_client_list [i] = null_processor_id then
126 l_dirty_processor_client_list [i] := l_client_processor_id
127 i := maximum_dirty_processor_client_count
128 end
129 i := i + 1
130 end
131
132 relinquish_processor_resource (
133 processor_meta_data [a_logical_processor_id].item_address (processor_dirty_processor_client_list_lock_index),
134 l_client_processor_id,
135 True -- High Priority
136 )
137
138 if i = maximum_dirty_processor_client_count then
139 -- There were no available slots for `l_client_processor_id'.
140 raise_scoop_exception ("Maximum SCOOP dirty processor count reached")
141 end
142
143 -- Update dirty flag count for the client processor.
144 l_dirty_flag_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [l_client_processor_id].item_address (processor_dirty_flag_count_index))
145 --| FIXME A check may be needed should the number of dirty processors for a client go above a certain limit.
146 end
147
148 is_uncontrolled (a_client_processor_id, a_supplier_processor_id: like processor_id_type): BOOLEAN
149 -- Is `a_supplier_processor_id' uncontrolled in the current context of `a_client_processor_id'.
150 local
151 l_end_index, i: INTEGER
152 do
153 if request_chain_meta_data [a_client_processor_id] /= default_request_chain_meta_data_entry then
154 -- Check first if we are lock passing, if not then we check the entire chain.
155 Result := (request_chain_meta_data [a_client_processor_id]) [request_chain_client_pid_index] /= a_supplier_processor_id
156 if Result then
157 l_end_index := (request_chain_meta_data [a_client_processor_id]) [request_chain_pid_count_index]
158 if l_end_index > 0 then
159 from
160 i := request_chain_meta_data_header_size
161 l_end_index := l_end_index + i
162 until
163 i = l_end_index or else not Result
164 loop
165 Result := (request_chain_meta_data [a_client_processor_id]) [i] /= a_supplier_processor_id
166 i := i + 1
167 end
168 end
169 end
170 else
171 -- There is no current chain, the supplier processor is uncontrolled if different from the client processor.
172 Result := a_client_processor_id /= a_supplier_processor_id
173 end
174 end
175
176 frozen assign_processor_task_id: NATURAL_8 = 1
177 frozen free_processor_task_id: NATURAL_8 = 2
178 -- frozen start_processor_loop_task_id: NATURAL_8 = 3
179 frozen signify_start_of_new_chain_task_id: NATURAL_8 = 4
180 frozen signify_end_of_new_chain_task_id: NATURAL_8 = 5
181 frozen add_supplier_to_request_chain_task_id: NATURAL_8 = 6
182 frozen wait_for_supplier_processor_locks_task_id: NATURAL_8 = 7
183 frozen add_call_task_id: NATURAL_8 = 8
184 frozen add_synchronous_call_task_id: NATURAL_8 = 9
185 frozen wait_for_processor_redundancy_task_id: NATURAL_8 = 10
186 frozen add_processor_reference_task_id: NATURAL_8 = 11
187 frozen remove_processor_reference_task_id: NATURAL_8 = 12
188 frozen check_uncontrolled_call_task_id: NATURAL_8 = 13
189 -- SCOOP Task Constants, similies of those defined in <eif_macros.h>
190 --| FIXME: Use external macros when valid in an inspect statement.
191
192 feature -- EIF_TYPED_VALUE externals
193
194 set_boolean_return_value (a_boolean_typed_value: POINTER; a_boolean: BOOLEAN)
195 external
196 "C macro use %"eif_scoop.h%""
197 end
198
199 set_integer_32_return_value (a_integer_32_typed_value: POINTER; a_integer: INTEGER_32)
200 external
201 "C macro use %"eif_scoop.h%""
202 end
203
204 feature -- Processor Initialization
205
206 assign_free_processor_id: like processor_id_type
207 -- Find the next available free SCOOP Processor, reuse or instantiate as needs be.
208 do
209 --| FIXME Implement redundant processor traversal to return next available id
210 Result := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($processor_count) - 1
211
212 if Result = max_scoop_processors_instantiable then
213 -- Perform processor cleanup.
214 -- Raise Exception if no free processor could not be found, or block until there is one.
215 raise_scoop_exception ("Maximum SCOOP Processor Allocation reached")
216 end
217
218 initialize_default_processor_meta_data (Result)
219
220 debug ("ISE_SCOOP_MANAGER")
221 print ("assign_free_processor_id of pid " + Result.out + "%N")
222 end
223 end
224
225 free_processor_id (a_processor_id: like processor_id_type)
226 -- Free resources of processor id `a_processor_id'.
227 do
228 -- Mark `a_processor_id' as free.
229 -- Called via GC so all threads will be stopped.
230 end
231
232 start_processor_application_loop (a_processor_id: like processor_id_type)
233 -- Start feature application loop for `a_processor_id'.
234 do
235 create_and_initialize_scoop_processor (Current, $scoop_processor_loop, default_processor_attributes.item, a_processor_id)
236 end
237
238 root_processor_creation_routine_exited
239 -- Root processor's creation routine has exited.
240 do
241 -- End request chain of root processor creation routine.
242 signify_end_of_request_chain (root_processor_id)
243
244 -- Make root processor as initialized as the creation routine has completed.
245 processor_meta_data [root_processor_id].put (processor_status_initialized, processor_status_index)
246
247 -- Run processor loop for root processor for any pending logged calls.
248 scoop_processor_loop (root_processor_id)
249
250 -- Wait for all processors to have completely exited before exiting.
251 root_processor_wait_for_redundancy
252 end
253
254 feature -- Request Chain Handling
255
256 signify_start_of_request_chain (a_client_processor_id: like processor_id_type)
257 -- Signify the start of a new request chain on `a_client_processor_id'
258 local
259 l_temp_value: INTEGER_32
260 do
261 -- Increment current request chain id depth
262 l_temp_value := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [a_client_processor_id].item_address (current_request_chain_id_depth_index));
263 if l_temp_value = max_request_chain_depth then
264 raise_scoop_exception (scoop_request_chain_stack_overflow_message)
265 end
266
267 -- Increase request chain id.
268 l_temp_value := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [a_client_processor_id].item_address (current_request_chain_id_index));
269
270 -- Update `a_client_processor_id' with a new request chain.
271 update_request_chain_meta_data (a_client_processor_id, new_request_chain_meta_data_entry (a_client_processor_id))
272
273 debug ("ISE_SCOOP_MANAGER")
274 print ("signify_start_of_request_chain for pid " + a_client_processor_id.out + " %N")
275 end
276 end
277
278 increase_request_chain_depth (a_client_processor_id: like processor_id_type)
279 -- Increase request chain depth for `a_client_processor_id'
280 local
281 l_request_chain_depth: like default_request_chain_depth_value
282 do
283 l_request_chain_depth := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [a_client_processor_id].item_address (current_request_chain_id_depth_index));
284 end
285
286 decrease_request_chain_depth (a_client_processor_id: like processor_id_type)
287 -- Decrease request chain depth for `a_client_processor_id'.
288 local
289 l_request_chain_depth: like default_request_chain_depth_value
290 do
291 l_request_chain_depth := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 (processor_meta_data [a_client_processor_id].item_address (current_request_chain_id_depth_index));
292 end
293
294 signify_end_of_request_chain (a_client_processor_id: like processor_id_type)
295 -- Signal the end of a request chain for `a_client_processor_id'.
296 local
297 l_request_chain_depth: like default_request_chain_depth_value
298 l_request_chain_meta_data: detachable like new_request_chain_meta_data_entry
299 l_wait_condition_counter: INTEGER
300 do
301 -- Reset request chain values
302 l_request_chain_depth := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 (processor_meta_data [a_client_processor_id].item_address (Current_request_chain_id_depth_index))
303
304 -- Retrieve existing meta data chain and close it
305 l_request_chain_meta_data := (request_chain_meta_data_stack_list [a_client_processor_id]) [l_request_chain_depth + 1]
306 (request_chain_meta_data_stack_list [a_client_processor_id]) [l_request_chain_depth + 1] := Void
307 check l_previous_chain_meta_data_entry_attached: attached l_request_chain_meta_data then end
308 l_request_chain_meta_data [request_chain_status_index] := request_chain_status_closed;
309
310 if l_request_chain_depth > default_request_chain_depth_value then
311 l_request_chain_meta_data := (request_chain_meta_data_stack_list [a_client_processor_id]) [l_request_chain_depth]
312 check l_request_chain_meta_data_attached: l_request_chain_meta_data /= Void then end
313 request_chain_meta_data [a_client_processor_id] := l_request_chain_meta_data
314 else
315 -- We are fully closing off the chain so we set the request chain to the default value.
316 request_chain_meta_data [a_client_processor_id] := default_request_chain_meta_data_entry
317 end
318 -- Reset the wait condition counter to zero.
319 l_wait_condition_counter := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 (processor_meta_data [a_client_processor_id].item_address (processor_wait_condition_counter_index), 0)
320
321 debug ("ISE_SCOOP_MANAGER")
322 print ("signify_end_of_request_chain for pid " + a_client_processor_id.out + "%N")
323 end
324 end
325
326 signify_end_of_wait_condition_chain (a_client_processor_id: like processor_id_type)
327 -- Signal the end of a failed wait_condition.
328 local
329 l_wait_condition_counter: INTEGER_32
330 do
331 -- End request chain and make sure that the wait condition counter is incremented.
332 l_wait_condition_counter := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [a_client_processor_id].item_address (processor_wait_condition_counter_index))
333
334 signify_end_of_request_chain (a_client_processor_id)
335 -- Set the wait counter to the incremented value as ending the request chain always resets it back to zero.
336 l_wait_condition_counter := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 (processor_meta_data [a_client_processor_id].item_address (processor_wait_condition_counter_index), l_wait_condition_counter)
337
338 -- Yield processor temporarily (no spin locking as a failed wait condition is lower priority).
339 processor_yield (a_client_processor_id, Processor_spin_lock_limit + l_wait_condition_counter.as_natural_32)
340 end
341
342 assign_supplier_processor_to_request_chain (a_client_processor_id, a_supplier_processor_id: like processor_id_type)
343 -- Assign `a_supplier_processor_id' to the current request chain of `a_client_processor_id'
344 local
345 l_request_chain_id: like invalid_request_chain_id
346 l_request_chain_meta_data: detachable like new_processor_meta_data_entry
347 i, l_count, l_pid_count: INTEGER
348 l_pid_present: BOOLEAN
349 do
350 debug ("ISE_SCOOP_MANAGER")
351 print ("assign_supplier_process_to_request_chain for pid " + a_client_processor_id.out + " with supplier processor " + a_supplier_processor_id.out + "%N")
352 end
353
354 if a_supplier_processor_id /= a_client_processor_id then
355 l_request_chain_id := (processor_meta_data [a_client_processor_id])[current_request_chain_id_index]
356
357 -- Retrieve request chain meta data structure, add supplier pid to it if not already present.
358 l_request_chain_meta_data := request_chain_meta_data [a_client_processor_id]
359 check l_request_chain_meta_data_attached: attached l_request_chain_meta_data end
360
361 l_pid_count := l_request_chain_meta_data [request_chain_pid_count_index]
362 from
363 i := request_chain_meta_data_header_size
364 l_count := i + l_pid_count
365 until
366 i = l_count
367 loop
368 if l_request_chain_meta_data [i] = a_supplier_processor_id then
369 -- pid already present so exit loop and do nothing else.
370 l_pid_present := True
371 i := l_count
372 else
373 i := i + 1
374 end
375 end
376
377 if not l_pid_present then
378 -- Check that structure is big enough, if not then resize and readd to parent structure.
379 if l_request_chain_meta_data.count = l_request_chain_meta_data.capacity then
380 l_request_chain_meta_data := l_request_chain_meta_data.aliased_resized_area (l_request_chain_meta_data.count + 2)
381 update_request_chain_meta_data (a_client_processor_id, l_request_chain_meta_data)
382 end
383 -- Add new pid to request chain list.
384 if a_supplier_processor_id = null_processor_id then
385 -- We are adding a creation routine
386 l_request_chain_meta_data.force (a_client_processor_id, l_count)
387 else
388 l_request_chain_meta_data.force (a_supplier_processor_id, l_count)
389 end
390 l_request_chain_meta_data [request_chain_pid_count_index] := l_pid_count + 1
391 end
392 end
393 end
394
395 update_request_chain_meta_data (a_client_processor_id: like processor_id_type; a_request_chain_meta_data: like new_request_chain_meta_data_entry)
396 -- Update request chain meta data for `a_client_processor_id'.
397 local
398 l_request_chain_depth: INTEGER
399 do
400 l_request_chain_depth := (processor_meta_data [a_client_processor_id]) [Current_request_chain_id_depth_index]
401 (request_chain_meta_data_stack_list [a_client_processor_id]) [l_request_chain_depth] := a_request_chain_meta_data
402 request_chain_meta_data [a_client_processor_id] := a_request_chain_meta_data
403 end
404
405 wait_for_request_chain_supplier_processor_locks (a_client_processor_id: like processor_id_type)
406 -- Wait until all locks for the supplier processors involved in the current request chain id of `a_client_processor_id' to become available.
407 local
408 l_request_chain_id: like invalid_request_chain_id
409 l_request_chain_node_id: like invalid_request_chain_node_id
410 l_request_chain_meta_data, l_previous_request_chain_meta_data: detachable like new_request_chain_meta_data_entry
411 l_request_chain_node_meta_data_queue: detachable like new_request_chain_node_meta_data_queue
412 l_request_chain_node_queue: detachable like new_request_chain_node_queue
413 l_request_chain_node_queue_entry: detachable like new_request_chain_node_queue_entry
414 l_exit, l_swap_occurred, l_merge_needed: BOOLEAN
415 i, j, l_container_count, l_pid_count, l_previous_container_pid_count_upper, l_previous_pid_count: INTEGER
416 l_pid: like processor_id_type
417 l_lock_request_return: NATURAL_8
418 l_request_chain_depth: INTEGER
419 l_wait_condition_counter: INTEGER
420 do
421 -- Sort unique processor id's by order of priority, then wait for locks on each processor so that a new request chain node can be initialized.
422 debug ("ISE_SCOOP_MANAGER")
423 print ("wait_for_request_chain_supplier_processor_locks for pid " + a_client_processor_id.out + "%N")
424 end
425
426 -- Retrieve wait condition counter for determining priority of processor request.
427 l_wait_condition_counter := (processor_meta_data [a_client_processor_id]) [processor_wait_condition_counter_index]
428
429 debug ("SCOOP_Wait_Condition_Retry_Limit_Check")
430 if l_wait_condition_counter > max_wait_condition_retry_limit then
431 raise_scoop_exception ("SCOOP Wait Condition Retry Limit Reached")
432 end
433 end
434
435 -- Retrieve request chain meta data structure.
436 l_request_chain_meta_data := request_chain_meta_data [a_client_processor_id]
437 check l_request_chain_meta_data_attached: attached l_request_chain_meta_data end
438
439 l_pid_count := l_request_chain_meta_data [request_chain_pid_count_index]
440 l_container_count := request_chain_meta_data_header_size + l_pid_count
441 -- Number of processor ids in new structure
442
443 -- Retrieve request chain meta data structure
444 -- Sort unique pid values by logical order
445 l_request_chain_id := (processor_meta_data [a_client_processor_id])[current_request_chain_id_index]
446 l_request_chain_depth := (processor_meta_data [a_client_processor_id])[current_request_chain_id_depth_index]
447
448 if l_request_chain_depth > 0 then
449 -- Iterate parent chain to see if there are common processors, if so then we need to sync them.
450 from
451 l_previous_request_chain_meta_data := (request_chain_meta_data_stack_list [a_client_processor_id]) [l_request_chain_depth - 1]
452 check l_previous_request_chain_attached: l_previous_request_chain_meta_data /= Void then end
453 i := request_chain_meta_data_header_size
454 l_previous_pid_count := l_previous_request_chain_meta_data [request_chain_pid_count_index]
455 l_previous_container_pid_count_upper := i + l_previous_pid_count
456 until
457 i = l_previous_container_pid_count_upper
458 loop
459 from
460 j := request_chain_meta_data_header_size
461 until
462 j = l_container_count
463 loop
464 if l_request_chain_meta_data [j] = l_previous_request_chain_meta_data [i] then
465 -- The processor exists in the previous chain so we must remove it from the new one for later merging.
466 l_merge_needed := True
467 if j + 1 = l_container_count then
468 -- We are the last pid in the list so we can simply null the values
469 l_request_chain_meta_data [j] := null_processor_id
470 else
471 l_request_chain_meta_data.move_data (j + 1, j, l_container_count - (j + 1))
472 end
473 l_pid_count := l_pid_count - 1
474 l_container_count := l_container_count - 1
475 l_request_chain_meta_data [request_chain_pid_count_index] := l_pid_count
476 -- Exit the loop to check again the next pid in the previous chain.
477 j := l_container_count
478 else
479 j := j + 1
480 end
481 end
482 i := i + 1
483 end
484 end
485
486 if not l_merge_needed then
487 -- Reset previous pid count if no merging is required.
488 l_previous_pid_count := 0
489 end
490
491 from
492 -- Start at first PID value, with zero based SPECIAL this equals the header size.
493 i := request_chain_meta_data_header_size
494 l_exit := l_pid_count <= 1
495 until
496 l_exit
497 loop
498 -- Sort Unique PID Values by order of priority (in this case the lowest PID value takes preference)
499 l_pid := l_request_chain_meta_data [i]
500 if l_pid > l_request_chain_meta_data [i + 1] then
501 l_request_chain_meta_data [i] := l_request_chain_meta_data [i + 1]
502 l_request_chain_meta_data [i + 1] := l_pid
503 l_swap_occurred := l_pid_count > 2
504 -- If we swap with two unique values then there is no need to reiterate
505 -- as this is the only operation that can occur.
506 end
507 -- If we are at the final PID position and no swap has occurred then we can exit the loop
508 -- Otherwise we must reset the index to the position and start again to check that it is full sorted.
509 if i = l_container_count - 2 then
510 -- -2 for zero based penultimate value.
511 if l_swap_occurred then
512 -- Reset iteration to start values.
513 l_swap_occurred := False
514 i := request_chain_meta_data_header_size
515 else
516 l_exit := True
517 end
518 else
519 i := i + 1
520 end
521 end
522
523 -- Reformulate meta data structure with unique pid count as the first value followed by the unique sorted pid values
524 l_request_chain_meta_data [request_chain_pid_count_index] := l_pid_count + l_previous_pid_count -- Add previous pid count should merging occur.
525 l_request_chain_meta_data [request_chain_client_pid_index] := a_client_processor_id
526 l_request_chain_meta_data [request_chain_client_pid_request_chain_id_index] := l_request_chain_id
527 l_request_chain_meta_data [request_chain_status_index] := request_chain_status_uninitialized
528 l_request_chain_meta_data [request_chain_sync_counter_index] := l_pid_count -- Do not include merged processors in sync count.
529
530 -- Resize meta data to allow for supplier processor meta data and any previous merge data.
531 if (l_container_count + l_pid_count + (l_previous_pid_count * 2)) > l_request_chain_meta_data.count then
532 l_request_chain_meta_data := l_request_chain_meta_data.aliased_resized_area_with_default (null_processor_id, l_container_count + l_pid_count + (l_previous_pid_count * 2))
533 update_request_chain_meta_data (a_client_processor_id, l_request_chain_meta_data)
534 end
535
536 -- Obtain a request queue lock on each of the processors (already uniquely sorted by logical pid order)
537 from
538 i := request_chain_meta_data_header_size
539 until
540 i = l_container_count
541 loop
542 -- Obtain lock on request chain node id value to prevent other processors from accessing it
543 -- This has to be done atomically via compare and swap
544 l_pid := l_request_chain_meta_data [i]
545 l_lock_request_return := request_processor_resource (
546 current_request_chain_node_id_lock_index,
547 l_pid,
548 a_client_processor_id,
549 True, -- Wait until granted, we cannot continue until we have control over the value.
550 l_wait_condition_counter = 0 -- Low Priority if there was a previous wait condition failure.
551 )
552 check resource_attained: l_lock_request_return = resource_lock_newly_attained end
553
554 i := i + 1
555 end
556
557 -- When all locks have been obtained we retrieve the request chain node ids for each of the locked processors.
558 -- When retrieved we initialize the data structure for each supplier pid so that we can then log calls.
559
560
561 from
562 i := request_chain_meta_data_header_size
563 until
564 i = l_container_count
565 loop
566 -- Add the current supplier processor request chain node id
567 l_pid := l_request_chain_meta_data [i]
568
569 -- We atomically increase for the next use and set the current request chain node id value.
570 l_request_chain_node_id := (processor_meta_data [l_pid]) [current_request_chain_node_id_index]
571
572 if l_request_chain_node_id = max_request_chain_node_queue_index then
573 -- We are at the maximum amount of allocations so we wait for the processor to reset its request chain node counter.
574 from
575 -- We can wait until the processor's application counter has caught up to the logging counter so that everything may be reset.
576 until
577 l_request_chain_node_id < max_request_chain_node_queue_index
578 loop
579 processor_yield (a_client_processor_id, 0)
580 l_request_chain_node_id := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (processor_meta_data [l_pid].item_address (current_request_chain_node_id_index), 0)
581 end
582 -- Processor `a_client_processor_id' has to wait for `l_pid' to reset its application queue.
583 end
584
585 -- Extend value to request chain node meta data.
586 l_request_chain_meta_data.put (l_request_chain_node_id, i + l_pid_count + l_previous_pid_count)
587 l_request_chain_node_id := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (processor_meta_data [l_pid].item_address (current_request_chain_node_id_index))
588 i := i + 1
589 end
590
591 if l_merge_needed then
592 -- We need to merge the previous processor values with the new ones.
593 check l_previous_request_chain_meta_data_attached: l_previous_request_chain_meta_data /= Void then end
594 from
595 i := request_chain_meta_data_header_size
596 until
597 i = l_previous_container_pid_count_upper
598 loop
599 -- Add previous pid to new chain.
600 l_request_chain_meta_data.put (l_previous_request_chain_meta_data [i], i + l_pid_count)
601 -- Add previous request chain node id.
602 l_request_chain_meta_data.put (l_previous_request_chain_meta_data [i + l_previous_pid_count], i + (2 * l_pid_count) + l_previous_pid_count)
603 i := i + 1
604 end
605 end
606
607 -- Release locks when request chain nodes ids have been calculated.
608 from
609 i := request_chain_meta_data_header_size
610 until
611 i = l_container_count
612 loop
613 -- Release lock on processor request chain node id
614 -- This has to be done atomically via compare and swap
615 l_pid := l_request_chain_meta_data [i]
616
617 relinquish_processor_resource (
618 processor_meta_data [l_pid].item_address (current_request_chain_node_id_lock_index),
619 a_client_processor_id,
620 l_wait_condition_counter = 0 -- Low priority if there is a wait condition failure.
621 )
622
623 i := i + 1
624 end
625
626 from
627 i := request_chain_meta_data_header_size
628 until
629 i = l_container_count
630 loop
631 l_pid := l_request_chain_meta_data [i]
632 l_request_chain_node_id := l_request_chain_meta_data [i + l_pid_count + l_previous_pid_count]
633
634 -- Set meta data for the request node of `l_pid'
635 -- This is used for both `head' and `tail' request chain nodes.
636 l_request_chain_node_meta_data_queue := request_chain_node_meta_data_queue_list [l_pid]
637 check l_request_chain_node_meta_data_queue_attached: attached l_request_chain_node_meta_data_queue then end
638 l_request_chain_node_meta_data_queue [l_request_chain_node_id] := l_request_chain_meta_data
639
640 -- Set request chain node queue entry for `l_pid' for future logging.
641 l_request_chain_node_queue := request_chain_node_queue_list [l_pid]
642 check l_request_chain_node_queue_attached: attached l_request_chain_node_queue then end
643 l_request_chain_node_queue_entry := l_request_chain_node_queue [l_request_chain_node_id]
644 if not attached l_request_chain_node_queue_entry then
645 l_request_chain_node_queue_entry := new_request_chain_node_queue_entry
646 l_request_chain_node_queue [l_request_chain_node_id] := l_request_chain_node_queue_entry
647 else
648 -- Make sure request chain node structure is empty.
649 l_request_chain_node_queue_entry.wipe_out
650 end
651 i := i + 1
652 end
653
654 -- Set chain as open so that the processors may enter the chain
655 l_request_chain_meta_data [request_chain_status_index] := request_chain_status_open
656 end
657
658 feature {NONE} -- Exceptions
659
660 raise_scoop_exception (a_exception_message: STRING)
661 -- Raise a SCOOP Exception using `a_exception_message'.
662 do
663 exception_helper.raise (a_exception_message)
664 end
665
666 exception_helper: EXCEPTIONS
667 -- Helper object for exceptions.
668
669 feature -- Command/Query Handling
670
671 is_processor_dirty (a_client_processor_id, a_supplier_processor_id: like processor_id_type): BOOLEAN
672 -- Is `a_supplier_processor_id' dirty with respect to `a_client_processor_id'.
673 local
674 l_dirty_processor_client_list: detachable like new_request_chain_meta_data_entry
675 i: INTEGER
676 l_lock_request_return: INTEGER
677 do
678 l_dirty_processor_client_list := (request_chain_meta_data_stack_list [a_supplier_processor_id]) [Max_request_chain_depth]
679 if l_dirty_processor_client_list /= Void then
680 l_lock_request_return := request_processor_resource (
681 processor_dirty_processor_client_list_lock_index,
682 a_supplier_processor_id,
683 a_client_processor_id,
684 True, -- Wait until granted, we cannot continue until we have control over the value.
685 False -- Low Priority, wait is minimal as this is a temporary lock value
686 )
687 check resource_attained: l_lock_request_return = resource_lock_newly_attained end
688
689 -- An uncaught exception has occurred on `a_supplier_processor_id' so we must check if `a_client_processor_id' is involved.
690 from
691 i := 0
692 until
693 i >= maximum_dirty_processor_client_count
694 loop
695 if l_dirty_processor_client_list [i] = a_client_processor_id then
696 Result := True
697 -- Reset dirty flag for `a_client_processor_id'.
698 l_dirty_processor_client_list [i] := null_processor_id
699 -- Update dirty flag count for the client processor.
700 i := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 (processor_meta_data [a_client_processor_id].item_address (processor_dirty_flag_count_index))
701 -- Exit loop.
702 i := maximum_dirty_processor_client_count
703 end
704 i := i + 1
705 end
706
707 relinquish_processor_resource (
708 processor_meta_data [a_supplier_processor_id].item_address (processor_dirty_processor_client_list_lock_index),
709 a_client_processor_id,
710 False
711 )
712 end
713 end
714
715 log_call_on_processor (a_client_processor_id, a_supplier_processor_id: like processor_id_type; a_call_data: like call_data)
716 -- Log call on `a_suppler_processor_id' for `a_client_processor_id'
717 local
718 l_client_request_chain_meta_data, l_supplier_request_chain_meta_data, l_creation_request_chain_meta_data: detachable like new_request_chain_meta_data_entry
719 l_request_chain_node_id: like invalid_request_chain_node_id
720 l_request_chain_node_queue: detachable like new_request_chain_node_queue
721 l_client_request_chain_node_queue_entry, l_request_chain_node_queue_entry: detachable like new_request_chain_node_queue_entry
722 l_unique_pid_count, i, l_last_pid_index, l_logged_calls_original_count, l_logged_calls_current_count: INTEGER_32
723 l_is_synchronous, l_client_is_sibling, l_client_sync_needed, l_exit_loop: BOOLEAN
724 l_call_ptr: POINTER
725 do
726 debug ("ISE_SCOOP_MANAGER")
727 print ("log_call_on_processor for pid " + a_client_processor_id.out + " on pid " + a_supplier_processor_id.out + "%N")
728 end
729
730 -- Retrieve request chain node list so the call can be logged
731 l_request_chain_node_queue := request_chain_node_queue_list [a_supplier_processor_id]
732 check l_request_chain_node_queue_attached: attached l_request_chain_node_queue then end
733
734 l_is_synchronous := call_data_sync_pid (a_call_data) /= null_processor_id
735
736 -- Initially mark the request chain node as invalid if not a creation routine
737 if (processor_meta_data [a_supplier_processor_id])[current_request_chain_id_index] = 0 then
738 l_request_chain_node_id := 0
739 else
740 l_request_chain_node_id := invalid_request_chain_node_id
741 end
742
743 -- Check if `a_supplier_processor_id' is dirty with respect to `a_client_processor_id'.
744 if {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 ((processor_meta_data [a_client_processor_id]).item_address (processor_dirty_flag_count_index), 0) > 0 then
745 -- We need to check if `a_client_processor_id' is dirty with respect to `a_supplier_processor_id'.
746 if is_processor_dirty (a_client_processor_id, a_supplier_processor_id) then
747 raise_scoop_exception (scoop_dirty_processor_exception_message)
748 end
749 end
750
751 l_client_request_chain_meta_data := request_chain_meta_data [a_client_processor_id]
752 if l_client_request_chain_meta_data /= Void then
753 -- Call is specific to `a_client_processor_id' so we need to retrieve the request chain node for `a_supplier_processor_id' from here.
754 l_unique_pid_count := l_client_request_chain_meta_data [request_chain_pid_count_index]
755 -- Search through remaining pid's to find the associating request chain node id.
756 from
757 i := request_chain_meta_data_header_size
758 l_last_pid_index := request_chain_meta_data_header_size + l_unique_pid_count - 1
759 until
760 i > l_last_pid_index
761 loop
762 if l_client_request_chain_meta_data [i] = a_supplier_processor_id then
763 l_request_chain_node_id := l_client_request_chain_meta_data [i + l_unique_pid_count]
764 elseif l_client_request_chain_meta_data [i] = a_client_processor_id then
765 l_client_is_sibling := True
766 end
767 i := i + 1
768 end
769 end
770
771 if l_request_chain_node_id = invalid_request_chain_id then
772 -- We are logging either via lock passing or a sibling processor is asynchronously logging on another sibling processor.
773 l_client_sync_needed := l_is_synchronous
774 l_request_chain_node_id := (processor_meta_data [a_supplier_processor_id])[current_request_node_id_execution_index]
775 l_request_chain_node_queue_entry := l_request_chain_node_queue [l_request_chain_node_id]
776 else
777 l_request_chain_node_queue_entry := l_request_chain_node_queue [l_request_chain_node_id]
778 end
779
780 if l_request_chain_node_queue_entry = Void then
781 -- We already have the locks so we may not have created the node queue yet.
782 l_request_chain_node_queue_entry := new_request_chain_node_queue_entry
783 l_request_chain_node_queue [l_request_chain_node_id] := l_request_chain_node_queue_entry
784 end
785
786 l_logged_calls_original_count := l_request_chain_node_queue_entry.count
787 if l_logged_calls_original_count = l_request_chain_node_queue_entry.capacity then
788 -- Resize node structure if there is not enough room for the new entry.
789
790 if
791 l_client_request_chain_meta_data /= Void and then
792 {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_client_request_chain_meta_data.item_address (request_chain_status_index), 0) = request_chain_status_open
793 then
794 -- We can reuse the data structure if the chain is still open.
795 l_request_chain_node_queue_entry := l_request_chain_node_queue_entry.aliased_resized_area ((l_logged_calls_original_count * 4) // 3)
796 else
797 l_request_chain_node_queue_entry := l_request_chain_node_queue_entry.resized_area ((l_logged_calls_original_count * 4) // 3)
798 end
799 l_request_chain_node_queue [l_request_chain_node_id] := l_request_chain_node_queue_entry
800 -- Readd in case we have a new structure.
801 end
802
803
804 if (processor_meta_data [a_supplier_processor_id]) [processor_status_index] = processor_status_uninitialized then
805 -- We have an uninitialized processor so we must be logging the creation routine.
806
807 signify_start_of_request_chain (a_supplier_processor_id)
808 assign_supplier_processor_to_request_chain (a_supplier_processor_id, null_processor_id)
809 wait_for_request_chain_supplier_processor_locks (a_supplier_processor_id)
810
811 l_creation_request_chain_meta_data := request_chain_meta_data [a_supplier_processor_id]
812 check l_creation_request_chain_meta_data_attached: l_creation_request_chain_meta_data /= Void end
813
814 -- Set `a_client_processor_id' as the initiator of the new processor creation request chain.
815 l_creation_request_chain_meta_data [request_chain_client_pid_index] := a_client_processor_id;
816
817 -- Mark processor as initializing to signify that the creation routine is being logged and executing.
818 (processor_meta_data [a_supplier_processor_id]) [processor_status_index] := processor_status_initializing
819 end
820
821 if call_data_is_lock_passing (a_call_data) then
822 -- Retrieve the node queue list for the client processor.
823 l_request_chain_node_queue := request_chain_node_queue_list [a_client_processor_id]
824 check l_request_chain_node_queue_attached: attached l_request_chain_node_queue then end
825
826 l_client_request_chain_meta_data := request_chain_meta_data [a_client_processor_id]
827 check l_client_request_chain_meta_data_attached: attached l_client_request_chain_meta_data end
828
829 from
830 l_client_request_chain_node_queue_entry := l_request_chain_node_queue [(processor_meta_data [a_client_processor_id])[current_request_node_id_execution_index]]
831 check l_client_request_chain_node_queue_entry_attached: l_client_request_chain_node_queue_entry /= Void then end
832
833 -- Wait until the request chain has started.
834 wait_for_request_chain_to_begin (a_client_processor_id, a_supplier_processor_id, l_client_request_chain_meta_data)
835 -- We make a copy of the current request chain meta data and pass to the supplier for controlled argument processing.
836
837 -- Temporarily pass the locks of the client processor to the supplier processor.
838 l_supplier_request_chain_meta_data := request_chain_meta_data [a_supplier_processor_id]
839
840 increase_request_chain_depth (a_supplier_processor_id)
841 update_request_chain_meta_data (a_supplier_processor_id, l_client_request_chain_meta_data)
842
843 -- Store current logged call count to see if any feature application requests are made by the call.
844 l_logged_calls_original_count := l_client_request_chain_node_queue_entry.count;
845
846 l_request_chain_node_queue_entry.extend (a_call_data)
847
848 if l_creation_request_chain_meta_data /= Void then
849 -- We are lock passing to a creation routine so we start the loop.
850 start_processor_application_loop (a_supplier_processor_id)
851 wait_for_request_chain_to_begin (a_client_processor_id, a_supplier_processor_id, l_creation_request_chain_meta_data)
852 end
853 -- Wait for client processor to be signalled to continue.
854 -- Note: asynchronous logged calls do not need signalling.
855 processor_wait (a_client_processor_id, a_supplier_processor_id)
856
857 until
858 l_exit_loop
859 loop
860 -- We need to set the queue entry each time in case it has resized.
861 l_client_request_chain_node_queue_entry := l_request_chain_node_queue [(processor_meta_data [a_client_processor_id])[current_request_node_id_execution_index]]
862 check l_client_request_chain_node_queue_entry_attached: l_client_request_chain_node_queue_entry /= Void then end
863 l_logged_calls_current_count := l_client_request_chain_node_queue_entry.count
864 if
865 l_logged_calls_current_count > l_logged_calls_original_count
866 then
867 -- The supplier processor has logged back calls on the client processor after signalling.
868 -- The supplier processor has either finished logging or is waiting on a client sync from a synchronous call.
869
870 -- Find the next applicable call_data pointer.
871 from
872 i := l_logged_calls_original_count
873 until
874 l_call_ptr /= null_pointer
875 loop
876 l_call_ptr := l_client_request_chain_node_queue_entry [i]
877 if l_call_ptr /= null_pointer then
878 l_client_request_chain_node_queue_entry [i] := null_pointer
879 end
880 i := i + 1
881 end
882
883 if i = l_logged_calls_current_count then
884 -- We are at the last logged item so we can reduce the structure back to the original size.
885 l_client_request_chain_node_queue_entry.keep_head (l_logged_calls_original_count)
886 end
887
888 l_is_synchronous := call_data_sync_pid (l_call_ptr) /= null_processor_id
889 scoop_command_call (l_call_ptr)
890 if l_is_synchronous then
891 -- Signal processor to continue
892 processor_wake_up (a_supplier_processor_id, a_client_processor_id)
893
894 -- Make client processor wait until either another synchronous call has been logged or the initial logged call has completed.
895 processor_wait (a_client_processor_id, a_supplier_processor_id)
896 end
897 scoop_command_call_cleanup (l_call_ptr)
898 l_call_ptr := null_pointer
899 else
900 -- Reset supplier processors request chain meta data to previous state.
901 update_request_chain_meta_data (a_supplier_processor_id, default_request_chain_meta_data_entry)
902 decrease_request_chain_depth (a_supplier_processor_id)
903 request_chain_meta_data [a_supplier_processor_id] := l_supplier_request_chain_meta_data
904
905 if l_creation_request_chain_meta_data /= Void then
906 -- Here we wait for the creation routine to finish.
907 l_creation_request_chain_meta_data [request_chain_status_index] := request_chain_status_waiting
908 processor_wait (a_client_processor_id, a_supplier_processor_id)
909 -- The new processor is waiting for us to close the chain so we do so and then signal it to continue.
910 signify_end_of_request_chain (a_supplier_processor_id)
911 -- Flag new processor as initialized as the creation routine has now executed.
912 (processor_meta_data [a_supplier_processor_id]) [processor_status_index] := processor_status_initialized
913 processor_wake_up (a_supplier_processor_id, a_client_processor_id)
914 end
915 l_exit_loop := True
916 end
917 end
918 else
919 -- Add call to request chain node then wait/sync as needed.
920 l_request_chain_node_queue_entry.extend (a_call_data)
921 if l_is_synchronous then
922 if l_client_sync_needed then
923 processor_wake_up (a_supplier_processor_id, a_client_processor_id)
924 end
925 processor_wait (a_client_processor_id, a_supplier_processor_id)
926 elseif l_creation_request_chain_meta_data /= Void then
927 -- We are logging an asynchronous creation routine.
928 start_processor_application_loop (a_supplier_processor_id)
929 wait_for_request_chain_to_begin (a_client_processor_id, a_supplier_processor_id, l_creation_request_chain_meta_data)
930 l_creation_request_chain_meta_data [request_chain_status_index] := request_chain_status_waiting
931 processor_wait (a_client_processor_id, a_supplier_processor_id)
932 -- The new processor is waiting for us to close the chain so we do so and then signal it to continue.
933 signify_end_of_request_chain (a_supplier_processor_id)
934 -- Flag new processor as initialized as the creation routine has now executed.
935 (processor_meta_data [a_supplier_processor_id]) [processor_status_index] := processor_status_initialized
936 processor_wake_up (a_supplier_processor_id, a_client_processor_id)
937 end
938 end
939
940 -- If we are a synchronous call and we have a dirty flag set then we should check if `a_supplier_processor_id' is dirty with respect to `a_client_processor_id'.
941 if l_is_synchronous and then {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 ((processor_meta_data [a_client_processor_id]).item_address (processor_dirty_flag_count_index), 0) > 0 then
942 if is_processor_dirty (a_client_processor_id, a_supplier_processor_id) then
943 -- Fire exception in client
944 raise_scoop_exception (scoop_dirty_processor_exception_message)
945 end
946 end
947 end
948
949 wait_for_request_chain_to_begin (a_client_processor_id, a_supplier_processor_id: like processor_id_type; a_request_chain_meta_data: like new_request_chain_meta_data_entry)
950 -- Wait for the request chain represented by `a_request_chain_meta_data' to begin.
951 local
952 l_counter: NATURAL_32
953 l_chain_status: INTEGER_32
954 do
955 -- Spin lock before resorting to the semaphore
956 l_chain_status := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (a_request_chain_meta_data.item_address (Request_chain_status_index), 0)
957 from
958 l_counter := 0
959 until
960 l_chain_status > request_chain_status_open or else l_counter > Max_yield_counter
961 loop
962 processor_yield (a_client_processor_id, l_counter)
963 l_counter := l_counter + 1
964 l_chain_status := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (a_request_chain_meta_data.item_address (Request_chain_status_index), 0)
965 end
966
967 if l_chain_status <= request_chain_status_open then
968 if a_client_processor_id = a_request_chain_meta_data [request_chain_client_pid_index] then
969 -- `a_client_processor_id' is the owner of the chain so we can wait in a semaphore if the chain is not already being applied.
970 l_chain_status := {ATOMIC_MEMORY_OPERATIONS}.compare_and_swap_integer_32 (
971 a_request_chain_meta_data.item_address (Request_chain_status_index),
972 Request_chain_status_application,
973 Request_chain_status_open
974 )
975 if l_chain_status = request_chain_status_open then
976 -- The chain is not yet being applied, wait for the head node to signal the processor to continue.
977
978 -- Chain status is now set to application so we update the flag so we don't needlessly enter the spin lock loop.
979 l_chain_status := request_chain_status_application
980 processor_wait (a_client_processor_id, a_supplier_processor_id)
981 end
982 end
983
984 from
985 l_counter := 0
986 until
987 l_chain_status > request_chain_status_open
988 loop
989 processor_yield (a_client_processor_id, l_counter)
990 l_counter := l_counter + 1
991 l_chain_status := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (a_request_chain_meta_data.item_address (Request_chain_status_index), 0)
992 end
993 end
994 end
995
996 feature {NONE} -- Implementation
997
998 frozen call_data_sync_pid (a_call_data: like call_data): INTEGER_16
999 external
1000 "C macro use %"eif_scoop.h%""
1001 end
1002
1003 frozen call_data_is_lock_passing (a_call_data: like call_data): BOOLEAN
1004 external
1005 "C macro use %"eif_scoop.h%""
1006 end
1007
1008 feature {NONE} -- Resource Initialization
1009
1010 init_scoop_manager
1011 -- Initialize processor meta data.
1012 local
1013 i: INTEGER_32
1014 l_processor_meta_data: like processor_meta_data
1015 l_request_chain_meta_data_stack_list: like request_chain_meta_data_stack_list
1016 l_request_chain_meta_data: detachable like new_request_chain_meta_data_entry
1017 do
1018 logical_cpu_count := available_cpus
1019
1020 -- Initialize exception handling helper object.
1021 create exception_helper
1022
1023 -- Initialize the default attributes used to create each SCOOP processor.
1024 create default_processor_attributes.make_with_stack_size (processor_default_stack_size)
1025
1026 default_request_chain_meta_data_entry := new_request_chain_meta_data_entry (null_processor_id)
1027
1028 from
1029 i := 1
1030 create l_processor_meta_data.make_empty (max_scoop_processors_instantiable)
1031 create request_chain_meta_data.make_filled (default_request_chain_meta_data_entry, max_scoop_processors_instantiable)
1032 create l_request_chain_meta_data_stack_list.make_empty (max_scoop_processors_instantiable)
1033 until
1034 i > Max_scoop_processors_instantiable
1035 loop
1036 l_request_chain_meta_data_stack_list.extend (new_request_chain_meta_data_stack_list_entry)
1037 l_processor_meta_data.extend (new_processor_meta_data_entry)
1038 --| FIXME: Free semaphore list when application exits
1039 i := i + 1
1040 end
1041 processor_meta_data := l_processor_meta_data
1042 request_chain_meta_data_stack_list := l_request_chain_meta_data_stack_list
1043
1044
1045 -- Create request chain node meta data queue pigeon hole for each potential processor.
1046 create request_chain_node_meta_data_queue_list.make_filled (Void, max_scoop_processors_instantiable)
1047
1048 -- Create request chain node queue pigeon for each potential processor.
1049 create request_chain_node_queue_list.make_filled (Void, max_scoop_processors_instantiable)
1050
1051 -- Create processor semaphore list for use during processor meta data creation.
1052 create processor_semaphore_list.make_filled (default_pointer, max_scoop_processors_instantiable)
1053
1054 -- Set up root processor and initial chain meta data.
1055 root_processor_id := assign_free_processor_id
1056
1057 signify_start_of_request_chain (root_processor_id)
1058 assign_supplier_processor_to_request_chain (root_processor_id, null_processor_id)
1059 wait_for_request_chain_supplier_processor_locks (root_processor_id)
1060 l_request_chain_meta_data := request_chain_meta_data [root_processor_id]
1061 check l_request_chain_meta_data_attached: attached l_request_chain_meta_data end
1062 l_request_chain_meta_data [request_chain_status_index] := request_chain_status_application
1063
1064 -- Make root processor as initializing.
1065 processor_meta_data [root_processor_id].put (processor_status_initializing, processor_status_index)
1066 end
1067
1068 initialize_default_processor_meta_data (a_processor_id: like processor_id_type)
1069 -- Initialize processor `a_processor_id' meta data to default values after a creation routine has been logged.
1070 local
1071 l_request_chain_node_meta_data_queue: detachable like new_request_chain_node_meta_data_queue
1072 l_request_chain_node_queue: detachable like new_request_chain_node_queue
1073 do
1074 -- Initialize request chain node meta data queue
1075 l_request_chain_node_meta_data_queue := request_chain_node_meta_data_queue_list [a_processor_id]
1076 if not attached l_request_chain_node_meta_data_queue then
1077 l_request_chain_node_meta_data_queue := new_request_chain_node_meta_data_queue
1078 request_chain_node_meta_data_queue_list [a_processor_id] := l_request_chain_node_meta_data_queue
1079 else
1080 l_request_chain_node_meta_data_queue.fill_with_default (0, Max_request_chain_node_queue_index)
1081 end
1082
1083 -- Initialize request chain node queue
1084 l_request_chain_node_queue := request_chain_node_queue_list [a_processor_id]
1085 if not attached l_request_chain_node_queue then
1086 l_request_chain_node_queue := new_request_chain_node_queue
1087 request_chain_node_queue_list [a_processor_id] := l_request_chain_node_queue
1088 else
1089 l_request_chain_node_queue.fill_with_default (0, Max_request_chain_node_queue_index)
1090 end
1091
1092 -- Initialize processor semaphore with a count of zero for client - supplier processor notification.
1093
1094 --| FIXME: Destroy processor semaphore when resources are freed.
1095 processor_semaphore_list [a_processor_id] := new_semaphore (0)
1096
1097 (processor_meta_data [a_processor_id]).put (0, current_request_chain_id_index)
1098 (processor_meta_data [a_processor_id]).put (0, current_request_chain_node_id_index)
1099 (processor_meta_data [a_processor_id]).put (0, processor_dirty_flag_count_index)
1100 (processor_meta_data [a_processor_id]).put (0, processor_wait_condition_counter_index)
1101
1102 -- Reset execution index to `0'
1103 (processor_meta_data [a_processor_id]).put (0, current_request_node_id_execution_index)
1104 end
1105
1106 scoop_processor_loop (a_logical_processor_id: like processor_id_type)
1107 -- Entry point for all scoop processors, each unique identified by `a_logical_processor_id'.
1108 local
1109 l_loop_exit: BOOLEAN
1110 l_processor_meta_data: like new_processor_meta_data_entry
1111 l_executing_node_id: like invalid_request_chain_node_id
1112 l_executing_node_id_cursor: INTEGER_32
1113 l_request_chain_node_queue: detachable like new_request_chain_node_queue
1114 l_executing_request_chain_node: detachable like new_request_chain_node_queue_entry
1115 l_request_chain_node_meta_data_queue: detachable like new_request_chain_node_meta_data_queue
1116 l_executing_request_chain_node_meta_data: detachable like new_request_chain_node_meta_data_queue_entry
1117 l_exception_caught: BOOLEAN
1118 l_orig_sync_count, l_temp_count: INTEGER
1119 l_wait_counter: NATURAL_32
1120 l_call_ptr: POINTER
1121 l_is_head: BOOLEAN
1122 l_pid, l_client_pid: like processor_id_type
1123 do
1124 -- SCOOP Processor has been launched
1125 -- We are guaranteed that at least a creation routine has been logged.
1126
1127 from
1128 l_processor_meta_data := processor_meta_data [a_logical_processor_id]
1129 l_request_chain_node_queue := request_chain_node_queue_list [a_logical_processor_id]
1130 check l_request_chain_node_queue_attached: attached l_request_chain_node_queue then end
1131 l_request_chain_node_meta_data_queue := request_chain_node_meta_data_queue_list [a_logical_processor_id]
1132 check l_request_chain_node_meta_data_queue_attached: attached l_request_chain_node_meta_data_queue then end
1133 until
1134 l_processor_meta_data [processor_status_index] = processor_status_redundant
1135 -- Loop until the processor is marked as surplus to requirements.
1136 loop
1137 --| This is needed so that any pending gc cycles are correctly handled
1138 --| as this is a tight loop without any call to RTGC
1139 check_for_gc
1140
1141 if l_processor_meta_data [processor_status_index] >= processor_status_initializing then
1142 -- SCOOP processor is initializing/initialized so we can check current index
1143
1144 -- Retrieve execution index
1145 l_executing_node_id := l_processor_meta_data [current_request_node_id_execution_index]
1146
1147 l_executing_request_chain_node_meta_data := l_request_chain_node_meta_data_queue [l_executing_node_id]
1148 if
1149 l_executing_request_chain_node_meta_data /= Void and then l_executing_request_chain_node_meta_data [request_chain_status_index] /= request_chain_status_uninitialized
1150 -- We only allow feature application to occur when the chain is correctly set.
1151 then
1152 -- We are in a valid feature application position as the request chain has been initialized.
1153 from
1154 -- Set whether the current processor is the head of the request chain.
1155 l_is_head := a_logical_processor_id = l_executing_request_chain_node_meta_data [request_chain_meta_data_head_pid_index]
1156
1157 -- Store the client processor id for synchronization.
1158 l_client_pid := l_executing_request_chain_node_meta_data [request_chain_client_pid_index]
1159
1160 if l_is_head then
1161 l_orig_sync_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1162 -- We only need synchronization if there are tail nodes involved.
1163 if l_orig_sync_count > 1 then
1164 -- Flag `a_logical_processor_id' as waiting.
1165 processor_wait (a_logical_processor_id, a_logical_processor_id)
1166
1167 -- We are a head node, set sync count to minus original sync count
1168 from
1169 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), -l_orig_sync_count)
1170 l_wait_counter := 0
1171 until
1172 l_temp_count = -1
1173 loop
1174 processor_yield (a_logical_processor_id, l_wait_counter)
1175 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1176 l_wait_counter := l_wait_counter + 1
1177 end
1178
1179 -- Flag processor as woken up.
1180 processor_wake_up (a_logical_processor_id, a_logical_processor_id)
1181
1182 -- Set to zero, increment by 1 (atomic swap with 1 as shortcut)
1183 -- Wait until sync counter is original value, this signifies that all tail nodes are now executing
1184 from
1185 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index))
1186 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index))
1187 l_wait_counter := 0
1188 until
1189 l_temp_count = l_orig_sync_count
1190 loop
1191 processor_yield (a_logical_processor_id, l_wait_counter)
1192 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1193 l_wait_counter := l_wait_counter + 1
1194 end
1195 end
1196 -- Tail nodes are all synchronized and executing so head node can continue.
1197
1198 -- Signify that the chains are now being applied (if not already closed)
1199 -- This is used by the client to make sure that all chains have been started before attempting any wait calls.
1200 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.compare_and_swap_integer_32 (
1201 l_executing_request_chain_node_meta_data.item_address (request_chain_status_index), request_chain_status_application, request_chain_status_open
1202 )
1203
1204 if l_temp_count = request_chain_status_open then
1205 -- The client processor is not currently waiting for this chain.
1206 else
1207 if l_temp_count = request_chain_status_application then
1208 -- The client processor is waiting to be synced.
1209 processor_wake_up (l_client_pid, a_logical_processor_id)
1210 else
1211 -- check request_chain_status_closed: l_temp_count = request_chain_status_closed end
1212 end
1213 end
1214 else
1215 -- We are a tail node, we wait for head node to set pid count to negative value. (as we are a tail node then there must be at least two processors involved)
1216
1217 -- Flag `a_logical_processor_id' as waiting.
1218 processor_wait (a_logical_processor_id, a_logical_processor_id)
1219
1220 from
1221 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1222 l_wait_counter := 0
1223 until
1224 l_temp_count < -1
1225 loop
1226 processor_yield (a_logical_processor_id, l_wait_counter)
1227 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1228 l_wait_counter := l_wait_counter + 1
1229 end
1230 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index))
1231
1232 from
1233 l_wait_counter := 0
1234 until
1235 l_temp_count >= 1
1236 loop
1237 processor_yield (a_logical_processor_id, l_wait_counter)
1238 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index), 0)
1239 l_wait_counter := l_wait_counter + 1
1240 end
1241
1242 -- Flag processor as woken up.
1243 processor_wake_up (a_logical_processor_id, a_logical_processor_id)
1244
1245 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 (l_executing_request_chain_node_meta_data.item_address (request_chain_sync_counter_index))
1246 end
1247
1248 l_executing_node_id_cursor := 0
1249 l_wait_counter := 0
1250 l_loop_exit := False
1251 l_exception_caught := False
1252 l_executing_request_chain_node := l_request_chain_node_queue [l_executing_node_id]
1253 check l_executing_request_chain_node_attached: attached l_executing_request_chain_node then end
1254 until
1255 l_loop_exit
1256 loop
1257 l_temp_count := l_executing_request_chain_node.count
1258 if l_exception_caught then
1259 -- If an exception has occurred then we must not continue applying calls and exit immediately.
1260 l_loop_exit := True
1261 l_request_chain_node_meta_data_queue [l_executing_node_id] := Void
1262 elseif l_executing_node_id_cursor < l_temp_count then
1263 l_call_ptr := l_executing_request_chain_node [l_executing_node_id_cursor]
1264 if l_call_ptr /= null_pointer then
1265 l_exception_caught := scoop_command_call_with_exception_check (l_call_ptr)
1266 if l_exception_caught then
1267 -- An exception was raised during application of `l_call_ptr'.
1268 -- We need to flag `a_logical_processor_id' dirty with respect to the client of the chain.
1269
1270 -- We have caught an assertion violation so we flag the processor as dirty
1271 flag_processor_dirty (a_logical_processor_id, l_executing_request_chain_node_meta_data)
1272 end
1273 l_executing_node_id_cursor := l_executing_node_id_cursor + 1
1274 if l_executing_node_id_cursor = l_temp_count then
1275
1276 -- Client may have increased capacity of request chain node so we make sure that we have the correct object.
1277 l_executing_request_chain_node := l_request_chain_node_queue [l_executing_node_id]
1278 check l_executing_request_chain_node_attached: attached l_executing_request_chain_node then end
1279
1280 -- Check for a query if we are at the last index.
1281 l_pid := call_data_sync_pid (l_call_ptr)
1282 if l_pid /= null_processor_id then
1283 -- Reset call data in structure before releasing client processor.
1284 l_executing_request_chain_node [l_executing_node_id_cursor - 1] := null_pointer
1285 -- Wake up client processor.
1286 processor_wake_up (l_pid, a_logical_processor_id)
1287
1288 -- Clean up call data.
1289 scoop_command_call_cleanup (l_call_ptr)
1290
1291 -- Reset yielding.
1292 l_wait_counter := 0
1293 end
1294 end
1295 end
1296 elseif
1297 l_is_head and then l_executing_request_chain_node_meta_data [request_chain_status_index] = request_chain_status_waiting
1298 then
1299 -- The chain status is set to waiting and we are the head pid so we must be a creation routine
1300 -- Signal client processor to wake up and wait until signalled to continue.
1301 processor_wake_up (l_client_pid, a_logical_processor_id)
1302 processor_wait (a_logical_processor_id, l_client_pid)
1303
1304 -- Reset yielding.
1305 l_wait_counter := 0
1306 else
1307 -- Processor has caught up with chain and is waiting for more calls if any.
1308 if l_executing_request_chain_node_meta_data [request_chain_status_index] = request_chain_status_closed then
1309 -- Request chain has been fully closed therefore we can exit if all calls have been applied.
1310 if l_executing_request_chain_node = l_request_chain_node_queue [l_executing_node_id] and then l_executing_request_chain_node.count <= l_executing_node_id_cursor then
1311 l_loop_exit := True
1312 l_request_chain_node_meta_data_queue [l_executing_node_id] := Void
1313 end
1314 else
1315 -- We are in an idle state, waiting for the request chain to close or to have more calls logged so we yield to another thread.
1316 if l_executing_request_chain_node = l_request_chain_node_queue [l_executing_node_id] then
1317 processor_yield (a_logical_processor_id, l_wait_counter)
1318 l_wait_counter := l_wait_counter + 1
1319 end
1320 end
1321 -- Update request chain node in case client has resized it during logging.
1322 l_executing_request_chain_node := l_request_chain_node_queue [l_executing_node_id]
1323 check l_executing_request_chain_node_attached: attached l_executing_request_chain_node then end
1324 end
1325 end
1326
1327 -- Clean up call data, this can only be performed when the chain is closed or the client is waiting as logging calls may resize the request chain node queue
1328 -- structure while it is being manipulated, which would break the post-condition of resize.
1329 from
1330 l_executing_request_chain_node := l_request_chain_node_queue [l_executing_node_id]
1331 check l_executing_request_chain_node_attached: attached l_executing_request_chain_node then end
1332 l_executing_node_id_cursor := l_executing_request_chain_node.count
1333 until
1334 l_executing_node_id_cursor = 0
1335 loop
1336 l_executing_node_id_cursor := l_executing_node_id_cursor - 1
1337 l_call_ptr := l_executing_request_chain_node [l_executing_node_id_cursor]
1338 if l_call_ptr /= null_pointer then
1339 scoop_command_call_cleanup (l_call_ptr)
1340 l_executing_request_chain_node [l_executing_node_id_cursor] := null_pointer
1341 end
1342 end
1343
1344 -- Increment execution cursor by one.
1345 l_executing_node_id := l_executing_node_id + 1
1346 if l_executing_node_id >= max_request_chain_node_queue_index then
1347 -- We have reached the maximum indexes so we can reset both so that logging can resume.
1348 l_processor_meta_data [current_request_node_id_execution_index] := 1
1349 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 (l_processor_meta_data.item_address (current_request_chain_node_id_index), 1)
1350 l_executing_node_id := 1
1351 else
1352 l_processor_meta_data [current_request_node_id_execution_index] := l_executing_node_id
1353 end
1354 else
1355 -- There are no request chains to be applied so processor is idle until more are added.
1356 from
1357 l_wait_counter := 0
1358 l_loop_exit := False
1359 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($idle_processor_count)
1360 until
1361 l_loop_exit
1362 loop
1363 l_loop_exit := l_request_chain_node_meta_data_queue [l_processor_meta_data [Current_request_node_id_execution_index]] /= Void
1364 if not l_loop_exit then
1365 if idle_processor_count /= processor_count then
1366 -- Make sure that the processor stays in the feature application loop.
1367 l_processor_meta_data [processor_status_index] := processor_status_initialized
1368 else
1369 if l_processor_meta_data [processor_status_index] /= processor_status_redundant then
1370 l_wait_counter := 0
1371 l_processor_meta_data [processor_status_index] := processor_status_redundant
1372 end
1373 --| FIXME Update exiting code when GC support is available.
1374 if l_wait_counter > Processor_spin_lock_limit then
1375 l_loop_exit := True
1376 end
1377 end
1378 processor_is_idle (a_logical_processor_id, l_wait_counter)
1379 l_wait_counter := l_wait_counter + 1
1380 else
1381 -- Make sure the the processor stays in the feature application loop.
1382 l_processor_meta_data [processor_status_index] := processor_status_initialized
1383 end
1384 end
1385 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($idle_processor_count)
1386 if l_processor_meta_data [processor_status_index] = processor_status_redundant then
1387 l_temp_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($processor_count)
1388 end
1389 end
1390 elseif l_processor_meta_data [processor_status_index] = processor_status_uninitialized then
1391 -- Processor is uninitialized so we yield control to OS for the time being.
1392 processor_yield (a_logical_processor_id, 0)
1393 else
1394 check invalid_processor_status: False end
1395 end
1396 end
1397 end
1398
1399 processor_is_idle (a_client_processor_id: like processor_id_type; a_wait_counter: NATURAL_32)
1400 -- Processor `a_client_processor_id' is idle.
1401 do
1402 processor_yield (a_client_processor_id, a_wait_counter)
1403 if a_wait_counter > max_yield_counter then
1404 -- Fully relinquish processor.
1405 processor_sleep (processor_sleep_quantum)
1406 end
1407 end
1408
1409 deadlock_counter: NATURAL_16
1410 -- Counter for deadlock detection.
1411
1412 previous_waiting_processor_count: like waiting_processor_count
1413 -- Previous number of waiting processors, used for deadlock detection.
1414
1415 deadlock_detection_limit: INTEGER_32 = 1000
1416 -- Number of iterations the system may remain in a constant idle state before raising an exception.
1417
1418 scoop_command_call_with_exception_check (a_data: like call_data): BOOLEAN
1419 -- Apply scoop call represented by `a_data'.
1420 -- Return `True' if an exception was raised.
1421 do
1422 if not Result then
1423 scoop_command_call (a_data)
1424 end
1425 rescue
1426 -- Return True to signify that an exception was caught.
1427 Result := True
1428 retry
1429 end
1430
1431 scoop_command_call (a_data: like call_data)
1432 -- Make scoop call from call data `a_data'.
1433 external
1434 "C macro use %"eif_scoop.h%""
1435 alias
1436 "eif_try_call"
1437 end
1438
1439 scoop_command_call_cleanup (a_data: like call_data)
1440 -- Free scoop call data in `a_data'.
1441 external
1442 "C macro use %"eif_scoop.h%""
1443 alias
1444 "eif_free_call"
1445 end
1446
1447 null_pointer: POINTER
1448 external
1449 "C macro use %"eif_scoop.h%""
1450 alias
1451 "NULL"
1452 end
1453
1454 request_processor_resource (a_resource_index: INTEGER_32; a_resource_processor, a_requesting_processor: like processor_id_type; a_block_until_request_granted, a_high_priority: BOOLEAN): NATURAL_8
1455 --| Request access for `a_requesting_processor' to the resource indicated by `a_resource_index' held by `a_resource_processor'.
1456 local
1457 l_exit: BOOLEAN
1458 l_original_value: INTEGER_32
1459 l_wait_counter: NATURAL_32
1460 l_processor_resource: like new_processor_meta_data_entry
1461 do
1462 from
1463 l_processor_resource := processor_meta_data [a_resource_processor]
1464 if not a_high_priority then
1465 -- Make sure processors yield CPU if low priority
1466 l_wait_counter := processor_spin_lock_limit
1467 end
1468 until
1469 l_exit
1470 loop
1471 -- Use `a_resource_type' to determine what kind of resource of `a_requesting_processor' needs from `a_resource_processor'.
1472 -- Be it exclusive access to request queue, or for access to processor locks for lock passing.
1473 if not a_high_priority then
1474 -- Yield before attempting request for low priority attempts.
1475 processor_yield (a_requesting_processor, l_wait_counter)
1476 end
1477
1478 l_original_value := {ATOMIC_MEMORY_OPERATIONS}.compare_and_swap_integer_32 (l_processor_resource.item_address (a_resource_index), a_requesting_processor, null_processor_id)
1479 if l_original_value = null_processor_id then
1480 -- The value has been correctly set so Return True and Exit
1481 Result := resource_lock_newly_attained
1482 l_exit := True
1483 else
1484 -- The processor resource has already been prior requested.
1485 if l_original_value = a_requesting_processor then
1486 -- `a_requesting_processor' already has the lock so we can exit.
1487
1488 --| FIXME We need to handle recursive locking for unlock.
1489 Result := resource_lock_previously_attained
1490 l_exit := True
1491 elseif a_block_until_request_granted then
1492 processor_yield (a_requesting_processor, l_wait_counter)
1493 l_wait_counter := l_wait_counter + 1
1494 else
1495 -- We don't have the lock and we do not block so we exit and return False.
1496 Result := resource_lock_unattained
1497 processor_yield (a_requesting_processor, l_wait_counter)
1498 l_exit := True
1499 end
1500 end
1501 end
1502 end
1503
1504 frozen check_for_gc
1505 -- Hack needed to force the gc to kick in when code is in a tight loop.
1506 external
1507 "C macro use %"eif_scoop.h%""
1508 alias
1509 "RTGC"
1510 end
1511
1512 resource_lock_unattained: NATURAL_8 = 0
1513 resource_lock_newly_attained: NATURAL_8 = 1
1514 resource_lock_previously_attained: NATURAL_8 = 2
1515 -- SCOOP Processor resource lock return values.
1516
1517 relinquish_processor_resource (a_resource_address: POINTER; a_requesting_processor: like processor_id_type; a_high_priority: BOOLEAN)
1518 -- Relinquish processor resource at `a_resource_address' previously obtained by `a_requesting_processor'.
1519 local
1520 l_original_value: INTEGER_32
1521 do
1522 l_original_value := {ATOMIC_MEMORY_OPERATIONS}.compare_and_swap_integer_32 (a_resource_address, null_processor_id, a_requesting_processor)
1523 check resource_relinquished: l_original_value = a_requesting_processor end
1524 if not a_high_priority then
1525 processor_yield (a_requesting_processor, processor_spin_lock_limit)
1526 end
1527 end
1528
1529 processor_id_type: INTEGER_32
1530 -- Type used for unique SCOOP processor id.
1531 do
1532 end
1533
1534 null_processor_id: like processor_id_type = -1
1535 -- Value to designate an unset processor id value.
1536
1537 root_processor_id: like processor_id_type
1538 -- ID of root processor.
1539
1540 scoop_dirty_processor_exception_message: STRING = "SCOOP Processor Dirty Exception"
1541 -- Exception message when a client processor has logged a call on a supplier processor that raises an exception.
1542
1543 scoop_processor_deadlock_detected_message: STRING = "SCOOP Processor Deadlock Detected"
1544
1545 scoop_request_chain_stack_overflow_message: STRING = "SCOOP Request Chain Stack Overflow"
1546
1547 feature {NONE} -- Atomic Access
1548
1549 processor_count: INTEGER_32
1550 -- Total number of processors currently available to the system.
1551
1552 waiting_processor_count: INTEGER_32
1553 -- Number of processors that are currently block waiting for other processors.
1554
1555 waiting_semaphore_count: INTEGER_32
1556 -- Number of processors that are waiting for their semaphore to be signalled.
1557
1558 idle_processor_count: INTEGER_32
1559 -- Number of processors that are at the end of their queue.
1560 -- If equal to processor_count then the system may exit,
1561
1562 feature {NONE} -- Scoop Processor Meta Data
1563
1564 default_processor_attributes: ISE_SCOOP_PROCESSOR_ATTRIBUTES
1565 -- Default scoop processor thread attributes.
1566
1567 call_data: POINTER do end
1568 result_type: POINTER do end
1569
1570 max_scoop_processors_instantiable: INTEGER_32 = 1536
1571 -- Total Number of SCOOP Processors that may be instantiated by Pool including Root.
1572
1573 processor_default_stack_size: INTEGER_32 = 1_048_576
1574 -- Size in bytes of stack size of processor.
1575
1576 max_wait_condition_retry_limit: INTEGER_32 = 10000
1577 -- Maximum number of retries a wait condition may have before raising an exception.
1578
1579 processor_meta_data: SPECIAL [like new_processor_meta_data_entry]
1580 -- Holder of Processor Meta Data (indexed by logical processor ID)
1581
1582 processor_semaphore_list: SPECIAL [POINTER]
1583 -- Holder of Processor Synchronous Call Semaphores
1584
1585 new_semaphore (a_sem_count: NATURAL_8): POINTER
1586 -- Return a new semaphore with an initial count of `a_sem_count'.
1587 external
1588 "C macro use <eif_threads.h>"
1589 alias
1590 "eif_thr_sem_create"
1591 end
1592
1593 processor_wait (a_client_processor_id, a_supplier_processor_id: like processor_id_type)
1594 -- Make processor `a_client_processor_id' wait until its semaphore is signalled.
1595 local
1596 l_waiting_count, l_waiting_semaphore_count: INTEGER
1597 do
1598 l_waiting_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($waiting_processor_count)
1599 if a_supplier_processor_id /= a_client_processor_id then
1600 l_waiting_semaphore_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($waiting_semaphore_count)
1601 if l_waiting_semaphore_count = processor_count then
1602 raise_scoop_exception (scoop_processor_deadlock_detected_message)
1603 end
1604 semaphore_client_wait (processor_semaphore_list [a_client_processor_id])
1605 end
1606 end
1607
1608 processor_wake_up (a_client_processor_id, a_supplier_processor_id: like processor_id_type)
1609 -- Signal processor `a_client_processor_id' to wake up for `a_supplier_processor_id'
1610 local
1611 l_waiting_count: INTEGER
1612 l_waiting_semaphore_count: INTEGER
1613 do
1614 l_waiting_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($waiting_processor_count)
1615 if a_client_processor_id /= null_processor_id then
1616 if a_client_processor_id /= a_supplier_processor_id then
1617 l_waiting_semaphore_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($waiting_semaphore_count)
1618 semaphore_supplier_signal (processor_semaphore_list [a_client_processor_id])
1619 end
1620 end
1621 end
1622
1623 processor_busy_wait (a_client_processor_id, a_supplier_processor_id: like processor_id_type)
1624 -- Make processor `a_client_processor_id' wait until its semaphore is signalled.
1625 local
1626 l_waiting_count, l_waiting_semaphore_count: INTEGER
1627 l_processor_meta_data: like new_processor_meta_data_entry
1628 l_wait_counter: NATURAL_32
1629 do
1630 l_waiting_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($waiting_processor_count)
1631 if a_supplier_processor_id /= a_client_processor_id then
1632 l_waiting_semaphore_count := {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($waiting_semaphore_count)
1633 if l_waiting_semaphore_count = processor_count then
1634 raise_scoop_exception (scoop_processor_deadlock_detected_message)
1635 end
1636
1637 from
1638 l_processor_meta_data := processor_meta_data [a_client_processor_id]
1639 until
1640 {ATOMIC_MEMORY_OPERATIONS}.compare_and_swap_integer_32 (l_processor_meta_data.item_address (processor_semaphore_status_index), processor_semaphore_status_running, processor_semaphore_status_signalled) = processor_semaphore_status_signalled
1641 loop
1642 if l_wait_counter > 0 then
1643 processor_cpu_yield
1644 l_wait_counter := 0
1645 else
1646 from
1647 l_wait_counter := 1
1648 until
1649 l_wait_counter = processor_spin_lock_limit
1650 loop
1651 l_wait_counter := l_wait_counter + 1
1652 end
1653 end
1654 end
1655 end
1656 end
1657
1658 processor_busy_wake_up (a_client_processor_id, a_supplier_processor_id: like processor_id_type)
1659 -- Signal processor `a_client_processor_id' to wake up for `a_supplier_processor_id'
1660 local
1661 l_waiting_count: INTEGER
1662 l_waiting_semaphore_count: INTEGER
1663 do
1664 l_waiting_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($waiting_processor_count)
1665 if a_client_processor_id /= null_processor_id then
1666 if a_client_processor_id /= a_supplier_processor_id then
1667 l_waiting_semaphore_count := {ATOMIC_MEMORY_OPERATIONS}.decrement_integer_32 ($waiting_semaphore_count)
1668 l_waiting_semaphore_count := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 ((processor_meta_data [a_client_processor_id]).item_address (processor_semaphore_status_index), processor_semaphore_status_signalled)
1669 end
1670 end
1671 end
1672
1673 new_processor_meta_data_entry: SPECIAL [INTEGER_32]
1674 -- New Processor Meta Data Value Entry
1675 do
1676 create Result.make_filled (null_processor_id, processor_meta_data_index_count)
1677 end
1678
1679 request_chain_meta_data: SPECIAL [like new_request_chain_meta_data_entry]
1680 -- Holder of Processor Request Chain Meta Data (indexed by logical processor ID)
1681
1682 request_chain_meta_data_stack_list: SPECIAL [like new_request_chain_meta_data_stack_list_entry]
1683 -- Holder of Processor Request Chain Meta Data (indexed by logical processor ID)
1684
1685 new_request_chain_meta_data_stack_list_entry: SPECIAL [detachable like new_request_chain_meta_data_entry]
1686 do
1687 create Result.make_filled (Void, request_chain_meta_data_stack_list_entry_size)
1688 end
1689
1690 request_chain_meta_data_stack_list_entry_size: NATURAL_8 = 16
1691 -- Default depth of the request chain meta data stack.
1692
1693 max_request_chain_depth: INTEGER = 15
1694 -- Maximum request chain depth.
1695 -- `request_chain_meta_data_stack_list_entry_size' - 1
1696
1697 new_request_chain_meta_data_entry (a_client_processor_id: like processor_id_type): SPECIAL [INTEGER_32]
1698 -- New Request Chain Meta Data
1699 do
1700 create Result.make_empty (request_chain_meta_data_default_size)
1701 -- Add meta data header default values.
1702 Result.extend (0) -- pid count
1703 Result.extend (a_client_processor_id)
1704 Result.extend (invalid_request_chain_id)
1705 Result.extend (request_chain_status_uninitialized)
1706 Result.extend (0) -- sync count
1707 check request_chain_header_set: Result.count = request_chain_meta_data_header_size end
1708 -- Format = {pid_count, client pid, client pid request chain id, node status, sync count, head_pid, tailx_pid, head request chain node id, tail request chain node id}
1709 end
1710
1711 default_request_chain_meta_data_entry: like new_request_chain_meta_data_entry
1712 -- Default request chain meta data entry.
1713
1714 request_chain_pid_count_index: NATURAL_8 = 0
1715 request_chain_client_pid_index: NATURAL_8 = 1
1716 request_chain_client_pid_request_chain_id_index: NATURAL_8 = 2
1717 request_chain_status_index: NATURAL_8 = 3
1718 request_chain_sync_counter_index: NATURAL_8 = 4
1719 -- Index values for request chain states
1720
1721 request_chain_status_uninitialized: INTEGER_8 = -1
1722 request_chain_status_open: INTEGER_8 = 0
1723 request_chain_status_application: INTEGER_8 = 1
1724 request_chain_status_waiting: INTEGER_8 = 2
1725 request_chain_status_closed: INTEGER_8 = 3
1726 request_chain_status_dirty: INTEGER_8 = 4
1727 -- Various state constants for a request chain.
1728
1729 request_chain_meta_data_default_size: INTEGER_32 = 11
1730 -- meta data header + (3 * supplier PID request chain meta data)
1731 request_chain_meta_data_header_size: INTEGER_32 = 5
1732 -- Size of request chain meta data header {pid_count, client pid, client pid request chain id, node status}
1733
1734 request_chain_meta_data_head_pid_index: INTEGER_32 = 5
1735 -- Index of head processor id in the request chain.
1736
1737 request_chain_meta_data_supplier_pid_meta_data_size: INTEGER_32 = 2
1738 -- {Supplier PID, Supplier Request Chain Node ID}
1739
1740 new_request_chain_node_meta_data_entry,
1741 new_request_chain_node_meta_data_queue_entry: like new_request_chain_meta_data_entry
1742 -- New Request Chain Node Meta Data
1743 do
1744 check do_not_call: False end
1745 create Result.make_empty (0)
1746 end
1747
1748 processor_status_index: INTEGER_32 = 0
1749 -- Current Status of the Scoop Processor at index 'scoop_logical_index'.
1750
1751 processor_status_uninitialized: INTEGER_32 = -1
1752 -- Only processor object has been allocated at this point.
1753
1754 processor_status_redundant: INTEGER_32 = 0
1755 -- Processor is redundant.
1756
1757 processor_status_initializing: INTEGER_32 = 1
1758 -- Processor is being initialized by executing its creation routine.
1759 processor_status_initialized: INTEGER_32 = 2
1760 -- Processor is fully initialized and executing.
1761
1762 current_request_node_id_execution_index: INTEGER_32 = 1
1763
1764 current_request_chain_id_index: INTEGER_32 = 2
1765 -- Index to value containing current request chain id.
1766
1767 current_request_chain_id_depth_index: INTEGER_32 = 3
1768 -- Index to value containing current depth of request chain id.
1769 -- Initial value = -1 to signify that there is no active request chain.
1770
1771 invalid_request_chain_id: INTEGER_32 = -1
1772 default_request_chain_depth_value: INTEGER_32 = -1
1773
1774 request_chain_id_lock_index: INTEGER_32 = 4
1775 -- Index to value containing the lock on the processor for request chain initialization.
1776
1777 current_request_chain_node_id_index: INTEGER_32 = 5
1778 -- Index to value containing current request chain node id.
1779
1780 invalid_request_chain_node_id: INTEGER_32 = -1
1781
1782 current_request_chain_node_id_lock_index: INTEGER_32 = 6
1783 -- Lock index used for accessing current request chain node id.
1784
1785 processor_reference_count_index: INTEGER_32 = 7
1786 -- Current reference count of the processor
1787
1788 processor_dirty_flag_count_index: INTEGER_32 = 8
1789 -- Number of processors that are dirty with respect to `Current'.
1790
1791 processor_dirty_processor_client_list_lock_index: INTEGER_32 = 9
1792 -- Lock index used for accessing the dirty processor client list.
1793
1794 processor_wait_condition_counter_index: INTEGER_32 = 10
1795 -- Index of the number of times a wait condition has failed.
1796
1797 processor_semaphore_status_index: INTEGER_32 = 11
1798 -- Index of the quick semaphore for the processor.
1799
1800 processor_semaphore_status_running: INTEGER = 0
1801 processor_semaphore_status_busy_waiting: INTEGER = 1
1802 processor_semaphore_status_waiting: INTEGER = 2
1803 processor_semaphore_status_signalled: INTEGER = 3
1804 -- Various status's for processor semaphores.
1805
1806 processor_meta_data_index_count: INTEGER_32 = 12
1807 -- Number of items in the SCOOP Processor Meta Data structure.
1808
1809
1810 request_chain_node_meta_data_queue_list: SPECIAL [detachable like new_request_chain_node_meta_data_queue]
1811 -- List of all request chain node meta data queues, indexed by supplier processor id.
1812
1813 new_request_chain_node_meta_data_queue: SPECIAL [detachable like new_request_chain_node_meta_data_queue_entry]
1814 -- Return a new processor request chain node meta data queue.
1815 do
1816 create Result.make_filled (Void, max_request_chain_node_queue_index)
1817 end
1818
1819 request_chain_node_queue_list: SPECIAL [detachable like new_request_chain_node_queue]
1820 -- List of all request chain node queues, indexed by supplier processor id.
1821
1822 new_request_chain_node_queue: SPECIAL [detachable like new_request_chain_node_queue_entry]
1823 -- Return a new processor request chain node queue.
1824 do
1825 create Result.make_filled (Void, max_request_chain_node_queue_index)
1826 end
1827
1828 max_request_chain_node_queue_index: INTEGER_32 = 4096
1829 -- Maximum index of a processors request chain node queue.
1830
1831 new_request_chain_node_queue_entry: SPECIAL [POINTER]
1832 -- New entry for request chain node queue
1833 do
1834 create Result.make_empty (default_request_chain_node_queue_entry_size)
1835 end
1836
1837 default_request_chain_node_queue_entry_size: INTEGER_32 = 5
1838 -- Default size of request chain node queue.
1839
1840 logical_cpu_count: NATURAL_32
1841 -- Number of processors available on the system.
1842
1843 feature {NONE} -- Externals
1844
1845 semaphore_client_wait (a_sem_address: POINTER)
1846 -- Wait for semaphore `a_sem_address'.
1847 external
1848 "C macro use %"eif_scoop.h%""
1849 alias
1850 "RTS_SEMAPHORE_CLIENT_WAIT"
1851 end
1852
1853 semaphore_supplier_signal (a_sem_address: POINTER)
1854 -- Signal semaphore `a_sem_address'.
1855 external
1856 "C macro use %"eif_scoop.h%""
1857 alias
1858 "RTS_SEMAPHORE_SUPPLIER_SIGNAL"
1859 end
1860
1861 frozen available_cpus: NATURAL_8
1862 --| FIXME: Not Currently used: Implemented for future pooling optimizations
1863 external
1864 "C inline use %"eif_scoop.h%""
1865 alias
1866 "[
1867 //#ifdef _WIN32
1868 //#include <windows.h>
1869 //#elif MACOS
1870 //#include <sys/param.h>
1871 //#include <sys/sysctl.h>
1872 //#else
1873 //#include <unistd.h>
1874 //#endif
1875 #ifdef EIF_WINDOWS
1876 SYSTEM_INFO sysinfo;
1877 GetSystemInfo(&sysinfo);
1878 return sysinfo.dwNumberOfProcessors;
1879 #elif EIF_MACOSX
1880 int nm[2];
1881 size_t len = 4;
1882 uint32_t count;
1883
1884 nm[0] = CTL_HW; nm[1] = HW_AVAILCPU;
1885 sysctl(nm, 2, &count, &len, NULL, 0);
1886
1887 if(count < 1) {
1888 nm[1] = HW_NCPU;
1889 sysctl(nm, 2, &count, &len, NULL, 0);
1890 if(count < 1) { count = 1; }
1891 }
1892 return count;
1893 #else
1894 return sysconf(_SC_NPROCESSORS_ONLN);
1895 #endif
1896 ]"
1897 end
1898
1899 frozen create_and_initialize_scoop_processor (current_obj: like Current; init_func, attr: POINTER; a_processor_id: like processor_id_type)
1900 -- Initialize and start SCOOP Processor thread.
1901 external
1902 "C inline use %"eif_threads.h%""
1903 alias
1904 "[
1905 eif_thr_create_with_attr_new ((EIF_OBJECT)$current_obj, (EIF_PROCEDURE) $init_func, (EIF_INTEGER_32)$a_processor_id, EIF_TRUE, (EIF_POINTER)$attr);
1906 ]"
1907 end
1908
1909 frozen processor_sleep (nanoseconds: INTEGER_64)
1910 -- Suspend thread execution for interval specified in
1911 -- `nanoseconds' (1 nanosecond = 10^(-9) second).
1912 require
1913 non_negative_nanoseconds: nanoseconds >= 0
1914 external
1915 "C blocking use %"eif_misc.h%""
1916 alias
1917 "eif_sleep"
1918 end
1919
1920 one_second_expressed_in_nanoseconds: INTEGER_64 = 1_000_000_000
1921 -- Value of one second expressed in nanoseconds.
1922
1923 frozen processor_yield (a_processor_id: like processor_id_type; a_iteration_number: NATURAL_32)
1924 -- Yield processor `a_processor_id' to competing threads for an OS specific set time.
1925 local
1926 l_temp_val: INTEGER_32
1927 do
1928 if a_iteration_number < processor_spin_lock_limit then
1929 -- Spin lock
1930 l_temp_val := l_temp_val + 1
1931 elseif a_iteration_number < max_yield_counter then
1932 processor_cpu_yield
1933 else
1934 processor_cpu_yield
1935 -- Check for any potential deadlock.
1936
1937 l_temp_val := {ATOMIC_MEMORY_OPERATIONS}.add_integer_32 ($waiting_processor_count, 0)
1938 if
1939 waiting_semaphore_count > 0 and then
1940 l_temp_val = previous_waiting_processor_count and then
1941 (l_temp_val + idle_processor_count = processor_count)
1942 then
1943 if {ATOMIC_MEMORY_OPERATIONS}.increment_integer_32 ($deadlock_counter) > (deadlock_detection_limit * processor_count) then
1944 raise_scoop_exception (scoop_processor_deadlock_detected_message)
1945 end
1946 else
1947 l_temp_val := {ATOMIC_MEMORY_OPERATIONS}.swap_integer_32 ($deadlock_counter, 0)
1948 end
1949 previous_waiting_processor_count := waiting_processor_count
1950 end
1951 check_for_gc
1952 end
1953
1954 processor_spin_lock_limit: NATURAL_32 = 100
1955 -- Number of iterations to spin lock until yielding.
1956
1957 max_yield_counter: NATURAL_32 = 10_000
1958 -- Maximum value of the yield counter.
1959
1960 processor_sleep_quantum: NATURAL_32 = 15_000_000
1961 -- Number of nanoseconds an idle processor should temporarily sleep for (15 ms)
1962
1963 frozen processor_cpu_yield
1964 -- Yield processor to other threads in the processor that are on the same cpu.
1965 external
1966 "C macro use %"eif_scoop.h%""
1967 alias
1968 "RTS_PROCESSOR_CPU_YIELD"
1969 end
1970
1971 frozen native_thread_id: POINTER
1972 -- Native Thread ID of the `Current' SCOOP Processor
1973 external
1974 "C inline use %"eif_threads.h%""
1975 alias
1976 "return eif_thr_thread_id();"
1977 end
1978
1979 frozen root_processor_wait_for_redundancy
1980 -- Called by root processor to wait for `child' processors (direct/indirect).
1981 external
1982 "C blocking use %"eif_threads.h%""
1983 alias
1984 "eif_thr_join_all"
1985 end
1986
1987 feature {NONE} -- Debugger Helpers
1988
1989 frozen processor_id_from_object (a_object: ANY): like processor_id_type
1990 external
1991 "C inline use %"eif_scoop.h%""
1992 alias
1993 "RTS_PID($a_object)"
1994 end
1995
1996 frozen call_data_result (a_call_data: like call_data): POINTER
1997 require
1998 a_call_data_valid: a_call_data /= default_pointer
1999 external
2000 "C inline use %"eif_scoop.h%""
2001 alias
2002 "((call_data*) $a_call_data)->result"
2003 end
2004
2005 frozen call_data_count (a_call_data: like call_data): NATURAL_32
2006 require
2007 a_call_data_valid: a_call_data /= default_pointer
2008 external
2009 "C inline use %"eif_scoop.h%""
2010 alias
2011 "((call_data*) $a_call_data)->count"
2012 end
2013
2014 frozen call_data_body_index (a_call_data: like call_data): INTEGER_32
2015 require
2016 a_call_data_valid: a_call_data /= default_pointer
2017 external
2018 "C inline use %"eif_scoop.h%""
2019 alias
2020 "[
2021 #ifdef WORKBENCH
2022 return ((call_data*) $a_call_data)->body_index;
2023 #endif
2024 ]"
2025 end
2026
2027 frozen call_data_target (a_call_data: like call_data): POINTER
2028 require
2029 a_call_data_valid: a_call_data /= default_pointer
2030 external
2031 "C inline use %"eif_scoop.h%""
2032 alias
2033 "((call_data*) $a_call_data)->target"
2034 end
2035
2036 frozen call_data_argument (a_call_data: like call_data; i_th: NATURAL_32): POINTER
2037 require
2038 a_call_data_valid: a_call_data /= default_pointer
2039 external
2040 "C inline use %"eif_scoop.h%""
2041 alias
2042 "&((call_data*) $a_call_data)->argument [$i_th]"
2043 end
2044
2045 note
2046 copyright: "Copyright (c) 1984-2012, Eiffel Software and others"
2047 source: "[
2048 Eiffel Software
2049 5949 Hollister Ave., Goleta, CA 93117 USA
2050 Telephone 805-685-1006, Fax 805-685-6869
2051 Website http://www.eiffel.com
2052 Customer support http://support.eiffel.com
2053 ]"
2054 license: "Eiffel Forum License v2 (see http://www.eiffel.com/licensing/forum.txt)"
2055
2056 end

Properties

Name Value
svn:eol-style native
svn:keywords Author Date ID Revision

  ViewVC Help
Powered by ViewVC 1.1.23