source: CIVL/include/impls/civl-omp.cvl@ 1aaefd4

main test-branch
Last change on this file since 1aaefd4 was 1aaefd4, checked in by Wenhao Wu <wuwenhao@…>, 3 years ago

Add the declaration of the system function $elaborate_domain to civlc.cvh and call it in $omp_arrive_loop,
so that the domain can be concrete before iterating.
Also, add an exception handling in CommonSymbolicUtility in case a symbolic domain is iterated.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5754 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 20.8 KB
Line 
1// civlc-omp.cvl: implementations of functions and types in civlc-omp.cvh
2#include<civl-omp.cvh>
3#include<civlc.cvh>
4#include<mem.cvh>
5#include<pointer.cvh>
6#include<seq.cvh>
7#include<concurrency.cvh>
8#include<domain.cvh>
9#pragma CIVL ACSL
10/* Completes the definition of struct OMP_gshared given in civlc-omp.cvh.
11 */
12struct OMP_gshared {
13 int nthreads;
14 _Bool init[]; // which threads have joined
15 $omp_shared shares[];
16 void * original; // pointer to original variable
17};
18
19/* Completes the definition of struct OMP_shared given in civlc-omp.cvh.
20 */
21struct OMP_shared {
22 $omp_gshared gshared;
23 /* The thread id */
24 int tid;
25 /* Pointer to the local copy of the shared variable.
26 * This provides the thread's "private view" of the variable. */
27 void * local;
28 /* Pointer to the local status variable */
29 void * status;
30};
31
32/* Completes the definition of struct OMP_work_record
33 * given in civlc-omp.cvh. */
34struct OMP_work_record {
35 int kind; // loop, barrier, sections, or single
36 int location; // location in model of construct
37 _Bool arrived; // has this thread arrived yet?
38 $domain loop_domain; // full loop domain; null if not loop
39 $domain subdomain; // tasks this thread must do
40 // reduction operation?
41};
42
43struct OMP_gteam {
44 /* scope in which data is allocated in heap */
45 $scope scope;
46 /* number of threads in team */
47 int nthreads;
48 /* which threads have joined this gteam */
49 _Bool init[];
50 /* work queues. Length nthreads. For each thread,
51 * a FIFO queue of work records */
52 /* work queues. Width nthreads. For each worksharing region,
53 * one entry of size nthreads is added. */
54 $omp_work_record work[][];
55 /* the shared object data. */
56 $omp_gshared shared[];
57 $gbarrier gbarrier;
58 /* r/w sets for each thread */
59 $mem read_sets[];
60 $mem write_sets[];
61};
62
63struct OMP_team {
64 $omp_gteam gteam;
65 $scope scope;
66 int tid;
67 $omp_shared shared[];
68 $barrier barrier;
69};
70
71struct OMP_HELPER_SIGNAL {
72 int value;
73};
74
75/* *********************** Functions *********************** */
76
77$atomic_f $omp_gteam $omp_gteam_create($scope scope, int nthreads) {
78 $omp_gteam result = ($omp_gteam)$malloc(scope, sizeof(struct OMP_gteam));
79 _Bool f = $false;
80 $mem empty = $mem_empty();
81
82 //$seq_init(&empty, 0, NULL);
83 result->scope = scope;
84 result->nthreads = nthreads;
85 $seq_init(&result->init, nthreads, &f);
86 $seq_init(&result->work, 0, NULL);
87 $seq_init(&result->shared, 0, NULL);
88 $seq_init(&result->read_sets, nthreads, &empty);
89 $seq_init(&result->write_sets, nthreads, &empty);
90 result->gbarrier = $gbarrier_create(scope, nthreads);
91 return result;
92}
93
94$atomic_f void $omp_gteam_destroy($omp_gteam gteam) {
95 int length = $seq_length(&gteam->work);
96
97 $assert(length == 0, "There are still %d queued worksharing events.", length);
98 $free(gteam->gbarrier);
99 $free(gteam);
100}
101
102/* creates new local team object for a specific thread. */
103$atomic_f $omp_team $omp_team_create($scope scope, $omp_gteam gteam, int tid) {
104 $omp_team result = ($omp_team)$malloc(scope, sizeof(struct OMP_team));
105
106 $assert(!gteam->init[tid], "Thread %d has already joined gteam",
107 tid);
108 gteam->init[tid] = $true;
109 result->gteam = gteam;
110 result->scope = scope;
111 result->tid = tid;
112 $seq_init(&result->shared, 0, NULL);
113 result->barrier = $barrier_create(scope, gteam->gbarrier, tid);
114 return result;
115}
116
117/* destroys the local team object */
118$atomic_f void $omp_team_destroy($omp_team team) {
119 /*int numShared = $seq_length(&team->shared);
120
121 for(int i = 0; i < numShared; i++){
122 $free(team->shared[i]);
123 }*/
124 $free(team->barrier);
125 $free(team);
126}
127
128/* creates new global shared object, associated to the given
129 * global team. A pointer to the shared variable that this
130 * object corresponds to is given.
131 */
132$atomic_f $omp_gshared $omp_gshared_create($omp_gteam gteam,
133 void *original) {
134 $omp_gshared result =
135 ($omp_gshared)$malloc(gteam->scope, sizeof(struct OMP_gshared));
136 _Bool f = $false;
137 $omp_shared null = NULL;
138
139 result->nthreads = gteam->nthreads;
140 $seq_init(&result->init, gteam->nthreads, &f);
141 result->original = original;
142 $seq_init(&result->shares, gteam->nthreads, &null);
143 //result->status = status;
144 return result;
145}
146
147/* destroys the global shared object, copying the content
148 * to the original variable.
149 */
150$atomic_f void $omp_gshared_destroy($omp_gshared gshared) {
151 $free(gshared);
152}
153
154$atomic_f $omp_shared $omp_shared_create($omp_team team,
155 $omp_gshared gshared, void *local, void *status) {
156 $omp_shared result =
157 ($omp_shared)$malloc(team->scope, sizeof(struct OMP_shared));
158 //void *statusRefs[];
159 //int numStatusRefs;
160 int sharedLength;
161
162 $assert(!gshared->init[team->tid], "Thread %d has already created its local copy for %p.\n",
163 team->tid, gshared);
164 result->gshared = gshared;
165 result->tid = team->tid;
166 result->local = local;
167 // copies the shared data to the local copy
168 $copy(local, gshared->original);
169 result->status = status;
170 // set all leaf nodes of status to FULL
171 $set_leaf_nodes(status, FULL);
172 //$leaf_node_ptrs(&statusRefs, status);
173 //numStatusRefs = $seq_length(&statusRefs);
174 //for(int i = 0; i < numStatusRefs; i++)
175 //*((int*)(statusRefs[i])) = FULL;
176 sharedLength = $seq_length(&team->shared);
177 $seq_insert(&team->shared, sharedLength, &result, 1);
178 //$seq_append(&team->shared, &result, 1);
179 gshared->shares[team->tid] = result;
180 return result;
181}
182
183$atomic_f void $omp_shared_destroy($omp_shared shared) {
184 $free(shared);
185}
186
187int $omp_modified_tid($omp_shared shared, void *ref){
188 int tid = shared->tid;
189 $omp_gshared gshared = shared->gshared;
190 int nthreads = gshared->nthreads;
191 _Bool isIdentityRef = $is_identity_ref(ref);
192
193 for(int i=0; i < nthreads;i++){
194 if(i == tid)
195 continue;
196 else{
197 $omp_shared other = gshared->shares[i];
198 $omp_var_status other_status;
199 _Bool segFault = $false;
200
201 if(!$is_derefable_pointer(other))
202 continue;
203 if(isIdentityRef){
204 segFault = $has_leaf_node_equal_to(other->status, MODIFIED);
205 }else{
206 int *other_status_ref = (int*)$translate_ptr(ref, other->status);
207
208 segFault = (*other_status_ref) == MODIFIED;
209 }
210 if(segFault)
211 return i;
212 }
213 }
214 return -1;
215}
216
217void $omp_read($omp_shared shared, void *result, void *ref) {
218 $atomic{
219 int otherTid = $omp_modified_tid(shared, ref);
220
221 if (otherTid >= 0) {
222 void * p = $translate_ptr(ref, shared->gshared->original);
223
224
225 $assert($false, "Thread %d can not safely read memory location %p, "
226 "because thread %d has written to\nthat memory location and hasn't flushed yet.",
227 shared->tid, p, otherTid);
228 }
229 if($is_identity_ref(ref)){
230 if($leaf_nodes_equal_to(shared->status, EMPTY)){
231 $copy(ref, shared->gshared->original); // copy shared to local
232 $set_leaf_nodes(shared->status, FULL);
233 }
234 }else{
235 int *status_ref = (int*)$translate_ptr(ref, shared->status);
236 int status = *status_ref;
237
238 if(status == EMPTY){
239 void *global = $translate_ptr(ref, shared->gshared->original);
240
241 $copy(ref, global); // copy shared to local
242 *status_ref = FULL; // set status to FULL
243 }
244 }
245 // read local
246 $copy(result, ref);
247 }
248}
249
250void $omp_write($omp_shared shared, void *ref, void *value) {
251 $atomic{
252 int otherTid = $omp_modified_tid(shared, ref);
253
254 if (otherTid >= 0) {
255 void *p = $translate_ptr(ref, shared->gshared->original);
256
257 $assert($false, "Thread %d can not safely write to memory location %p, because thread %d has\n"
258 "written to that memory location and hasn't flushed yet.",
259 shared->tid, p, otherTid);
260 }
261 $copy(ref, value);
262 if($is_identity_ref(ref)){
263 $set_leaf_nodes(shared->status, MODIFIED);
264 }else{
265 int *status_ref = (int*)$translate_ptr(ref, shared->status);
266 *status_ref = MODIFIED;
267 }
268 }
269}
270
271/* Only applicable to scalar type? */
272/*@ depends_on \access(shared, local, shared->gshared->original); */
273$atomic_f void $omp_apply_assoc($omp_shared shared,
274 $operation op,
275 void *local){
276 void *shared_ref = $translate_ptr(local, shared->gshared->original);
277
278 $apply(shared_ref, op, local, shared_ref);
279}
280
281
282$atomic_f void $omp_reduction_combine(
283 $operation op, void *reduction_final, void *reduction_local
284) {
285 $apply(reduction_final, op, reduction_local, reduction_final);
286}
287
288void $omp_flush($omp_shared shared, void *ref) {
289 // need to drill down into all leaf nodes of the object
290 // being flushed...
291 // also, it should be ok to flush a memory unit if you are not
292 // the owner but you also have no reads or writes to that variable
293 // TODO: assert there is at most one thread for which this memory unit has status MODIFIED;
294 $omp_gshared gshared = shared->gshared;
295 int tid = shared->tid;
296 void *refs[];
297 int numRefs;
298
299 // get all leaf node pointers
300 $leaf_node_ptrs(&refs, ref);
301 numRefs = $seq_length(&refs);
302 for(int i = 0; i < numRefs; i++){
303 void *leaf = refs[i];
304 int *leaf_status = (int *)$translate_ptr(leaf, shared->status);
305 //void *leaf_local = (int *)$translate_ptr(leaf, shared->local);
306 void *leaf_shared = (int *)$translate_ptr(leaf, gshared->original);
307
308 switch(*leaf_status){
309 case EMPTY:
310 break;
311 case MODIFIED:
312 $copy(leaf_shared, leaf);
313 case FULL:
314 *leaf_status = EMPTY;
315 $set_default(leaf);
316 break;
317 default:
318 $assert(0, "Unknown omp shared variable status: %d", *leaf_status);
319 }
320 }
321}
322
323void $omp_flush_all($omp_team team) {
324 int num_shared = $seq_length(&team->shared);
325
326 for (int i=0; i<num_shared; i++) {
327 $omp_shared shared = team->shared[i];
328
329 $omp_flush(shared, shared->local);
330 }
331}
332
333
334$atomic_f void $check_data_race($omp_team team) {
335 int cur_tid = team->tid;
336 $mem out_s0 = $mem_empty();
337 $mem out_s1 = $mem_empty();
338 $mem cur_mw = team->gteam->write_sets[cur_tid];
339 $mem cur_mr = team->gteam->read_sets[cur_tid];
340
341 // Check data race.
342 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
343 if (tmp_tid == cur_tid) continue;
344
345 $mem tmp_mr = team->gteam->read_sets[tmp_tid];
346 $mem tmp_mw = team->gteam->write_sets[tmp_tid];
347
348 $assert($mem_no_intersect(cur_mr, tmp_mw, &out_s0, &out_s1),
349 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
350 out_s0, cur_tid, out_s1, tmp_tid);
351 $assert($mem_no_intersect(cur_mw, tmp_mr, &out_s0, &out_s1),
352 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
353 out_s0, cur_tid, out_s1, tmp_tid);
354 $assert($mem_no_intersect(cur_mw, tmp_mw, &out_s0, &out_s1),
355 "Data-race detected: %p written by thread %d intersects %p written by thread %d\n",
356 out_s0, cur_tid, out_s1, tmp_tid);
357 }
358}
359
360void $omp_barrier($omp_team team) {
361 int cur_tid = team->tid;
362 $mem out_s0 = $mem_empty();
363 $mem out_s1 = $mem_empty();
364 $mem cur_mw = $write_set_pop();
365 $mem cur_mr = $read_set_pop();
366
367 team->gteam->write_sets[cur_tid] = cur_mw;
368 team->gteam->read_sets[cur_tid] = cur_mr;
369 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
370 if (tmp_tid == cur_tid) continue;
371
372 $mem tmp_mr = team->gteam->read_sets[tmp_tid];
373 $mem tmp_mw = team->gteam->write_sets[tmp_tid];
374
375 $assert($mem_no_intersect(cur_mr, tmp_mw, &out_s0, &out_s1),
376 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
377 out_s0, cur_tid, out_s1, tmp_tid);
378 $assert($mem_no_intersect(cur_mw, tmp_mr, &out_s0, &out_s1),
379 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
380 out_s0, cur_tid, out_s1, tmp_tid);
381 $assert($mem_no_intersect(cur_mw, tmp_mw, &out_s0, &out_s1),
382 "Data-race detected: %p written by thread %d intersects %p written by thread %d\n",
383 out_s0, cur_tid, out_s1, tmp_tid);
384 }
385 $local_end();
386 $barrier_call(team->barrier);
387 if (0 == cur_tid) {
388 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
389 team->gteam->read_sets[tmp_tid] = $mem_empty();
390 team->gteam->write_sets[tmp_tid] = $mem_empty();
391 }
392 }
393 $barrier_call(team->barrier);
394 $local_start();
395
396 $read_set_push();
397 $write_set_push();
398}
399
400/**
401 * performs the checking of workrecords when the last thread arrives the workshared region,
402 * including:
403 * number of workrecords in the entry should be nthreads
404 * all workrecords are arrived
405 * all workrecords have the same location as specified
406 * all workrecords are of the same kind
407 */
408void $omp_check_workrecords($omp_gteam gteam, int location, $omp_worksharing_kind kind, int index){
409 int nthreads = gteam->nthreads;
410 int numWorkrecords;
411
412 numWorkrecords = $seq_length(&gteam->work[index]);
413 $assert(numWorkrecords == nthreads, "The %d-th workshared entry has length %d while the expected length should be %d.", index, numWorkrecords, nthreads);
414 for(int i = 0; i < nthreads; i++){
415 $omp_work_record workrecord = gteam->work[index][i];
416
417 $assert(workrecord.arrived, "The workshared event of location %d for thread %d hasn't been arrived yet.",
418 location, i);
419 $assert(workrecord.location == location, "Wrong workshared location of thread %d: expected %d, but saw %d.",
420 i, location, workrecord.location);
421 $assert(workrecord.kind == kind, "Wrong workshared kind of thread %d: expected %d but saw %d",
422 i, kind, workrecord.kind);
423 }
424}
425
426/* finds the workrecord of the given location for a certain thread in a team,
427 * stores the workrecord in the buffer provided,
428 * returns the index of the workrecod entry in the work queue
429 * or -1 if the workrecord is absent.
430 *
431 * gteam: the global team object
432 * tid: the id of the thread
433 * location: the location of the workrecord being searched for
434 * result: the buffer for store the workrecord found
435 */
436int find_work_record($omp_gteam gteam, int tid, int location, $omp_work_record *result){
437 int numEntires = $seq_length(&gteam->work);
438
439 for(int i = 0; i < numEntires; i++){
440 $omp_work_record workrecord = gteam->work[i][tid];
441
442 if(workrecord.location == location){
443 *result = workrecord;
444 return i;
445 }
446 }
447 return -1;
448}
449
450/* checks if all threads have arrived the workrecord at the given index */
451_Bool all_arrived($omp_gteam gteam, int index){
452 for(int tid = 0; tid < gteam->nthreads; tid++){
453 if(!gteam->work[index][tid].arrived)
454 return $false;
455 }
456 return $true;
457}
458
459void arrive($omp_gteam gteam, int tid, int index,
460$omp_work_record workrecord, $omp_worksharing_kind kind, int location){
461 $assert(!workrecord.arrived, "The loop at location %d has been arrived before by thread %d!",
462 location, tid);
463 $assert(workrecord.kind == kind, "Wrong workshare kind of thread %d: expected LOOP",
464 tid);
465 (gteam->work[index][tid]).arrived = $true;
466 if(all_arrived(gteam, index)){//this is the last thread
467 $assert(index == 0, "Inconsistent order or worksharing events.");
468 $omp_check_workrecords(gteam, location, kind, index);
469 $seq_remove(&gteam->work, index, NULL, 1);
470 }
471}
472
473/* TODO: checks if loop_dom gets changed if the current thread is NOT the
474 * first thread entering the loop.
475 */
476$domain $omp_arrive_loop($omp_team team, int location, $domain loop_dom,
477 $domain_strategy strategy){
478 $atomic{
479 $omp_gteam gteam = team->gteam;
480 int tid = team->tid;
481 int nthreads = gteam->nthreads;
482 int index;
483 $omp_work_record workrecord;
484
485 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
486 if(nthreads == 1)
487 return loop_dom;
488 $elaborate_domain(loop_dom);
489 index = find_work_record(gteam, tid, location, &workrecord);
490 if(index >= 0){//workrecord found
491 $assert($equals(&gteam->work[index][0].loop_domain, &loop_dom), "The loop domain at location %d is changed when thread %d arrives it. \n Expected %s but saw %s.",
492 location,
493 tid,
494 gteam->work[index][0].loop_domain,
495 loop_dom);
496 arrive(gteam, tid, index, workrecord, LOOP, location);
497 return workrecord.subdomain;
498 }else{//workrecord not found, this should be the first thread
499 $omp_work_record workrecords[nthreads];
500 $domain_decomposition decomposition;
501 int numEntries = $seq_length(&gteam->work);
502
503 decomposition = $domain_partition(loop_dom, strategy, nthreads);
504 workrecords[0].loop_domain = loop_dom;
505 for(int i = 0; i< nthreads; i++){
506 workrecords[i].kind = LOOP;
507 workrecords[i].location = location;
508 workrecords[i].arrived = (i == tid);
509 workrecords[i].subdomain = decomposition.subdomains[i];
510 }
511 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
512 return workrecords[tid].subdomain;
513 }
514 }
515}
516
517/* TODO: need strategies? */
518$domain(1) $omp_arrive_sections($omp_team team, int location, int numSections){
519 $atomic{
520 $omp_gteam gteam = team->gteam;
521 int tid = team->tid;
522 int nthreads = gteam->nthreads;
523 int index;
524 $omp_work_record workrecord;
525
526 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
527 if(nthreads == 1)
528 return ($domain(1)) {0 .. numSections - 1 # 1};
529 index = find_work_record(gteam, tid, location, &workrecord);
530 if(index >= 0){//workrecord found
531 arrive(gteam, tid, index, workrecord, SECTIONS, location);
532 return workrecord.subdomain;
533 }else{//workrecord not found, this should be the first thread
534 $omp_work_record workrecords[nthreads];
535 int numEntries = $seq_length(&gteam->work);
536
537 for(int i = 0; i < nthreads; i++){
538 int low = i, high = numSections - 1, step = nthreads;
539 $range range = low .. high # step;
540 $domain(1) dom = ($domain(1)) {range};
541
542 workrecords[i].kind = SECTIONS;
543 workrecords[i].location = location;
544 workrecords[i].arrived = (i == tid);
545 workrecords[i].subdomain = dom;
546 }
547 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
548 return workrecords[tid].subdomain;
549 }
550 }
551}
552
553/* TODO: need strategies? */
554 int $omp_arrive_single($omp_team team, int location){
555 $atomic{
556 $omp_gteam gteam = team->gteam;
557 int tid = team->tid;
558 int nthreads = gteam->nthreads;
559 int index;
560 $omp_work_record workrecord;
561
562 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
563 if(nthreads == 1)
564 return tid;
565 index = find_work_record(gteam, tid, location, &workrecord);
566 if(index >= 0){//workrecord found
567 arrive(gteam, tid, index, workrecord, SINGLE, location);
568 }else{//workrecord not found, this should be the first thread
569 $omp_work_record workrecords[nthreads];
570 int numEntries = $seq_length(&gteam->work);
571
572 for(int i = 0; i < nthreads; i++){
573 workrecords[i].kind = SINGLE;
574 workrecords[i].location = location;
575 workrecords[i].arrived = (i == tid);
576 }
577 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
578 }
579 return 0;
580 }
581}
582
583$atomic_f $omp_helper_signal $omp_helper_signal_create(
584 int initValue
585){
586 $omp_helper_signal result;
587 result.value = initValue;
588
589 return result;
590}
591
592$atomic_f void $omp_helper_signal_wait(
593 $omp_helper_signal *signal, int waitValue
594){
595 $read_set_push();
596 $write_set_push();
597 $yield();
598 $when(signal->value == waitValue) signal->value = -(signal->value)-1 /* do nothing */;
599 $read_set_pop();
600 $write_set_pop();
601}
602
603$atomic_f void $omp_helper_signal_send(
604 $omp_helper_signal *signal, int sendValue
605){
606 $read_set_push();
607 $write_set_push();
608 signal->value = sendValue;
609 $read_set_pop();
610 $write_set_pop();
611}
612
613$atomic_f void $read_and_write_sets_pop($omp_team team) {
614 $mem cur_mr = $read_set_pop();
615 $mem cur_mw = $write_set_pop();
616 team->gteam->read_sets[team->tid] = cur_mr;
617 team->gteam->write_sets[team->tid] = cur_mw;
618}
619
620$atomic_f void $read_and_write_sets_push($omp_team team) {
621 team->gteam->write_sets[team->tid] = $mem_empty();
622 team->gteam->read_sets[team->tid] = $mem_empty();
623 $read_set_push();
624 $write_set_push();
625}
626
627$atomic_f void $omp_atomic_execution_lock_acquire(
628 $omp_team team, $omp_helper_signal *lock) {
629 // pop: memsets
630 $read_and_write_sets_pop(team);
631 // check: data race
632 $check_data_race(team);
633 // yield: to other threads
634 $omp_helper_signal_wait(lock, 0);
635 // acquire: lock
636 $omp_helper_signal_send(lock, 1);
637 // push: memsets
638 $read_and_write_sets_push(team);
639}
640
641$atomic_f void $omp_atomic_execution_lock_release(
642 $omp_team team, $omp_helper_signal *lock) {
643 // pop: memsets
644 $read_and_write_sets_pop(team);
645 // yield: to other threads
646 $yield();
647 // check: data race
648 $check_data_race(team);
649 // release: lock
650 $omp_helper_signal_send(lock, 0);
651 // push: memsets
652 $read_and_write_sets_push(team);
653}
654
655$atomic_f void $omp_loop_operation($omp_team team) {
656 // pop: memsets
657 $read_and_write_sets_pop(team);
658 // yield: to other threads
659 $yield();
660 // check: data race
661 $check_data_race(team);
662 // push: memsets
663 $read_and_write_sets_push(team);
664}
665
666$atomic_f void $omp_thread_termination($omp_team team) {
667 // pop: memsets
668 $read_and_write_sets_pop(team);
669 // check: data race
670 $check_data_race(team);
671 // push: memsets
672 $read_and_write_sets_push(team);
673}
Note: See TracBrowser for help on using the repository browser.