source: CIVL/include/impls/civl-omp.cvl@ 1aaefd4

main test-branch
Last change on this file since 1aaefd4 was 1aaefd4, checked in by Wenhao Wu <wuwenhao@…>, 3 years ago

Add the declaration of the system function $elaborate_domain to civlc.cvh and call it in $omp_arrive_loop,
so that the domain can be concrete before iterating.
Also, add an exception handling in CommonSymbolicUtility in case a symbolic domain is iterated.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5754 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 20.8 KB
RevLine 
[2f8145f]1// civlc-omp.cvl: implementations of functions and types in civlc-omp.cvh
[3af26ac]2#include<civl-omp.cvh>
[2f8145f]3#include<civlc.cvh>
[c7c308c]4#include<mem.cvh>
[2f8145f]5#include<pointer.cvh>
6#include<seq.cvh>
7#include<concurrency.cvh>
8#include<domain.cvh>
[e93c797]9#pragma CIVL ACSL
[68f2754]10/* Completes the definition of struct OMP_gshared given in civlc-omp.cvh.
11 */
[2f8145f]12struct OMP_gshared {
[655afb2]13 int nthreads;
[2f8145f]14 _Bool init[]; // which threads have joined
[655afb2]15 $omp_shared shares[];
[7459af4]16 void * original; // pointer to original variable
17};
18
[68f2754]19/* Completes the definition of struct OMP_shared given in civlc-omp.cvh.
20 */
[2f8145f]21struct OMP_shared {
22 $omp_gshared gshared;
[68f2754]23 /* The thread id */
[2f8145f]24 int tid;
[68f2754]25 /* Pointer to the local copy of the shared variable.
26 * This provides the thread's "private view" of the variable. */
[2f8145f]27 void * local;
[68f2754]28 /* Pointer to the local status variable */
[2f8145f]29 void * status;
30};
[7459af4]31
[68f2754]32/* Completes the definition of struct OMP_work_record
33 * given in civlc-omp.cvh. */
[2f8145f]34struct OMP_work_record {
35 int kind; // loop, barrier, sections, or single
[7459af4]36 int location; // location in model of construct
37 _Bool arrived; // has this thread arrived yet?
[06518e3]38 $domain loop_domain; // full loop domain; null if not loop
[2f8145f]39 $domain subdomain; // tasks this thread must do
40 // reduction operation?
41};
42
43struct OMP_gteam {
44 /* scope in which data is allocated in heap */
45 $scope scope;
[7459af4]46 /* number of threads in team */
[118a7b7]47 int nthreads;
[2f8145f]48 /* which threads have joined this gteam */
49 _Bool init[];
[7459af4]50 /* work queues. Length nthreads. For each thread,
51 * a FIFO queue of work records */
[cbdbd4e]52 /* work queues. Width nthreads. For each worksharing region,
53 * one entry of size nthreads is added. */
[2f8145f]54 $omp_work_record work[][];
55 /* the shared object data. */
[7459af4]56 $omp_gshared shared[];
[2f8145f]57 $gbarrier gbarrier;
[c7c308c]58 /* r/w sets for each thread */
59 $mem read_sets[];
60 $mem write_sets[];
[2f8145f]61};
[f6ce0eb]62
[2f8145f]63struct OMP_team {
[7459af4]64 $omp_gteam gteam;
[2f8145f]65 $scope scope;
[f6ce0eb]66 int tid;
[2f8145f]67 $omp_shared shared[];
68 $barrier barrier;
69};
[7459af4]70
[86ee0b6]71struct OMP_HELPER_SIGNAL {
72 int value;
73};
[7459af4]74
75/* *********************** Functions *********************** */
76
[17f29f0]77$atomic_f $omp_gteam $omp_gteam_create($scope scope, int nthreads) {
[2f8145f]78 $omp_gteam result = ($omp_gteam)$malloc(scope, sizeof(struct OMP_gteam));
79 _Bool f = $false;
[c7c308c]80 $mem empty = $mem_empty();
[2f8145f]81
[cbdbd4e]82 //$seq_init(&empty, 0, NULL);
[2f8145f]83 result->scope = scope;
84 result->nthreads = nthreads;
85 $seq_init(&result->init, nthreads, &f);
[cbdbd4e]86 $seq_init(&result->work, 0, NULL);
[2f8145f]87 $seq_init(&result->shared, 0, NULL);
[c7c308c]88 $seq_init(&result->read_sets, nthreads, &empty);
89 $seq_init(&result->write_sets, nthreads, &empty);
[2f8145f]90 result->gbarrier = $gbarrier_create(scope, nthreads);
91 return result;
92}
93
[17f29f0]94$atomic_f void $omp_gteam_destroy($omp_gteam gteam) {
[cbdbd4e]95 int length = $seq_length(&gteam->work);
96
[3ff27cf]97 $assert(length == 0, "There are still %d queued worksharing events.", length);
[2f8145f]98 $free(gteam->gbarrier);
99 $free(gteam);
100}
[7459af4]101
102/* creates new local team object for a specific thread. */
[17f29f0]103$atomic_f $omp_team $omp_team_create($scope scope, $omp_gteam gteam, int tid) {
[2f8145f]104 $omp_team result = ($omp_team)$malloc(scope, sizeof(struct OMP_team));
105
[3ff27cf]106 $assert(!gteam->init[tid], "Thread %d has already joined gteam",
107 tid);
[2f8145f]108 gteam->init[tid] = $true;
109 result->gteam = gteam;
110 result->scope = scope;
111 result->tid = tid;
112 $seq_init(&result->shared, 0, NULL);
113 result->barrier = $barrier_create(scope, gteam->gbarrier, tid);
114 return result;
115}
[7459af4]116
117/* destroys the local team object */
[c7c308c]118$atomic_f void $omp_team_destroy($omp_team team) {
[5d0489c]119 /*int numShared = $seq_length(&team->shared);
[2f8145f]120
121 for(int i = 0; i < numShared; i++){
122 $free(team->shared[i]);
[5d0489c]123 }*/
[2f8145f]124 $free(team->barrier);
125 $free(team);
126}
[7459af4]127
128/* creates new global shared object, associated to the given
129 * global team. A pointer to the shared variable that this
[2f8145f]130 * object corresponds to is given.
[7459af4]131 */
[17f29f0]132$atomic_f $omp_gshared $omp_gshared_create($omp_gteam gteam,
[2f8145f]133 void *original) {
134 $omp_gshared result =
135 ($omp_gshared)$malloc(gteam->scope, sizeof(struct OMP_gshared));
136 _Bool f = $false;
[655afb2]137 $omp_shared null = NULL;
138
139 result->nthreads = gteam->nthreads;
[31c5a80]140 $seq_init(&result->init, gteam->nthreads, &f);
[2f8145f]141 result->original = original;
[655afb2]142 $seq_init(&result->shares, gteam->nthreads, &null);
[2f8145f]143 //result->status = status;
144 return result;
145}
[7459af4]146
147/* destroys the global shared object, copying the content
148 * to the original variable.
149 */
[17f29f0]150$atomic_f void $omp_gshared_destroy($omp_gshared gshared) {
[2f8145f]151 $free(gshared);
152}
153
[17f29f0]154$atomic_f $omp_shared $omp_shared_create($omp_team team,
[2f8145f]155 $omp_gshared gshared, void *local, void *status) {
156 $omp_shared result =
157 ($omp_shared)$malloc(team->scope, sizeof(struct OMP_shared));
[823152d]158 //void *statusRefs[];
159 //int numStatusRefs;
[5d0489c]160 int sharedLength;
[2f8145f]161
[3ff27cf]162 $assert(!gshared->init[team->tid], "Thread %d has already created its local copy for %p.\n",
163 team->tid, gshared);
[2f8145f]164 result->gshared = gshared;
165 result->tid = team->tid;
166 result->local = local;
[584355b]167 // copies the shared data to the local copy
168 $copy(local, gshared->original);
[2f8145f]169 result->status = status;
[584355b]170 // set all leaf nodes of status to FULL
[823152d]171 $set_leaf_nodes(status, FULL);
172 //$leaf_node_ptrs(&statusRefs, status);
173 //numStatusRefs = $seq_length(&statusRefs);
174 //for(int i = 0; i < numStatusRefs; i++)
175 //*((int*)(statusRefs[i])) = FULL;
[5d0489c]176 sharedLength = $seq_length(&team->shared);
177 $seq_insert(&team->shared, sharedLength, &result, 1);
178 //$seq_append(&team->shared, &result, 1);
[655afb2]179 gshared->shares[team->tid] = result;
[2f8145f]180 return result;
181}
182
[17f29f0]183$atomic_f void $omp_shared_destroy($omp_shared shared) {
[2f8145f]184 $free(shared);
185}
186
[61fb6f6]187int $omp_modified_tid($omp_shared shared, void *ref){
[2f8145f]188 int tid = shared->tid;
[655afb2]189 $omp_gshared gshared = shared->gshared;
190 int nthreads = gshared->nthreads;
191 _Bool isIdentityRef = $is_identity_ref(ref);
[61fb6f6]192
[655afb2]193 for(int i=0; i < nthreads;i++){
194 if(i == tid)
195 continue;
196 else{
197 $omp_shared other = gshared->shares[i];
198 $omp_var_status other_status;
199 _Bool segFault = $false;
200
[69c7880]201 if(!$is_derefable_pointer(other))
[655afb2]202 continue;
203 if(isIdentityRef){
204 segFault = $has_leaf_node_equal_to(other->status, MODIFIED);
205 }else{
206 int *other_status_ref = (int*)$translate_ptr(ref, other->status);
207
208 segFault = (*other_status_ref) == MODIFIED;
209 }
[61fb6f6]210 if(segFault)
211 return i;
212 }
213 }
214 return -1;
215}
216
217void $omp_read($omp_shared shared, void *result, void *ref) {
[7a5db91]218 $atomic{
219 int otherTid = $omp_modified_tid(shared, ref);
220
[bf234c6]221 if (otherTid >= 0) {
222 void * p = $translate_ptr(ref, shared->gshared->original);
223
224
225 $assert($false, "Thread %d can not safely read memory location %p, "
226 "because thread %d has written to\nthat memory location and hasn't flushed yet.",
227 shared->tid, p, otherTid);
228 }
[7a5db91]229 if($is_identity_ref(ref)){
230 if($leaf_nodes_equal_to(shared->status, EMPTY)){
231 $copy(ref, shared->gshared->original); // copy shared to local
232 $set_leaf_nodes(shared->status, FULL);
233 }
234 }else{
235 int *status_ref = (int*)$translate_ptr(ref, shared->status);
236 int status = *status_ref;
237
238 if(status == EMPTY){
239 void *global = $translate_ptr(ref, shared->gshared->original);
240
241 $copy(ref, global); // copy shared to local
242 *status_ref = FULL; // set status to FULL
243 }
[655afb2]244 }
[7a5db91]245 // read local
246 $copy(result, ref);
[655afb2]247 }
[61fb6f6]248}
249
250void $omp_write($omp_shared shared, void *ref, void *value) {
251 $atomic{
252 int otherTid = $omp_modified_tid(shared, ref);
253
[bf234c6]254 if (otherTid >= 0) {
255 void *p = $translate_ptr(ref, shared->gshared->original);
256
257 $assert($false, "Thread %d can not safely write to memory location %p, because thread %d has\n"
258 "written to that memory location and hasn't flushed yet.",
259 shared->tid, p, otherTid);
260 }
[2f8145f]261 $copy(ref, value);
[61fb6f6]262 if($is_identity_ref(ref)){
[af389e5]263 $set_leaf_nodes(shared->status, MODIFIED);
264 }else{
265 int *status_ref = (int*)$translate_ptr(ref, shared->status);
266 *status_ref = MODIFIED;
267 }
[61fb6f6]268 }
[2f8145f]269}
270
[a7911c8]271/* Only applicable to scalar type? */
[17f29f0]272/*@ depends_on \access(shared, local, shared->gshared->original); */
273$atomic_f void $omp_apply_assoc($omp_shared shared,
[2f8145f]274 $operation op,
275 void *local){
[17f29f0]276 void *shared_ref = $translate_ptr(local, shared->gshared->original);
[2f8145f]277
[17f29f0]278 $apply(shared_ref, op, local, shared_ref);
[2f8145f]279}
280
[c7c308c]281
282$atomic_f void $omp_reduction_combine(
283 $operation op, void *reduction_final, void *reduction_local
284) {
285 $apply(reduction_final, op, reduction_local, reduction_final);
286}
287
[2f8145f]288void $omp_flush($omp_shared shared, void *ref) {
289 // need to drill down into all leaf nodes of the object
290 // being flushed...
291 // also, it should be ok to flush a memory unit if you are not
292 // the owner but you also have no reads or writes to that variable
[704d01c]293 // TODO: assert there is at most one thread for which this memory unit has status MODIFIED;
[2f8145f]294 $omp_gshared gshared = shared->gshared;
295 int tid = shared->tid;
296 void *refs[];
297 int numRefs;
298
299 // get all leaf node pointers
300 $leaf_node_ptrs(&refs, ref);
301 numRefs = $seq_length(&refs);
302 for(int i = 0; i < numRefs; i++){
303 void *leaf = refs[i];
304 int *leaf_status = (int *)$translate_ptr(leaf, shared->status);
[5d0489c]305 //void *leaf_local = (int *)$translate_ptr(leaf, shared->local);
[2f8145f]306 void *leaf_shared = (int *)$translate_ptr(leaf, gshared->original);
307
308 switch(*leaf_status){
[e045201d]309 case EMPTY:
310 break;
311 case MODIFIED:
312 $copy(leaf_shared, leaf);
313 case FULL:
314 *leaf_status = EMPTY;
315 $set_default(leaf);
316 break;
317 default:
318 $assert(0, "Unknown omp shared variable status: %d", *leaf_status);
[2f8145f]319 }
320 }
321}
322
323void $omp_flush_all($omp_team team) {
[84932eb]324 int num_shared = $seq_length(&team->shared);
[2f8145f]325
326 for (int i=0; i<num_shared; i++) {
327 $omp_shared shared = team->shared[i];
328
329 $omp_flush(shared, shared->local);
330 }
331}
332
[b76d962]333
[f127e0f]334$atomic_f void $check_data_race($omp_team team) {
[c7c308c]335 int cur_tid = team->tid;
336 $mem out_s0 = $mem_empty();
337 $mem out_s1 = $mem_empty();
[b76d962]338 $mem cur_mw = team->gteam->write_sets[cur_tid];
339 $mem cur_mr = team->gteam->read_sets[cur_tid];
[c7c308c]340
[86ee0b6]341 // Check data race.
[e3f356c]342 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
343 if (tmp_tid == cur_tid) continue;
344
[c7c308c]345 $mem tmp_mr = team->gteam->read_sets[tmp_tid];
346 $mem tmp_mw = team->gteam->write_sets[tmp_tid];
347
348 $assert($mem_no_intersect(cur_mr, tmp_mw, &out_s0, &out_s1),
[86ee0b6]349 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
[c7c308c]350 out_s0, cur_tid, out_s1, tmp_tid);
351 $assert($mem_no_intersect(cur_mw, tmp_mr, &out_s0, &out_s1),
[86ee0b6]352 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
[c7c308c]353 out_s0, cur_tid, out_s1, tmp_tid);
354 $assert($mem_no_intersect(cur_mw, tmp_mw, &out_s0, &out_s1),
[86ee0b6]355 "Data-race detected: %p written by thread %d intersects %p written by thread %d\n",
[c7c308c]356 out_s0, cur_tid, out_s1, tmp_tid);
357 }
358}
359
[86ee0b6]360void $omp_barrier($omp_team team) {
[b76d962]361 int cur_tid = team->tid;
[86ee0b6]362 $mem out_s0 = $mem_empty();
363 $mem out_s1 = $mem_empty();
[b76d962]364 $mem cur_mw = $write_set_pop();
365 $mem cur_mr = $read_set_pop();
[86ee0b6]366
367 team->gteam->write_sets[cur_tid] = cur_mw;
368 team->gteam->read_sets[cur_tid] = cur_mr;
[e3f356c]369 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
370 if (tmp_tid == cur_tid) continue;
371
[86ee0b6]372 $mem tmp_mr = team->gteam->read_sets[tmp_tid];
373 $mem tmp_mw = team->gteam->write_sets[tmp_tid];
374
375 $assert($mem_no_intersect(cur_mr, tmp_mw, &out_s0, &out_s1),
376 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
377 out_s0, cur_tid, out_s1, tmp_tid);
378 $assert($mem_no_intersect(cur_mw, tmp_mr, &out_s0, &out_s1),
379 "Data-race detected: %p read by thread %d intersects %p written by thread %d\n",
380 out_s0, cur_tid, out_s1, tmp_tid);
381 $assert($mem_no_intersect(cur_mw, tmp_mw, &out_s0, &out_s1),
382 "Data-race detected: %p written by thread %d intersects %p written by thread %d\n",
383 out_s0, cur_tid, out_s1, tmp_tid);
384 }
[b76d962]385 $local_end();
[2f8145f]386 $barrier_call(team->barrier);
[b76d962]387 if (0 == cur_tid) {
388 for (int tmp_tid = 0; tmp_tid < team->gteam->nthreads; tmp_tid++) {
389 team->gteam->read_sets[tmp_tid] = $mem_empty();
390 team->gteam->write_sets[tmp_tid] = $mem_empty();
391 }
392 }
393 $barrier_call(team->barrier);
394 $local_start();
[86ee0b6]395
[c7c308c]396 $read_set_push();
397 $write_set_push();
[2f8145f]398}
399
[cbdbd4e]400/**
401 * performs the checking of workrecords when the last thread arrives the workshared region,
402 * including:
403 * number of workrecords in the entry should be nthreads
404 * all workrecords are arrived
405 * all workrecords have the same location as specified
406 * all workrecords are of the same kind
407 */
408void $omp_check_workrecords($omp_gteam gteam, int location, $omp_worksharing_kind kind, int index){
409 int nthreads = gteam->nthreads;
410 int numWorkrecords;
[2f8145f]411
[cbdbd4e]412 numWorkrecords = $seq_length(&gteam->work[index]);
[3ff27cf]413 $assert(numWorkrecords == nthreads, "The %d-th workshared entry has length %d while the expected length should be %d.", index, numWorkrecords, nthreads);
[cbdbd4e]414 for(int i = 0; i < nthreads; i++){
415 $omp_work_record workrecord = gteam->work[index][i];
416
[3ff27cf]417 $assert(workrecord.arrived, "The workshared event of location %d for thread %d hasn't been arrived yet.",
418 location, i);
419 $assert(workrecord.location == location, "Wrong workshared location of thread %d: expected %d, but saw %d.",
420 i, location, workrecord.location);
421 $assert(workrecord.kind == kind, "Wrong workshared kind of thread %d: expected %d but saw %d",
422 i, kind, workrecord.kind);
[cbdbd4e]423 }
424}
425
426/* finds the workrecord of the given location for a certain thread in a team,
427 * stores the workrecord in the buffer provided,
428 * returns the index of the workrecod entry in the work queue
429 * or -1 if the workrecord is absent.
430 *
431 * gteam: the global team object
432 * tid: the id of the thread
433 * location: the location of the workrecord being searched for
434 * result: the buffer for store the workrecord found
435 */
436int find_work_record($omp_gteam gteam, int tid, int location, $omp_work_record *result){
437 int numEntires = $seq_length(&gteam->work);
438
439 for(int i = 0; i < numEntires; i++){
440 $omp_work_record workrecord = gteam->work[i][tid];
[2f8145f]441
[cbdbd4e]442 if(workrecord.location == location){
443 *result = workrecord;
444 return i;
445 }
[2f8145f]446 }
[cbdbd4e]447 return -1;
448}
449
450/* checks if all threads have arrived the workrecord at the given index */
451_Bool all_arrived($omp_gteam gteam, int index){
452 for(int tid = 0; tid < gteam->nthreads; tid++){
453 if(!gteam->work[index][tid].arrived)
454 return $false;
455 }
456 return $true;
457}
458
459void arrive($omp_gteam gteam, int tid, int index,
460$omp_work_record workrecord, $omp_worksharing_kind kind, int location){
[3ff27cf]461 $assert(!workrecord.arrived, "The loop at location %d has been arrived before by thread %d!",
462 location, tid);
463 $assert(workrecord.kind == kind, "Wrong workshare kind of thread %d: expected LOOP",
464 tid);
[cbdbd4e]465 (gteam->work[index][tid]).arrived = $true;
466 if(all_arrived(gteam, index)){//this is the last thread
[3ff27cf]467 $assert(index == 0, "Inconsistent order or worksharing events.");
[cbdbd4e]468 $omp_check_workrecords(gteam, location, kind, index);
469 $seq_remove(&gteam->work, index, NULL, 1);
470 }
471}
472
[7deb2af]473/* TODO: checks if loop_dom gets changed if the current thread is NOT the
474 * first thread entering the loop.
475 */
[cbdbd4e]476$domain $omp_arrive_loop($omp_team team, int location, $domain loop_dom,
477 $domain_strategy strategy){
478 $atomic{
479 $omp_gteam gteam = team->gteam;
480 int tid = team->tid;
481 int nthreads = gteam->nthreads;
482 int index;
[2f8145f]483 $omp_work_record workrecord;
484
[3ff27cf]485 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
[77ef677]486 if(nthreads == 1)
487 return loop_dom;
[1aaefd4]488 $elaborate_domain(loop_dom);
[cbdbd4e]489 index = find_work_record(gteam, tid, location, &workrecord);
490 if(index >= 0){//workrecord found
[3ff27cf]491 $assert($equals(&gteam->work[index][0].loop_domain, &loop_dom), "The loop domain at location %d is changed when thread %d arrives it. \n Expected %s but saw %s.",
[06518e3]492 location,
493 tid,
494 gteam->work[index][0].loop_domain,
[3ff27cf]495 loop_dom);
[cbdbd4e]496 arrive(gteam, tid, index, workrecord, LOOP, location);
497 return workrecord.subdomain;
498 }else{//workrecord not found, this should be the first thread
499 $omp_work_record workrecords[nthreads];
500 $domain_decomposition decomposition;
501 int numEntries = $seq_length(&gteam->work);
502
503 decomposition = $domain_partition(loop_dom, strategy, nthreads);
[06518e3]504 workrecords[0].loop_domain = loop_dom;
[cbdbd4e]505 for(int i = 0; i< nthreads; i++){
506 workrecords[i].kind = LOOP;
507 workrecords[i].location = location;
508 workrecords[i].arrived = (i == tid);
509 workrecords[i].subdomain = decomposition.subdomains[i];
510 }
511 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
512 return workrecords[tid].subdomain;
[2f8145f]513 }
514 }
515}
516
[f7b95e5]517/* TODO: need strategies? */
[cbdbd4e]518$domain(1) $omp_arrive_sections($omp_team team, int location, int numSections){
519 $atomic{
520 $omp_gteam gteam = team->gteam;
521 int tid = team->tid;
522 int nthreads = gteam->nthreads;
523 int index;
524 $omp_work_record workrecord;
525
[3ff27cf]526 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
[77ef677]527 if(nthreads == 1)
528 return ($domain(1)) {0 .. numSections - 1 # 1};
[cbdbd4e]529 index = find_work_record(gteam, tid, location, &workrecord);
530 if(index >= 0){//workrecord found
531 arrive(gteam, tid, index, workrecord, SECTIONS, location);
532 return workrecord.subdomain;
533 }else{//workrecord not found, this should be the first thread
534 $omp_work_record workrecords[nthreads];
535 int numEntries = $seq_length(&gteam->work);
536
537 for(int i = 0; i < nthreads; i++){
538 int low = i, high = numSections - 1, step = nthreads;
539 $range range = low .. high # step;
540 $domain(1) dom = ($domain(1)) {range};
541
542 workrecords[i].kind = SECTIONS;
543 workrecords[i].location = location;
544 workrecords[i].arrived = (i == tid);
545 workrecords[i].subdomain = dom;
546 }
547 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
548 return workrecords[tid].subdomain;
549 }
550 }
[2f8145f]551}
552
[f7b95e5]553/* TODO: need strategies? */
[cbdbd4e]554 int $omp_arrive_single($omp_team team, int location){
555 $atomic{
556 $omp_gteam gteam = team->gteam;
557 int tid = team->tid;
558 int nthreads = gteam->nthreads;
559 int index;
560 $omp_work_record workrecord;
561
[3ff27cf]562 $assert(gteam->init[tid], "The current thread %d has not joined the gteam!", tid);
[77ef677]563 if(nthreads == 1)
564 return tid;
[cbdbd4e]565 index = find_work_record(gteam, tid, location, &workrecord);
566 if(index >= 0){//workrecord found
567 arrive(gteam, tid, index, workrecord, SINGLE, location);
568 }else{//workrecord not found, this should be the first thread
569 $omp_work_record workrecords[nthreads];
570 int numEntries = $seq_length(&gteam->work);
571
572 for(int i = 0; i < nthreads; i++){
573 workrecords[i].kind = SINGLE;
574 workrecords[i].location = location;
575 workrecords[i].arrived = (i == tid);
576 }
577 $seq_insert(&gteam->work, numEntries, &workrecords, 1);
578 }
579 return 0;
580 }
[2f8145f]581}
[cbdbd4e]582
[86ee0b6]583$atomic_f $omp_helper_signal $omp_helper_signal_create(
584 int initValue
585){
586 $omp_helper_signal result;
587 result.value = initValue;
588
589 return result;
590}
591
592$atomic_f void $omp_helper_signal_wait(
593 $omp_helper_signal *signal, int waitValue
594){
[f127e0f]595 $read_set_push();
596 $write_set_push();
[b76d962]597 $yield();
[f127e0f]598 $when(signal->value == waitValue) signal->value = -(signal->value)-1 /* do nothing */;
599 $read_set_pop();
600 $write_set_pop();
[86ee0b6]601}
602
603$atomic_f void $omp_helper_signal_send(
604 $omp_helper_signal *signal, int sendValue
605){
606 $read_set_push();
607 $write_set_push();
608 signal->value = sendValue;
609 $read_set_pop();
610 $write_set_pop();
611}
612
[b76d962]613$atomic_f void $read_and_write_sets_pop($omp_team team) {
614 $mem cur_mr = $read_set_pop();
615 $mem cur_mw = $write_set_pop();
616 team->gteam->read_sets[team->tid] = cur_mr;
617 team->gteam->write_sets[team->tid] = cur_mw;
618}
619
620$atomic_f void $read_and_write_sets_push($omp_team team) {
621 team->gteam->write_sets[team->tid] = $mem_empty();
622 team->gteam->read_sets[team->tid] = $mem_empty();
623 $read_set_push();
624 $write_set_push();
625}
626
627$atomic_f void $omp_atomic_execution_lock_acquire(
628 $omp_team team, $omp_helper_signal *lock) {
629 // pop: memsets
630 $read_and_write_sets_pop(team);
631 // check: data race
632 $check_data_race(team);
633 // yield: to other threads
634 $omp_helper_signal_wait(lock, 0);
635 // acquire: lock
636 $omp_helper_signal_send(lock, 1);
637 // push: memsets
638 $read_and_write_sets_push(team);
639}
640
641$atomic_f void $omp_atomic_execution_lock_release(
642 $omp_team team, $omp_helper_signal *lock) {
643 // pop: memsets
644 $read_and_write_sets_pop(team);
645 // yield: to other threads
646 $yield();
647 // check: data race
648 $check_data_race(team);
649 // release: lock
650 $omp_helper_signal_send(lock, 0);
651 // push: memsets
652 $read_and_write_sets_push(team);
653}
654
655$atomic_f void $omp_loop_operation($omp_team team) {
656 // pop: memsets
657 $read_and_write_sets_pop(team);
658 // yield: to other threads
659 $yield();
660 // check: data race
661 $check_data_race(team);
662 // push: memsets
663 $read_and_write_sets_push(team);
664}
665
666$atomic_f void $omp_thread_termination($omp_team team) {
667 // pop: memsets
668 $read_and_write_sets_pop(team);
669 // check: data race
670 $check_data_race(team);
671 // push: memsets
672 $read_and_write_sets_push(team);
673}
Note: See TracBrowser for help on using the repository browser.