source: CIVL/examples/omp/dataracebench-1.3.2/scripts/metric.py

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 2.9 KB
Line 
1import sys
2import csv
3
4data=[]
5with open(sys.argv[1],'r') as csvfile:
6 content = csv.reader(csvfile, delimiter=',')
7 for line in content:
8 data.append(line)
9
10benchmarks={}
11for line in data[1:]:
12 benchmarks.setdefault(line[1],[]).append({"truth":line[3], "races":int(line[6]), "compiler":line[9], "runtime":line[10]})
13
14truePositive = 0
15falsePositive = 0
16trueNegative = 0
17falseNegative = 0
18compilertrue = 0
19compilererror = 0
20compilertimeout = 0
21runtimeerror = 0
22runtimetrue = 0
23runtimeout = 0
24runtimeoutreport = 0
25positive = 0
26negative = 0
27
28Nbenchmarks={}
29
30for app,runs in benchmarks.items():
31 Nbenchmarks[app]=runs[0]
32 for run in runs[1:]:
33 if Nbenchmarks[app]["races"]<run["races"]:
34 Nbenchmarks[app]["races"]=run["races"]
35
36def classify(truth, races):
37 global positive, negative, falseNegative, truePositive, trueNegative, falsePositive
38 if truth.upper() == 'TRUE':
39 positive += 1
40 if races == 0:
41 falseNegative += 1
42 else:
43 truePositive += 1
44 else:
45 negative += 1
46 if races == 0:
47 trueNegative += 1
48 else:
49 falsePositive += 1
50
51
52for app,run in Nbenchmarks.items():
53 if run["compiler"] == '0':
54 compilertrue += 1
55 if run["runtime"] == '0':
56 classify(run["truth"], run["races"])
57 runtimetrue += 1
58 elif run["runtime"] == '11':
59 runtimeerror += 1
60 elif run["runtime"] == '124':
61 if (run["races"]>0):
62 classify(run["truth"], run["races"])
63 runtimeoutreport += 1
64 else:
65 runtimeout += 1
66 else:
67 print(app, run["runtime"], "there are some errors in your runtime data.")
68 elif run["compiler"] in ('1', '2', '4', '134', '254'):
69 compilererror += 1
70 elif run["compiler"] == '11':
71 compilertimeout += 1
72 else:
73 print(app, run["compiler"], "there are some errors in your compiler data.")
74
75print("total test case is ", len(Nbenchmarks))
76print("compiler segmentation fault is ", compilererror)
77print("runtime segmentation fault is ", runtimeerror)
78print("compiler time out is ", compilertimeout)
79print("runtime time out is ", runtimeout)
80print("runtime time out with report is ", runtimeoutreport)
81print("tool success rate is ", (negative+positive)/(len(Nbenchmarks)))
82print("false positive is ", falsePositive)
83print("true positive is ", truePositive)
84print("true negative is ", trueNegative)
85print("false negative is ", falseNegative)
86Accuracy = (truePositive + trueNegative) / (negative+positive)
87if (trueNegative+falsePositive) != 0:
88 Specificity = (trueNegative)/(trueNegative+falsePositive)
89else:
90 Specificity = 'N/A'
91if (truePositive + falsePositive) != 0:
92 Precision = truePositive / (truePositive + falsePositive)
93else:
94 Precision = 'N/A'
95if (truePositive + falseNegative) != 0:
96 Recall = truePositive / (truePositive + falseNegative)
97else:
98 Recall = 'N/A'
99if Specificity == 'N/A' or Precision == 'N/A' or Recall == 'N/A':
100 f1Score = 'N/A'
101else:
102 f1Score = 2 * Precision * Recall / (Precision + Recall)
103print("Accuracy is ", Accuracy)
104print("Precision is", Precision)
105print("Specificity is ", Specificity)
106print("Recall is ", Recall)
107print("F1 Score is ", f1Score)
Note: See TracBrowser for help on using the repository browser.