NeoPZ
test.py
Go to the documentation of this file.
1 #! /usr/bin/env python2.7
2 #***************************************************************************
3 #* Copyright (C) 2013 by Edson Borin *
4 #* edson@ic.unicamp.br *
5 #* *
6 #* This program is free software; you can redistribute it and/or modify *
7 #* it under the terms of the GNU General Public License as published by *
8 #* the Free Software Foundation; either version 2 of the License, or *
9 #* (at your option) any later version. *
10 #* *
11 #* This program is distributed in the hope that it will be useful, *
12 #* but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 #* GNU General Public License for more details. *
15 #* *
16 #* You should have received a copy of the GNU General Public License *
17 #* along with this program; if not, write to the *
18 #* Free Software Foundation, Inc., *
19 #* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
20 #**************************************************************************/
21 
22 import sys
23 import os.path
24 import shutil
25 
26 # Try to import rdt and stats modules, if available.
27 import sys
28 # Add the perf_base_dir/scripts/misc to the system path to enable importing misc modules
29 sys.path.append("@PERFTEST_SCRIPTS_SRC_DIR@/misc_modules/")
30 try:
31  import rdt, stats
32  summarize_results=True
33 except ImportError, e:
34  print "WARNING: I wont summarize results because I could not import modules: ", e
35  summarize_results=False
36 
37 # == Adding new tests ==
38 #
39 # In order to add new tests one must:
40 # 1) import the test module (e.g.: import substruct_tst01.test). The module must
41 # contain:
42 # a) the functions short_description and long_description which return a string
43 # containing a short/long description of the test.
44 # b) a function called "run_test(ntimes)" that runs the performance test
45 # "ntimes" times and returns (status, rdt_files). So that:
46 # - status: the test status: 0 == OK, != 0 == ERROR
47 # - rdt_files: a dictionary mapping rdt_ids to pairs (rdt_filename,
48 # rdt_description).
49 # * rdt_id is a unique string identifier for the rdt file.
50 # * rdt_filename is the full path to the rdt file
51 # * rdt_description is a short description of the what is being measured
52 # by the rdt file.
53 #
54 # 2) add the test module to one (or more) of the test lists.
55 
56 # == Import tests modules ==
70 
71 import skyline_tst01.test # s (p2) s Chlsk+sfwd+sbck
72 import skyline_tst02.test # m (p3) m Chlsk+sfwd+sbck???
73 import skyline_tst03.test # m (p4) m Chlsk+sfwd+sbck
74 import skyline_tst04.test # s (p2) s LDLt
75 import skyline_tst05.test # s (p3) s LDLt
76 import skyline_tst06.test # m (p4) m LDLt
77 import skyline_tst07.test # s (p2) s multadd
78 import skyline_tst08.test # m (p3) m multadd
79 import skyline_tst09.test # (p7) l sor????
80 import skyline_tst10.test # (p3) m sor????
81 import skyline_tst11.test # (p6) l sor????
82 import skyline_tst12.test # (p5) m Chlsk+sfwd+sbck???
83 # =========================
84 
85 # == Available test lists ==
86 # See Usage for the description of the test lists
87 
88 # Tests with execution time shorter than 1 minute
89 short_tests = [("substruct_tst01",substruct_tst01.test),
90  ("substruct_tst02",substruct_tst02.test),
91  ("skyline_tst01",skyline_tst01.test), # ~1s
92  ("skyline_tst04",skyline_tst04.test), # ~1s
93  ("skyline_tst05",skyline_tst05.test), # ~28s
94  ("skyline_tst07",skyline_tst07.test)] # ~10s
95 
96 other_tests = [("skyline_tst09",skyline_tst09.test),
97  ("skyline_tst10",skyline_tst10.test),
98  ("skyline_tst11",skyline_tst11.test),
99  ("skyline_tst12",skyline_tst12.test)]
100 
101 # TODO: How about:
102 # substruct_tst05
103 # substruct_tst14
104 # substruct_tst15
105 # substruct_tst16
106 
107 # Tests with execution time between 1 and 10 minutes
108 medium_tests= [("substruct_tst03",substruct_tst03.test),
109  ("substruct_tst04",substruct_tst04.test),
110  ("substruct_tst07",substruct_tst07.test),
111  ("substruct_tst08",substruct_tst08.test),
112  ("substruct_tst09",substruct_tst09.test),
113  ("substruct_tst11",substruct_tst11.test),
114  ("substruct_tst13",substruct_tst13.test),
115  ("skyline_tst02",skyline_tst02.test), # ???
116  ("skyline_tst03",skyline_tst03.test), # ~190s
117  ("skyline_tst06",skyline_tst06.test), # ~312s
118  ("skyline_tst08",skyline_tst08.test)] # ~286s
119 
120 # Tests with execution time longer than 10 minutes
121 long_tests = [
122 # ("skyline_tst06",skyline_tst06.test),
123  ("substruct_tst06",substruct_tst06.test),
124  ("substruct_tst10",substruct_tst10.test),
125  ("skyline_tst09",skyline_tst09.test),
126  ("skyline_tst11",skyline_tst11.test)]
127 
128 short_regression_tests = short_tests
129 regression_tests = short_tests + medium_tests
130 full_regression_tests = short_tests + medium_tests + long_tests
131 
132 all_tests = short_tests + medium_tests + long_tests + other_tests
133 # =========================
134 
135 # Default value for ntimes
136 ntimes_dft = 3
137 
138 def error(message, status):
139  sys.stderr.write('ERROR: '+message+'\n')
140  if status != 0 : sys.exit(status)
141 
142 def get_test(v) :
143  for t in all_tests :
144  if t[0] == v :
145  return t
146  error("Could not find test " + str(v))
147 
148 # Functions for stand alone tests
149 def usage():
150  print "\nUsage: test.py -t test_name [-n #times] [-a|-s|-m|-l|-r] [-t test_name] [-h]"
151  print "\nARGUMENTS"
152  print "\t-n #times : Run each test #times times. (default = ",ntimes_dft,")"
153  print "\t-a : Run all tests."
154  print "\t-s : Run short tests."
155  print "\t-m : Run medium tests."
156  print "\t-l : Run long tests."
157  print "\t-r res_dir : Run daily regression tests and move results to \"res_dir\" directory."
158  print "\t-F res_dir : Run full regression tests and move results to \"res_dir\" directory."
159  print "\t-S res_dir : Run short regression tests and move results to \"res_dir\" directory."
160  print "\t-t test_name : Run test test_name."
161  print "\nDESCRIPTION"
162  print "\tExecutes a set of performance tests. The following tests are available:"
163  print "\tShort tests:"
164  for t in short_tests :
165  print "\t* ", t[0], ":", t[1].short_description()
166  print "\tMedium tests:"
167  for t in medium_tests :
168  print "\t* ", t[0], ":", t[1].short_description()
169  print "\tLong tests:"
170  for t in long_tests :
171  print "\t* ", t[0], ":", t[1].short_description()
172  print "\tRegression tests:"
173  for t in regression_tests :
174  print "\t* ", t[0], ":", t[1].short_description()
175  print "\tFull regression tests:"
176  for t in full_regression_tests :
177  print "\t* ", t[0], ":", t[1].short_description()
178  print "\tShort regression tests:"
179  for t in short_regression_tests :
180  print "\t* ", t[0], ":", t[1].short_description()
181  sys.exit(1)
182 
183 # Main - for stand alone tests only
184 if __name__ == "__main__":
185  import getopt
186  results_dir=0
187  ntimes=ntimes_dft
188  tests_to_run = {}
189  # Process arguments
190  try :
191  opts, extra_args = getopt.getopt(sys.argv[1:], 't:n:hasmlr:S:F:')
192  except getopt.GetoptError, e:
193  error(str(e), 1)
194  for f, v in opts:
195  if f == '-a':
196  for t in short_tests : tests_to_run[t[0]] = t
197  for t in medium_tests : tests_to_run[t[0]] = t
198  for t in long_tests : tests_to_run[t[0]] = t
199  for t in regression_tests : tests_to_run[t[0]] = t
200  for t in full_regression_tests : tests_to_run[t[0]] = t
201  for t in short_regression_tests : tests_to_run[t[0]] = t
202  elif f == '-n': ntimes=int(v)
203  elif f == '-t': tests_to_run[v] = get_test(v)
204  elif f == '-s':
205  for t in short_tests : tests_to_run[t[0]] = t
206  elif f == '-m':
207  for t in medium_tests : tests_to_run[t[0]] = t
208  elif f == '-l':
209  for t in long_tests : tests_to_run[t[0]] = t
210  elif f == '-r':
211  for t in regression_tests : tests_to_run[t[0]] = t
212  results_dir = v
213  elif f == '-F':
214  for t in full_regression_tests : tests_to_run[t[0]] = t
215  results_dir = v
216  elif f == '-S':
217  for t in short_regression_tests : tests_to_run[t[0]] = t
218  results_dir = v
219  elif f == '-h': usage()
220 
221  all_results={}
222 
223  # Run tests
224  for f, t in tests_to_run.iteritems() :
225  obj=t[1]
226  test_name=t[0]
227  try:
228  status,rdt_files = obj.run_test(ntimes)
229  all_results[test_name]=(status,obj.short_description(),obj.long_description(),rdt_files)
230  except:
231  error('Could not run test '+test_name,0)
232  all_results[test_name]=(-1,obj.short_description(),obj.long_description(),{})
233 
234  # Move/Print results
235  for k, v in all_results.iteritems() :
236  status = v[0]
237  s_desc = v[1]
238  l_desc = v[2]
239  rdt_files = v[3]
240  test_name = k
241  print '** ' + test_name + ' **'
242  print 'desc: ', s_desc
243  # Print results
244  if status != 0:
245  print "Status [FAILED] ("+str(status)+")"
246  else :
247  print "Status [OK]"
248  print "Results summary ----------------------------"
249 
250  for rdt_id,v in rdt_files.iteritems() :
251  if summarize_results :
252  try:
253  fn=v[0]
254  rdt_d=rdt.read(fn)
255  elapsed_list=rdt.get_column_values(rdt_d,"ELAPSED")
256  try:
257  av=stats.average(elapsed_list)
258  except stats.StatsError, e:
259  print "WARNING: Could not compute average for results at", fn, "(", e, ")"
260  av=0.0
261  try:
262  ci=stats.conf_int(elapsed_list, 95.0)
263  except stats.StatsError, e:
264  print "WARNING: Could not compute confidence interval for results at", fn, "(", e, ")"
265  ci=0.0
266  except rdt.RdtError, e:
267  print "WARNING: error when summarizing results for", fn, "(", e, ")"
268  av=0.0
269  ci=0.0
270  print '{0:15s} : {1:>16f} +- {2:<16f} : {3:s}'.format(rdt_id, av, ci, v[1])
271  else:
272  print '{0:15s} : {1:s} : {2:s}'.format(rdt_id,v[0],v[1])
273  print "--------------------------------------------"
274  # Move results
275  if results_dir != 0 :
276  # Record results to results_dir. For each test, create a directory,
277  # record the test status and copy the rdt result files to the directory.
278  result_dir = os.path.join(results_dir,test_name)
279  if not os.path.isdir(result_dir) :
280  try:
281  os.makedirs(result_dir)
282  except os.error, e:
283  warning(str(e))
284  continue
285  result_info = os.path.join(results_dir,test_name+".info")
286  try:
287  f = open(result_info, 'w+')
288  f.write("test_name : "+test_name+"\n")
289  f.write("test_desc : "+s_desc+"\n")
290  f.write("test_status : "+str(status)+"\n")
291  # Copy rdt files
292  for rdt_id,v in rdt_files.iteritems() :
293  f.write(rdt_id+" : "+v[1]+"\n")
294  f.close()
295  except IOError, e:
296  warning(str(e))
297  continue
298  # Copy rdt files
299  for rdt_id,v in rdt_files.iteritems() :
300  rdt_fn = v[0]
301  rdt_dsc = v[1]
302  # Copy rdt file (rdt_fn) to result_dir
303  try:
304  shutil.copy2(rdt_fn,result_dir)
305  except (IOError, os.error) as why :
306  warning(str(why))
307  continue
def get_test(v)
Definition: test.py:142
def short_description()
Definition: test.py:154
def average(vlist)
Definition: stats.py:45
def conf_int(vlist, conf_level)
Definition: stats.py:59
def get_column_values(rdt_d, field)
Definition: rdt.py:70
def usage()
Definition: test.py:149
def read(filename)
Definition: rdt.py:46
def error(message, status)
Definition: test.py:138