xref: /petsc/config/report_tests.py (revision 0425e358a93e3df180e109da13d1c846f016d85b)
1#!/usr/bin/env python
2from __future__ import print_function
3import glob, os, re
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When petsc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No tests run')
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0}
40  timesummary={}
41  timelist=[]
42  for cfile in glob.glob('*.counts'):
43    with open(cfile, 'r') as f:
44      for line in f:
45        l = line.split()
46        if l[0] == 'failures':
47           if len(l)>1:
48             summary[l[0]] += l[1:]
49        elif l[0] == 'time':
50           if len(l)==1: continue
51           summary[l[0]] += float(l[1])
52           timesummary[cfile]=float(l[1])
53           timelist.append(float(l[1]))
54        elif l[0] not in summary:
55           continue
56        else:
57           summary[l[0]] += int(l[1])
58
59  failstr=' '.join(summary['failures'])
60  print("\n# -------------")
61  print("#   Summary    ")
62  print("# -------------")
63  if failstr.strip(): print("# FAILED " + failstr)
64
65  for t in "success failed todo skip".split():
66    percent=summary[t]/float(summary['total'])*100
67    print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
68  print("#")
69  if etime:
70    print("# Wall clock time for tests: %s sec"% etime)
71  print("# Approximate CPU time (not incl. build time): %s sec"% summary['time'])
72
73  if failstr.strip():
74      fail_targets=(
75          re.sub('(?<=[0-9]_\w)_.*','',
76          re.sub('cmd-','',
77          re.sub('diff-','',failstr+' ')))
78          )
79      # Strip off characters from subtests
80      fail_list=[]
81      for failure in fail_targets.split():
82        if failure.count('-')>1:
83            fail_list.append('-'.join(failure.split('-')[:-1]))
84        else:
85            fail_list.append(failure)
86      fail_list=list(set(fail_list))
87      fail_targets=' '.join(fail_list)
88
89      #Make the message nice
90      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
91
92      print("#\n# To rerun failed tests: ")
93      print("#     "+make+" -f "+makefile+" test search='" + fail_targets.strip()+"'")
94
95  if ntime>0:
96      print("#\n# Timing summary: ")
97      timelist=list(set(timelist))
98      timelist.sort(reverse=True)
99      nlim=(ntime if ntime<len(timelist) else len(timelist))
100      # Do a double loop to sort in order
101      for timelimit in timelist[0:nlim]:
102        for cf in timesummary:
103          if timesummary[cf] == timelimit:
104              print("#   %s: %.2f sec" % (re.sub('.counts','',cf), timesummary[cf]))
105  os.chdir(startdir)
106  return
107
108def generate_xml(directory):
109    startdir= os.getcwd()
110    try:
111        os.chdir(directory)
112    except OSError:
113        print('# No tests run')
114        return
115    # loop over *.counts files for all the problems tested in the test suite
116    testdata = {}
117    for cfile in glob.glob('*.counts'):
118        # first we get rid of the .counts extension, then we split the name in two
119        # to recover the problem name and the package it belongs to
120        fname = cfile.split('.')[0]
121        testname = fname.split('-')
122        probname = ''
123        for i in range(1,len(testname)):
124            probname += testname[i]
125        # we split the package into its subcomponents of PETSc module (e.g.: snes)
126        # and test type (e.g.: tutorial)
127        testname_list = testname[0].split('_')
128        pkgname = testname_list[0]
129        testtype = testname_list[-1]
130        # in order to correct assemble the folder path for problem outputs, we
131        # iterate over any possible subpackage names and test suffixes
132        testname_short = testname_list[:-1]
133        prob_subdir = os.path.join(*testname_short)
134        probfolder = 'run%s'%probname
135        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
136        if not os.path.exists(probdir):
137            probfolder = probfolder.split('_')[0]
138            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
139        # assemble the final full folder path for problem outputs and read the files
140        try:
141            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
142                difflines = probdiff.readlines()
143        except IOError:
144            difflines = []
145        try:
146            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
147                stderrlines = probstderr.readlines()
148        except IOError:
149            stderrlines = []
150        try:
151            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
152                stdoutlines = probstdout.readlines()
153        except IOError:
154            stdoutlines = []
155        # join the package, subpackage and problem type names into a "class"
156        classname = pkgname
157        for item in testname_list[1:]:
158            classname += '.%s'%item
159        # if this is the first time we see this package, initialize its dict
160        if pkgname not in testdata.keys():
161            testdata[pkgname] = {
162                'total':0,
163                'success':0,
164                'failed':0,
165                'errors':0,
166                'todo':0,
167                'skip':0,
168                'time':0,
169                'problems':{}
170            }
171        # add the dict for the problem into the dict for the package
172        testdata[pkgname]['problems'][probname] = {
173            'classname':classname,
174            'time':0,
175            'failed':False,
176            'skipped':False,
177            'diff':difflines,
178            'stdout':stdoutlines,
179            'stderr':stderrlines
180        }
181        # process the *.counts file and increment problem status trackers
182        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
183            testdata[pkgname]['errors'] += 1
184        with open(cfile, 'r') as f:
185            for line in f:
186                l = line.split()
187                if l[0] == 'failed':
188                    testdata[pkgname]['problems'][probname][l[0]] = True
189                    testdata[pkgname][l[0]] += 1
190                elif l[0] == 'time':
191                    if len(l)==1: continue
192                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
193                    testdata[pkgname][l[0]] += float(l[1])
194                elif l[0] == 'skip':
195                    testdata[pkgname]['problems'][probname][l[0]] = True
196                    testdata[pkgname][l[0]] += 1
197                elif l[0] not in testdata[pkgname].keys():
198                    continue
199                else:
200                    testdata[pkgname][l[0]] += 1
201    # at this point we have the complete test results in dictionary structures
202    # we can now write this information into a jUnit formatted XLM file
203    junit = open('../testresults.xml', 'w')
204    junit.write('<?xml version="1.0" ?>\n')
205    junit.write('<testsuites>\n')
206    for pkg in testdata.keys():
207        testsuite = testdata[pkg]
208        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
209            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
210        for prob in testsuite['problems'].keys():
211            p = testsuite['problems'][prob]
212            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
213                p['classname'], prob, p['time']))
214            if p['skipped']:
215                # if we got here, the TAP output shows a skipped test
216                junit.write('      <skipped/>\n')
217            elif len(p['stderr'])>0:
218                # if we got here, the test crashed with an error
219                # we show the stderr output under <error>
220                junit.write('      <error type="crash">\n')
221                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
222                for line in p['stderr']:
223                    junit.write("%s\n"%line.rstrip())
224                junit.write("]]>")
225                junit.write('      </error>\n')
226            elif len(p['diff'])>0:
227                # if we got here, the test output did not match the stored output file
228                # we show the diff between new output and old output under <failure>
229                junit.write('      <failure type="output">\n')
230                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
231                for line in p['diff']:
232                    junit.write("%s\n"%line.rstrip())
233                junit.write("]]>")
234                junit.write('      </failure>\n')
235            elif len(p['stdout'])>0:
236                # if we got here, the test succeeded so we just show the stdout
237                # for manual sanity-checks
238                junit.write('      <system-out>\n')
239                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
240                count = 0
241                for line in p['stdout']:
242                    junit.write("%s\n"%line.rstrip())
243                    count += 1
244                    if count >= 1024:
245                        break
246                junit.write("]]>")
247                junit.write('      </system-out>\n')
248            junit.write('    </testcase>\n')
249        junit.write('  </testsuite>\n')
250    junit.write('</testsuites>')
251    junit.close()
252    os.chdir(startdir)
253    return
254
255def main():
256    parser = optparse.OptionParser(usage="%prog [options]")
257    parser.add_option('-d', '--directory', dest='directory',
258                      help='Directory containing results of petsc test system',
259                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
260                                           'tests','counts'))
261    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
262                      help='Report elapsed time in output',
263                      default=None)
264    parser.add_option('-m', '--make', dest='make',
265                      help='make executable to report in summary',
266                      default='make')
267    parser.add_option('-t', '--time', dest='time',
268                      help='-t n: Report on the n number expensive jobs',
269                      default=0)
270    options, args = parser.parse_args()
271
272    # Process arguments
273    if len(args) > 0:
274      parser.print_usage()
275      return
276
277    summarize_results(options.directory,options.make,int(options.time),options.elapsed_time)
278
279    generate_xml(options.directory)
280
281if __name__ == "__main__":
282        main()
283