[GRASS-SVN] r61392 - grass/trunk/lib/python/gunittest

svn_grass at osgeo.org svn_grass at osgeo.org
Thu Jul 24 15:23:56 PDT 2014


Author: wenzeslaus
Date: 2014-07-24 15:23:56 -0700 (Thu, 24 Jul 2014)
New Revision: 61392

Modified:
   grass/trunk/lib/python/gunittest/case.py
   grass/trunk/lib/python/gunittest/invoker.py
   grass/trunk/lib/python/gunittest/main.py
   grass/trunk/lib/python/gunittest/reporters.py
   grass/trunk/lib/python/gunittest/runner.py
Log:
gunittest: multi test result class and key-value report for one test file and their aggregation (aggregation not well tested)

Modified: grass/trunk/lib/python/gunittest/case.py
===================================================================
--- grass/trunk/lib/python/gunittest/case.py	2014-07-24 22:12:52 UTC (rev 61391)
+++ grass/trunk/lib/python/gunittest/case.py	2014-07-24 22:23:56 UTC (rev 61392)
@@ -42,6 +42,7 @@
 
     def __init__(self, methodName):
         super(TestCase, self).__init__(methodName)
+        self.grass_modules = []
 
     def _formatMessage(self, msg, standardMsg):
         """Honor the longMessage attribute when generating failure messages.
@@ -980,6 +981,7 @@
         _check_module_run_parameters(module)
         try:
             module.run()
+            self.grass_modules.append(module.name)
         except CalledModuleError:
             print module.outputs.stdout
             print module.outputs.stderr
@@ -1013,10 +1015,10 @@
         # note that we cannot use finally because we do not leave except
         try:
             module.run()
+            self.grass_modules.append(module.name)
         except CalledModuleError:
             print module.outputs.stdout
             print module.outputs.stderr
-            pass
         else:
             print module.outputs.stdout
             print module.outputs.stderr

Modified: grass/trunk/lib/python/gunittest/invoker.py
===================================================================
--- grass/trunk/lib/python/gunittest/invoker.py	2014-07-24 22:12:52 UTC (rev 61391)
+++ grass/trunk/lib/python/gunittest/invoker.py	2014-07-24 22:23:56 UTC (rev 61392)
@@ -20,6 +20,8 @@
 from unittest.main import TestProgram, USAGE_AS_MAIN
 TestProgram.USAGE = USAGE_AS_MAIN
 
+from .checkers import text_to_keyvalue
+
 from .loader import GrassTestLoader, discover_modules
 from .reporters import (GrassTestFilesMultiReporter,
                         GrassTestFilesTextReporter,
@@ -28,7 +30,45 @@
 
 import grass.script.setup as gsetup
 
+import collections
+import types
 
+
+# TODO: change text_to_keyvalue to same sep as here
+def keyvalue_to_text(keyvalue, sep='=', vsep='\n', isep=',',
+                     last_vertical=None):
+    if not last_vertical:
+        last_vertical = vsep == '\n'
+    items = []
+    for key, value in keyvalue.iteritems():
+        # TODO: use isep for iterables other than strings
+        if (not isinstance(value, types.StringTypes)
+                and isinstance(value, collections.Iterable)):
+            # TODO: this does not work for list of non-strings
+            value = isep.join(value)
+        items.append('{key}{sep}{value}'.format(
+            key=key, sep=sep, value=value))
+    text = vsep.join(items)
+    if last_vertical:
+        text = text + vsep
+    return text
+
+
+def update_keyval_file(filename, module, returncode):
+    if os.path.exists(filename):
+        with open(filename, 'r') as keyval_file:
+            keyval = text_to_keyvalue(keyval_file.read(), sep='=')
+    else:
+        keyval = {}
+
+    # always owerwrite name and ok
+    keyval['name'] = module.name
+    keyval['status'] = 'failed' if returncode else 'passed'
+    with open(filename, 'w') as keyval_file:
+        keyval_file.write(keyvalue_to_text(keyval))
+    return keyval
+
+
 class GrassTestFilesInvoker(object):
     """A class used to invoke test files and create the main report"""
 
@@ -112,9 +152,13 @@
         returncode = p.wait()
         stdout.close()
         stderr.close()
+        test_summary = update_keyval_file(
+            os.path.join(cwd, 'test_keyvalue_result.txt'),
+            module=module, returncode=returncode)
         self.reporter.end_file_test(module=module, cwd=cwd,
                                     returncode=returncode,
-                                    stdout=stdout_path, stderr=stderr_path)
+                                    stdout=stdout_path, stderr=stderr_path,
+                                    test_summary=test_summary)
         # TODO: add some try-except or with for better error handling
         os.remove(gisrc)
         # TODO: only if clean up

Modified: grass/trunk/lib/python/gunittest/main.py
===================================================================
--- grass/trunk/lib/python/gunittest/main.py	2014-07-24 22:12:52 UTC (rev 61391)
+++ grass/trunk/lib/python/gunittest/main.py	2014-07-24 22:23:56 UTC (rev 61392)
@@ -18,7 +18,8 @@
 TestProgram.USAGE = USAGE_AS_MAIN
 
 from .loader import GrassTestLoader
-from .runner import GrassTestRunner
+from .runner import (GrassTestRunner, MultiTestResult,
+                     TextTestResult, KeyValueTestResult)
 from .invoker import GrassTestFilesInvoker
 from .utils import silent_rmtree
 
@@ -43,9 +44,18 @@
         buffer_stdout_stderr = False
 
         grass_loader = GrassTestLoader(grass_location=self.grass_location)
+
+        text_result = TextTestResult(stream=sys.stderr,
+                                     descriptions=True,
+                                     verbosity=verbosity)
+        keyval_file = open('test_keyvalue_result.txt', 'w')
+        keyval_result = KeyValueTestResult(stream=keyval_file)
+        result = MultiTestResult(results=[text_result, keyval_result])
+
         grass_runner = GrassTestRunner(verbosity=verbosity,
                                        failfast=failfast,
-                                       buffer=buffer_stdout_stderr)
+                                       buffer=buffer_stdout_stderr,
+                                       result=result)
 
         super(GrassTestProgram, self).__init__(module=module,
                                                argv=unittest_argv,
@@ -56,6 +66,7 @@
                                                failfast=failfast,
                                                catchbreak=catchbreak,
                                                buffer=buffer_stdout_stderr)
+        keyval_file.close()
 
 
 def test():

Modified: grass/trunk/lib/python/gunittest/reporters.py
===================================================================
--- grass/trunk/lib/python/gunittest/reporters.py	2014-07-24 22:12:52 UTC (rev 61391)
+++ grass/trunk/lib/python/gunittest/reporters.py	2014-07-24 22:23:56 UTC (rev 61392)
@@ -171,10 +171,10 @@
                 else:
                     raise
 
-    def end_file_test(self, module, cwd, returncode, stdout, stderr):
+    def end_file_test(self, **kwargs):
         for reporter in self.reporters:
             try:
-                reporter.end_file_test(module, cwd, returncode, stdout, stderr)
+                reporter.end_file_test(**kwargs)
             except AttributeError:
                 if self.forgiving:
                     pass
@@ -221,7 +221,7 @@
         self._start_file_test_called = True
         self.test_files += 1
 
-    def end_file_test(self, module, cwd, returncode, stdout, stderr):
+    def end_file_test(self, returncode, **kwargs):
         assert self._start_file_test_called
         self.file_end_time = datetime.datetime.now()
         self.file_time = self.file_end_time - self.file_start_time
@@ -234,6 +234,8 @@
 
 class GrassTestFilesHtmlReporter(GrassTestFilesCountingReporter):
 
+    unknown_number = '<span style="font-size: 60%">unknown</span>'
+
     def __init__(self):
         super(GrassTestFilesHtmlReporter, self).__init__()
         self.main_index = None
@@ -243,6 +245,16 @@
         # having all variables public although not really part of API
         self.main_index = open(os.path.join(results_dir, 'index.html'), 'w')
 
+        # TODO: this can be moved to the counter class
+        self.failures = 0
+        self.errors = 0
+        self.skiped = 0
+        self.successes = 0
+        self.expected_failures = 0
+        self.unexpected_success = 0
+        self.total = 0
+        # TODO: skiped and unexpected success
+
         svn_info = get_svn_info()
         if not svn_info:
             svn_text = ('<span style="font-size: 60%">'
@@ -264,6 +276,8 @@
                               '<th>Tested directory</th>'
                               '<th>Test file</th>'
                               '<th>Status</th>'
+                              '<th>Tests</th><th>Successful</td>'
+                              '<th>Failed</th><th>Percent successful</th>'
                               '</tr></thead><tbody>'.format(
                                   time=self.main_start_time,
                                   svn=svn_text))
@@ -271,14 +285,23 @@
     def finish(self):
         super(GrassTestFilesHtmlReporter, self).finish()
 
+        if self.total:
+            pass_per = 100 * (float(self.successes) / self.total)
+            pass_per = '{:.2f}%'.format(pass_per)
+        else:
+            pass_per = self.unknown_number
         tfoot = ('<tfoot>'
                  '<tr>'
                  '<td>Summary</td>'
                  '<td>{nfiles} test files</td>'
                  '<td>{nsper:.2f}% successful</td>'
+                 '<td>{total}</td><td>{st}</td><td>{ft}</td><td>{pt}</td>'
                  '</tr>'
-                 '</tfoot>'.format(nfiles=self.test_files,
-                                   nsper=self.file_pass_per))
+                 '</tfoot>'.format(
+                     nfiles=self.test_files, nsper=self.file_pass_per,
+                     st=self.successes, ft=self.failures + self.errors,
+                     total=self.total, pt=pass_per
+                     ))
 
         summary_sentence = ('Executed {nfiles} test files in {time:}.'
                             ' From them'
@@ -329,16 +352,50 @@
             return ('<span style="color: green">&#x2713;</span>'
                     ' Test succeeded (return code %d)' % (returncode))
 
-    def end_file_test(self, module, cwd, returncode, stdout, stderr):
+    def end_file_test(self, module, cwd, returncode, stdout, stderr,
+                      test_summary):
         super(GrassTestFilesHtmlReporter, self).end_file_test(
             module=module, cwd=cwd, returncode=returncode,
             stdout=stdout, stderr=stderr)
+        # TODO: considering others accoring to total, OK?
+        total = test_summary.get('total', None)
+        failures = test_summary.get('failures', 0)
+        errors = test_summary.get('errors', 0)
+        # Python unittest TestResult class is reporting success for no
+        # errors or failures, so skipped, expected failures and unexpected
+        # success are ignored
+        # but successful tests are only total - the others
+        # TODO: add success counter to GrassTestResult base class
+        skipped = test_summary.get('skipped', 0)
+        expected_failures = test_summary.get('expected_failures', 0)
+        unexpected_success = test_summary.get('unexpected_success', 0)
+
+        self.failures += failures
+        self.errors += errors
+        self.skiped += skipped
+        self.expected_failures += expected_failures
+        self.unexpected_success += unexpected_success
+
+        if total is not None:
+            # success are only the clear ones
+            # percentage is influenced by all but putting only failures to table
+            successes = total - failures - errors - skipped - expected_failures - unexpected_success
+            self.successes += successes
+            self.total += total
+
+            pass_per = 100 * (float(successes) / total)
+            pass_per = '{:.2f}%'.format(pass_per)
+        else:
+            total = successes = pass_per = self.unknown_number
+        bad_ones = failures + errors
         self.main_index.write(
             '<tr><td>{d}</td>'
             '<td><a href="{d}/{m}/index.html">{m}</a></td><td>{sf}</td>'
+            '<td>{total}</td><td>{st}</td><td>{ft}</td><td>{pt}</td>'
             '<tr>'.format(
                 d=module.tested_dir, m=module.name,
-                sf=self.returncode_to_html_text(returncode)))
+                sf=self.returncode_to_html_text(returncode),
+                st=successes, ft=bad_ones, total=total, pt=pass_per))
         self.wrap_stdstream_to_html(infile=stdout,
                                     outfile=os.path.join(cwd, 'stdout.html'),
                                     module=module, stream='stdout')
@@ -400,16 +457,25 @@
         super(GrassTestFilesTextReporter, self).start_file_test(module)
         self._stream.flush()  # to get previous lines to the report
 
-    def end_file_test(self, module, cwd, returncode, stdout, stderr):
+    def end_file_test(self, module, cwd, returncode, stdout, stderr,
+                      test_summary):
         super(GrassTestFilesTextReporter, self).end_file_test(
             module=module, cwd=cwd, returncode=returncode,
             stdout=stdout, stderr=stderr)
 
         if returncode:
             self._stream.write(
-                '{m} from {d} failed\n'
+                '{m} from {d} failed'
                 .format(
                     d=module.tested_dir,
                     m=module.name))
+            num_failed = test_summary.get('failures', None)
+            if num_failed:
+                if num_failed > 1:
+                    text = ' ({f} tests failed)'
+                else:
+                    text = ' ({f} test failed)'
+                self._stream.write(text.format(f=num_failed))
+            self._stream.write('\n')
             # TODO: here we lost the possibility to include also file name
             # of the appropriate report

Modified: grass/trunk/lib/python/gunittest/runner.py
===================================================================
--- grass/trunk/lib/python/gunittest/runner.py	2014-07-24 22:12:52 UTC (rev 61391)
+++ grass/trunk/lib/python/gunittest/runner.py	2014-07-24 22:23:56 UTC (rev 61392)
@@ -18,7 +18,7 @@
 import sys
 import time
 
-from unittest import result
+from unittest.result import TestResult
 from unittest.signals import registerResult
 
 __unittest = True
@@ -40,7 +40,7 @@
         self.write('\n') # text-mode streams translate to \r\n if needed
 
 
-class TextTestResult(result.TestResult):
+class TextTestResult(TestResult):
     """A test result class that can print formatted text results to a stream.
 
     Used by TextTestRunner.
@@ -49,12 +49,17 @@
     separator2 = '-' * 70
 
     def __init__(self, stream, descriptions, verbosity):
-        super(TextTestResult, self).__init__(stream, descriptions, verbosity)
-        self.stream = stream
+        super(TextTestResult, self).__init__(
+            stream=stream, descriptions=descriptions, verbosity=verbosity)
+        self.stream = _WritelnDecorator(stream)
         self.showAll = verbosity > 1
         self.dots = verbosity == 1
         self.descriptions = descriptions
 
+        self.start_time = None
+        self.end_time = None
+        self.time_taken = None
+
     def getDescription(self, test):
         doc_first_line = test.shortDescription()
         if self.descriptions and doc_first_line:
@@ -126,35 +131,310 @@
     def printErrorList(self, flavour, errors):
         for test, err in errors:
             self.stream.writeln(self.separator1)
-            self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
+            self.stream.writeln("%s: %s" % (flavour,
+                                            self.getDescription(test)))
             self.stream.writeln(self.separator2)
             self.stream.writeln("%s" % err)
 
+    def setTimes(self, start_time, end_time, time_taken):
+        self.start_time = start_time
+        self.end_time = end_time
+        self.time_taken = time_taken
 
-class GrassTestRunner(object):
-    """A test runner class that displays results in textual form.
+    def stopTestRun(self):
+        super(TextTestResult, self).stopTestRun()
+        self.printErrors()
+        self.stream.writeln(self.separator2)
+        run = self.testsRun
+        self.stream.write("Ran %d test%s" % (run, run != 1 and "s" or ""))
+        if self.time_taken:
+            self.stream.write(" in %.3fs" % (self.time_taken))
+        self.stream.writeln()
 
-    It prints out the names of tests as they are run, errors as they
-    occur, and a summary of the results at the end of the test run.
+        expectedFails = unexpectedSuccesses = skipped = 0
+        results = map(len, (self.expectedFailures,
+                            self.unexpectedSuccesses,
+                            self.skipped))
+        expectedFails, unexpectedSuccesses, skipped = results
+
+        infos = []
+        if not self.wasSuccessful():
+            self.stream.write("FAILED")
+            failed, errored = map(len, (self.failures, self.errors))
+            if failed:
+                infos.append("failures=%d" % failed)
+            if errored:
+                infos.append("errors=%d" % errored)
+        else:
+            self.stream.write("OK")
+        if skipped:
+            infos.append("skipped=%d" % skipped)
+        if expectedFails:
+            infos.append("expected_failures=%d" % expectedFails)
+        if unexpectedSuccesses:
+            infos.append("unexpected_successes=%d" % unexpectedSuccesses)
+        if infos:
+            self.stream.writeln(" (%s)" % (", ".join(infos),))
+        else:
+            self.stream.write("\n")
+
+
+class KeyValueTestResult(TestResult):
+    """A test result class that can print formatted text results to a stream.
+
+    Used by TextTestRunner.
     """
-    resultclass = TextTestResult
+    separator1 = '=' * 70
+    separator2 = '-' * 70
 
+    def __init__(self, stream, test_type=None):
+        super(KeyValueTestResult, self).__init__(
+            stream=stream, descriptions=None, verbosity=None)
+        self._stream = _WritelnDecorator(stream)
+
+        self.start_time = None
+        self.end_time = None
+        self.time_taken = None
+
+        if test_type:
+            self.test_type = test_type
+        else:
+            self.test_type = 'not-specified'
+
+        self._grass_modules = []
+
+    def setTimes(self, start_time, end_time, time_taken):
+        self.start_time = start_time
+        self.end_time = end_time
+        self.time_taken = time_taken
+
+    def stopTest(self, test):
+        super(KeyValueTestResult, self).stopTest(test)
+        if hasattr(test, 'grass_modules'):
+            self._grass_modules.extend(test.grass_modules)
+
+    def stopTestRun(self):
+        super(KeyValueTestResult, self).stopTestRun()
+        infos = []
+
+        run = self.testsRun
+        # TODO: name should be included by test file caller
+        # from inspect import getsourcefile
+        # from os.path import abspath
+        # abspath(getsourcefile(lambda _: None))
+        # not writing name is a good option
+        # infos.append("name=%s" % 'unknown')
+
+        infos.append("time=%.3fs" % (self.time_taken))
+#            'date={rundate}\n'
+#            'date={runtime}\n'
+#            'date={start_datetime}\n'
+#            'date={end_datetime}\n'
+
+        results = map(len, (self.expectedFailures,
+                            self.unexpectedSuccesses,
+                            self.skipped))
+        expectedFails, unexpectedSuccesses, skipped = results
+
+        infos.append("status=%s" % ('failed' if self.wasSuccessful() else 'passed'))
+
+        infos.append("total=%d" % (run))
+        failed, errored = map(len, (self.failures, self.errors))
+        infos.append("failures=%d" % failed)
+        infos.append("errors=%d" % errored)
+        infos.append("skipped=%d" % skipped)
+
+        # TODO: document this: if not supported by view,
+        # expected_failures should be counted as failures and vice versa
+        # or both add to skipped as unclear?
+        infos.append("expected_failures=%d" % expectedFails)
+        infos.append("unexpected_successes=%d" % unexpectedSuccesses)
+
+        # TODO: include each module just once? list good and bad modules?
+        infos.append("modules=%s" % ','.join(self._grass_modules))
+        
+        # module, modules?, c, c++?, python
+        # TODO: include also type modules?
+        # TODO: include also C++ code?
+        # TODO: distinguish C and Python modules?
+        infos.append("test_type=%s" % (self.test_type))
+
+        self._stream.write('\n'.join(infos))
+        self._stream.write('\n')
+        self._stream.flush()
+
+
+class MultiTestResult(TestResult):
+    # descriptions and verbosity unused
+    # included for compatibility with unittest's TestResult
+    # where are also unused, so perhaps we can remove them
+    # stream set to None and not included in interface, it would not make sense
+    def __init__(self, results, forgiving=False,
+                 descriptions=None, verbosity=None):
+        super(MultiTestResult, self).__init__(
+            descriptions=descriptions, verbosity=verbosity, stream=None)
+        self._results = results
+        self._forgiving = forgiving
+
+    def startTest(self, test):
+        super(MultiTestResult, self).startTest(test)
+        for result in self._results:
+            try:
+                result.startTest(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def stopTest(self, test):
+        """Called when the given test has been run"""
+        super(MultiTestResult, self).stopTest(test)
+        for result in self._results:
+            try:
+                result.stopTest(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addSuccess(self, test):
+        super(MultiTestResult, self).addSuccess(test)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addError(self, test, err):
+        super(MultiTestResult, self).addError(test, err)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addFailure(self, test, err):
+        super(MultiTestResult, self).addFailure(test, err)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addSkip(self, test, reason):
+        super(MultiTestResult, self).addSkip(test, reason)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addExpectedFailure(self, test, err):
+        super(MultiTestResult, self).addExpectedFailure(test, err)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def addUnexpectedSuccess(self, test):
+        super(MultiTestResult, self).addUnexpectedSuccess(test)
+        for result in self._results:
+            try:
+                result.addSuccess(test)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def printErrors(self):
+        "Called by TestRunner after test run"
+        super(MultiTestResult, self).printErrors()
+        for result in self._results:
+            try:
+                result.printErrors()
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def startTestRun(self):
+        """Called once before any tests are executed.
+
+        See startTest for a method called before each test.
+        """
+        super(MultiTestResult, self).startTestRun()
+        for result in self._results:
+            try:
+                result.startTestRun()
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    def stopTestRun(self):
+        """Called once after all tests are executed.
+
+        See stopTest for a method called after each test.
+        """
+        super(MultiTestResult, self).stopTestRun()
+        for result in self._results:
+            try:
+                result.stopTestRun()
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+    # TODO: better would be to pass start at the beginning
+    # alternative is to leave counting time on class
+    # TODO: document: we expect all grass classes to have setTimes
+    # TODO: alternatively, be more forgiving for non-unittest methods
+    def setTimes(self, start_time, end_time, time_taken):
+        for result in self._results:
+            try:
+                result.setTimes(start_time, end_time, time_taken)
+            except AttributeError:
+                if self._forgiving:
+                    pass
+                else:
+                    raise
+
+
+class GrassTestRunner(object):
     def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
-                 failfast=False, buffer=False, resultclass=None):
+                 failfast=False, buffer=False, result=None):
         self.stream = _WritelnDecorator(stream)
         self.descriptions = descriptions
         self.verbosity = verbosity
         self.failfast = failfast
         self.buffer = buffer
-        if resultclass is not None:
-            self.resultclass = resultclass
+        self._result = result
 
-    def _makeResult(self):
-        return self.resultclass(self.stream, self.descriptions, self.verbosity)
-
     def run(self, test):
         "Run the given test case or test suite."
-        result = self._makeResult()
+        result = self._result
         registerResult(result)
         result.failfast = self.failfast
         result.buffer = self.buffer
@@ -165,47 +445,13 @@
         try:
             test(result)
         finally:
+            stopTime = time.time()
+            timeTaken = stopTime - startTime
+            setTimes = getattr(result, 'setTimes', None)
+            if setTimes is not None:
+                setTimes(startTime, stopTime, timeTaken)
             stopTestRun = getattr(result, 'stopTestRun', None)
             if stopTestRun is not None:
                 stopTestRun()
-        stopTime = time.time()
-        timeTaken = stopTime - startTime
-        result.printErrors()
-        if hasattr(result, 'separator2'):
-            self.stream.writeln(result.separator2)
-        run = result.testsRun
-        self.stream.writeln("Ran %d test%s in %.3fs" %
-                            (run, run != 1 and "s" or "", timeTaken))
-        self.stream.writeln()
 
-        expectedFails = unexpectedSuccesses = skipped = 0
-        try:
-            results = map(len, (result.expectedFailures,
-                                result.unexpectedSuccesses,
-                                result.skipped))
-        except AttributeError:
-            pass
-        else:
-            expectedFails, unexpectedSuccesses, skipped = results
-
-        infos = []
-        if not result.wasSuccessful():
-            self.stream.write("FAILED")
-            failed, errored = map(len, (result.failures, result.errors))
-            if failed:
-                infos.append("failures=%d" % failed)
-            if errored:
-                infos.append("errors=%d" % errored)
-        else:
-            self.stream.write("OK")
-        if skipped:
-            infos.append("skipped=%d" % skipped)
-        if expectedFails:
-            infos.append("expected failures=%d" % expectedFails)
-        if unexpectedSuccesses:
-            infos.append("unexpected successes=%d" % unexpectedSuccesses)
-        if infos:
-            self.stream.writeln(" (%s)" % (", ".join(infos),))
-        else:
-            self.stream.write("\n")
         return result



More information about the grass-commit mailing list