12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289 |
- # -*- test-case-name: twisted.trial.test.test_reporter -*-
- # Copyright (c) Twisted Matrix Laboratories.
- # See LICENSE for details.
- #
- # Maintainer: Jonathan Lange
- """
- Defines classes that handle the results of tests.
- """
- from __future__ import annotations
- import os
- import sys
- import time
- import unittest as pyunit
- import warnings
- from collections import OrderedDict
- from types import TracebackType
- from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
- from zope.interface import implementer
- from typing_extensions import TypeAlias
- from twisted.python import log, reflect
- from twisted.python.components import proxyForInterface
- from twisted.python.failure import Failure
- from twisted.python.util import untilConcludes
- from twisted.trial import itrial, util
- if TYPE_CHECKING:
- from ._synctest import Todo
- try:
- from subunit import TestProtocolClient
- except ImportError:
- TestProtocolClient = None
- ExcInfo: TypeAlias = Tuple[Type[BaseException], BaseException, TracebackType]
- XUnitFailure = Union[ExcInfo, Tuple[None, None, None]]
- TrialFailure = Union[XUnitFailure, Failure]
- def _makeTodo(value: str) -> "Todo":
- """
- Return a L{Todo} object built from C{value}.
- This is a synonym for L{twisted.trial.unittest.makeTodo}, but imported
- locally to avoid circular imports.
- @param value: A string or a tuple of C{(errors, reason)}, where C{errors}
- is either a single exception class or an iterable of exception classes.
- @return: A L{Todo} object.
- """
- from twisted.trial.unittest import makeTodo
- return makeTodo(value)
- class BrokenTestCaseWarning(Warning):
- """
- Emitted as a warning when an exception occurs in one of setUp or tearDown.
- """
- class SafeStream:
- """
- Wraps a stream object so that all C{write} calls are wrapped in
- L{untilConcludes<twisted.python.util.untilConcludes>}.
- """
- def __init__(self, original):
- self.original = original
- def __getattr__(self, name):
- return getattr(self.original, name)
- def write(self, *a, **kw):
- return untilConcludes(self.original.write, *a, **kw)
- @implementer(itrial.IReporter)
- class TestResult(pyunit.TestResult):
- """
- Accumulates the results of several L{twisted.trial.unittest.TestCase}s.
- @ivar successes: count the number of successes achieved by the test run.
- @type successes: C{int}
- @ivar _startTime: The time when the current test was started. It defaults to
- L{None}, which means that the test was skipped.
- @ivar _lastTime: The duration of the current test run. It defaults to
- L{None}, which means that the test was skipped.
- """
- # Used when no todo provided to addExpectedFailure or addUnexpectedSuccess.
- _DEFAULT_TODO = "Test expected to fail"
- skips: List[Tuple[itrial.ITestCase, str]]
- expectedFailures: List[Tuple[itrial.ITestCase, str, "Todo"]] # type: ignore[assignment]
- unexpectedSuccesses: List[Tuple[itrial.ITestCase, str]] # type: ignore[assignment]
- successes: int
- _testStarted: Optional[int]
- # The duration of the test. It is None until the test completes.
- _lastTime: Optional[int]
- def __init__(self):
- super().__init__()
- self.skips = []
- self.expectedFailures = []
- self.unexpectedSuccesses = []
- self.successes = 0
- self._timings = []
- self._testStarted = None
- self._lastTime = None
- def __repr__(self) -> str:
- return "<%s run=%d errors=%d failures=%d todos=%d dones=%d skips=%d>" % (
- reflect.qual(self.__class__),
- self.testsRun,
- len(self.errors),
- len(self.failures),
- len(self.expectedFailures),
- len(self.skips),
- len(self.unexpectedSuccesses),
- )
- def _getTime(self):
- return time.time()
- def _getFailure(self, error):
- """
- Convert a C{sys.exc_info()}-style tuple to a L{Failure}, if necessary.
- """
- is_exc_info_tuple = isinstance(error, tuple) and len(error) == 3
- if is_exc_info_tuple:
- return Failure(error[1], error[0], error[2])
- elif isinstance(error, Failure):
- return error
- raise TypeError(f"Cannot convert {error} to a Failure")
- def startTest(self, test):
- """
- This must be called before the given test is commenced.
- @type test: L{pyunit.TestCase}
- """
- super().startTest(test)
- self._testStarted = self._getTime()
- def stopTest(self, test):
- """
- This must be called after the given test is completed.
- @type test: L{pyunit.TestCase}
- """
- super().stopTest(test)
- if self._testStarted is not None:
- self._lastTime = self._getTime() - self._testStarted
- def addFailure(self, test, fail):
- """
- Report a failed assertion for the given test.
- @type test: L{pyunit.TestCase}
- @type fail: L{Failure} or L{tuple}
- """
- self.failures.append((test, self._getFailure(fail)))
- def addError(self, test, error):
- """
- Report an error that occurred while running the given test.
- @type test: L{pyunit.TestCase}
- @type error: L{Failure} or L{tuple}
- """
- self.errors.append((test, self._getFailure(error)))
- def addSkip(self, test, reason):
- """
- Report that the given test was skipped.
- In Trial, tests can be 'skipped'. Tests are skipped mostly because
- there is some platform or configuration issue that prevents them from
- being run correctly.
- @type test: L{pyunit.TestCase}
- @type reason: L{str}
- """
- self.skips.append((test, reason))
- def addUnexpectedSuccess(self, test, todo=None):
- """
- Report that the given test succeeded against expectations.
- In Trial, tests can be marked 'todo'. That is, they are expected to
- fail. When a test that is expected to fail instead succeeds, it should
- call this method to report the unexpected success.
- @type test: L{pyunit.TestCase}
- @type todo: L{unittest.Todo}, or L{None}, in which case a default todo
- message is provided.
- """
- if todo is None:
- todo = _makeTodo(self._DEFAULT_TODO)
- self.unexpectedSuccesses.append((test, todo))
- def addExpectedFailure(self, test, error, todo=None):
- """
- Report that the given test failed, and was expected to do so.
- In Trial, tests can be marked 'todo'. That is, they are expected to
- fail.
- @type test: L{pyunit.TestCase}
- @type error: L{Failure}
- @type todo: L{unittest.Todo}, or L{None}, in which case a default todo
- message is provided.
- """
- if todo is None:
- todo = _makeTodo(self._DEFAULT_TODO)
- self.expectedFailures.append((test, error, todo))
- def addSuccess(self, test):
- """
- Report that the given test succeeded.
- @type test: L{pyunit.TestCase}
- """
- self.successes += 1
- def wasSuccessful(self):
- """
- Report whether or not this test suite was successful or not.
- The behaviour of this method changed in L{pyunit} in Python 3.4 to
- fail if there are any errors, failures, or unexpected successes.
- Previous to 3.4, it was only if there were errors or failures. This
- method implements the old behaviour for backwards compatibility reasons,
- checking just for errors and failures.
- @rtype: L{bool}
- """
- return len(self.failures) == len(self.errors) == 0
- def done(self):
- """
- The test suite has finished running.
- """
- @implementer(itrial.IReporter)
- class TestResultDecorator(
- proxyForInterface(itrial.IReporter, "_originalReporter") # type: ignore[misc]
- ):
- """
- Base class for TestResult decorators.
- @ivar _originalReporter: The wrapped instance of reporter.
- @type _originalReporter: A provider of L{itrial.IReporter}
- """
- @implementer(itrial.IReporter)
- class UncleanWarningsReporterWrapper(TestResultDecorator):
- """
- A wrapper for a reporter that converts L{util.DirtyReactorAggregateError}s
- to warnings.
- """
- def addError(self, test, error):
- """
- If the error is a L{util.DirtyReactorAggregateError}, instead of
- reporting it as a normal error, throw a warning.
- """
- if isinstance(error, Failure) and error.check(util.DirtyReactorAggregateError):
- warnings.warn(error.getErrorMessage())
- else:
- self._originalReporter.addError(test, error)
- @implementer(itrial.IReporter)
- class _ExitWrapper(TestResultDecorator):
- """
- A wrapper for a reporter that causes the reporter to stop after
- unsuccessful tests.
- """
- def addError(self, *args, **kwargs):
- self.shouldStop = True
- return self._originalReporter.addError(*args, **kwargs)
- def addFailure(self, *args, **kwargs):
- self.shouldStop = True
- return self._originalReporter.addFailure(*args, **kwargs)
- class _AdaptedReporter(TestResultDecorator):
- """
- TestResult decorator that makes sure that addError only gets tests that
- have been adapted with a particular test adapter.
- """
- def __init__(self, original, testAdapter):
- """
- Construct an L{_AdaptedReporter}.
- @param original: An {itrial.IReporter}.
- @param testAdapter: A callable that returns an L{itrial.ITestCase}.
- """
- TestResultDecorator.__init__(self, original)
- self.testAdapter = testAdapter
- def addError(self, test, error):
- """
- See L{itrial.IReporter}.
- """
- test = self.testAdapter(test)
- return self._originalReporter.addError(test, error)
- def addExpectedFailure(self, test, failure, todo=None):
- """
- See L{itrial.IReporter}.
- @type test: A L{pyunit.TestCase}.
- @type failure: A L{failure.Failure} or L{AssertionError}
- @type todo: A L{unittest.Todo} or None
- When C{todo} is L{None} a generic C{unittest.Todo} is built.
- L{pyunit.TestCase}'s C{run()} calls this with 3 positional arguments
- (without C{todo}).
- """
- return self._originalReporter.addExpectedFailure(
- self.testAdapter(test), failure, todo
- )
- def addFailure(self, test, failure):
- """
- See L{itrial.IReporter}.
- """
- test = self.testAdapter(test)
- return self._originalReporter.addFailure(test, failure)
- def addSkip(self, test, skip):
- """
- See L{itrial.IReporter}.
- """
- test = self.testAdapter(test)
- return self._originalReporter.addSkip(test, skip)
- def addUnexpectedSuccess(self, test, todo=None):
- """
- See L{itrial.IReporter}.
- @type test: A L{pyunit.TestCase}.
- @type todo: A L{unittest.Todo} or None
- When C{todo} is L{None} a generic C{unittest.Todo} is built.
- L{pyunit.TestCase}'s C{run()} calls this with 2 positional arguments
- (without C{todo}).
- """
- test = self.testAdapter(test)
- return self._originalReporter.addUnexpectedSuccess(test, todo)
- def startTest(self, test):
- """
- See L{itrial.IReporter}.
- """
- return self._originalReporter.startTest(self.testAdapter(test))
- def stopTest(self, test):
- """
- See L{itrial.IReporter}.
- """
- return self._originalReporter.stopTest(self.testAdapter(test))
- @implementer(itrial.IReporter)
- class Reporter(TestResult):
- """
- A basic L{TestResult} with support for writing to a stream.
- @ivar _startTime: The time when the first test was started. It defaults to
- L{None}, which means that no test was actually launched.
- @type _startTime: C{float} or L{None}
- @ivar _warningCache: A C{set} of tuples of warning message (file, line,
- text, category) which have already been written to the output stream
- during the currently executing test. This is used to avoid writing
- duplicates of the same warning to the output stream.
- @type _warningCache: C{set}
- @ivar _publisher: The log publisher which will be observed for warning
- events.
- @type _publisher: L{twisted.python.log.LogPublisher}
- """
- _separator = "-" * 79
- _doubleSeparator = "=" * 79
- def __init__(
- self, stream=sys.stdout, tbformat="default", realtime=False, publisher=None
- ):
- super().__init__()
- self._stream = SafeStream(stream)
- self.tbformat = tbformat
- self.realtime = realtime
- self._startTime = None
- self._warningCache = set()
- # Start observing log events so as to be able to report warnings.
- self._publisher = publisher
- if publisher is not None:
- publisher.addObserver(self._observeWarnings)
- def _observeWarnings(self, event):
- """
- Observe warning events and write them to C{self._stream}.
- This method is a log observer which will be registered with
- C{self._publisher.addObserver}.
- @param event: A C{dict} from the logging system. If it has a
- C{'warning'} key, a logged warning will be extracted from it and
- possibly written to C{self.stream}.
- """
- if "warning" in event:
- key = (
- event["filename"],
- event["lineno"],
- event["category"].split(".")[-1],
- str(event["warning"]),
- )
- if key not in self._warningCache:
- self._warningCache.add(key)
- self._stream.write("%s:%s: %s: %s\n" % key)
- def startTest(self, test):
- """
- Called when a test begins to run. Records the time when it was first
- called and resets the warning cache.
- @param test: L{ITestCase}
- """
- super().startTest(test)
- if self._startTime is None:
- self._startTime = self._getTime()
- self._warningCache = set()
- def addFailure(self, test, fail):
- """
- Called when a test fails. If C{realtime} is set, then it prints the
- error to the stream.
- @param test: L{ITestCase} that failed.
- @param fail: L{failure.Failure} containing the error.
- """
- super().addFailure(test, fail)
- if self.realtime:
- fail = self.failures[-1][1] # guarantee it's a Failure
- self._write(self._formatFailureTraceback(fail))
- def addError(self, test, error):
- """
- Called when a test raises an error. If C{realtime} is set, then it
- prints the error to the stream.
- @param test: L{ITestCase} that raised the error.
- @param error: L{failure.Failure} containing the error.
- """
- error = self._getFailure(error)
- super().addError(test, error)
- if self.realtime:
- error = self.errors[-1][1] # guarantee it's a Failure
- self._write(self._formatFailureTraceback(error))
- def _write(self, format, *args):
- """
- Safely write to the reporter's stream.
- @param format: A format string to write.
- @param args: The arguments for the format string.
- """
- s = str(format)
- assert isinstance(s, str)
- if args:
- self._stream.write(s % args)
- else:
- self._stream.write(s)
- untilConcludes(self._stream.flush)
- def _writeln(self, format, *args):
- """
- Safely write a line to the reporter's stream. Newline is appended to
- the format string.
- @param format: A format string to write.
- @param args: The arguments for the format string.
- """
- self._write(format, *args)
- self._write("\n")
- def upDownError(self, method, error, warn=True, printStatus=True):
- super().upDownError(method, error, warn, printStatus)
- if warn:
- tbStr = self._formatFailureTraceback(error)
- log.msg(tbStr)
- msg = "caught exception in {}, your TestCase is broken\n\n{}".format(
- method,
- tbStr,
- )
- warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2)
- def cleanupErrors(self, errs):
- super().cleanupErrors(errs)
- warnings.warn(
- "%s\n%s"
- % (
- "REACTOR UNCLEAN! traceback(s) follow: ",
- self._formatFailureTraceback(errs),
- ),
- BrokenTestCaseWarning,
- )
- def _trimFrames(self, frames):
- """
- Trim frames to remove internal paths.
- When a C{SynchronousTestCase} method fails synchronously, the stack
- looks like this:
- - [0]: C{SynchronousTestCase._run}
- - [1]: C{util.runWithWarningsSuppressed}
- - [2:-2]: code in the test method which failed
- - [-1]: C{_synctest.fail}
- When a C{TestCase} method fails synchronously, the stack looks like
- this:
- - [0]: C{defer.maybeDeferred}
- - [1]: C{utils.runWithWarningsSuppressed}
- - [2]: C{utils.runWithWarningsSuppressed}
- - [3:-2]: code in the test method which failed
- - [-1]: C{_synctest.fail}
- When a method fails inside a C{Deferred} (i.e., when the test method
- returns a C{Deferred}, and that C{Deferred}'s errback fires), the stack
- captured inside the resulting C{Failure} looks like this:
- - [0]: C{defer.Deferred._runCallbacks}
- - [1:-2]: code in the testmethod which failed
- - [-1]: C{_synctest.fail}
- As a result, we want to trim either [maybeDeferred, runWWS, runWWS] or
- [Deferred._runCallbacks] or [SynchronousTestCase._run, runWWS] from the
- front, and trim the [unittest.fail] from the end.
- There is also another case, when the test method is badly defined and
- contains extra arguments.
- If it doesn't recognize one of these cases, it just returns the
- original frames.
- @param frames: The C{list} of frames from the test failure.
- @return: The C{list} of frames to display.
- """
- newFrames = list(frames)
- if len(frames) < 2:
- return newFrames
- firstMethod = newFrames[0][0]
- firstFile = os.path.splitext(os.path.basename(newFrames[0][1]))[0]
- secondMethod = newFrames[1][0]
- secondFile = os.path.splitext(os.path.basename(newFrames[1][1]))[0]
- syncCase = (("_run", "_synctest"), ("runWithWarningsSuppressed", "util"))
- asyncCase = (("maybeDeferred", "defer"), ("runWithWarningsSuppressed", "utils"))
- twoFrames = ((firstMethod, firstFile), (secondMethod, secondFile))
- # On PY3, we have an extra frame which is reraising the exception
- for frame in newFrames:
- frameFile = os.path.splitext(os.path.basename(frame[1]))[0]
- if frameFile == "compat" and frame[0] == "reraise":
- # If it's in the compat module and is reraise, BLAM IT
- newFrames.pop(newFrames.index(frame))
- if twoFrames == syncCase:
- newFrames = newFrames[2:]
- elif twoFrames == asyncCase:
- newFrames = newFrames[3:]
- elif (firstMethod, firstFile) == ("_runCallbacks", "defer"):
- newFrames = newFrames[1:]
- if not newFrames:
- # The method fails before getting called, probably an argument
- # problem
- return newFrames
- last = newFrames[-1]
- if (
- last[0].startswith("fail")
- and os.path.splitext(os.path.basename(last[1]))[0] == "_synctest"
- ):
- newFrames = newFrames[:-1]
- return newFrames
- def _formatFailureTraceback(self, fail):
- if isinstance(fail, str):
- return fail.rstrip() + "\n"
- fail.frames, frames = self._trimFrames(fail.frames), fail.frames
- result = fail.getTraceback(detail=self.tbformat, elideFrameworkCode=True)
- fail.frames = frames
- return result
- def _groupResults(self, results, formatter):
- """
- Group tests together based on their results.
- @param results: An iterable of tuples of two or more elements. The
- first element of each tuple is a test case. The remaining
- elements describe the outcome of that test case.
- @param formatter: A callable which turns a test case result into a
- string. The elements after the first of the tuples in
- C{results} will be passed as positional arguments to
- C{formatter}.
- @return: A C{list} of two-tuples. The first element of each tuple
- is a unique string describing one result from at least one of
- the test cases in C{results}. The second element is a list of
- the test cases which had that result.
- """
- groups = OrderedDict()
- for content in results:
- case = content[0]
- outcome = content[1:]
- key = formatter(*outcome)
- groups.setdefault(key, []).append(case)
- return list(groups.items())
- def _printResults(self, flavor, errors, formatter):
- """
- Print a group of errors to the stream.
- @param flavor: A string indicating the kind of error (e.g. 'TODO').
- @param errors: A list of errors, often L{failure.Failure}s, but
- sometimes 'todo' errors.
- @param formatter: A callable that knows how to format the errors.
- """
- for reason, cases in self._groupResults(errors, formatter):
- self._writeln(self._doubleSeparator)
- self._writeln(flavor)
- self._write(reason)
- self._writeln("")
- for case in cases:
- self._writeln(case.id())
- def _printExpectedFailure(self, error, todo):
- return "Reason: {!r}\n{}".format(
- todo.reason, self._formatFailureTraceback(error)
- )
- def _printUnexpectedSuccess(self, todo):
- ret = f"Reason: {todo.reason!r}\n"
- if todo.errors:
- ret += "Expected errors: {}\n".format(", ".join(todo.errors))
- return ret
- def _printErrors(self):
- """
- Print all of the non-success results to the stream in full.
- """
- self._write("\n")
- self._printResults("[SKIPPED]", self.skips, lambda x: "%s\n" % x)
- self._printResults("[TODO]", self.expectedFailures, self._printExpectedFailure)
- self._printResults("[FAIL]", self.failures, self._formatFailureTraceback)
- self._printResults("[ERROR]", self.errors, self._formatFailureTraceback)
- self._printResults(
- "[SUCCESS!?!]", self.unexpectedSuccesses, self._printUnexpectedSuccess
- )
- def _getSummary(self):
- """
- Return a formatted count of tests status results.
- """
- summaries = []
- for stat in (
- "skips",
- "expectedFailures",
- "failures",
- "errors",
- "unexpectedSuccesses",
- ):
- num = len(getattr(self, stat))
- if num:
- summaries.append("%s=%d" % (stat, num))
- if self.successes:
- summaries.append("successes=%d" % (self.successes,))
- summary = (summaries and " (" + ", ".join(summaries) + ")") or ""
- return summary
- def _printSummary(self):
- """
- Print a line summarising the test results to the stream.
- """
- summary = self._getSummary()
- if self.wasSuccessful():
- status = "PASSED"
- else:
- status = "FAILED"
- self._write("%s%s\n", status, summary)
- def done(self):
- """
- Summarize the result of the test run.
- The summary includes a report of all of the errors, todos, skips and
- so forth that occurred during the run. It also includes the number of
- tests that were run and how long it took to run them (not including
- load time).
- Expects that C{_printErrors}, C{_writeln}, C{_write}, C{_printSummary}
- and C{_separator} are all implemented.
- """
- if self._publisher is not None:
- self._publisher.removeObserver(self._observeWarnings)
- self._printErrors()
- self._writeln(self._separator)
- if self._startTime is not None:
- self._writeln(
- "Ran %d tests in %.3fs", self.testsRun, time.time() - self._startTime
- )
- self._write("\n")
- self._printSummary()
- class MinimalReporter(Reporter):
- """
- A minimalist reporter that prints only a summary of the test result, in
- the form of (timeTaken, #tests, #tests, #errors, #failures, #skips).
- """
- def _printErrors(self):
- """
- Don't print a detailed summary of errors. We only care about the
- counts.
- """
- def _printSummary(self):
- """
- Print out a one-line summary of the form:
- '%(runtime) %(number_of_tests) %(number_of_tests) %(num_errors)
- %(num_failures) %(num_skips)'
- """
- numTests = self.testsRun
- if self._startTime is not None:
- timing = self._getTime() - self._startTime
- else:
- timing = 0
- t = (
- timing,
- numTests,
- numTests,
- len(self.errors),
- len(self.failures),
- len(self.skips),
- )
- self._writeln(" ".join(map(str, t)))
- class TextReporter(Reporter):
- """
- Simple reporter that prints a single character for each test as it runs,
- along with the standard Trial summary text.
- """
- def addSuccess(self, test):
- super().addSuccess(test)
- self._write(".")
- def addError(self, *args):
- super().addError(*args)
- self._write("E")
- def addFailure(self, *args):
- super().addFailure(*args)
- self._write("F")
- def addSkip(self, *args):
- super().addSkip(*args)
- self._write("S")
- def addExpectedFailure(self, *args):
- super().addExpectedFailure(*args)
- self._write("T")
- def addUnexpectedSuccess(self, *args):
- super().addUnexpectedSuccess(*args)
- self._write("!")
- class VerboseTextReporter(Reporter):
- """
- A verbose reporter that prints the name of each test as it is running.
- Each line is printed with the name of the test, followed by the result of
- that test.
- """
- # This is actually the bwverbose option
- def startTest(self, tm):
- self._write("%s ... ", tm.id())
- super().startTest(tm)
- def addSuccess(self, test):
- super().addSuccess(test)
- self._write("[OK]")
- def addError(self, *args):
- super().addError(*args)
- self._write("[ERROR]")
- def addFailure(self, *args):
- super().addFailure(*args)
- self._write("[FAILURE]")
- def addSkip(self, *args):
- super().addSkip(*args)
- self._write("[SKIPPED]")
- def addExpectedFailure(self, *args):
- super().addExpectedFailure(*args)
- self._write("[TODO]")
- def addUnexpectedSuccess(self, *args):
- super().addUnexpectedSuccess(*args)
- self._write("[SUCCESS!?!]")
- def stopTest(self, test):
- super().stopTest(test)
- self._write("\n")
- class TimingTextReporter(VerboseTextReporter):
- """
- Prints out each test as it is running, followed by the time taken for each
- test to run.
- """
- def stopTest(self, method):
- """
- Mark the test as stopped, and write the time it took to run the test
- to the stream.
- """
- super().stopTest(method)
- self._write("(%.03f secs)\n" % self._lastTime)
- class _AnsiColorizer:
- """
- A colorizer is an object that loosely wraps around a stream, allowing
- callers to write text to the stream in a particular color.
- Colorizer classes must implement C{supported()} and C{write(text, color)}.
- """
- _colors = dict(
- black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37
- )
- def __init__(self, stream):
- self.stream = stream
- @classmethod
- def supported(cls, stream=sys.stdout):
- """
- A class method that returns True if the current platform supports
- coloring terminal output using this method. Returns False otherwise.
- """
- if not stream.isatty():
- return False # auto color only on TTYs
- try:
- import curses
- except ImportError:
- return False
- else:
- try:
- try:
- return curses.tigetnum("colors") > 2
- except curses.error:
- curses.setupterm()
- return curses.tigetnum("colors") > 2
- except BaseException:
- # guess false in case of error
- return False
- def write(self, text, color):
- """
- Write the given text to the stream in the given color.
- @param text: Text to be written to the stream.
- @param color: A string label for a color. e.g. 'red', 'white'.
- """
- color = self._colors[color]
- self.stream.write(f"\x1b[{color};1m{text}\x1b[0m")
- class _Win32Colorizer:
- """
- See _AnsiColorizer docstring.
- """
- def __init__(self, stream):
- from win32console import (
- FOREGROUND_BLUE,
- FOREGROUND_GREEN,
- FOREGROUND_INTENSITY,
- FOREGROUND_RED,
- STD_OUTPUT_HANDLE,
- GetStdHandle,
- )
- red, green, blue, bold = (
- FOREGROUND_RED,
- FOREGROUND_GREEN,
- FOREGROUND_BLUE,
- FOREGROUND_INTENSITY,
- )
- self.stream = stream
- self.screenBuffer = GetStdHandle(STD_OUTPUT_HANDLE)
- self._colors = {
- "normal": red | green | blue,
- "red": red | bold,
- "green": green | bold,
- "blue": blue | bold,
- "yellow": red | green | bold,
- "magenta": red | blue | bold,
- "cyan": green | blue | bold,
- "white": red | green | blue | bold,
- }
- @classmethod
- def supported(cls, stream=sys.stdout):
- try:
- import win32console
- screenBuffer = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
- except ImportError:
- return False
- import pywintypes
- try:
- screenBuffer.SetConsoleTextAttribute(
- win32console.FOREGROUND_RED
- | win32console.FOREGROUND_GREEN
- | win32console.FOREGROUND_BLUE
- )
- except pywintypes.error:
- return False
- else:
- return True
- def write(self, text, color):
- color = self._colors[color]
- self.screenBuffer.SetConsoleTextAttribute(color)
- self.stream.write(text)
- self.screenBuffer.SetConsoleTextAttribute(self._colors["normal"])
- class _NullColorizer:
- """
- See _AnsiColorizer docstring.
- """
- def __init__(self, stream):
- self.stream = stream
- @classmethod
- def supported(cls, stream=sys.stdout):
- return True
- def write(self, text, color):
- self.stream.write(text)
- @implementer(itrial.IReporter)
- class SubunitReporter:
- """
- Reports test output via Subunit.
- @ivar _subunit: The subunit protocol client that we are wrapping.
- @ivar _successful: An internal variable, used to track whether we have
- received only successful results.
- @since: 10.0
- """
- testsRun = None
- def __init__(
- self, stream=sys.stdout, tbformat="default", realtime=False, publisher=None
- ):
- """
- Construct a L{SubunitReporter}.
- @param stream: A file-like object representing the stream to print
- output to. Defaults to stdout.
- @param tbformat: The format for tracebacks. Ignored, since subunit
- always uses Python's standard format.
- @param realtime: Whether or not to print exceptions in the middle
- of the test results. Ignored, since subunit always does this.
- @param publisher: The log publisher which will be preserved for
- reporting events. Ignored, as it's not relevant to subunit.
- """
- if TestProtocolClient is None:
- raise Exception("Subunit not available")
- self._subunit = TestProtocolClient(stream)
- self._successful = True
- def done(self):
- """
- Record that the entire test suite run is finished.
- We do nothing, since a summary clause is irrelevant to the subunit
- protocol.
- """
- pass
- @property
- def shouldStop(self):
- """
- Whether or not the test runner should stop running tests.
- """
- return self._subunit.shouldStop
- def stop(self):
- """
- Signal that the test runner should stop running tests.
- """
- return self._subunit.stop()
- def wasSuccessful(self):
- """
- Has the test run been successful so far?
- @return: C{True} if we have received no reports of errors or failures,
- C{False} otherwise.
- """
- # Subunit has a bug in its implementation of wasSuccessful, see
- # https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't
- # simply forward it on.
- return self._successful
- def startTest(self, test):
- """
- Record that C{test} has started.
- """
- return self._subunit.startTest(test)
- def stopTest(self, test):
- """
- Record that C{test} has completed.
- """
- return self._subunit.stopTest(test)
- def addSuccess(self, test):
- """
- Record that C{test} was successful.
- """
- return self._subunit.addSuccess(test)
- def addSkip(self, test, reason):
- """
- Record that C{test} was skipped for C{reason}.
- Some versions of subunit don't have support for addSkip. In those
- cases, the skip is reported as a success.
- @param test: A unittest-compatible C{TestCase}.
- @param reason: The reason for it being skipped. The C{str()} of this
- object will be included in the subunit output stream.
- """
- addSkip = getattr(self._subunit, "addSkip", None)
- if addSkip is None:
- self.addSuccess(test)
- else:
- self._subunit.addSkip(test, reason)
- def addError(self, test, err):
- """
- Record that C{test} failed with an unexpected error C{err}.
- Also marks the run as being unsuccessful, causing
- L{SubunitReporter.wasSuccessful} to return C{False}.
- """
- self._successful = False
- return self._subunit.addError(test, util.excInfoOrFailureToExcInfo(err))
- def addFailure(self, test, err):
- """
- Record that C{test} failed an assertion with the error C{err}.
- Also marks the run as being unsuccessful, causing
- L{SubunitReporter.wasSuccessful} to return C{False}.
- """
- self._successful = False
- return self._subunit.addFailure(test, util.excInfoOrFailureToExcInfo(err))
- def addExpectedFailure(self, test, failure, todo=None):
- """
- Record an expected failure from a test.
- Some versions of subunit do not implement this. For those versions, we
- record a success.
- """
- failure = util.excInfoOrFailureToExcInfo(failure)
- addExpectedFailure = getattr(self._subunit, "addExpectedFailure", None)
- if addExpectedFailure is None:
- self.addSuccess(test)
- else:
- addExpectedFailure(test, failure)
- def addUnexpectedSuccess(self, test, todo=None):
- """
- Record an unexpected success.
- Since subunit has no way of expressing this concept, we record a
- success on the subunit stream.
- """
- # Not represented in pyunit/subunit.
- self.addSuccess(test)
- class TreeReporter(Reporter):
- """
- Print out the tests in the form a tree.
- Tests are indented according to which class and module they belong.
- Results are printed in ANSI color.
- """
- currentLine = ""
- indent = " "
- columns = 79
- FAILURE = "red"
- ERROR = "red"
- TODO = "blue"
- SKIP = "blue"
- TODONE = "red"
- SUCCESS = "green"
- def __init__(self, stream=sys.stdout, *args, **kwargs):
- super().__init__(stream, *args, **kwargs)
- self._lastTest = []
- for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
- if colorizer.supported(stream):
- self._colorizer = colorizer(stream)
- break
- def getDescription(self, test):
- """
- Return the name of the method which 'test' represents. This is
- what gets displayed in the leaves of the tree.
- e.g. getDescription(TestCase('test_foo')) ==> test_foo
- """
- return test.id().split(".")[-1]
- def addSuccess(self, test):
- super().addSuccess(test)
- self.endLine("[OK]", self.SUCCESS)
- def addError(self, *args):
- super().addError(*args)
- self.endLine("[ERROR]", self.ERROR)
- def addFailure(self, *args):
- super().addFailure(*args)
- self.endLine("[FAIL]", self.FAILURE)
- def addSkip(self, *args):
- super().addSkip(*args)
- self.endLine("[SKIPPED]", self.SKIP)
- def addExpectedFailure(self, *args):
- super().addExpectedFailure(*args)
- self.endLine("[TODO]", self.TODO)
- def addUnexpectedSuccess(self, *args):
- super().addUnexpectedSuccess(*args)
- self.endLine("[SUCCESS!?!]", self.TODONE)
- def _write(self, format, *args):
- if args:
- format = format % args
- self.currentLine = format
- super()._write(self.currentLine)
- def _getPreludeSegments(self, testID):
- """
- Return a list of all non-leaf segments to display in the tree.
- Normally this is the module and class name.
- """
- segments = testID.split(".")[:-1]
- if len(segments) == 0:
- return segments
- segments = [
- seg for seg in (".".join(segments[:-1]), segments[-1]) if len(seg) > 0
- ]
- return segments
- def _testPrelude(self, testID):
- """
- Write the name of the test to the stream, indenting it appropriately.
- If the test is the first test in a new 'branch' of the tree, also
- write all of the parents in that branch.
- """
- segments = self._getPreludeSegments(testID)
- indentLevel = 0
- for seg in segments:
- if indentLevel < len(self._lastTest):
- if seg != self._lastTest[indentLevel]:
- self._write(f"{self.indent * indentLevel}{seg}\n")
- else:
- self._write(f"{self.indent * indentLevel}{seg}\n")
- indentLevel += 1
- self._lastTest = segments
- def cleanupErrors(self, errs):
- self._colorizer.write(" cleanup errors", self.ERROR)
- self.endLine("[ERROR]", self.ERROR)
- super().cleanupErrors(errs)
- def upDownError(self, method, error, warn, printStatus):
- self._colorizer.write(" %s" % method, self.ERROR)
- if printStatus:
- self.endLine("[ERROR]", self.ERROR)
- super().upDownError(method, error, warn, printStatus)
- def startTest(self, test):
- """
- Called when C{test} starts. Writes the tests name to the stream using
- a tree format.
- """
- self._testPrelude(test.id())
- self._write(
- "%s%s ... "
- % (self.indent * (len(self._lastTest)), self.getDescription(test))
- )
- super().startTest(test)
- def endLine(self, message, color):
- """
- Print 'message' in the given color.
- @param message: A string message, usually '[OK]' or something similar.
- @param color: A string color, 'red', 'green' and so forth.
- """
- spaces = " " * (self.columns - len(self.currentLine) - len(message))
- super()._write(spaces)
- self._colorizer.write(message, color)
- super()._write("\n")
- def _printSummary(self):
- """
- Print a line summarising the test results to the stream, and color the
- status result.
- """
- summary = self._getSummary()
- if self.wasSuccessful():
- status = "PASSED"
- color = self.SUCCESS
- else:
- status = "FAILED"
- color = self.FAILURE
- self._colorizer.write(status, color)
- self._write("%s\n", summary)
|