Browse Source

Fixed #20680 -- Deprecated django.utils.unittest.

Refs #19204.
Aymeric Augustin 11 years ago
parent
commit
7f264e02f4

+ 4 - 75
django/utils/unittest/__init__.py

@@ -1,80 +1,9 @@
-"""
-unittest2
+import warnings
 
-unittest2 is a backport of the new features added to the unittest testing
-framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
-
-To use unittest2 instead of unittest simply replace ``import unittest`` with
-``import unittest2``.
-
-
-Copyright (c) 1999-2003 Steve Purcell
-Copyright (c) 2003-2010 Python Software Foundation
-This module is free software, and you may redistribute it and/or modify
-it under the same terms as Python itself, so long as this copyright message
-and disclaimer are retained in their original form.
-
-IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
-SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
-THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
-THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE.  THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
-AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
-SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
-
-import sys
-
-# Django hackery to load the appropriate version of unittest
+warnings.warn("django.utils.unittest will be removed in Django 1.9.",
+    PendingDeprecationWarning)
 
 try:
-    # check the system path first
     from unittest2 import *
 except ImportError:
-    if sys.version_info >= (2,7):
-        # unittest2 features are native in Python 2.7
-        from unittest import *
-    else:
-        # otherwise use our bundled version
-        __all__ = ['TestResult', 'TestCase', 'TestSuite',
-                   'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
-                   'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
-                   'expectedFailure', 'TextTestResult', '__version__', 'collector']
-
-        __version__ = '0.5.1'
-
-        # Expose obsolete functions for backwards compatibility
-        __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
-
-
-        from django.utils.unittest.collector import collector
-        from django.utils.unittest.result import TestResult
-        from django.utils.unittest.case import \
-            TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
-            skipUnless, expectedFailure
-
-        from django.utils.unittest.suite import BaseTestSuite, TestSuite
-        from django.utils.unittest.loader import \
-            TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
-            findTestCases
-
-        from django.utils.unittest.main import TestProgram, main, main_
-        from django.utils.unittest.runner import TextTestRunner, TextTestResult
-
-        try:
-            from django.utils.unittest.signals import\
-                installHandler, registerResult, removeResult, removeHandler
-        except ImportError:
-            # Compatibility with platforms that don't have the signal module
-            pass
-        else:
-            __all__.extend(['installHandler', 'registerResult', 'removeResult',
-                            'removeHandler'])
-
-        # deprecated
-        _TextTestResult = TextTestResult
-
-        __unittest = True
+    from unittest import *

+ 0 - 10
django/utils/unittest/__main__.py

@@ -1,10 +0,0 @@
-"""Main entry point"""
-
-import sys
-if sys.argv[0].endswith("__main__.py"):
-    sys.argv[0] = "unittest2"
-
-__unittest = True
-
-from django.utils.unittest.main import main_
-main_()

+ 0 - 1076
django/utils/unittest/case.py

@@ -1,1076 +0,0 @@
-"""Test case implementation"""
-
-import sys
-import difflib
-import pprint
-import re
-import unittest
-import warnings
-
-from django.utils.unittest import result
-from django.utils.unittest.util import\
-    safe_repr, safe_str, strclass,\
-    unorderable_list_difference
-
-from django.utils.unittest.compatibility import wraps
-
-__unittest = True
-
-
-DIFF_OMITTED = ('\nDiff is %s characters long. '
-                 'Set self.maxDiff to None to see it.')
-
-class SkipTest(Exception):
-    """
-    Raise this exception in a test to skip it.
-
-    Usually you can use TestResult.skip() or one of the skipping decorators
-    instead of raising this directly.
-    """
-
-class _ExpectedFailure(Exception):
-    """
-    Raise this when a test is expected to fail.
-
-    This is an implementation detail.
-    """
-
-    def __init__(self, exc_info):
-        # can't use super because Python 2.4 exceptions are old style
-        Exception.__init__(self)
-        self.exc_info = exc_info
-
-class _UnexpectedSuccess(Exception):
-    """
-    The test was supposed to fail, but it didn't!
-    """
-
-def _id(obj):
-    return obj
-
-def skip(reason):
-    """
-    Unconditionally skip a test.
-    """
-    def decorator(test_item):
-        if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
-            @wraps(test_item)
-            def skip_wrapper(*args, **kwargs):
-                raise SkipTest(reason)
-            test_item = skip_wrapper
-
-        test_item.__unittest_skip__ = True
-        test_item.__unittest_skip_why__ = reason
-        return test_item
-    return decorator
-
-def skipIf(condition, reason):
-    """
-    Skip a test if the condition is true.
-    """
-    if condition:
-        return skip(reason)
-    return _id
-
-def skipUnless(condition, reason):
-    """
-    Skip a test unless the condition is true.
-    """
-    if not condition:
-        return skip(reason)
-    return _id
-
-
-def expectedFailure(func):
-    @wraps(func)
-    def wrapper(*args, **kwargs):
-        try:
-            func(*args, **kwargs)
-        except Exception:
-            raise _ExpectedFailure(sys.exc_info())
-        raise _UnexpectedSuccess
-    return wrapper
-
-
-class _AssertRaisesContext(object):
-    """A context manager used to implement TestCase.assertRaises* methods."""
-
-    def __init__(self, expected, test_case, expected_regexp=None):
-        self.expected = expected
-        self.failureException = test_case.failureException
-        self.expected_regexp = expected_regexp
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_value, tb):
-        if exc_type is None:
-            try:
-                exc_name = self.expected.__name__
-            except AttributeError:
-                exc_name = str(self.expected)
-            raise self.failureException(
-                "%s not raised" % (exc_name,))
-        if not issubclass(exc_type, self.expected):
-            # let unexpected exceptions pass through
-            return False
-        self.exception = exc_value # store for later retrieval
-        if self.expected_regexp is None:
-            return True
-
-        expected_regexp = self.expected_regexp
-        if isinstance(expected_regexp, basestring):
-            expected_regexp = re.compile(expected_regexp)
-        if not expected_regexp.search(str(exc_value)):
-            raise self.failureException('"%s" does not match "%s"' %
-                     (expected_regexp.pattern, str(exc_value)))
-        return True
-
-
-class _TypeEqualityDict(object):
-
-    def __init__(self, testcase):
-        self.testcase = testcase
-        self._store = {}
-
-    def __setitem__(self, key, value):
-        self._store[key] = value
-
-    def __getitem__(self, key):
-        value = self._store[key]
-        if isinstance(value, basestring):
-            return getattr(self.testcase, value)
-        return value
-
-    def get(self, key, default=None):
-        if key in self._store:
-            return self[key]
-        return default
-
-
-class TestCase(unittest.TestCase):
-    """A class whose instances are single test cases.
-
-    By default, the test code itself should be placed in a method named
-    'runTest'.
-
-    If the fixture may be used for many test cases, create as
-    many test methods as are needed. When instantiating such a TestCase
-    subclass, specify in the constructor arguments the name of the test method
-    that the instance is to execute.
-
-    Test authors should subclass TestCase for their own tests. Construction
-    and deconstruction of the test's environment ('fixture') can be
-    implemented by overriding the 'setUp' and 'tearDown' methods respectively.
-
-    If it is necessary to override the __init__ method, the base class
-    __init__ method must always be called. It is important that subclasses
-    should not change the signature of their __init__ method, since instances
-    of the classes are instantiated automatically by parts of the framework
-    in order to be run.
-    """
-
-    # This attribute determines which exception will be raised when
-    # the instance's assertion methods fail; test methods raising this
-    # exception will be deemed to have 'failed' rather than 'errored'
-
-    failureException = AssertionError
-
-    # This attribute sets the maximum length of a diff in failure messages
-    # by assert methods using difflib. It is looked up as an instance attribute
-    # so can be configured by individual tests if required.
-
-    maxDiff = 80*8
-
-    # This attribute determines whether long messages (including repr of
-    # objects used in assert methods) will be printed on failure in *addition*
-    # to any explicit message passed.
-
-    longMessage = True
-
-    # Attribute used by TestSuite for classSetUp
-
-    _classSetupFailed = False
-
-    def __init__(self, methodName='runTest'):
-        """Create an instance of the class that will use the named test
-           method when executed. Raises a ValueError if the instance does
-           not have a method with the specified name.
-        """
-        self._testMethodName = methodName
-        self._resultForDoCleanups = None
-        try:
-            testMethod = getattr(self, methodName)
-        except AttributeError:
-            raise ValueError("no such test method in %s: %s" % \
-                  (self.__class__, methodName))
-        self._testMethodDoc = testMethod.__doc__
-        self._cleanups = []
-
-        # Map types to custom assertEqual functions that will compare
-        # instances of said type in more detail to generate a more useful
-        # error message.
-        self._type_equality_funcs = _TypeEqualityDict(self)
-        self.addTypeEqualityFunc(dict, 'assertDictEqual')
-        self.addTypeEqualityFunc(list, 'assertListEqual')
-        self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
-        self.addTypeEqualityFunc(set, 'assertSetEqual')
-        self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
-        self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
-
-    def addTypeEqualityFunc(self, typeobj, function):
-        """Add a type specific assertEqual style function to compare a type.
-
-        This method is for use by TestCase subclasses that need to register
-        their own type equality functions to provide nicer error messages.
-
-        Args:
-            typeobj: The data type to call this function on when both values
-                    are of the same type in assertEqual().
-            function: The callable taking two arguments and an optional
-                    msg= argument that raises self.failureException with a
-                    useful error message when the two arguments are not equal.
-        """
-        self._type_equality_funcs[typeobj] = function
-
-    def addCleanup(self, function, *args, **kwargs):
-        """Add a function, with arguments, to be called when the test is
-        completed. Functions added are called on a LIFO basis and are
-        called after tearDown on test failure or success.
-
-        Cleanup items are called even if setUp fails (unlike tearDown)."""
-        self._cleanups.append((function, args, kwargs))
-
-    @classmethod
-    def setUpClass(cls):
-        "Hook method for setting up class fixture before running tests in the class."
-
-    @classmethod
-    def tearDownClass(cls):
-        "Hook method for deconstructing the class fixture after running all tests in the class."
-
-    def countTestCases(self):
-        return 1
-
-    def defaultTestResult(self):
-        return result.TestResult()
-
-    def shortDescription(self):
-        """Returns a one-line description of the test, or None if no
-        description has been provided.
-
-        The default implementation of this method returns the first line of
-        the specified test method's docstring.
-        """
-        doc = self._testMethodDoc
-        return doc and doc.split("\n")[0].strip() or None
-
-
-    def id(self):
-        return "%s.%s" % (strclass(self.__class__), self._testMethodName)
-
-    def __eq__(self, other):
-        if type(self) is not type(other):
-            return NotImplemented
-
-        return self._testMethodName == other._testMethodName
-
-    def __ne__(self, other):
-        return not self == other
-
-    def __hash__(self):
-        return hash((type(self), self._testMethodName))
-
-    def __str__(self):
-        return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
-
-    def __repr__(self):
-        return "<%s testMethod=%s>" % \
-               (strclass(self.__class__), self._testMethodName)
-
-    def _addSkip(self, result, reason):
-        addSkip = getattr(result, 'addSkip', None)
-        if addSkip is not None:
-            addSkip(self, reason)
-        else:
-            warnings.warn("Use of a TestResult without an addSkip method is deprecated",
-                          DeprecationWarning, 2)
-            result.addSuccess(self)
-
-    def run(self, result=None):
-        orig_result = result
-        if result is None:
-            result = self.defaultTestResult()
-            startTestRun = getattr(result, 'startTestRun', None)
-            if startTestRun is not None:
-                startTestRun()
-
-        self._resultForDoCleanups = result
-        result.startTest(self)
-
-        testMethod = getattr(self, self._testMethodName)
-
-        if (getattr(self.__class__, "__unittest_skip__", False) or
-            getattr(testMethod, "__unittest_skip__", False)):
-            # If the class or method was skipped.
-            try:
-                skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
-                            or getattr(testMethod, '__unittest_skip_why__', ''))
-                self._addSkip(result, skip_why)
-            finally:
-                result.stopTest(self)
-            return
-        try:
-            success = False
-            try:
-                self.setUp()
-            except SkipTest as e:
-                self._addSkip(result, str(e))
-            except Exception:
-                result.addError(self, sys.exc_info())
-            else:
-                try:
-                    testMethod()
-                except self.failureException:
-                    result.addFailure(self, sys.exc_info())
-                except _ExpectedFailure as e:
-                    addExpectedFailure = getattr(result, 'addExpectedFailure', None)
-                    if addExpectedFailure is not None:
-                        addExpectedFailure(self, e.exc_info)
-                    else:
-                        warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
-                                      DeprecationWarning)
-                        result.addSuccess(self)
-                except _UnexpectedSuccess:
-                    addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
-                    if addUnexpectedSuccess is not None:
-                        addUnexpectedSuccess(self)
-                    else:
-                        warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
-                                      DeprecationWarning)
-                        result.addFailure(self, sys.exc_info())
-                except SkipTest as e:
-                    self._addSkip(result, str(e))
-                except Exception:
-                    result.addError(self, sys.exc_info())
-                else:
-                    success = True
-
-                try:
-                    self.tearDown()
-                except Exception:
-                    result.addError(self, sys.exc_info())
-                    success = False
-
-            cleanUpSuccess = self.doCleanups()
-            success = success and cleanUpSuccess
-            if success:
-                result.addSuccess(self)
-        finally:
-            result.stopTest(self)
-            if orig_result is None:
-                stopTestRun = getattr(result, 'stopTestRun', None)
-                if stopTestRun is not None:
-                    stopTestRun()
-
-    def doCleanups(self):
-        """Execute all cleanup functions. Normally called for you after
-        tearDown."""
-        result = self._resultForDoCleanups
-        ok = True
-        while self._cleanups:
-            function, args, kwargs = self._cleanups.pop(-1)
-            try:
-                function(*args, **kwargs)
-            except Exception:
-                ok = False
-                result.addError(self, sys.exc_info())
-        return ok
-
-    def __call__(self, *args, **kwds):
-        return self.run(*args, **kwds)
-
-    def debug(self):
-        """Run the test without collecting errors in a TestResult"""
-        self.setUp()
-        getattr(self, self._testMethodName)()
-        self.tearDown()
-        while self._cleanups:
-            function, args, kwargs = self._cleanups.pop(-1)
-            function(*args, **kwargs)
-
-    def skipTest(self, reason):
-        """Skip this test."""
-        raise SkipTest(reason)
-
-    def fail(self, msg=None):
-        """Fail immediately, with the given message."""
-        raise self.failureException(msg)
-
-    def assertFalse(self, expr, msg=None):
-        "Fail the test if the expression is true."
-        if expr:
-            msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
-            raise self.failureException(msg)
-
-    def assertTrue(self, expr, msg=None):
-        """Fail the test unless the expression is true."""
-        if not expr:
-            msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
-            raise self.failureException(msg)
-
-    def _formatMessage(self, msg, standardMsg):
-        """Honour the longMessage attribute when generating failure messages.
-        If longMessage is False this means:
-        * Use only an explicit message if it is provided
-        * Otherwise use the standard message for the assert
-
-        If longMessage is True:
-        * Use the standard message
-        * If an explicit message is provided, plus ' : ' and the explicit message
-        """
-        if not self.longMessage:
-            return msg or standardMsg
-        if msg is None:
-            return standardMsg
-        try:
-            return '%s : %s' % (standardMsg, msg)
-        except UnicodeDecodeError:
-            return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
-
-
-    def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
-        """Fail unless an exception of class excClass is thrown
-           by callableObj when invoked with arguments args and keyword
-           arguments kwargs. If a different type of exception is
-           thrown, it will not be caught, and the test case will be
-           deemed to have suffered an error, exactly as for an
-           unexpected exception.
-
-           If called with callableObj omitted or None, will return a
-           context object used like this::
-
-                with self.assertRaises(SomeException):
-                    do_something()
-
-           The context manager keeps a reference to the exception as
-           the 'exception' attribute. This allows you to inspect the
-           exception after the assertion::
-
-               with self.assertRaises(SomeException) as cm:
-                   do_something()
-               the_exception = cm.exception
-               self.assertEqual(the_exception.error_code, 3)
-        """
-        if callableObj is None:
-            return _AssertRaisesContext(excClass, self)
-        try:
-            callableObj(*args, **kwargs)
-        except excClass:
-            return
-
-        if hasattr(excClass,'__name__'):
-            excName = excClass.__name__
-        else:
-            excName = str(excClass)
-        raise self.failureException("%s not raised" % excName)
-
-    def _getAssertEqualityFunc(self, first, second):
-        """Get a detailed comparison function for the types of the two args.
-
-        Returns: A callable accepting (first, second, msg=None) that will
-        raise a failure exception if first != second with a useful human
-        readable error message for those types.
-        """
-        #
-        # NOTE(gregory.p.smith): I considered isinstance(first, type(second))
-        # and vice versa.  I opted for the conservative approach in case
-        # subclasses are not intended to be compared in detail to their super
-        # class instances using a type equality func.  This means testing
-        # subtypes won't automagically use the detailed comparison.  Callers
-        # should use their type specific assertSpamEqual method to compare
-        # subclasses if the detailed comparison is desired and appropriate.
-        # See the discussion in http://bugs.python.org/issue2578.
-        #
-        if type(first) is type(second):
-            asserter = self._type_equality_funcs.get(type(first))
-            if asserter is not None:
-                return asserter
-
-        return self._baseAssertEqual
-
-    def _baseAssertEqual(self, first, second, msg=None):
-        """The default assertEqual implementation, not type specific."""
-        if not first == second:
-            standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
-            msg = self._formatMessage(msg, standardMsg)
-            raise self.failureException(msg)
-
-    def assertEqual(self, first, second, msg=None):
-        """Fail if the two objects are unequal as determined by the '=='
-           operator.
-        """
-        assertion_func = self._getAssertEqualityFunc(first, second)
-        assertion_func(first, second, msg=msg)
-
-    def assertNotEqual(self, first, second, msg=None):
-        """Fail if the two objects are equal as determined by the '=='
-           operator.
-        """
-        if not first != second:
-            msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
-                                                           safe_repr(second)))
-            raise self.failureException(msg)
-
-    def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
-        """Fail if the two objects are unequal as determined by their
-           difference rounded to the given number of decimal places
-           (default 7) and comparing to zero, or by comparing that the
-           between the two objects is more than the given delta.
-
-           Note that decimal places (from zero) are usually not the same
-           as significant digits (measured from the most signficant digit).
-
-           If the two objects compare equal then they will automatically
-           compare almost equal.
-        """
-        if first == second:
-            # shortcut
-            return
-        if delta is not None and places is not None:
-            raise TypeError("specify delta or places not both")
-
-        if delta is not None:
-            if abs(first - second) <= delta:
-                return
-
-            standardMsg = '%s != %s within %s delta' % (safe_repr(first),
-                                                        safe_repr(second),
-                                                        safe_repr(delta))
-        else:
-            if places is None:
-                places = 7
-
-            if round(abs(second-first), places) == 0:
-                return
-
-            standardMsg = '%s != %s within %r places' % (safe_repr(first),
-                                                          safe_repr(second),
-                                                          places)
-        msg = self._formatMessage(msg, standardMsg)
-        raise self.failureException(msg)
-
-    def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
-        """Fail if the two objects are equal as determined by their
-           difference rounded to the given number of decimal places
-           (default 7) and comparing to zero, or by comparing that the
-           between the two objects is less than the given delta.
-
-           Note that decimal places (from zero) are usually not the same
-           as significant digits (measured from the most signficant digit).
-
-           Objects that are equal automatically fail.
-        """
-        if delta is not None and places is not None:
-            raise TypeError("specify delta or places not both")
-        if delta is not None:
-            if not (first == second) and abs(first - second) > delta:
-                return
-            standardMsg = '%s == %s within %s delta' % (safe_repr(first),
-                                                        safe_repr(second),
-                                                        safe_repr(delta))
-        else:
-            if places is None:
-                places = 7
-            if not (first == second) and round(abs(second-first), places) != 0:
-                return
-            standardMsg = '%s == %s within %r places' % (safe_repr(first),
-                                                         safe_repr(second),
-                                                         places)
-
-        msg = self._formatMessage(msg, standardMsg)
-        raise self.failureException(msg)
-
-    # Synonyms for assertion methods
-
-    # The plurals are undocumented.  Keep them that way to discourage use.
-    # Do not add more.  Do not remove.
-    # Going through a deprecation cycle on these would annoy many people.
-    assertEquals = assertEqual
-    assertNotEquals = assertNotEqual
-    assertAlmostEquals = assertAlmostEqual
-    assertNotAlmostEquals = assertNotAlmostEqual
-    assert_ = assertTrue
-
-    # These fail* assertion method names are pending deprecation and will
-    # be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
-    def _deprecate(original_func):
-        def deprecated_func(*args, **kwargs):
-            warnings.warn(
-                ('Please use %s instead.' % original_func.__name__),
-                PendingDeprecationWarning, 2)
-            return original_func(*args, **kwargs)
-        return deprecated_func
-
-    failUnlessEqual = _deprecate(assertEqual)
-    failIfEqual = _deprecate(assertNotEqual)
-    failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
-    failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
-    failUnless = _deprecate(assertTrue)
-    failUnlessRaises = _deprecate(assertRaises)
-    failIf = _deprecate(assertFalse)
-
-    def assertSequenceEqual(self, seq1, seq2,
-                            msg=None, seq_type=None, max_diff=80*8):
-        """An equality assertion for ordered sequences (like lists and tuples).
-
-        For the purposes of this function, a valid ordered sequence type is one
-        which can be indexed, has a length, and has an equality operator.
-
-        Args:
-            seq1: The first sequence to compare.
-            seq2: The second sequence to compare.
-            seq_type: The expected datatype of the sequences, or None if no
-                    datatype should be enforced.
-            msg: Optional message to use on failure instead of a list of
-                    differences.
-            max_diff: Maximum size off the diff, larger diffs are not shown
-        """
-        if seq_type is not None:
-            seq_type_name = seq_type.__name__
-            if not isinstance(seq1, seq_type):
-                raise self.failureException('First sequence is not a %s: %s'
-                                            % (seq_type_name, safe_repr(seq1)))
-            if not isinstance(seq2, seq_type):
-                raise self.failureException('Second sequence is not a %s: %s'
-                                            % (seq_type_name, safe_repr(seq2)))
-        else:
-            seq_type_name = "sequence"
-
-        differing = None
-        try:
-            len1 = len(seq1)
-        except (TypeError, NotImplementedError):
-            differing = 'First %s has no length.    Non-sequence?' % (
-                    seq_type_name)
-
-        if differing is None:
-            try:
-                len2 = len(seq2)
-            except (TypeError, NotImplementedError):
-                differing = 'Second %s has no length.    Non-sequence?' % (
-                        seq_type_name)
-
-        if differing is None:
-            if seq1 == seq2:
-                return
-
-            seq1_repr = repr(seq1)
-            seq2_repr = repr(seq2)
-            if len(seq1_repr) > 30:
-                seq1_repr = seq1_repr[:30] + '...'
-            if len(seq2_repr) > 30:
-                seq2_repr = seq2_repr[:30] + '...'
-            elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
-            differing = '%ss differ: %s != %s\n' % elements
-
-            for i in xrange(min(len1, len2)):
-                try:
-                    item1 = seq1[i]
-                except (TypeError, IndexError, NotImplementedError):
-                    differing += ('\nUnable to index element %d of first %s\n' %
-                                 (i, seq_type_name))
-                    break
-
-                try:
-                    item2 = seq2[i]
-                except (TypeError, IndexError, NotImplementedError):
-                    differing += ('\nUnable to index element %d of second %s\n' %
-                                 (i, seq_type_name))
-                    break
-
-                if item1 != item2:
-                    differing += ('\nFirst differing element %d:\n%s\n%s\n' %
-                                 (i, item1, item2))
-                    break
-            else:
-                if (len1 == len2 and seq_type is None and
-                    type(seq1) != type(seq2)):
-                    # The sequences are the same, but have differing types.
-                    return
-
-            if len1 > len2:
-                differing += ('\nFirst %s contains %d additional '
-                             'elements.\n' % (seq_type_name, len1 - len2))
-                try:
-                    differing += ('First extra element %d:\n%s\n' %
-                                  (len2, seq1[len2]))
-                except (TypeError, IndexError, NotImplementedError):
-                    differing += ('Unable to index element %d '
-                                  'of first %s\n' % (len2, seq_type_name))
-            elif len1 < len2:
-                differing += ('\nSecond %s contains %d additional '
-                             'elements.\n' % (seq_type_name, len2 - len1))
-                try:
-                    differing += ('First extra element %d:\n%s\n' %
-                                  (len1, seq2[len1]))
-                except (TypeError, IndexError, NotImplementedError):
-                    differing += ('Unable to index element %d '
-                                  'of second %s\n' % (len1, seq_type_name))
-        standardMsg = differing
-        diffMsg = '\n' + '\n'.join(
-            difflib.ndiff(pprint.pformat(seq1).splitlines(),
-                          pprint.pformat(seq2).splitlines()))
-
-        standardMsg = self._truncateMessage(standardMsg, diffMsg)
-        msg = self._formatMessage(msg, standardMsg)
-        self.fail(msg)
-
-    def _truncateMessage(self, message, diff):
-        max_diff = self.maxDiff
-        if max_diff is None or len(diff) <= max_diff:
-            return message + diff
-        return message + (DIFF_OMITTED % len(diff))
-
-    def assertListEqual(self, list1, list2, msg=None):
-        """A list-specific equality assertion.
-
-        Args:
-            list1: The first list to compare.
-            list2: The second list to compare.
-            msg: Optional message to use on failure instead of a list of
-                    differences.
-
-        """
-        self.assertSequenceEqual(list1, list2, msg, seq_type=list)
-
-    def assertTupleEqual(self, tuple1, tuple2, msg=None):
-        """A tuple-specific equality assertion.
-
-        Args:
-            tuple1: The first tuple to compare.
-            tuple2: The second tuple to compare.
-            msg: Optional message to use on failure instead of a list of
-                    differences.
-        """
-        self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
-
-    def assertSetEqual(self, set1, set2, msg=None):
-        """A set-specific equality assertion.
-
-        Args:
-            set1: The first set to compare.
-            set2: The second set to compare.
-            msg: Optional message to use on failure instead of a list of
-                    differences.
-
-        assertSetEqual uses ducktyping to support
-        different types of sets, and is optimized for sets specifically
-        (parameters must support a difference method).
-        """
-        try:
-            difference1 = set1.difference(set2)
-        except TypeError as e:
-            self.fail('invalid type when attempting set difference: %s' % e)
-        except AttributeError as e:
-            self.fail('first argument does not support set difference: %s' % e)
-
-        try:
-            difference2 = set2.difference(set1)
-        except TypeError as e:
-            self.fail('invalid type when attempting set difference: %s' % e)
-        except AttributeError as e:
-            self.fail('second argument does not support set difference: %s' % e)
-
-        if not (difference1 or difference2):
-            return
-
-        lines = []
-        if difference1:
-            lines.append('Items in the first set but not the second:')
-            for item in difference1:
-                lines.append(repr(item))
-        if difference2:
-            lines.append('Items in the second set but not the first:')
-            for item in difference2:
-                lines.append(repr(item))
-
-        standardMsg = '\n'.join(lines)
-        self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIn(self, member, container, msg=None):
-        """Just like self.assertTrue(a in b), but with a nicer default message."""
-        if member not in container:
-            standardMsg = '%s not found in %s' % (safe_repr(member),
-                                                   safe_repr(container))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertNotIn(self, member, container, msg=None):
-        """Just like self.assertTrue(a not in b), but with a nicer default message."""
-        if member in container:
-            standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
-                                                            safe_repr(container))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIs(self, expr1, expr2, msg=None):
-        """Just like self.assertTrue(a is b), but with a nicer default message."""
-        if expr1 is not expr2:
-            standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIsNot(self, expr1, expr2, msg=None):
-        """Just like self.assertTrue(a is not b), but with a nicer default message."""
-        if expr1 is expr2:
-            standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertDictEqual(self, d1, d2, msg=None):
-        self.assertTrue(isinstance(d1, dict), 'First argument is not a dictionary')
-        self.assertTrue(isinstance(d2, dict), 'Second argument is not a dictionary')
-
-        if d1 != d2:
-            standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
-            diff = ('\n' + '\n'.join(difflib.ndiff(
-                           pprint.pformat(d1).splitlines(),
-                           pprint.pformat(d2).splitlines())))
-            standardMsg = self._truncateMessage(standardMsg, diff)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertDictContainsSubset(self, expected, actual, msg=None):
-        """Checks whether actual is a superset of expected."""
-        missing = []
-        mismatched = []
-        for key, value in expected.iteritems():
-            if key not in actual:
-                missing.append(key)
-            elif value != actual[key]:
-                mismatched.append('%s, expected: %s, actual: %s' %
-                                  (safe_repr(key), safe_repr(value),
-                                   safe_repr(actual[key])))
-
-        if not (missing or mismatched):
-            return
-
-        standardMsg = ''
-        if missing:
-            standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
-                                                    missing)
-        if mismatched:
-            if standardMsg:
-                standardMsg += '; '
-            standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
-
-        self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
-        """An unordered sequence specific comparison. It asserts that
-        expected_seq and actual_seq contain the same elements. It is
-        the equivalent of::
-
-            self.assertEqual(sorted(expected_seq), sorted(actual_seq))
-
-        Raises with an error message listing which elements of expected_seq
-        are missing from actual_seq and vice versa if any.
-
-        Asserts that each element has the same count in both sequences.
-        Example:
-            - [0, 1, 1] and [1, 0, 1] compare equal.
-            - [0, 0, 1] and [0, 1] compare unequal.
-        """
-        try:
-            expected = sorted(expected_seq)
-            actual = sorted(actual_seq)
-        except TypeError:
-            # Unsortable items (example: set(), complex(), ...)
-            expected = list(expected_seq)
-            actual = list(actual_seq)
-            missing, unexpected = unorderable_list_difference(
-                expected, actual, ignore_duplicate=False
-            )
-        else:
-            return self.assertSequenceEqual(expected, actual, msg=msg)
-
-        errors = []
-        if missing:
-            errors.append('Expected, but missing:\n    %s' %
-                           safe_repr(missing))
-        if unexpected:
-            errors.append('Unexpected, but present:\n    %s' %
-                           safe_repr(unexpected))
-        if errors:
-            standardMsg = '\n'.join(errors)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertMultiLineEqual(self, first, second, msg=None):
-        """Assert that two multi-line strings are equal."""
-        self.assertTrue(isinstance(first, basestring), (
-                'First argument is not a string'))
-        self.assertTrue(isinstance(second, basestring), (
-                'Second argument is not a string'))
-
-        if first != second:
-            standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
-            diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
-                                                       second.splitlines(True)))
-            standardMsg = self._truncateMessage(standardMsg, diff)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertLess(self, a, b, msg=None):
-        """Just like self.assertTrue(a < b), but with a nicer default message."""
-        if not a < b:
-            standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertLessEqual(self, a, b, msg=None):
-        """Just like self.assertTrue(a <= b), but with a nicer default message."""
-        if not a <= b:
-            standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertGreater(self, a, b, msg=None):
-        """Just like self.assertTrue(a > b), but with a nicer default message."""
-        if not a > b:
-            standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertGreaterEqual(self, a, b, msg=None):
-        """Just like self.assertTrue(a >= b), but with a nicer default message."""
-        if not a >= b:
-            standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIsNone(self, obj, msg=None):
-        """Same as self.assertTrue(obj is None), with a nicer default message."""
-        if obj is not None:
-            standardMsg = '%s is not None' % (safe_repr(obj),)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIsNotNone(self, obj, msg=None):
-        """Included for symmetry with assertIsNone."""
-        if obj is None:
-            standardMsg = 'unexpectedly None'
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertIsInstance(self, obj, cls, msg=None):
-        """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
-        default message."""
-        if not isinstance(obj, cls):
-            standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertNotIsInstance(self, obj, cls, msg=None):
-        """Included for symmetry with assertIsInstance."""
-        if isinstance(obj, cls):
-            standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
-            self.fail(self._formatMessage(msg, standardMsg))
-
-    def assertRaisesRegexp(self, expected_exception, expected_regexp,
-                           callable_obj=None, *args, **kwargs):
-        """Asserts that the message in a raised exception matches a regexp.
-
-        Args:
-            expected_exception: Exception class expected to be raised.
-            expected_regexp: Regexp (re pattern object or string) expected
-                    to be found in error message.
-            callable_obj: Function to be called.
-            args: Extra args.
-            kwargs: Extra kwargs.
-        """
-        if callable_obj is None:
-            return _AssertRaisesContext(expected_exception, self, expected_regexp)
-        try:
-            callable_obj(*args, **kwargs)
-        except expected_exception as exc_value:
-            if isinstance(expected_regexp, basestring):
-                expected_regexp = re.compile(expected_regexp)
-            if not expected_regexp.search(str(exc_value)):
-                raise self.failureException('"%s" does not match "%s"' %
-                         (expected_regexp.pattern, str(exc_value)))
-        else:
-            if hasattr(expected_exception, '__name__'):
-                excName = expected_exception.__name__
-            else:
-                excName = str(expected_exception)
-            raise self.failureException("%s not raised" % excName)
-
-    def assertRegexpMatches(self, text, expected_regexp, msg=None):
-        """Fail the test unless the text matches the regular expression."""
-        if isinstance(expected_regexp, basestring):
-            expected_regexp = re.compile(expected_regexp)
-        if not expected_regexp.search(text):
-            msg = msg or "Regexp didn't match"
-            msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
-            raise self.failureException(msg)
-
-    def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
-        """Fail the test if the text matches the regular expression."""
-        if isinstance(unexpected_regexp, basestring):
-            unexpected_regexp = re.compile(unexpected_regexp)
-        match = unexpected_regexp.search(text)
-        if match:
-            msg = msg or "Regexp matched"
-            msg = '%s: %r matches %r in %r' % (msg,
-                                               text[match.start():match.end()],
-                                               unexpected_regexp.pattern,
-                                               text)
-            raise self.failureException(msg)
-
-class FunctionTestCase(TestCase):
-    """A test case that wraps a test function.
-
-    This is useful for slipping pre-existing test functions into the
-    unittest framework. Optionally, set-up and tidy-up functions can be
-    supplied. As with TestCase, the tidy-up ('tearDown') function will
-    always be called if the set-up ('setUp') function ran successfully.
-    """
-
-    def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
-        super(FunctionTestCase, self).__init__()
-        self._setUpFunc = setUp
-        self._tearDownFunc = tearDown
-        self._testFunc = testFunc
-        self._description = description
-
-    def setUp(self):
-        if self._setUpFunc is not None:
-            self._setUpFunc()
-
-    def tearDown(self):
-        if self._tearDownFunc is not None:
-            self._tearDownFunc()
-
-    def runTest(self):
-        self._testFunc()
-
-    def id(self):
-        return self._testFunc.__name__
-
-    def __eq__(self, other):
-        if not isinstance(other, self.__class__):
-            return NotImplemented
-
-        return self._setUpFunc == other._setUpFunc and \
-               self._tearDownFunc == other._tearDownFunc and \
-               self._testFunc == other._testFunc and \
-               self._description == other._description
-
-    def __ne__(self, other):
-        return not self == other
-
-    def __hash__(self):
-        return hash((type(self), self._setUpFunc, self._tearDownFunc,
-                     self._testFunc, self._description))
-
-    def __str__(self):
-        return "%s (%s)" % (strclass(self.__class__),
-                            self._testFunc.__name__)
-
-    def __repr__(self):
-        return "<%s testFunc=%s>" % (strclass(self.__class__),
-                                     self._testFunc)
-
-    def shortDescription(self):
-        if self._description is not None:
-            return self._description
-        doc = self._testFunc.__doc__
-        return doc and doc.split("\n")[0].strip() or None

+ 0 - 9
django/utils/unittest/collector.py

@@ -1,9 +0,0 @@
-import os
-import sys
-from django.utils.unittest.loader import defaultTestLoader
-
-def collector():
-    # import __main__ triggers code re-execution
-    __main__ = sys.modules['__main__']
-    setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
-    return defaultTestLoader.discover(setupDir)

+ 0 - 64
django/utils/unittest/compatibility.py

@@ -1,64 +0,0 @@
-import os
-import sys
-
-try:
-    from functools import wraps
-except ImportError:
-    # only needed for Python 2.4
-    def wraps(_):
-        def _wraps(func):
-            return func
-        return _wraps
-
-__unittest = True
-
-def _relpath_nt(path, start=os.path.curdir):
-    """Return a relative version of a path"""
-
-    if not path:
-        raise ValueError("no path specified")
-    start_list = os.path.abspath(start).split(os.path.sep)
-    path_list = os.path.abspath(path).split(os.path.sep)
-    if start_list[0].lower() != path_list[0].lower():
-        unc_path, rest = os.path.splitunc(path)
-        unc_start, rest = os.path.splitunc(start)
-        if bool(unc_path) ^ bool(unc_start):
-            raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
-                                                                % (path, start))
-        else:
-            raise ValueError("path is on drive %s, start on drive %s"
-                                                % (path_list[0], start_list[0]))
-    # Work out how much of the filepath is shared by start and path.
-    for i in range(min(len(start_list), len(path_list))):
-        if start_list[i].lower() != path_list[i].lower():
-            break
-    else:
-        i += 1
-
-    rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
-    if not rel_list:
-        return os.path.curdir
-    return os.path.join(*rel_list)
-
-# default to posixpath definition
-def _relpath_posix(path, start=os.path.curdir):
-    """Return a relative version of a path"""
-
-    if not path:
-        raise ValueError("no path specified")
-
-    start_list = os.path.abspath(start).split(os.path.sep)
-    path_list = os.path.abspath(path).split(os.path.sep)
-
-    # Work out how much of the filepath is shared by start and path.
-    i = len(os.path.commonprefix([start_list, path_list]))
-
-    rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
-    if not rel_list:
-        return os.path.curdir
-    return os.path.join(*rel_list)
-
-if os.path is sys.modules.get('ntpath'):
-    relpath = _relpath_nt
-else:
-    relpath = _relpath_posix

+ 0 - 322
django/utils/unittest/loader.py

@@ -1,322 +0,0 @@
-"""Loading unittests."""
-
-import os
-import re
-import sys
-import traceback
-import types
-import unittest
-
-from fnmatch import fnmatch
-
-from django.utils.unittest import case, suite
-
-try:
-    from os.path import relpath
-except ImportError:
-    from django.utils.unittest.compatibility import relpath
-
-__unittest = True
-
-
-def _CmpToKey(mycmp):
-    'Convert a cmp= function into a key= function'
-    class K(object):
-        def __init__(self, obj):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) == -1
-    return K
-
-
-# what about .pyc or .pyo (etc)
-# we would need to avoid loading the same tests multiple times
-# from '.py', '.pyc' *and* '.pyo'
-VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
-
-
-def _make_failed_import_test(name, suiteClass):
-    message = 'Failed to import test module: %s' % name
-    if hasattr(traceback, 'format_exc'):
-        # Python 2.3 compatibility
-        # format_exc returns two frames of discover.py as well
-        message += '\n%s' % traceback.format_exc()
-    return _make_failed_test('ModuleImportFailure', name, ImportError(message),
-                             suiteClass)
-
-def _make_failed_load_tests(name, exception, suiteClass):
-    return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
-
-def _make_failed_test(classname, methodname, exception, suiteClass):
-    def testFailure(self):
-        raise exception
-    attrs = {methodname: testFailure}
-    TestClass = type(classname, (case.TestCase,), attrs)
-    return suiteClass((TestClass(methodname),))
-
-
-class TestLoader(unittest.TestLoader):
-    """
-    This class is responsible for loading tests according to various criteria
-    and returning them wrapped in a TestSuite
-    """
-    testMethodPrefix = 'test'
-    sortTestMethodsUsing = cmp
-    suiteClass = suite.TestSuite
-    _top_level_dir = None
-
-    def loadTestsFromTestCase(self, testCaseClass):
-        """Return a suite of all tests cases contained in testCaseClass"""
-        if issubclass(testCaseClass, suite.TestSuite):
-            raise TypeError("Test cases should not be derived from TestSuite."
-                            " Maybe you meant to derive from TestCase?")
-        testCaseNames = self.getTestCaseNames(testCaseClass)
-        if not testCaseNames and hasattr(testCaseClass, 'runTest'):
-            testCaseNames = ['runTest']
-        loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
-        return loaded_suite
-
-    def loadTestsFromModule(self, module, use_load_tests=True):
-        """Return a suite of all tests cases contained in the given module"""
-        tests = []
-        for name in dir(module):
-            obj = getattr(module, name)
-            if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
-                tests.append(self.loadTestsFromTestCase(obj))
-
-        load_tests = getattr(module, 'load_tests', None)
-        tests = self.suiteClass(tests)
-        if use_load_tests and load_tests is not None:
-            try:
-                return load_tests(self, tests, None)
-            except Exception as e:
-                return _make_failed_load_tests(module.__name__, e,
-                                               self.suiteClass)
-        return tests
-
-    def loadTestsFromName(self, name, module=None):
-        """Return a suite of all tests cases given a string specifier.
-
-        The name may resolve either to a module, a test case class, a
-        test method within a test case class, or a callable object which
-        returns a TestCase or TestSuite instance.
-
-        The method optionally resolves the names relative to a given module.
-        """
-        parts = name.split('.')
-        if module is None:
-            parts_copy = parts[:]
-            while parts_copy:
-                try:
-                    module = __import__('.'.join(parts_copy))
-                    break
-                except ImportError:
-                    del parts_copy[-1]
-                    if not parts_copy:
-                        raise
-            parts = parts[1:]
-        obj = module
-        for part in parts:
-            parent, obj = obj, getattr(obj, part)
-
-        if isinstance(obj, types.ModuleType):
-            return self.loadTestsFromModule(obj)
-        elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
-            return self.loadTestsFromTestCase(obj)
-        elif (isinstance(obj, types.UnboundMethodType) and
-              isinstance(parent, type) and
-              issubclass(parent, unittest.TestCase)):
-            return self.suiteClass([parent(obj.__name__)])
-        elif isinstance(obj, unittest.TestSuite):
-            return obj
-        elif hasattr(obj, '__call__'):
-            test = obj()
-            if isinstance(test, unittest.TestSuite):
-                return test
-            elif isinstance(test, unittest.TestCase):
-                return self.suiteClass([test])
-            else:
-                raise TypeError("calling %s returned %s, not a test" %
-                                (obj, test))
-        else:
-            raise TypeError("don't know how to make test from: %s" % obj)
-
-    def loadTestsFromNames(self, names, module=None):
-        """Return a suite of all tests cases found using the given sequence
-        of string specifiers. See 'loadTestsFromName()'.
-        """
-        suites = [self.loadTestsFromName(name, module) for name in names]
-        return self.suiteClass(suites)
-
-    def getTestCaseNames(self, testCaseClass):
-        """Return a sorted sequence of method names found within testCaseClass
-        """
-        def isTestMethod(attrname, testCaseClass=testCaseClass,
-                         prefix=self.testMethodPrefix):
-            return attrname.startswith(prefix) and \
-                hasattr(getattr(testCaseClass, attrname), '__call__')
-        testFnNames = filter(isTestMethod, dir(testCaseClass))
-        if self.sortTestMethodsUsing:
-            testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
-        return testFnNames
-
-    def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
-        """Find and return all test modules from the specified start
-        directory, recursing into subdirectories to find them. Only test files
-        that match the pattern will be loaded. (Using shell style pattern
-        matching.)
-
-        All test modules must be importable from the top level of the project.
-        If the start directory is not the top level directory then the top
-        level directory must be specified separately.
-
-        If a test package name (directory with '__init__.py') matches the
-        pattern then the package will be checked for a 'load_tests' function. If
-        this exists then it will be called with loader, tests, pattern.
-
-        If load_tests exists then discovery does  *not* recurse into the package,
-        load_tests is responsible for loading all tests in the package.
-
-        The pattern is deliberately not stored as a loader attribute so that
-        packages can continue discovery themselves. top_level_dir is stored so
-        load_tests does not need to pass this argument in to loader.discover().
-        """
-        set_implicit_top = False
-        if top_level_dir is None and self._top_level_dir is not None:
-            # make top_level_dir optional if called from load_tests in a package
-            top_level_dir = self._top_level_dir
-        elif top_level_dir is None:
-            set_implicit_top = True
-            top_level_dir = start_dir
-
-        top_level_dir = os.path.abspath(top_level_dir)
-
-        if not top_level_dir in sys.path:
-            # all test modules must be importable from the top level directory
-            # should we *unconditionally* put the start directory in first
-            # in sys.path to minimise likelihood of conflicts between installed
-            # modules and development versions?
-            sys.path.insert(0, top_level_dir)
-        self._top_level_dir = top_level_dir
-
-        is_not_importable = False
-        if os.path.isdir(os.path.abspath(start_dir)):
-            start_dir = os.path.abspath(start_dir)
-            if start_dir != top_level_dir:
-                is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
-        else:
-            # support for discovery from dotted module names
-            try:
-                __import__(start_dir)
-            except ImportError:
-                is_not_importable = True
-            else:
-                the_module = sys.modules[start_dir]
-                top_part = start_dir.split('.')[0]
-                start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
-                if set_implicit_top:
-                    self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
-                    sys.path.remove(top_level_dir)
-
-        if is_not_importable:
-            raise ImportError('Start directory is not importable: %r' % start_dir)
-
-        tests = list(self._find_tests(start_dir, pattern))
-        return self.suiteClass(tests)
-
-    def _get_name_from_path(self, path):
-        path = os.path.splitext(os.path.normpath(path))[0]
-
-        _relpath = relpath(path, self._top_level_dir)
-        assert not os.path.isabs(_relpath), "Path must be within the project"
-        assert not _relpath.startswith('..'), "Path must be within the project"
-
-        name = _relpath.replace(os.path.sep, '.')
-        return name
-
-    def _get_module_from_name(self, name):
-        __import__(name)
-        return sys.modules[name]
-
-    def _match_path(self, path, full_path, pattern):
-        # override this method to use alternative matching strategy
-        return fnmatch(path, pattern)
-
-    def _find_tests(self, start_dir, pattern):
-        """Used by discovery. Yields test suites it loads."""
-        paths = os.listdir(start_dir)
-
-        for path in paths:
-            full_path = os.path.join(start_dir, path)
-            if os.path.isfile(full_path):
-                if not VALID_MODULE_NAME.match(path):
-                    # valid Python identifiers only
-                    continue
-                if not self._match_path(path, full_path, pattern):
-                    continue
-                # if the test file matches, load it
-                name = self._get_name_from_path(full_path)
-                try:
-                    module = self._get_module_from_name(name)
-                except:
-                    yield _make_failed_import_test(name, self.suiteClass)
-                else:
-                    mod_file = os.path.abspath(getattr(module, '__file__', full_path))
-                    realpath = os.path.splitext(mod_file)[0]
-                    fullpath_noext = os.path.splitext(full_path)[0]
-                    if realpath.lower() != fullpath_noext.lower():
-                        module_dir = os.path.dirname(realpath)
-                        mod_name = os.path.splitext(os.path.basename(full_path))[0]
-                        expected_dir = os.path.dirname(full_path)
-                        msg = ("%r module incorrectly imported from %r. Expected %r. "
-                               "Is this module globally installed?")
-                        raise ImportError(msg % (mod_name, module_dir, expected_dir))
-                    yield self.loadTestsFromModule(module)
-            elif os.path.isdir(full_path):
-                if not os.path.isfile(os.path.join(full_path, '__init__.py')):
-                    continue
-
-                load_tests = None
-                tests = None
-                if fnmatch(path, pattern):
-                    # only check load_tests if the package directory itself matches the filter
-                    name = self._get_name_from_path(full_path)
-                    package = self._get_module_from_name(name)
-                    load_tests = getattr(package, 'load_tests', None)
-                    tests = self.loadTestsFromModule(package, use_load_tests=False)
-
-                if load_tests is None:
-                    if tests is not None:
-                        # tests loaded from package file
-                        yield tests
-                    # recurse into the package
-                    for test in self._find_tests(full_path, pattern):
-                        yield test
-                else:
-                    try:
-                        yield load_tests(self, tests, pattern)
-                    except Exception as e:
-                        yield _make_failed_load_tests(package.__name__, e,
-                                                      self.suiteClass)
-
-defaultTestLoader = TestLoader()
-
-
-def _makeLoader(prefix, sortUsing, suiteClass=None):
-    loader = TestLoader()
-    loader.sortTestMethodsUsing = sortUsing
-    loader.testMethodPrefix = prefix
-    if suiteClass:
-        loader.suiteClass = suiteClass
-    return loader
-
-def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
-    return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
-
-def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
-              suiteClass=suite.TestSuite):
-    return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
-
-def findTestCases(module, prefix='test', sortUsing=cmp,
-                  suiteClass=suite.TestSuite):
-    return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)

+ 0 - 241
django/utils/unittest/main.py

@@ -1,241 +0,0 @@
-"""Unittest main program"""
-
-import sys
-import os
-import types
-
-from django.utils.unittest import loader, runner
-try:
-    from django.utils.unittest.signals import installHandler
-except ImportError:
-    installHandler = None
-
-__unittest = True
-
-FAILFAST     = "  -f, --failfast   Stop on first failure\n"
-CATCHBREAK   = "  -c, --catch      Catch control-C and display results\n"
-BUFFEROUTPUT = "  -b, --buffer     Buffer stdout and stderr during test runs\n"
-
-USAGE_AS_MAIN = """\
-Usage: %(progName)s [options] [tests]
-
-Options:
-  -h, --help       Show this message
-  -v, --verbose    Verbose output
-  -q, --quiet      Minimal output
-%(failfast)s%(catchbreak)s%(buffer)s
-Examples:
-  %(progName)s test_module                       - run tests from test_module
-  %(progName)s test_module.TestClass             - run tests from
-                                                   test_module.TestClass
-  %(progName)s test_module.TestClass.test_method - run specified test method
-
-[tests] can be a list of any number of test modules, classes and test
-methods.
-
-Alternative Usage: %(progName)s discover [options]
-
-Options:
-  -v, --verbose    Verbose output
-%(failfast)s%(catchbreak)s%(buffer)s  -s directory     Directory to start discovery ('.' default)
-  -p pattern       Pattern to match test files ('test*.py' default)
-  -t directory     Top level directory of project (default to
-                   start directory)
-
-For test discovery all test modules must be importable from the top
-level directory of the project.
-"""
-
-USAGE_FROM_MODULE = """\
-Usage: %(progName)s [options] [test] [...]
-
-Options:
-  -h, --help       Show this message
-  -v, --verbose    Verbose output
-  -q, --quiet      Minimal output
-%(failfast)s%(catchbreak)s%(buffer)s
-Examples:
-  %(progName)s                               - run default set of tests
-  %(progName)s MyTestSuite                   - run suite 'MyTestSuite'
-  %(progName)s MyTestCase.testSomething      - run MyTestCase.testSomething
-  %(progName)s MyTestCase                    - run all 'test*' test methods
-                                               in MyTestCase
-"""
-
-
-class TestProgram(object):
-    """A command-line program that runs a set of tests; this is primarily
-       for making test modules conveniently executable.
-    """
-    USAGE = USAGE_FROM_MODULE
-
-    # defaults for testing
-    failfast = catchbreak = buffer = progName = None
-
-    def __init__(self, module='__main__', defaultTest=None,
-                 argv=None, testRunner=None,
-                 testLoader=loader.defaultTestLoader, exit=True,
-                 verbosity=1, failfast=None, catchbreak=None, buffer=None):
-        if isinstance(module, basestring):
-            self.module = __import__(module)
-            for part in module.split('.')[1:]:
-                self.module = getattr(self.module, part)
-        else:
-            self.module = module
-        if argv is None:
-            argv = sys.argv
-
-        self.exit = exit
-        self.verbosity = verbosity
-        self.failfast = failfast
-        self.catchbreak = catchbreak
-        self.buffer = buffer
-        self.defaultTest = defaultTest
-        self.testRunner = testRunner
-        self.testLoader = testLoader
-        self.progName = os.path.basename(argv[0])
-        self.parseArgs(argv)
-        self.runTests()
-
-    def usageExit(self, msg=None):
-        if msg:
-            print(msg)
-        usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
-                 'buffer': ''}
-        if self.failfast != False:
-            usage['failfast'] = FAILFAST
-        if self.catchbreak != False and installHandler is not None:
-            usage['catchbreak'] = CATCHBREAK
-        if self.buffer != False:
-            usage['buffer'] = BUFFEROUTPUT
-        print(self.USAGE % usage)
-        sys.exit(2)
-
-    def parseArgs(self, argv):
-        if len(argv) > 1 and argv[1].lower() == 'discover':
-            self._do_discovery(argv[2:])
-            return
-
-        import getopt
-        long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
-        try:
-            options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
-            for opt, value in options:
-                if opt in ('-h','-H','--help'):
-                    self.usageExit()
-                if opt in ('-q','--quiet'):
-                    self.verbosity = 0
-                if opt in ('-v','--verbose'):
-                    self.verbosity = 2
-                if opt in ('-f','--failfast'):
-                    if self.failfast is None:
-                        self.failfast = True
-                    # Should this raise an exception if -f is not valid?
-                if opt in ('-c','--catch'):
-                    if self.catchbreak is None and installHandler is not None:
-                        self.catchbreak = True
-                    # Should this raise an exception if -c is not valid?
-                if opt in ('-b','--buffer'):
-                    if self.buffer is None:
-                        self.buffer = True
-                    # Should this raise an exception if -b is not valid?
-            if len(args) == 0 and self.defaultTest is None:
-                # createTests will load tests from self.module
-                self.testNames = None
-            elif len(args) > 0:
-                self.testNames = args
-                if __name__ == '__main__':
-                    # to support python -m unittest ...
-                    self.module = None
-            else:
-                self.testNames = (self.defaultTest,)
-            self.createTests()
-        except getopt.error as msg:
-            self.usageExit(msg)
-
-    def createTests(self):
-        if self.testNames is None:
-            self.test = self.testLoader.loadTestsFromModule(self.module)
-        else:
-            self.test = self.testLoader.loadTestsFromNames(self.testNames,
-                                                           self.module)
-
-    def _do_discovery(self, argv, Loader=loader.TestLoader):
-        # handle command line args for test discovery
-        self.progName = '%s discover' % self.progName
-        import optparse
-        parser = optparse.OptionParser()
-        parser.prog = self.progName
-        parser.add_option('-v', '--verbose', dest='verbose', default=False,
-                          help='Verbose output', action='store_true')
-        if self.failfast != False:
-            parser.add_option('-f', '--failfast', dest='failfast', default=False,
-                              help='Stop on first fail or error',
-                              action='store_true')
-        if self.catchbreak != False and installHandler is not None:
-            parser.add_option('-c', '--catch', dest='catchbreak', default=False,
-                              help='Catch ctrl-C and display results so far',
-                              action='store_true')
-        if self.buffer != False:
-            parser.add_option('-b', '--buffer', dest='buffer', default=False,
-                              help='Buffer stdout and stderr during tests',
-                              action='store_true')
-        parser.add_option('-s', '--start-directory', dest='start', default='.',
-                          help="Directory to start discovery ('.' default)")
-        parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
-                          help="Pattern to match tests ('test*.py' default)")
-        parser.add_option('-t', '--top-level-directory', dest='top', default=None,
-                          help='Top level directory of project (defaults to start directory)')
-
-        options, args = parser.parse_args(argv)
-        if len(args) > 3:
-            self.usageExit()
-
-        for name, value in zip(('start', 'pattern', 'top'), args):
-            setattr(options, name, value)
-
-        # only set options from the parsing here
-        # if they weren't set explicitly in the constructor
-        if self.failfast is None:
-            self.failfast = options.failfast
-        if self.catchbreak is None and installHandler is not None:
-            self.catchbreak = options.catchbreak
-        if self.buffer is None:
-            self.buffer = options.buffer
-
-        if options.verbose:
-            self.verbosity = 2
-
-        start_dir = options.start
-        pattern = options.pattern
-        top_level_dir = options.top
-
-        loader = Loader()
-        self.test = loader.discover(start_dir, pattern, top_level_dir)
-
-    def runTests(self):
-        if self.catchbreak:
-            installHandler()
-        if self.testRunner is None:
-            self.testRunner = runner.TextTestRunner
-        if isinstance(self.testRunner, (type, types.ClassType)):
-            try:
-                testRunner = self.testRunner(verbosity=self.verbosity,
-                                             failfast=self.failfast,
-                                             buffer=self.buffer)
-            except TypeError:
-                # didn't accept the verbosity, buffer or failfast arguments
-                testRunner = self.testRunner()
-        else:
-            # it is assumed to be a TestRunner instance
-            testRunner = self.testRunner
-        self.result = testRunner.run(self.test)
-        if self.exit:
-            sys.exit(not self.result.wasSuccessful())
-
-main = TestProgram
-
-def main_():
-    TestProgram.USAGE = USAGE_AS_MAIN
-    main(module=None)
-

+ 0 - 183
django/utils/unittest/result.py

@@ -1,183 +0,0 @@
-"""Test result object"""
-
-import sys
-import traceback
-import unittest
-
-from StringIO import StringIO
-
-from django.utils.unittest import util
-from django.utils.unittest.compatibility import wraps
-
-__unittest = True
-
-def failfast(method):
-    @wraps(method)
-    def inner(self, *args, **kw):
-        if getattr(self, 'failfast', False):
-            self.stop()
-        return method(self, *args, **kw)
-    return inner
-
-
-STDOUT_LINE = '\nStdout:\n%s'
-STDERR_LINE = '\nStderr:\n%s'
-
-class TestResult(unittest.TestResult):
-    """Holder for test result information.
-
-    Test results are automatically managed by the TestCase and TestSuite
-    classes, and do not need to be explicitly manipulated by writers of tests.
-
-    Each instance holds the total number of tests run, and collections of
-    failures and errors that occurred among those test runs. The collections
-    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
-    formatted traceback of the error that occurred.
-    """
-    _previousTestClass = None
-    _moduleSetUpFailed = False
-
-    def __init__(self):
-        self.failfast = False
-        self.failures = []
-        self.errors = []
-        self.testsRun = 0
-        self.skipped = []
-        self.expectedFailures = []
-        self.unexpectedSuccesses = []
-        self.shouldStop = False
-        self.buffer = False
-        self._stdout_buffer = None
-        self._stderr_buffer = None
-        self._original_stdout = sys.stdout
-        self._original_stderr = sys.stderr
-        self._mirrorOutput = False
-
-    def startTest(self, test):
-        "Called when the given test is about to be run"
-        self.testsRun += 1
-        self._mirrorOutput = False
-        if self.buffer:
-            if self._stderr_buffer is None:
-                self._stderr_buffer = StringIO()
-                self._stdout_buffer = StringIO()
-            sys.stdout = self._stdout_buffer
-            sys.stderr = self._stderr_buffer
-
-    def startTestRun(self):
-        """Called once before any tests are executed.
-
-        See startTest for a method called before each test.
-        """
-
-    def stopTest(self, test):
-        """Called when the given test has been run"""
-        if self.buffer:
-            if self._mirrorOutput:
-                output = sys.stdout.getvalue()
-                error = sys.stderr.getvalue()
-                if output:
-                    if not output.endswith('\n'):
-                        output += '\n'
-                    self._original_stdout.write(STDOUT_LINE % output)
-                if error:
-                    if not error.endswith('\n'):
-                        error += '\n'
-                    self._original_stderr.write(STDERR_LINE % error)
-
-            sys.stdout = self._original_stdout
-            sys.stderr = self._original_stderr
-            self._stdout_buffer.seek(0)
-            self._stdout_buffer.truncate()
-            self._stderr_buffer.seek(0)
-            self._stderr_buffer.truncate()
-        self._mirrorOutput = False
-
-
-    def stopTestRun(self):
-        """Called once after all tests are executed.
-
-        See stopTest for a method called after each test.
-        """
-
-    @failfast
-    def addError(self, test, err):
-        """Called when an error has occurred. 'err' is a tuple of values as
-        returned by sys.exc_info().
-        """
-        self.errors.append((test, self._exc_info_to_string(err, test)))
-        self._mirrorOutput = True
-
-    @failfast
-    def addFailure(self, test, err):
-        """Called when an error has occurred. 'err' is a tuple of values as
-        returned by sys.exc_info()."""
-        self.failures.append((test, self._exc_info_to_string(err, test)))
-        self._mirrorOutput = True
-
-    def addSuccess(self, test):
-        "Called when a test has completed successfully"
-        pass
-
-    def addSkip(self, test, reason):
-        """Called when a test is skipped."""
-        self.skipped.append((test, reason))
-
-    def addExpectedFailure(self, test, err):
-        """Called when an expected failure/error occured."""
-        self.expectedFailures.append(
-            (test, self._exc_info_to_string(err, test)))
-
-    @failfast
-    def addUnexpectedSuccess(self, test):
-        """Called when a test was expected to fail, but succeed."""
-        self.unexpectedSuccesses.append(test)
-
-    def wasSuccessful(self):
-        "Tells whether or not this result was a success"
-        return (len(self.failures) + len(self.errors) == 0)
-
-    def stop(self):
-        "Indicates that the tests should be aborted"
-        self.shouldStop = True
-
-    def _exc_info_to_string(self, err, test):
-        """Converts a sys.exc_info()-style tuple of values into a string."""
-        exctype, value, tb = err
-        # Skip test runner traceback levels
-        while tb and self._is_relevant_tb_level(tb):
-            tb = tb.tb_next
-        if exctype is test.failureException:
-            # Skip assert*() traceback levels
-            length = self._count_relevant_tb_levels(tb)
-            msgLines = traceback.format_exception(exctype, value, tb, length)
-        else:
-            msgLines = traceback.format_exception(exctype, value, tb)
-
-        if self.buffer:
-            output = sys.stdout.getvalue()
-            error = sys.stderr.getvalue()
-            if output:
-                if not output.endswith('\n'):
-                    output += '\n'
-                msgLines.append(STDOUT_LINE % output)
-            if error:
-                if not error.endswith('\n'):
-                    error += '\n'
-                msgLines.append(STDERR_LINE % error)
-        return ''.join(msgLines)
-
-    def _is_relevant_tb_level(self, tb):
-        return '__unittest' in tb.tb_frame.f_globals
-
-    def _count_relevant_tb_levels(self, tb):
-        length = 0
-        while tb and not self._is_relevant_tb_level(tb):
-            length += 1
-            tb = tb.tb_next
-        return length
-
-    def __repr__(self):
-        return "<%s run=%i errors=%i failures=%i>" % \
-               (util.strclass(self.__class__), self.testsRun, len(self.errors),
-                len(self.failures))

+ 0 - 206
django/utils/unittest/runner.py

@@ -1,206 +0,0 @@
-"""Running tests"""
-
-import sys
-import time
-import unittest
-
-from django.utils.unittest import result
-
-try:
-    from django.utils.unittest.signals import registerResult
-except ImportError:
-    def registerResult(_):
-        pass
-
-__unittest = True
-
-
-class _WritelnDecorator(object):
-    """Used to decorate file-like objects with a handy 'writeln' method"""
-    def __init__(self,stream):
-        self.stream = stream
-
-    def __getattr__(self, attr):
-        if attr in ('stream', '__getstate__'):
-            raise AttributeError(attr)
-        return getattr(self.stream,attr)
-
-    def writeln(self, arg=None):
-        if arg:
-            self.write(arg)
-        self.write('\n') # text-mode streams translate to \r\n if needed
-
-
-class TextTestResult(result.TestResult):
-    """A test result class that can print formatted text results to a stream.
-
-    Used by TextTestRunner.
-    """
-    separator1 = '=' * 70
-    separator2 = '-' * 70
-
-    def __init__(self, stream, descriptions, verbosity):
-        super(TextTestResult, self).__init__()
-        self.stream = stream
-        self.showAll = verbosity > 1
-        self.dots = verbosity == 1
-        self.descriptions = descriptions
-
-    def getDescription(self, test):
-        doc_first_line = test.shortDescription()
-        if self.descriptions and doc_first_line:
-            return '\n'.join((str(test), doc_first_line))
-        else:
-            return str(test)
-
-    def startTest(self, test):
-        super(TextTestResult, self).startTest(test)
-        if self.showAll:
-            self.stream.write(self.getDescription(test))
-            self.stream.write(" ... ")
-            self.stream.flush()
-
-    def addSuccess(self, test):
-        super(TextTestResult, self).addSuccess(test)
-        if self.showAll:
-            self.stream.writeln("ok")
-        elif self.dots:
-            self.stream.write('.')
-            self.stream.flush()
-
-    def addError(self, test, err):
-        super(TextTestResult, self).addError(test, err)
-        if self.showAll:
-            self.stream.writeln("ERROR")
-        elif self.dots:
-            self.stream.write('E')
-            self.stream.flush()
-
-    def addFailure(self, test, err):
-        super(TextTestResult, self).addFailure(test, err)
-        if self.showAll:
-            self.stream.writeln("FAIL")
-        elif self.dots:
-            self.stream.write('F')
-            self.stream.flush()
-
-    def addSkip(self, test, reason):
-        super(TextTestResult, self).addSkip(test, reason)
-        if self.showAll:
-            self.stream.writeln("skipped %r" % (reason,))
-        elif self.dots:
-            self.stream.write("s")
-            self.stream.flush()
-
-    def addExpectedFailure(self, test, err):
-        super(TextTestResult, self).addExpectedFailure(test, err)
-        if self.showAll:
-            self.stream.writeln("expected failure")
-        elif self.dots:
-            self.stream.write("x")
-            self.stream.flush()
-
-    def addUnexpectedSuccess(self, test):
-        super(TextTestResult, self).addUnexpectedSuccess(test)
-        if self.showAll:
-            self.stream.writeln("unexpected success")
-        elif self.dots:
-            self.stream.write("u")
-            self.stream.flush()
-
-    def printErrors(self):
-        if self.dots or self.showAll:
-            self.stream.writeln()
-        self.printErrorList('ERROR', self.errors)
-        self.printErrorList('FAIL', self.failures)
-
-    def printErrorList(self, flavour, errors):
-        for test, err in errors:
-            self.stream.writeln(self.separator1)
-            self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
-            self.stream.writeln(self.separator2)
-            self.stream.writeln("%s" % err)
-
-    def stopTestRun(self):
-        super(TextTestResult, self).stopTestRun()
-        self.printErrors()
-
-
-class TextTestRunner(unittest.TextTestRunner):
-    """A test runner class that displays results in textual form.
-
-    It prints out the names of tests as they are run, errors as they
-    occur, and a summary of the results at the end of the test run.
-    """
-    resultclass = TextTestResult
-
-    def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
-                    failfast=False, buffer=False, resultclass=None):
-        self.stream = _WritelnDecorator(stream)
-        self.descriptions = descriptions
-        self.verbosity = verbosity
-        self.failfast = failfast
-        self.buffer = buffer
-        if resultclass is not None:
-            self.resultclass = resultclass
-
-    def _makeResult(self):
-        return self.resultclass(self.stream, self.descriptions, self.verbosity)
-
-    def run(self, test):
-        "Run the given test case or test suite."
-        result = self._makeResult()
-        result.failfast = self.failfast
-        result.buffer = self.buffer
-        registerResult(result)
-
-        startTime = time.time()
-        startTestRun = getattr(result, 'startTestRun', None)
-        if startTestRun is not None:
-            startTestRun()
-        try:
-            test(result)
-        finally:
-            stopTestRun = getattr(result, 'stopTestRun', None)
-            if stopTestRun is not None:
-                stopTestRun()
-            else:
-                result.printErrors()
-        stopTime = time.time()
-        timeTaken = stopTime - startTime
-        if hasattr(result, 'separator2'):
-            self.stream.writeln(result.separator2)
-        run = result.testsRun
-        self.stream.writeln("Ran %d test%s in %.3fs" %
-                            (run, run != 1 and "s" or "", timeTaken))
-        self.stream.writeln()
-
-        expectedFails = unexpectedSuccesses = skipped = 0
-        try:
-            results = map(len, (result.expectedFailures,
-                                result.unexpectedSuccesses,
-                                result.skipped))
-            expectedFails, unexpectedSuccesses, skipped = results
-        except AttributeError:
-            pass
-        infos = []
-        if not result.wasSuccessful():
-            self.stream.write("FAILED")
-            failed, errored = map(len, (result.failures, result.errors))
-            if failed:
-                infos.append("failures=%d" % failed)
-            if errored:
-                infos.append("errors=%d" % errored)
-        else:
-            self.stream.write("OK")
-        if skipped:
-            infos.append("skipped=%d" % skipped)
-        if expectedFails:
-            infos.append("expected failures=%d" % expectedFails)
-        if unexpectedSuccesses:
-            infos.append("unexpected successes=%d" % unexpectedSuccesses)
-        if infos:
-            self.stream.writeln(" (%s)" % (", ".join(infos),))
-        else:
-            self.stream.write("\n")
-        return result

+ 0 - 57
django/utils/unittest/signals.py

@@ -1,57 +0,0 @@
-import signal
-import weakref
-
-from django.utils.unittest.compatibility import wraps
-
-__unittest = True
-
-
-class _InterruptHandler(object):
-    def __init__(self, default_handler):
-        self.called = False
-        self.default_handler = default_handler
-
-    def __call__(self, signum, frame):
-        installed_handler = signal.getsignal(signal.SIGINT)
-        if installed_handler is not self:
-            # if we aren't the installed handler, then delegate immediately
-            # to the default handler
-            self.default_handler(signum, frame)
-
-        if self.called:
-            self.default_handler(signum, frame)
-        self.called = True
-        for result in _results.keys():
-            result.stop()
-
-_results = weakref.WeakKeyDictionary()
-def registerResult(result):
-    _results[result] = 1
-
-def removeResult(result):
-    return bool(_results.pop(result, None))
-
-_interrupt_handler = None
-def installHandler():
-    global _interrupt_handler
-    if _interrupt_handler is None:
-        default_handler = signal.getsignal(signal.SIGINT)
-        _interrupt_handler = _InterruptHandler(default_handler)
-        signal.signal(signal.SIGINT, _interrupt_handler)
-
-
-def removeHandler(method=None):
-    if method is not None:
-        @wraps(method)
-        def inner(*args, **kwargs):
-            initial = signal.getsignal(signal.SIGINT)
-            removeHandler()
-            try:
-                return method(*args, **kwargs)
-            finally:
-                signal.signal(signal.SIGINT, initial)
-        return inner
-
-    global _interrupt_handler
-    if _interrupt_handler is not None:
-        signal.signal(signal.SIGINT, _interrupt_handler.default_handler)

+ 0 - 287
django/utils/unittest/suite.py

@@ -1,287 +0,0 @@
-"""TestSuite"""
-
-import sys
-import unittest
-from django.utils.unittest import case, util
-
-__unittest = True
-
-
-class BaseTestSuite(unittest.TestSuite):
-    """A simple test suite that doesn't provide class or module shared fixtures.
-    """
-    def __init__(self, tests=()):
-        self._tests = []
-        self.addTests(tests)
-
-    def __repr__(self):
-        return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
-
-    def __eq__(self, other):
-        if not isinstance(other, self.__class__):
-            return NotImplemented
-        return list(self) == list(other)
-
-    def __ne__(self, other):
-        return not self == other
-
-    # Can't guarantee hash invariant, so flag as unhashable
-    __hash__ = None
-
-    def __iter__(self):
-        return iter(self._tests)
-
-    def countTestCases(self):
-        cases = 0
-        for test in self:
-            cases += test.countTestCases()
-        return cases
-
-    def addTest(self, test):
-        # sanity checks
-        if not hasattr(test, '__call__'):
-            raise TypeError("%r is not callable" % (repr(test),))
-        if isinstance(test, type) and issubclass(test,
-                                                 (case.TestCase, TestSuite)):
-            raise TypeError("TestCases and TestSuites must be instantiated "
-                            "before passing them to addTest()")
-        self._tests.append(test)
-
-    def addTests(self, tests):
-        if isinstance(tests, basestring):
-            raise TypeError("tests must be an iterable of tests, not a string")
-        for test in tests:
-            self.addTest(test)
-
-    def run(self, result):
-        for test in self:
-            if result.shouldStop:
-                break
-            test(result)
-        return result
-
-    def __call__(self, *args, **kwds):
-        return self.run(*args, **kwds)
-
-    def debug(self):
-        """Run the tests without collecting errors in a TestResult"""
-        for test in self:
-            test.debug()
-
-
-class TestSuite(BaseTestSuite):
-    """A test suite is a composite test consisting of a number of TestCases.
-
-    For use, create an instance of TestSuite, then add test case instances.
-    When all tests have been added, the suite can be passed to a test
-    runner, such as TextTestRunner. It will run the individual test cases
-    in the order in which they were added, aggregating the results. When
-    subclassing, do not forget to call the base class constructor.
-    """
-
-
-    def run(self, result):
-        self._wrapped_run(result)
-        self._tearDownPreviousClass(None, result)
-        self._handleModuleTearDown(result)
-        return result
-
-    def debug(self):
-        """Run the tests without collecting errors in a TestResult"""
-        debug = _DebugResult()
-        self._wrapped_run(debug, True)
-        self._tearDownPreviousClass(None, debug)
-        self._handleModuleTearDown(debug)
-
-    ################################
-    # private methods
-    def _wrapped_run(self, result, debug=False):
-        for test in self:
-            if result.shouldStop:
-                break
-
-            if _isnotsuite(test):
-                self._tearDownPreviousClass(test, result)
-                self._handleModuleFixture(test, result)
-                self._handleClassSetUp(test, result)
-                result._previousTestClass = test.__class__
-
-                if (getattr(test.__class__, '_classSetupFailed', False) or
-                    getattr(result, '_moduleSetUpFailed', False)):
-                    continue
-
-            if hasattr(test, '_wrapped_run'):
-                test._wrapped_run(result, debug)
-            elif not debug:
-                test(result)
-            else:
-                test.debug()
-
-    def _handleClassSetUp(self, test, result):
-        previousClass = getattr(result, '_previousTestClass', None)
-        currentClass = test.__class__
-        if currentClass == previousClass:
-            return
-        if result._moduleSetUpFailed:
-            return
-        if getattr(currentClass, "__unittest_skip__", False):
-            return
-
-        try:
-            currentClass._classSetupFailed = False
-        except TypeError:
-            # test may actually be a function
-            # so its class will be a builtin-type
-            pass
-
-        setUpClass = getattr(currentClass, 'setUpClass', None)
-        if setUpClass is not None:
-            try:
-                setUpClass()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                currentClass._classSetupFailed = True
-                className = util.strclass(currentClass)
-                errorName = 'setUpClass (%s)' % className
-                self._addClassOrModuleLevelException(result, e, errorName)
-
-    def _get_previous_module(self, result):
-        previousModule = None
-        previousClass = getattr(result, '_previousTestClass', None)
-        if previousClass is not None:
-            previousModule = previousClass.__module__
-        return previousModule
-
-
-    def _handleModuleFixture(self, test, result):
-        previousModule = self._get_previous_module(result)
-        currentModule = test.__class__.__module__
-        if currentModule == previousModule:
-            return
-
-        self._handleModuleTearDown(result)
-
-
-        result._moduleSetUpFailed = False
-        try:
-            module = sys.modules[currentModule]
-        except KeyError:
-            return
-        setUpModule = getattr(module, 'setUpModule', None)
-        if setUpModule is not None:
-            try:
-                setUpModule()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                result._moduleSetUpFailed = True
-                errorName = 'setUpModule (%s)' % currentModule
-                self._addClassOrModuleLevelException(result, e, errorName)
-
-    def _addClassOrModuleLevelException(self, result, exception, errorName):
-        error = _ErrorHolder(errorName)
-        addSkip = getattr(result, 'addSkip', None)
-        if addSkip is not None and isinstance(exception, case.SkipTest):
-            addSkip(error, str(exception))
-        else:
-            result.addError(error, sys.exc_info())
-
-    def _handleModuleTearDown(self, result):
-        previousModule = self._get_previous_module(result)
-        if previousModule is None:
-            return
-        if result._moduleSetUpFailed:
-            return
-
-        try:
-            module = sys.modules[previousModule]
-        except KeyError:
-            return
-
-        tearDownModule = getattr(module, 'tearDownModule', None)
-        if tearDownModule is not None:
-            try:
-                tearDownModule()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                errorName = 'tearDownModule (%s)' % previousModule
-                self._addClassOrModuleLevelException(result, e, errorName)
-
-    def _tearDownPreviousClass(self, test, result):
-        previousClass = getattr(result, '_previousTestClass', None)
-        currentClass = test.__class__
-        if currentClass == previousClass:
-            return
-        if getattr(previousClass, '_classSetupFailed', False):
-            return
-        if getattr(result, '_moduleSetUpFailed', False):
-            return
-        if getattr(previousClass, "__unittest_skip__", False):
-            return
-
-        tearDownClass = getattr(previousClass, 'tearDownClass', None)
-        if tearDownClass is not None:
-            try:
-                tearDownClass()
-            except Exception as e:
-                if isinstance(result, _DebugResult):
-                    raise
-                className = util.strclass(previousClass)
-                errorName = 'tearDownClass (%s)' % className
-                self._addClassOrModuleLevelException(result, e, errorName)
-
-
-class _ErrorHolder(object):
-    """
-    Placeholder for a TestCase inside a result. As far as a TestResult
-    is concerned, this looks exactly like a unit test. Used to insert
-    arbitrary errors into a test suite run.
-    """
-    # Inspired by the ErrorHolder from Twisted:
-    # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
-
-    # attribute used by TestResult._exc_info_to_string
-    failureException = None
-
-    def __init__(self, description):
-        self.description = description
-
-    def id(self):
-        return self.description
-
-    def shortDescription(self):
-        return None
-
-    def __repr__(self):
-        return "<ErrorHolder description=%r>" % (self.description,)
-
-    def __str__(self):
-        return self.id()
-
-    def run(self, result):
-        # could call result.addError(...) - but this test-like object
-        # shouldn't be run anyway
-        pass
-
-    def __call__(self, result):
-        return self.run(result)
-
-    def countTestCases(self):
-        return 0
-
-def _isnotsuite(test):
-    "A crude way to tell apart testcases and suites with duck-typing"
-    try:
-        iter(test)
-    except TypeError:
-        return True
-    return False
-
-
-class _DebugResult(object):
-    "Used by the TestSuite to hold previous class when running in debug."
-    _previousTestClass = None
-    _moduleSetUpFailed = False
-    shouldStop = False

+ 0 - 99
django/utils/unittest/util.py

@@ -1,99 +0,0 @@
-"""Various utility functions."""
-
-__unittest = True
-
-
-_MAX_LENGTH = 80
-def safe_repr(obj, short=False):
-    try:
-        result = repr(obj)
-    except Exception:
-        result = object.__repr__(obj)
-    if not short or len(result) < _MAX_LENGTH:
-        return result
-    return result[:_MAX_LENGTH] + ' [truncated]...'
-
-def safe_str(obj):
-    try:
-        return str(obj)
-    except Exception:
-        return object.__str__(obj)
-
-def strclass(cls):
-    return "%s.%s" % (cls.__module__, cls.__name__)
-
-def sorted_list_difference(expected, actual):
-    """Finds elements in only one or the other of two, sorted input lists.
-
-    Returns a two-element tuple of lists.    The first list contains those
-    elements in the "expected" list but not in the "actual" list, and the
-    second contains those elements in the "actual" list but not in the
-    "expected" list.    Duplicate elements in either input list are ignored.
-    """
-    i = j = 0
-    missing = []
-    unexpected = []
-    while True:
-        try:
-            e = expected[i]
-            a = actual[j]
-            if e < a:
-                missing.append(e)
-                i += 1
-                while expected[i] == e:
-                    i += 1
-            elif e > a:
-                unexpected.append(a)
-                j += 1
-                while actual[j] == a:
-                    j += 1
-            else:
-                i += 1
-                try:
-                    while expected[i] == e:
-                        i += 1
-                finally:
-                    j += 1
-                    while actual[j] == a:
-                        j += 1
-        except IndexError:
-            missing.extend(expected[i:])
-            unexpected.extend(actual[j:])
-            break
-    return missing, unexpected
-
-def unorderable_list_difference(expected, actual, ignore_duplicate=False):
-    """Same behavior as sorted_list_difference but
-    for lists of unorderable items (like dicts).
-
-    As it does a linear search per item (remove) it
-    has O(n*n) performance.
-    """
-    missing = []
-    unexpected = []
-    while expected:
-        item = expected.pop()
-        try:
-            actual.remove(item)
-        except ValueError:
-            missing.append(item)
-        if ignore_duplicate:
-            for lst in expected, actual:
-                try:
-                    while True:
-                        lst.remove(item)
-                except ValueError:
-                    pass
-    if ignore_duplicate:
-        while actual:
-            item = actual.pop()
-            unexpected.append(item)
-            try:
-                while True:
-                    actual.remove(item)
-            except ValueError:
-                pass
-        return missing, unexpected
-
-    # anything left in actual is unexpected
-    return missing, actual

+ 5 - 0
docs/internals/deprecation.txt

@@ -407,6 +407,11 @@ these changes.
 * The ``Model._meta.get_(add|change|delete)_permission`` methods will
   be removed.
 
+1.9
+---
+
+* ``django.utils.unittest`` will be removed.
+
 2.0
 ---
 

+ 41 - 0
docs/releases/1.7.txt

@@ -0,0 +1,41 @@
+============================================
+Django 1.7 release notes - UNDER DEVELOPMENT
+============================================
+
+Welcome to Django 1.7!
+
+These release notes cover the `new features`_, as well as some `backwards
+incompatible changes`_ you'll want to be aware of when upgrading from Django
+1.6 or older versions. We've also dropped some features, which are detailed in
+:doc:`our deprecation plan </internals/deprecation>`, and we've `begun the
+deprecation process for some features`_.
+
+.. _`new features`: `What's new in Django 1.7`_
+.. _`backwards incompatible changes`: `Backwards incompatible changes in 1.7`_
+.. _`begun the deprecation process for some features`: `Features deprecated in 1.7`_
+
+What's new in Django 1.7
+========================
+
+Backwards incompatible changes in 1.7
+=====================================
+
+.. warning::
+
+    In addition to the changes outlined in this section, be sure to review the
+    :doc:`deprecation plan </internals/deprecation>` for any features that
+    have been removed. If you haven't updated your code within the
+    deprecation timeline for a given feature, its removal may appear as a
+    backwards incompatible change.
+
+Features deprecated in 1.7
+==========================
+
+``django.utils.unittest``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``django.utils.unittest`` provided uniform access to the ``unittest2`` library
+on all Python versions. Since ``unittest2`` became the standard library's
+:mod:`unittest` module in Python 2.7, and Django 1.7 drops support for older
+Python versions, this module isn't useful anymore. It has been deprecated. Use
+:mod:`unittest` instead.

+ 21 - 229
docs/topics/testing/_images/django_unittest_classes_hierarchy.graffle

@@ -7,14 +7,14 @@
 	<key>ApplicationVersion</key>
 	<array>
 		<string>com.omnigroup.OmniGrafflePro</string>
-		<string>139.16.0.171715</string>
+		<string>139.15.0.171074</string>
 	</array>
 	<key>AutoAdjust</key>
 	<true/>
 	<key>BackgroundGraphic</key>
 	<dict>
 		<key>Bounds</key>
-		<string>{{0, 0}, {559.28997802734375, 782.8900146484375}}</string>
+		<string>{{0, 0}, {559, 783}}</string>
 		<key>Class</key>
 		<string>SolidGraphic</string>
 		<key>ID</key>
@@ -96,53 +96,6 @@
 				<integer>2</integer>
 			</dict>
 		</dict>
-		<dict>
-			<key>Class</key>
-			<string>LineGraphic</string>
-			<key>Head</key>
-			<dict>
-				<key>ID</key>
-				<integer>12</integer>
-				<key>Info</key>
-				<integer>1</integer>
-			</dict>
-			<key>ID</key>
-			<integer>27</integer>
-			<key>OrthogonalBarAutomatic</key>
-			<true/>
-			<key>OrthogonalBarPoint</key>
-			<string>{0, 0}</string>
-			<key>OrthogonalBarPosition</key>
-			<real>-1</real>
-			<key>Points</key>
-			<array>
-				<string>{135, 270}</string>
-				<string>{369, 225}</string>
-			</array>
-			<key>Style</key>
-			<dict>
-				<key>stroke</key>
-				<dict>
-					<key>HeadArrow</key>
-					<string>UMLInheritance</string>
-					<key>HeadScale</key>
-					<real>0.79999995231628418</real>
-					<key>Legacy</key>
-					<true/>
-					<key>LineType</key>
-					<integer>2</integer>
-					<key>TailArrow</key>
-					<string>0</string>
-				</dict>
-			</dict>
-			<key>Tail</key>
-			<dict>
-				<key>ID</key>
-				<integer>26</integer>
-				<key>Position</key>
-				<real>0.5</real>
-			</dict>
-		</dict>
 		<dict>
 			<key>Class</key>
 			<string>LineGraphic</string>
@@ -162,7 +115,7 @@
 			<key>Points</key>
 			<array>
 				<string>{135, 315}</string>
-				<string>{135, 225}</string>
+				<string>{135, 261}</string>
 			</array>
 			<key>Style</key>
 			<dict>
@@ -274,144 +227,7 @@
 		</dict>
 		<dict>
 			<key>Bounds</key>
-			<string>{{378, 252}, {81, 27}}</string>
-			<key>Class</key>
-			<string>ShapedGraphic</string>
-			<key>FontInfo</key>
-			<dict>
-				<key>Font</key>
-				<string>Helvetica</string>
-				<key>Size</key>
-				<real>12</real>
-			</dict>
-			<key>ID</key>
-			<integer>22</integer>
-			<key>Shape</key>
-			<string>NoteShape</string>
-			<key>Style</key>
-			<dict>
-				<key>stroke</key>
-				<dict>
-					<key>Color</key>
-					<dict>
-						<key>b</key>
-						<string>0</string>
-						<key>g</key>
-						<string>0.501961</string>
-						<key>r</key>
-						<string>0</string>
-					</dict>
-				</dict>
-			</dict>
-			<key>Text</key>
-			<dict>
-				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
-\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
-{\colortbl;\red255\green255\blue255;\red0\green128\blue0;}
-\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
-
-\f0\i\fs24 \cf2 Python &lt; 2.7}</string>
-				<key>VerticalPad</key>
-				<integer>0</integer>
-			</dict>
-			<key>TextRelativeArea</key>
-			<string>{{0, 0}, {1, 1}}</string>
-		</dict>
-		<dict>
-			<key>Bounds</key>
-			<string>{{45, 252}, {81, 27}}</string>
-			<key>Class</key>
-			<string>ShapedGraphic</string>
-			<key>FontInfo</key>
-			<dict>
-				<key>Font</key>
-				<string>Helvetica</string>
-				<key>Size</key>
-				<real>12</real>
-			</dict>
-			<key>ID</key>
-			<integer>20</integer>
-			<key>Shape</key>
-			<string>NoteShape</string>
-			<key>Style</key>
-			<dict>
-				<key>stroke</key>
-				<dict>
-					<key>Color</key>
-					<dict>
-						<key>b</key>
-						<string>0</string>
-						<key>g</key>
-						<string>0.501961</string>
-						<key>r</key>
-						<string>0</string>
-					</dict>
-				</dict>
-			</dict>
-			<key>Text</key>
-			<dict>
-				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
-\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
-{\colortbl;\red255\green255\blue255;\red0\green128\blue0;}
-\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
-
-\f0\i\fs24 \cf2 Python \uc0\u8805  2.7}</string>
-				<key>VerticalPad</key>
-				<integer>0</integer>
-			</dict>
-		</dict>
-		<dict>
-			<key>Bounds</key>
-			<string>{{288, 198}, {162, 27}}</string>
-			<key>Class</key>
-			<string>ShapedGraphic</string>
-			<key>ID</key>
-			<integer>12</integer>
-			<key>Magnets</key>
-			<array>
-				<string>{0, 1}</string>
-				<string>{0, -1}</string>
-				<string>{1, 0}</string>
-				<string>{-1, 0}</string>
-			</array>
-			<key>Shape</key>
-			<string>Rectangle</string>
-			<key>Style</key>
-			<dict>
-				<key>fill</key>
-				<dict>
-					<key>FillType</key>
-					<integer>2</integer>
-					<key>GradientAngle</key>
-					<real>90</real>
-					<key>GradientColor</key>
-					<dict>
-						<key>w</key>
-						<string>0.666667</string>
-					</dict>
-				</dict>
-				<key>stroke</key>
-				<dict>
-					<key>CornerRadius</key>
-					<real>5</real>
-				</dict>
-			</dict>
-			<key>Text</key>
-			<dict>
-				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
-\cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
-{\colortbl;\red255\green255\blue255;}
-\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
-
-\f0\fs24 \cf0 TestCase}</string>
-			</dict>
-		</dict>
-		<dict>
-			<key>Bounds</key>
-			<string>{{54, 198}, {162, 27}}</string>
+			<string>{{54, 234}, {162, 27}}</string>
 			<key>Class</key>
 			<string>ShapedGraphic</string>
 			<key>ID</key>
@@ -448,7 +264,7 @@
 			<key>Text</key>
 			<dict>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
@@ -495,7 +311,7 @@
 			<key>Text</key>
 			<dict>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
@@ -542,7 +358,7 @@
 			<key>Text</key>
 			<dict>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
@@ -589,7 +405,7 @@
 			<key>Text</key>
 			<dict>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
@@ -636,7 +452,7 @@
 			<key>Text</key>
 			<dict>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc
@@ -660,7 +476,7 @@
 				<key>Align</key>
 				<integer>2</integer>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
 \cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qr
@@ -672,7 +488,7 @@
 		</dict>
 		<dict>
 			<key>Bounds</key>
-			<string>{{18, 153}, {225, 90}}</string>
+			<string>{{18, 216}, {468, 63}}</string>
 			<key>Class</key>
 			<string>ShapedGraphic</string>
 			<key>ID</key>
@@ -686,40 +502,16 @@
 				<key>Align</key>
 				<integer>2</integer>
 				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
-\cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
+				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf390
+\cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier-Oblique;\f1\fmodern\fcharset0 Courier;}
 {\colortbl;\red255\green255\blue255;}
 \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qr
 
-\f0\fs24 \cf0 django.utils.unittest\
-= unittest (standard library)}</string>
-			</dict>
-			<key>TextPlacement</key>
-			<integer>0</integer>
-		</dict>
-		<dict>
-			<key>Bounds</key>
-			<string>{{261, 153}, {225, 90}}</string>
-			<key>Class</key>
-			<string>ShapedGraphic</string>
-			<key>ID</key>
-			<integer>19</integer>
-			<key>Shape</key>
-			<string>Rectangle</string>
-			<key>Style</key>
-			<dict/>
-			<key>Text</key>
-			<dict>
-				<key>Align</key>
-				<integer>2</integer>
-				<key>Text</key>
-				<string>{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf340
-\cocoascreenfonts1{\fonttbl\f0\fmodern\fcharset0 Courier;}
-{\colortbl;\red255\green255\blue255;}
-\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qr
+\f0\i\fs24 \cf0 standard library\
 
-\f0\fs24 \cf0 django.utils.unittest\
-= unittest2 (bundled copy)}</string>
+\f1\i0 \
+\
+unittest}</string>
 			</dict>
 			<key>TextPlacement</key>
 			<integer>0</integer>
@@ -777,7 +569,7 @@
 	<key>MasterSheets</key>
 	<array/>
 	<key>ModificationDate</key>
-	<string>2012-12-16 19:08:28 +0000</string>
+	<string>2013-07-01 11:48:06 +0000</string>
 	<key>Modifier</key>
 	<string>Aymeric Augustin</string>
 	<key>NotesVisible</key>
@@ -808,7 +600,7 @@
 		<key>NSPaperSize</key>
 		<array>
 			<string>size</string>
-			<string>{595.28997802734375, 841.8900146484375}</string>
+			<string>{595, 842}</string>
 		</array>
 		<key>NSPrintReverseOrientation</key>
 		<array>
@@ -853,7 +645,7 @@
 		<key>ExpandedCanvases</key>
 		<array/>
 		<key>Frame</key>
-		<string>{{9, 4}, {694, 874}}</string>
+		<string>{{613, 284}, {694, 774}}</string>
 		<key>ListView</key>
 		<true/>
 		<key>OutlineWidth</key>
@@ -867,7 +659,7 @@
 		<key>SidebarWidth</key>
 		<integer>120</integer>
 		<key>VisibleRegion</key>
-		<string>{{0, 0}, {559, 735}}</string>
+		<string>{{0, 0}, {545, 620}}</string>
 		<key>Zoom</key>
 		<real>1</real>
 		<key>ZoomValues</key>

BIN
docs/topics/testing/_images/django_unittest_classes_hierarchy.pdf


File diff suppressed because it is too large
+ 0 - 0
docs/topics/testing/_images/django_unittest_classes_hierarchy.svg


+ 9 - 24
docs/topics/testing/overview.txt

@@ -21,27 +21,15 @@ module defines tests using a class-based approach.
 
 .. admonition:: unittest2
 
-    Python 2.7 introduced some major changes to the ``unittest`` library,
-    adding some extremely useful features. To ensure that every Django
-    project can benefit from these new features, Django ships with a
-    copy of unittest2_, a copy of Python 2.7's ``unittest``, backported for
-    Python 2.6 compatibility.
-
-    To access this library, Django provides the
-    ``django.utils.unittest`` module alias. If you are using Python
-    2.7, or you have installed ``unittest2`` locally, Django will map the alias
-    to it. Otherwise, Django will use its own bundled version of ``unittest2``.
-
-    To use this alias, simply use::
-
-        from django.utils import unittest
+    .. deprecated:: 1.7
 
-    wherever you would have historically used::
-
-        import unittest
+    Python 2.7 introduced some major changes to the ``unittest`` library,
+    adding some extremely useful features. To ensure that every Django project
+    could benefit from these new features, Django used to ship with a copy of
+    Python 2.7's ``unittest`` backported for Python 2.6 compatibility.
 
-    If you want to continue to use the legacy ``unittest`` library, you can --
-    you just won't get any of the nice new ``unittest2`` features.
+    Since Django no longer supports Python versions older than 2.7,
+    ``django.utils.unittest`` is deprecated. Simply use ``unittest``.
 
 .. _unittest2: http://pypi.python.org/pypi/unittest2
 
@@ -849,13 +837,10 @@ Normal Python unit test classes extend a base class of
 .. figure:: _images/django_unittest_classes_hierarchy.*
    :alt: Hierarchy of Django unit testing classes (TestCase subclasses)
    :width: 508
-   :height: 391
+   :height: 328
 
    Hierarchy of Django unit testing classes
 
-Regardless of the version of Python you're using, if you've installed
-``unittest2``, ``django.utils.unittest`` will point to that library.
-
 SimpleTestCase
 ~~~~~~~~~~~~~~
 
@@ -905,7 +890,7 @@ features like:
 then you should use :class:`~django.test.TransactionTestCase` or
 :class:`~django.test.TestCase` instead.
 
-``SimpleTestCase`` inherits from ``django.utils.unittest.TestCase``.
+``SimpleTestCase`` inherits from ``unittest.TestCase``.
 
 TransactionTestCase
 ~~~~~~~~~~~~~~~~~~~

Some files were not shown because too many files changed in this diff