Browse Source

Fixed #24118 -- Added --debug-sql option for tests.

Added a --debug-sql option for tests and runtests.py which outputs the
SQL logger for failing tests. When combined with --verbosity=2, it also
outputs the SQL for passing tests.

Thanks to Berker, Tim, Markus, Shai, Josh and Anssi for review and
discussion.
Marc Tamlyn 10 years ago
parent
commit
b5c1a85b50

+ 62 - 5
django/test/runner.py

@@ -1,4 +1,5 @@
 from importlib import import_module
+import logging
 import os
 import unittest
 from unittest import TestSuite, defaultTestLoader
@@ -8,6 +9,47 @@ from django.core.exceptions import ImproperlyConfigured
 from django.test import SimpleTestCase, TestCase
 from django.test.utils import setup_test_environment, teardown_test_environment
 from django.utils.datastructures import OrderedSet
+from django.utils.six import StringIO
+
+
+class DebugSQLTextTestResult(unittest.TextTestResult):
+    def __init__(self, stream, descriptions, verbosity):
+        self.logger = logging.getLogger('django.db.backends')
+        self.logger.setLevel(logging.DEBUG)
+        super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
+
+    def startTest(self, test):
+        self.debug_sql_stream = StringIO()
+        self.handler = logging.StreamHandler(self.debug_sql_stream)
+        self.logger.addHandler(self.handler)
+        super(DebugSQLTextTestResult, self).startTest(test)
+
+    def stopTest(self, test):
+        super(DebugSQLTextTestResult, self).stopTest(test)
+        self.logger.removeHandler(self.handler)
+        if self.showAll:
+            self.debug_sql_stream.seek(0)
+            self.stream.write(self.debug_sql_stream.read())
+            self.stream.writeln(self.separator2)
+
+    def addError(self, test, err):
+        super(DebugSQLTextTestResult, self).addError(test, err)
+        self.debug_sql_stream.seek(0)
+        self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
+
+    def addFailure(self, test, err):
+        super(DebugSQLTextTestResult, self).addFailure(test, err)
+        self.debug_sql_stream.seek(0)
+        self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
+
+    def printErrorList(self, flavour, errors):
+        for test, err, sql_debug in errors:
+            self.stream.writeln(self.separator1)
+            self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
+            self.stream.writeln(self.separator2)
+            self.stream.writeln("%s" % err)
+            self.stream.writeln(self.separator2)
+            self.stream.writeln("%s" % sql_debug)
 
 
 class DiscoverRunner(object):
@@ -20,9 +62,9 @@ class DiscoverRunner(object):
     test_loader = defaultTestLoader
     reorder_by = (TestCase, SimpleTestCase)
 
-    def __init__(self, pattern=None, top_level=None,
-                 verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False,
-                 **kwargs):
+    def __init__(self, pattern=None, top_level=None, verbosity=1,
+                 interactive=True, failfast=False, keepdb=False,
+                 reverse=False, debug_sql=False, **kwargs):
 
         self.pattern = pattern
         self.top_level = top_level
@@ -32,6 +74,7 @@ class DiscoverRunner(object):
         self.failfast = failfast
         self.keepdb = keepdb
         self.reverse = reverse
+        self.debug_sql = debug_sql
 
     @classmethod
     def add_arguments(cls, parser):
@@ -47,6 +90,9 @@ class DiscoverRunner(object):
         parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
             default=False,
             help='Reverses test cases order.')
+        parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
+            default=False,
+            help='Prints logged SQL queries on failure.')
 
     def setup_test_environment(self, **kwargs):
         setup_test_environment()
@@ -115,12 +161,20 @@ class DiscoverRunner(object):
         return reorder_suite(suite, self.reorder_by, self.reverse)
 
     def setup_databases(self, **kwargs):
-        return setup_databases(self.verbosity, self.interactive, self.keepdb, **kwargs)
+        return setup_databases(
+            self.verbosity, self.interactive, self.keepdb, self.debug_sql,
+            **kwargs
+        )
+
+    def get_resultclass(self):
+        return DebugSQLTextTestResult if self.debug_sql else None
 
     def run_suite(self, suite, **kwargs):
+        resultclass = self.get_resultclass()
         return self.test_runner(
             verbosity=self.verbosity,
             failfast=self.failfast,
+            resultclass=resultclass,
         ).run(suite)
 
     def teardown_databases(self, old_config, **kwargs):
@@ -266,7 +320,7 @@ def partition_suite(suite, classes, bins, reverse=False):
                 bins[-1].add(test)
 
 
-def setup_databases(verbosity, interactive, keepdb=False, **kwargs):
+def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, **kwargs):
     from django.db import connections, DEFAULT_DB_ALIAS
 
     # First pass -- work out which databases actually need to be created,
@@ -326,4 +380,7 @@ def setup_databases(verbosity, interactive, keepdb=False, **kwargs):
         connections[alias].settings_dict['NAME'] = (
             connections[mirror_alias].settings_dict['NAME'])
 
+    if debug_sql:
+        for alias in connections:
+            connections[alias].force_debug_cursor = True
     return old_names, mirrors

+ 13 - 5
docs/internals/contributing/writing-code/unit-tests.txt

@@ -286,7 +286,7 @@ For example, suppose that the failing test that works on its own is
 
 .. code-block:: bash
 
-   $ ./runtests.py --bisect basic.tests.ModelTest.test_eq
+    $ ./runtests.py --bisect basic.tests.ModelTest.test_eq
 
 will try to determine a test that interferes with the given one. First, the
 test is run with the first half of the test suite. If a failure occurs, the
@@ -302,7 +302,7 @@ failure. So:
 
 .. code-block:: bash
 
-   $ ./runtests.py --pair basic.tests.ModelTest.test_eq
+    $ ./runtests.py --pair basic.tests.ModelTest.test_eq
 
 will pair ``test_eq`` with every test label.
 
@@ -313,7 +313,7 @@ the first one:
 
 .. code-block:: bash
 
-   $ ./runtests.py --pair basic.tests.ModelTest.test_eq queries transactions
+    $ ./runtests.py --pair basic.tests.ModelTest.test_eq queries transactions
 
 You can also try running any set of tests in reverse using the ``--reverse``
 option in order to verify that executing tests in a different order does not
@@ -321,8 +321,16 @@ cause any trouble:
 
 .. code-block:: bash
 
-   $ ./runtests.py basic --reverse
+    $ ./runtests.py basic --reverse
+
+If you wish to examine the SQL being run in failing tests, you can turn on
+:ref:`SQL logging <django-db-logger>` using the ``--debug-sql`` option. If you
+combine this with ``--verbosity=2``, all SQL queries will be output.
+
+.. code-block:: bash
+
+    $ ./runtests.py basic --debug-sql
 
 .. versionadded:: 1.8
 
-    The ``--reverse`` option was added.
+    The ``--reverse`` and ``--debug-sql`` options were added.

+ 8 - 0
docs/ref/django-admin.txt

@@ -1449,6 +1449,14 @@ This may help in debugging tests that aren't properly isolated and have side
 effects. :ref:`Grouping by test class <order-of-tests>` is preserved when using
 this option.
 
+.. django-admin-option:: --debug-sql
+
+.. versionadded:: 1.8
+
+The ``--debug-sql`` option can be used to enable :ref:`SQL logging
+<django-db-logger>` for failing tests. If :djadminopt:`--verbosity` is ``2``,
+then queries in passing tests are also output.
+
 testserver <fixture fixture ...>
 --------------------------------
 

+ 3 - 2
docs/releases/1.8.txt

@@ -625,8 +625,9 @@ Tests
   allows you to test that two JSON fragments are not equal.
 
 * Added options to the :djadmin:`test` command to preserve the test database
-  (:djadminopt:`--keepdb`) and to run the test cases in reverse order
-  (:djadminopt:`--reverse`).
+  (:djadminopt:`--keepdb`), to run the test cases in reverse order
+  (:djadminopt:`--reverse`), and to enable SQL logging for failing tests
+  (:djadminopt:`--debug-sql`).
 
 * Added the :attr:`~django.test.Response.resolver_match` attribute to test
   client responses.

+ 2 - 0
docs/topics/logging.txt

@@ -439,6 +439,8 @@ Messages to this logger have the following extra context:
 * ``request``: The request object that generated the logging
   message.
 
+.. _django-db-logger:
+
 ``django.db.backends``
 ~~~~~~~~~~~~~~~~~~~~~~
 

+ 7 - 2
docs/topics/testing/advanced.txt

@@ -355,7 +355,7 @@ behavior. This class defines the ``run_tests()`` entry point, plus a
 selection of other methods that are used to by ``run_tests()`` to set up,
 execute and tear down the test suite.
 
-.. class:: DiscoverRunner(pattern='test*.py', top_level=None, verbosity=1, interactive=True, failfast=True, keepdb=False, reverse=False, **kwargs)
+.. class:: DiscoverRunner(pattern='test*.py', top_level=None, verbosity=1, interactive=True, failfast=True, keepdb=False, reverse=False, debug_sql=False, **kwargs)
 
     ``DiscoverRunner`` will search for tests in any file matching ``pattern``.
 
@@ -386,6 +386,11 @@ execute and tear down the test suite.
     and have side effects. :ref:`Grouping by test class <order-of-tests>` is
     preserved when using this option.
 
+    If ``debug_sql`` is ``True``, failing test cases will output SQL queries
+    logged to the :ref:`django.db.backends logger <django-db-logger>` as well
+    as the traceback. If ``verbosity`` is ``2``, then queries in all tests are
+    output.
+
     Django may, from time to time, extend the capabilities of the test runner
     by adding new arguments. The ``**kwargs`` declaration allows for this
     expansion. If you subclass ``DiscoverRunner`` or write your own test
@@ -402,7 +407,7 @@ execute and tear down the test suite.
         subclassed test runner to add options to the list of command-line
         options that the :djadmin:`test` command could use.
 
-        The ``keepdb`` and the ``reverse`` arguments were added.
+        The ``keepdb``, ``reverse``, and ``debug_sql`` arguments were added.
 
 Attributes
 ~~~~~~~~~~

+ 7 - 2
tests/runtests.py

@@ -217,7 +217,7 @@ def teardown(state):
         setattr(settings, key, value)
 
 
-def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels):
+def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
     state = setup(verbosity, test_labels)
     extra_tests = []
 
@@ -232,6 +232,7 @@ def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels)
         failfast=failfast,
         keepdb=keepdb,
         reverse=reverse,
+        debug_sql=debug_sql,
     )
     # Catch warnings thrown in test DB setup -- remove in Django 1.9
     with warnings.catch_warnings():
@@ -386,6 +387,9 @@ if __name__ == "__main__":
     parser.add_argument(
         '--selenium', action='store_true', dest='selenium', default=False,
         help='Run the Selenium tests as well (if Selenium is installed)')
+    parser.add_argument(
+        '--debug-sql', action='store_true', dest='debug_sql', default=False,
+        help='Turn on the SQL query logger within tests')
     options = parser.parse_args()
 
     # mock is a required dependency
@@ -421,6 +425,7 @@ if __name__ == "__main__":
     else:
         failures = django_tests(options.verbosity, options.interactive,
                                 options.failfast, options.keepdb,
-                                options.reverse, options.modules)
+                                options.reverse, options.modules,
+                                options.debug_sql)
         if failures:
             sys.exit(bool(failures))

+ 102 - 0
tests/test_runner/test_debug_sql.py

@@ -0,0 +1,102 @@
+import unittest
+
+from django.db import connection
+from django.test import TestCase
+from django.test.runner import DiscoverRunner
+from django.utils import six
+
+from .models import Person
+
+
+@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
+class TestDebugSQL(unittest.TestCase):
+
+    class PassingTest(TestCase):
+        def runTest(self):
+            Person.objects.filter(first_name='pass').count()
+
+    class FailingTest(TestCase):
+        def runTest(self):
+            Person.objects.filter(first_name='fail').count()
+            self.fail()
+
+    class ErrorTest(TestCase):
+        def runTest(self):
+            Person.objects.filter(first_name='error').count()
+            raise Exception
+
+    def _test_output(self, verbosity):
+        runner = DiscoverRunner(debug_sql=True, verbosity=0)
+        suite = runner.test_suite()
+        suite.addTest(self.FailingTest())
+        suite.addTest(self.ErrorTest())
+        suite.addTest(self.PassingTest())
+        old_config = runner.setup_databases()
+        stream = six.StringIO()
+        resultclass = runner.get_resultclass()
+        runner.test_runner(
+            verbosity=verbosity,
+            stream=stream,
+            resultclass=resultclass,
+        ).run(suite)
+        runner.teardown_databases(old_config)
+
+        stream.seek(0)
+        return stream.read()
+
+    def test_output_normal(self):
+        full_output = self._test_output(1)
+        for output in self.expected_outputs:
+            self.assertIn(output, full_output)
+        for output in self.verbose_expected_outputs:
+            self.assertNotIn(output, full_output)
+
+    def test_output_verbose(self):
+        full_output = self._test_output(2)
+        for output in self.expected_outputs:
+            self.assertIn(output, full_output)
+        for output in self.verbose_expected_outputs:
+            self.assertIn(output, full_output)
+
+    if six.PY3:
+        expected_outputs = [
+            ('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = ('*', 'error');'''),
+            ('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = ('*', 'fail');'''),
+        ]
+    else:
+        expected_outputs = [
+            ('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = (u'*', u'error');'''),
+            ('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = (u'*', u'fail');'''),
+        ]
+
+    verbose_expected_outputs = [
+        'runTest (test_runner.test_debug_sql.FailingTest) ... FAIL',
+        'runTest (test_runner.test_debug_sql.ErrorTest) ... ERROR',
+        'runTest (test_runner.test_debug_sql.PassingTest) ... ok',
+    ]
+    if six.PY3:
+        verbose_expected_outputs += [
+            ('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = ('*', 'pass');'''),
+        ]
+    else:
+        verbose_expected_outputs += [
+            ('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
+                '''FROM "test_runner_person" WHERE '''
+                '''"test_runner_person"."first_name" = %s' '''
+                '''- PARAMS = (u'*', u'pass');'''),
+        ]