diff --git a/python/helpers/coveragepy/coverage/__init__.py b/python/helpers/coveragepy/coverage/__init__.py index 193b7a107ebd..192239926b7f 100644 --- a/python/helpers/coveragepy/coverage/__init__.py +++ b/python/helpers/coveragepy/coverage/__init__.py @@ -1,3 +1,6 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Code coverage measurement for Python. Ned Batchelder @@ -5,73 +8,16 @@ http://nedbatchelder.com/code/coverage """ -from coverage.version import __version__, __url__ +from coverage.version import __version__, __url__, version_info -from coverage.control import coverage, process_startup +from coverage.control import Coverage, process_startup from coverage.data import CoverageData -from coverage.cmdline import main, CoverageScript from coverage.misc import CoverageException +from coverage.plugin import CoveragePlugin, FileTracer, FileReporter +from coverage.pytracer import PyTracer -# Module-level functions. The original API to this module was based on -# functions defined directly in the module, with a singleton of the coverage() -# class. That design hampered programmability, so the current api uses -# explicitly-created coverage objects. But for backward compatibility, here we -# define the top-level functions to create the singleton when they are first -# called. - -# Singleton object for use with module-level functions. The singleton is -# created as needed when one of the module-level functions is called. -_the_coverage = None - -def _singleton_method(name): - """Return a function to the `name` method on a singleton `coverage` object. - - The singleton object is created the first time one of these functions is - called. - - """ - # Disable pylint msg W0612, because a bunch of variables look unused, but - # they're accessed via locals(). - # pylint: disable=W0612 - - def wrapper(*args, **kwargs): - """Singleton wrapper around a coverage method.""" - global _the_coverage - if not _the_coverage: - _the_coverage = coverage(auto_data=True) - return getattr(_the_coverage, name)(*args, **kwargs) - - import inspect - meth = getattr(coverage, name) - args, varargs, kw, defaults = inspect.getargspec(meth) - argspec = inspect.formatargspec(args[1:], varargs, kw, defaults) - docstring = meth.__doc__ - wrapper.__doc__ = ("""\ - A first-use-singleton wrapper around coverage.%(name)s. - - This wrapper is provided for backward compatibility with legacy code. - New code should use coverage.%(name)s directly. - - %(name)s%(argspec)s: - - %(docstring)s - """ % locals() - ) - - return wrapper - - -# Define the module-level functions. -use_cache = _singleton_method('use_cache') -start = _singleton_method('start') -stop = _singleton_method('stop') -erase = _singleton_method('erase') -exclude = _singleton_method('exclude') -analysis = _singleton_method('analysis') -analysis2 = _singleton_method('analysis2') -report = _singleton_method('report') -annotate = _singleton_method('annotate') - +# Backward compatibility. +coverage = Coverage # On Windows, we encode and decode deep enough that something goes wrong and # the encodings.utf_8 module is loaded and then unloaded, I don't know why. @@ -87,34 +33,3 @@ try: del sys.modules['coverage.coverage'] except KeyError: pass - - -# COPYRIGHT AND LICENSE -# -# Copyright 2001 Gareth Rees. All rights reserved. -# Copyright 2004-2013 Ned Batchelder. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -# DAMAGE. diff --git a/python/helpers/coveragepy/coverage/__main__.py b/python/helpers/coveragepy/coverage/__main__.py index 55e0d259e04a..35ab87a56bf4 100644 --- a/python/helpers/coveragepy/coverage/__main__.py +++ b/python/helpers/coveragepy/coverage/__main__.py @@ -1,4 +1,8 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Coverage.py's main entry point.""" + import sys from coverage.cmdline import main sys.exit(main()) diff --git a/python/helpers/coveragepy/coverage/annotate.py b/python/helpers/coveragepy/coverage/annotate.py index 5c396784445c..4060450fffb3 100644 --- a/python/helpers/coveragepy/coverage/annotate.py +++ b/python/helpers/coveragepy/coverage/annotate.py @@ -1,10 +1,19 @@ -"""Source file annotation for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt -import os, re +"""Source file annotation for coverage.py.""" -from coverage.backward import sorted # pylint: disable=W0622 +import io +import os +import re + +from coverage.files import flat_rootname +from coverage.misc import isolate_module from coverage.report import Reporter +os = isolate_module(os) + + class AnnotateReporter(Reporter): """Generate annotated source files showing line coverage. @@ -42,61 +51,53 @@ class AnnotateReporter(Reporter): """ self.report_files(self.annotate_file, morfs, directory) - def annotate_file(self, cu, analysis): + def annotate_file(self, fr, analysis): """Annotate a single file. - `cu` is the CodeUnit for the file to annotate. + `fr` is the FileReporter for the file to annotate. """ - if not cu.relative: - return - - filename = cu.filename - source = cu.source_file() - if self.directory: - dest_file = os.path.join(self.directory, cu.flat_rootname()) - dest_file += ".py,cover" - else: - dest_file = filename + ",cover" - dest = open(dest_file, 'w') - statements = sorted(analysis.statements) missing = sorted(analysis.missing) excluded = sorted(analysis.excluded) - lineno = 0 - i = 0 - j = 0 - covered = True - while True: - line = source.readline() - if line == '': - break - lineno += 1 - while i < len(statements) and statements[i] < lineno: - i += 1 - while j < len(missing) and missing[j] < lineno: - j += 1 - if i < len(statements) and statements[i] == lineno: - covered = j >= len(missing) or missing[j] > lineno - if self.blank_re.match(line): - dest.write(' ') - elif self.else_re.match(line): - # Special logic for lines containing only 'else:'. - if i >= len(statements) and j >= len(missing): - dest.write('! ') - elif i >= len(statements) or j >= len(missing): - dest.write('> ') - elif statements[i] == missing[j]: - dest.write('! ') + if self.directory: + dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) + if dest_file.endswith("_py"): + dest_file = dest_file[:-3] + ".py" + dest_file += ",cover" + else: + dest_file = fr.filename + ",cover" + + with io.open(dest_file, 'w', encoding='utf8') as dest: + i = 0 + j = 0 + covered = True + source = fr.source() + for lineno, line in enumerate(source.splitlines(True), start=1): + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(u' ') + elif self.else_re.match(line): + # Special logic for lines containing only 'else:'. + if i >= len(statements) and j >= len(missing): + dest.write(u'! ') + elif i >= len(statements) or j >= len(missing): + dest.write(u'> ') + elif statements[i] == missing[j]: + dest.write(u'! ') + else: + dest.write(u'> ') + elif lineno in excluded: + dest.write(u'- ') + elif covered: + dest.write(u'> ') else: - dest.write('> ') - elif lineno in excluded: - dest.write('- ') - elif covered: - dest.write('> ') - else: - dest.write('! ') - dest.write(line) - source.close() - dest.close() + dest.write(u'! ') + + dest.write(line) diff --git a/python/helpers/coveragepy/coverage/backunittest.py b/python/helpers/coveragepy/coverage/backunittest.py new file mode 100644 index 000000000000..09574ccb61d1 --- /dev/null +++ b/python/helpers/coveragepy/coverage/backunittest.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Implementations of unittest features from the future.""" + +# Use unittest2 if it's available, otherwise unittest. This gives us +# back-ported features for 2.6. +try: + import unittest2 as unittest +except ImportError: + import unittest + + +def unittest_has(method): + """Does `unittest.TestCase` have `method` defined?""" + return hasattr(unittest.TestCase, method) + + +class TestCase(unittest.TestCase): + """Just like unittest.TestCase, but with assert methods added. + + Designed to be compatible with 3.1 unittest. Methods are only defined if + `unittest` doesn't have them. + + """ + # pylint: disable=missing-docstring + + # Many Pythons have this method defined. But PyPy3 has a bug with it + # somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our + # own implementation that works everywhere, at least for the ways we're + # calling it. + def assertCountEqual(self, s1, s2): + """Assert these have the same elements, regardless of order.""" + self.assertEqual(sorted(s1), sorted(s2)) + + if not unittest_has('assertRaisesRegex'): + def assertRaisesRegex(self, *args, **kwargs): + return self.assertRaisesRegexp(*args, **kwargs) + + if not unittest_has('assertRegex'): + def assertRegex(self, *args, **kwargs): + return self.assertRegexpMatches(*args, **kwargs) diff --git a/python/helpers/coveragepy/coverage/backward.py b/python/helpers/coveragepy/coverage/backward.py index 7d2685459782..700c3ebd1c0a 100644 --- a/python/helpers/coveragepy/coverage/backward.py +++ b/python/helpers/coveragepy/coverage/backward.py @@ -1,60 +1,29 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Add things to old Pythons so I can pretend they are newer.""" -# This file does lots of tricky stuff, so disable a bunch of lintisms. -# pylint: disable=F0401,W0611,W0622 -# F0401: Unable to import blah -# W0611: Unused import blah -# W0622: Redefining built-in blah +# This file does lots of tricky stuff, so disable a bunch of pylint warnings. +# pylint: disable=redefined-builtin +# pylint: disable=unused-import +# pxlint: disable=no-name-in-module -import os, re, sys +import sys -# Python 2.3 doesn't have `set` -try: - set = set # new in 2.4 -except NameError: - from sets import Set as set +from coverage import env -# Python 2.3 doesn't have `sorted`. -try: - sorted = sorted -except NameError: - def sorted(iterable): - """A 2.3-compatible implementation of `sorted`.""" - lst = list(iterable) - lst.sort() - return lst -# Python 2.3 doesn't have `reversed`. -try: - reversed = reversed -except NameError: - def reversed(iterable): - """A 2.3-compatible implementation of `reversed`.""" - lst = list(iterable) - return lst[::-1] - -# rpartition is new in 2.5 -try: - "".rpartition -except AttributeError: - def rpartition(s, sep): - """Implement s.rpartition(sep) for old Pythons.""" - i = s.rfind(sep) - if i == -1: - return ('', '', s) - else: - return (s[:i], sep, s[i+len(sep):]) -else: - def rpartition(s, sep): - """A common interface for new Pythons.""" - return s.rpartition(sep) - -# Pythons 2 and 3 differ on where to get StringIO +# Pythons 2 and 3 differ on where to get StringIO. try: from cStringIO import StringIO - BytesIO = StringIO except ImportError: - from io import StringIO, BytesIO + from io import StringIO + +# In py3, ConfigParser was renamed to the more-standard configparser +try: + import configparser +except ImportError: + import ConfigParser as configparser # What's a string called? try: @@ -62,6 +31,12 @@ try: except NameError: string_class = str +# What's a Unicode string called? +try: + unicode_class = unicode +except NameError: + unicode_class = str + # Where do pickles come from? try: import cPickle as pickle @@ -72,7 +47,16 @@ except ImportError: try: range = xrange except NameError: - range = range + range = range # pylint: disable=redefined-variable-type + +# shlex.quote is new, but there's an undocumented implementation in "pipes", +# who knew!? +try: + from shlex import quote as shlex_quote +except ImportError: + # Useful function, available under a different (undocumented) name + # in Python versions earlier than 3.3. + from pipes import quote as shlex_quote # A function to iterate listlessly over a dict's items. try: @@ -86,71 +70,32 @@ else: """Produce the items from dict `d`.""" return d.iteritems() -# Exec is a statement in Py2, a function in Py3 -if sys.version_info >= (3, 0): - def exec_code_object(code, global_map): - """A wrapper around exec().""" - exec(code, global_map) +# Getting the `next` function from an iterator is different in 2 and 3. +try: + iter([]).next +except AttributeError: + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).__next__ else: - # OK, this is pretty gross. In Py2, exec was a statement, but that will - # be a syntax error if we try to put it in a Py3 file, even if it is never - # executed. So hide it inside an evaluated string literal instead. - eval( - compile( - "def exec_code_object(code, global_map):\n" - " exec code in global_map\n", - "", "exec" - ) - ) - -# Reading Python source and interpreting the coding comment is a big deal. -if sys.version_info >= (3, 0): - # Python 3.2 provides `tokenize.open`, the best way to open source files. - import tokenize - try: - open_source = tokenize.open # pylint: disable=E1101 - except AttributeError: - from io import TextIOWrapper - detect_encoding = tokenize.detect_encoding # pylint: disable=E1101 - # Copied from the 3.2 stdlib: - def open_source(fname): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = open(fname, 'rb') - encoding, _ = detect_encoding(buffer.readline) - buffer.seek(0) - text = TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = 'r' - return text -else: - def open_source(fname): - """Open a source file the best way.""" - return open(fname, "rU") - + def iternext(seq): + """Get the `next` function for iterating over `seq`.""" + return iter(seq).next # Python 3.x is picky about bytes and strings, so provide methods to # get them right, and make them no-ops in 2.x -if sys.version_info >= (3, 0): +if env.PY3: def to_bytes(s): """Convert string `s` to bytes.""" return s.encode('utf8') - def to_string(b): - """Convert bytes `b` to a string.""" - return b.decode('utf8') - def binary_bytes(byte_values): """Produce a byte string with the ints from `byte_values`.""" return bytes(byte_values) - def byte_to_int(byte_value): - """Turn an element of a bytes object into an int.""" - return byte_value - def bytes_to_ints(bytes_value): """Turn a bytes object into a sequence of ints.""" - # In Py3, iterating bytes gives ints. + # In Python 3, iterating bytes gives ints. return bytes_value else: @@ -158,27 +103,70 @@ else: """Convert string `s` to bytes (no-op in 2.x).""" return s - def to_string(b): - """Convert bytes `b` to a string (no-op in 2.x).""" - return b - def binary_bytes(byte_values): """Produce a byte string with the ints from `byte_values`.""" - return "".join([chr(b) for b in byte_values]) - - def byte_to_int(byte_value): - """Turn an element of a bytes object into an int.""" - return ord(byte_value) + return "".join(chr(b) for b in byte_values) def bytes_to_ints(bytes_value): """Turn a bytes object into a sequence of ints.""" for byte in bytes_value: yield ord(byte) -# Md5 is available in different places. + try: - import hashlib - md5 = hashlib.md5 + # In Python 2.x, the builtins were in __builtin__ + BUILTINS = sys.modules['__builtin__'] +except KeyError: + # In Python 3.x, they're in builtins + BUILTINS = sys.modules['builtins'] + + +# imp was deprecated in Python 3.3 +try: + import importlib + import importlib.util + imp = None except ImportError: - import md5 - md5 = md5.new + importlib = None + +# We only want to use importlib if it has everything we need. +try: + importlib_util_find_spec = importlib.util.find_spec +except Exception: + import imp + importlib_util_find_spec = None + +# What is the .pyc magic number for this version of Python? +try: + PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER +except AttributeError: + PYC_MAGIC_NUMBER = imp.get_magic() + + +def import_local_file(modname, modfile=None): + """Import a local file as a module. + + Opens a file in the current directory named `modname`.py, imports it + as `modname`, and returns the module object. `modfile` is the file to + import if it isn't in the current directory. + + """ + try: + from importlib.machinery import SourceFileLoader + except ImportError: + SourceFileLoader = None + + if modfile is None: + modfile = modname + '.py' + if SourceFileLoader: + mod = SourceFileLoader(modname, modfile).load_module() + else: + for suff in imp.get_suffixes(): # pragma: part covered + if suff[0] == '.py': + break + + with open(modfile, 'r') as f: + # pylint: disable=undefined-loop-variable + mod = imp.load_module(modname, f, modfile, suff) + + return mod diff --git a/python/helpers/coveragepy/coverage/bytecode.py b/python/helpers/coveragepy/coverage/bytecode.py index 85360638528e..d823c67c9200 100644 --- a/python/helpers/coveragepy/coverage/bytecode.py +++ b/python/helpers/coveragepy/coverage/bytecode.py @@ -1,62 +1,9 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Bytecode manipulation for coverage.py""" -import opcode, types - -from coverage.backward import byte_to_int - -class ByteCode(object): - """A single bytecode.""" - def __init__(self): - # The offset of this bytecode in the code object. - self.offset = -1 - - # The opcode, defined in the `opcode` module. - self.op = -1 - - # The argument, a small integer, whose meaning depends on the opcode. - self.arg = -1 - - # The offset in the code object of the next bytecode. - self.next_offset = -1 - - # The offset to jump to. - self.jump_to = -1 - - -class ByteCodes(object): - """Iterator over byte codes in `code`. - - Returns `ByteCode` objects. - - """ - # pylint: disable=R0924 - def __init__(self, code): - self.code = code - - def __getitem__(self, i): - return byte_to_int(self.code[i]) - - def __iter__(self): - offset = 0 - while offset < len(self.code): - bc = ByteCode() - bc.op = self[offset] - bc.offset = offset - - next_offset = offset+1 - if bc.op >= opcode.HAVE_ARGUMENT: - bc.arg = self[offset+1] + 256*self[offset+2] - next_offset += 2 - - label = -1 - if bc.op in opcode.hasjrel: - label = next_offset + bc.arg - elif bc.op in opcode.hasjabs: - label = bc.arg - bc.jump_to = label - - bc.next_offset = offset = next_offset - yield bc +import types class CodeObjects(object): diff --git a/python/helpers/coveragepy/coverage/cmdline.py b/python/helpers/coveragepy/coverage/cmdline.py index ea112a8b8f2d..09e8232313cd 100644 --- a/python/helpers/coveragepy/coverage/cmdline.py +++ b/python/helpers/coveragepy/coverage/cmdline.py @@ -1,114 +1,142 @@ -"""Command-line support for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt -import optparse, os, sys, time, traceback +"""Command-line support for coverage.py.""" -from coverage.backward import sorted # pylint: disable=W0622 +import glob +import optparse +import os.path +import sys +import textwrap +import traceback + +from coverage import env +from coverage.collector import CTracer from coverage.execfile import run_python_file, run_python_module from coverage.misc import CoverageException, ExceptionDuringRun, NoSource -from coverage.debug import info_formatter +from coverage.debug import info_formatter, info_header class Opts(object): """A namespace class for individual options we'll build parsers from.""" append = optparse.make_option( - '-a', '--append', action='store_false', dest="erase_first", - help="Append coverage data to .coverage, otherwise it is started " - "clean with each run." - ) + '-a', '--append', action='store_true', + help="Append coverage data to .coverage, otherwise it starts clean each time.", + ) branch = optparse.make_option( '', '--branch', action='store_true', - help="Measure branch coverage in addition to statement coverage." - ) + help="Measure branch coverage in addition to statement coverage.", + ) + CONCURRENCY_CHOICES = [ + "thread", "gevent", "greenlet", "eventlet", "multiprocessing", + ] + concurrency = optparse.make_option( + '', '--concurrency', action='store', metavar="LIB", + choices=CONCURRENCY_CHOICES, + help=( + "Properly measure code using a concurrency library. " + "Valid values are: %s." + ) % ", ".join(CONCURRENCY_CHOICES), + ) debug = optparse.make_option( '', '--debug', action='store', metavar="OPTS", - help="Debug options, separated by commas" - ) + help="Debug options, separated by commas", + ) directory = optparse.make_option( '-d', '--directory', action='store', metavar="DIR", - help="Write the output files to DIR." - ) + help="Write the output files to DIR.", + ) fail_under = optparse.make_option( '', '--fail-under', action='store', metavar="MIN", type="int", - help="Exit with a status of 2 if the total coverage is less than MIN." - ) + help="Exit with a status of 2 if the total coverage is less than MIN.", + ) help = optparse.make_option( '-h', '--help', action='store_true', - help="Get help on this command." - ) + help="Get help on this command.", + ) ignore_errors = optparse.make_option( '-i', '--ignore-errors', action='store_true', - help="Ignore errors while reading source files." - ) + help="Ignore errors while reading source files.", + ) include = optparse.make_option( '', '--include', action='store', metavar="PAT1,PAT2,...", - help="Include files only when their filename path matches one of " - "these patterns. Usually needs quoting on the command line." - ) + help=( + "Include only files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." + ), + ) pylib = optparse.make_option( '-L', '--pylib', action='store_true', - help="Measure coverage even inside the Python installed library, " - "which isn't done by default." - ) + help=( + "Measure coverage even inside the Python installed library, " + "which isn't done by default." + ), + ) show_missing = optparse.make_option( '-m', '--show-missing', action='store_true', - help="Show line numbers of statements in each module that weren't " - "executed." - ) - old_omit = optparse.make_option( - '-o', '--omit', action='store', - metavar="PAT1,PAT2,...", - help="Omit files when their filename matches one of these patterns. " - "Usually needs quoting on the command line." - ) + help="Show line numbers of statements in each module that weren't executed.", + ) + skip_covered = optparse.make_option( + '--skip-covered', action='store_true', + help="Skip files with 100% coverage.", + ) omit = optparse.make_option( '', '--omit', action='store', metavar="PAT1,PAT2,...", - help="Omit files when their filename matches one of these patterns. " - "Usually needs quoting on the command line." - ) + help=( + "Omit files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." + ), + ) output_xml = optparse.make_option( '-o', '', action='store', dest="outfile", metavar="OUTFILE", - help="Write the XML report to this file. Defaults to 'coverage.xml'" - ) + help="Write the XML report to this file. Defaults to 'coverage.xml'", + ) parallel_mode = optparse.make_option( '-p', '--parallel-mode', action='store_true', - help="Append the machine name, process id and random number to the " - ".coverage data file name to simplify collecting data from " - "many processes." - ) + help=( + "Append the machine name, process id and random number to the " + ".coverage data file name to simplify collecting data from " + "many processes." + ), + ) module = optparse.make_option( '-m', '--module', action='store_true', - help=" is an importable Python module, not a script path, " - "to be run as 'python -m' would run it." - ) + help=( + " is an importable Python module, not a script path, " + "to be run as 'python -m' would run it." + ), + ) rcfile = optparse.make_option( '', '--rcfile', action='store', - help="Specify configuration file. Defaults to '.coveragerc'" - ) + help="Specify configuration file. Defaults to '.coveragerc'", + ) source = optparse.make_option( '', '--source', action='store', metavar="SRC1,SRC2,...", - help="A list of packages or directories of code to be measured." - ) + help="A list of packages or directories of code to be measured.", + ) timid = optparse.make_option( '', '--timid', action='store_true', - help="Use a simpler but slower trace method. Try this if you get " - "seemingly impossible results!" - ) + help=( + "Use a simpler but slower trace method. Try this if you get " + "seemingly impossible results!" + ), + ) title = optparse.make_option( '', '--title', action='store', metavar="TITLE", - help="A text string to use as the title on the HTML." - ) + help="A text string to use as the title on the HTML.", + ) version = optparse.make_option( '', '--version', action='store_true', - help="Display version information and exit." - ) + help="Display version information and exit.", + ) class CoverageOptionParser(optparse.OptionParser, object): - """Base OptionParser for coverage. + """Base OptionParser for coverage.py. Problems don't exit the program. Defaults are initialized for all options. @@ -120,24 +148,26 @@ class CoverageOptionParser(optparse.OptionParser, object): add_help_option=False, *args, **kwargs ) self.set_defaults( - actions=[], + action=None, + append=None, branch=None, + concurrency=None, debug=None, directory=None, fail_under=None, help=None, ignore_errors=None, include=None, + module=None, omit=None, parallel_mode=None, - module=None, pylib=None, rcfile=True, show_missing=None, + skip_covered=None, source=None, timid=None, title=None, - erase_first=None, version=None, ) @@ -152,7 +182,7 @@ class CoverageOptionParser(optparse.OptionParser, object): """Used to stop the optparse error handler ending the process.""" pass - def parse_args(self, args=None, options=None): + def parse_args_ok(self, args=None, options=None): """Call optparse.parse_args, but return a triple: (ok, options, args) @@ -171,189 +201,187 @@ class CoverageOptionParser(optparse.OptionParser, object): raise self.OptionParserError -class ClassicOptionParser(CoverageOptionParser): - """Command-line parser for coverage.py classic arguments.""" +class GlobalOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py global option arguments.""" def __init__(self): - super(ClassicOptionParser, self).__init__() - - self.add_action('-a', '--annotate', 'annotate') - self.add_action('-b', '--html', 'html') - self.add_action('-c', '--combine', 'combine') - self.add_action('-e', '--erase', 'erase') - self.add_action('-r', '--report', 'report') - self.add_action('-x', '--execute', 'execute') + super(GlobalOptionParser, self).__init__() self.add_options([ - Opts.directory, Opts.help, - Opts.ignore_errors, - Opts.pylib, - Opts.show_missing, - Opts.old_omit, - Opts.parallel_mode, - Opts.timid, Opts.version, ]) - def add_action(self, dash, dashdash, action_code): - """Add a specialized option that is the action to execute.""" - option = self.add_option(dash, dashdash, action='callback', - callback=self._append_action - ) - option.action_code = action_code - - def _append_action(self, option, opt_unused, value_unused, parser): - """Callback for an option that adds to the `actions` list.""" - parser.values.actions.append(option.action_code) - class CmdOptionParser(CoverageOptionParser): """Parse one of the new-style commands for coverage.py.""" - def __init__(self, action, options=None, defaults=None, usage=None, - cmd=None, description=None - ): - """Create an OptionParser for a coverage command. + def __init__(self, action, options, defaults=None, usage=None, description=None): + """Create an OptionParser for a coverage.py command. - `action` is the slug to put into `options.actions`. + `action` is the slug to put into `options.action`. `options` is a list of Option's for the command. `defaults` is a dict of default value for options. `usage` is the usage string to display in help. - `cmd` is the command name, if different than `action`. `description` is the description of the command, for the help text. """ if usage: usage = "%prog " + usage super(CmdOptionParser, self).__init__( - prog="coverage %s" % (cmd or action), usage=usage, description=description, ) - self.set_defaults(actions=[action], **(defaults or {})) - if options: - self.add_options(options) - self.cmd = cmd or action + self.set_defaults(action=action, **(defaults or {})) + self.add_options(options) + self.cmd = action def __eq__(self, other): # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. return (other == "" % self.cmd) + def get_prog_name(self): + """Override of an undocumented function in optparse.OptionParser.""" + program_name = super(CmdOptionParser, self).get_prog_name() + + # Include the sub-command for this parser as part of the command. + return "%(command)s %(subcommand)s" % {'command': program_name, 'subcommand': self.cmd} + + GLOBAL_ARGS = [ - Opts.rcfile, + Opts.debug, Opts.help, + Opts.rcfile, ] CMDS = { - 'annotate': CmdOptionParser("annotate", + 'annotate': CmdOptionParser( + "annotate", [ Opts.directory, Opts.ignore_errors, - Opts.omit, Opts.include, + Opts.omit, ] + GLOBAL_ARGS, - usage = "[options] [modules]", - description = "Make annotated copies of the given files, marking " - "statements that are executed with > and statements that are " - "missed with !." + usage="[options] [modules]", + description=( + "Make annotated copies of the given files, marking statements that are executed " + "with > and statements that are missed with !." ), + ), - 'combine': CmdOptionParser("combine", GLOBAL_ARGS, - usage = " ", - description = "Combine data from multiple coverage files collected " + 'combine': CmdOptionParser( + "combine", + [ + Opts.append, + ] + GLOBAL_ARGS, + usage="[options] ... ", + description=( + "Combine data from multiple coverage files collected " "with 'run -p'. The combined results are written to a single " - "file representing the union of the data." + "file representing the union of the data. The positional " + "arguments are data files or directories containing data files. " + "If no paths are provided, data files in the default data file's " + "directory are combined." ), + ), - 'debug': CmdOptionParser("debug", GLOBAL_ARGS, - usage = "", - description = "Display information on the internals of coverage.py, " + 'debug': CmdOptionParser( + "debug", GLOBAL_ARGS, + usage="", + description=( + "Display information on the internals of coverage.py, " "for diagnosing problems. " "Topics are 'data' to show a summary of the collected data, " "or 'sys' to show installation information." ), + ), - 'erase': CmdOptionParser("erase", GLOBAL_ARGS, - usage = " ", - description = "Erase previously collected coverage data." - ), + 'erase': CmdOptionParser( + "erase", GLOBAL_ARGS, + description="Erase previously collected coverage data.", + ), - 'help': CmdOptionParser("help", GLOBAL_ARGS, - usage = "[command]", - description = "Describe how to use coverage.py" - ), + 'help': CmdOptionParser( + "help", GLOBAL_ARGS, + usage="[command]", + description="Describe how to use coverage.py", + ), - 'html': CmdOptionParser("html", + 'html': CmdOptionParser( + "html", [ Opts.directory, Opts.fail_under, Opts.ignore_errors, - Opts.omit, Opts.include, + Opts.omit, Opts.title, ] + GLOBAL_ARGS, - usage = "[options] [modules]", - description = "Create an HTML report of the coverage of the files. " + usage="[options] [modules]", + description=( + "Create an HTML report of the coverage of the files. " "Each file gets its own page, with the source decorated to show " "executed, excluded, and missed lines." ), + ), - 'report': CmdOptionParser("report", + 'report': CmdOptionParser( + "report", [ Opts.fail_under, Opts.ignore_errors, - Opts.omit, Opts.include, + Opts.omit, Opts.show_missing, + Opts.skip_covered, ] + GLOBAL_ARGS, - usage = "[options] [modules]", - description = "Report coverage statistics on modules." - ), + usage="[options] [modules]", + description="Report coverage statistics on modules." + ), - 'run': CmdOptionParser("execute", + 'run': CmdOptionParser( + "run", [ Opts.append, Opts.branch, - Opts.debug, + Opts.concurrency, + Opts.include, + Opts.module, + Opts.omit, Opts.pylib, Opts.parallel_mode, - Opts.module, - Opts.timid, Opts.source, - Opts.omit, - Opts.include, + Opts.timid, ] + GLOBAL_ARGS, - defaults = {'erase_first': True}, - cmd = "run", - usage = "[options] [program options]", - description = "Run a Python program, measuring code execution." - ), + usage="[options] [program options]", + description="Run a Python program, measuring code execution." + ), - 'xml': CmdOptionParser("xml", + 'xml': CmdOptionParser( + "xml", [ Opts.fail_under, Opts.ignore_errors, - Opts.omit, Opts.include, + Opts.omit, Opts.output_xml, ] + GLOBAL_ARGS, - cmd = "xml", - usage = "[options] [modules]", - description = "Generate an XML report of coverage results." - ), - } + usage="[options] [modules]", + description="Generate an XML report of coverage results." + ), +} OK, ERR, FAIL_UNDER = 0, 1, 2 class CoverageScript(object): - """The command-line interface to Coverage.""" + """The command-line interface to coverage.py.""" def __init__(self, _covpkg=None, _run_python_file=None, - _run_python_module=None, _help_fn=None): + _run_python_module=None, _help_fn=None, _path_exists=None): # _covpkg is for dependency injection, so we can test this code. if _covpkg: self.covpkg = _covpkg @@ -365,12 +393,26 @@ class CoverageScript(object): self.run_python_file = _run_python_file or run_python_file self.run_python_module = _run_python_module or run_python_module self.help_fn = _help_fn or self.help - self.classic = False + self.path_exists = _path_exists or os.path.exists + self.global_option = False self.coverage = None + self.program_name = os.path.basename(sys.argv[0]) + if self.program_name == '__main__.py': + self.program_name = 'coverage' + if env.WINDOWS: + # entry_points={'console_scripts':...} on Windows makes files + # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These + # invoke coverage-script.py, coverage3-script.py, and + # coverage-3.5-script.py. argv[0] is the .py file, but we want to + # get back to the original form. + auto_suffix = "-script.py" + if self.program_name.endswith(auto_suffix): + self.program_name = self.program_name[:-len(auto_suffix)] + def command_line(self, argv): - """The bulk of the command line interface to Coverage. + """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. @@ -382,11 +424,11 @@ class CoverageScript(object): self.help_fn(topic='minimum_help') return OK - # The command syntax we parse depends on the first argument. Classic - # syntax always starts with an option. - self.classic = argv[0].startswith('-') - if self.classic: - parser = ClassicOptionParser() + # The command syntax we parse depends on the first argument. Global + # switch syntax always starts with an option. + self.global_option = argv[0].startswith('-') + if self.global_option: + parser = GlobalOptionParser() else: parser = CMDS.get(argv[0]) if not parser: @@ -395,7 +437,7 @@ class CoverageScript(object): argv = argv[1:] parser.help_fn = self.help_fn - ok, options, args = parser.parse_args(argv) + ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR @@ -403,9 +445,9 @@ class CoverageScript(object): if self.do_help(options, args, parser): return OK - # Check for conflicts and problems in the options. - if not self.args_ok(options, args): - return ERR + # We need to be able to import from the current directory, because + # plugins may try to, for example, to read Django settings. + sys.path[0] = '' # Listify the list options. source = unshell_list(options.source) @@ -415,74 +457,101 @@ class CoverageScript(object): # Do something. self.coverage = self.covpkg.coverage( - data_suffix = options.parallel_mode, - cover_pylib = options.pylib, - timid = options.timid, - branch = options.branch, - config_file = options.rcfile, - source = source, - omit = omit, - include = include, - debug = debug, + data_suffix=options.parallel_mode, + cover_pylib=options.pylib, + timid=options.timid, + branch=options.branch, + config_file=options.rcfile, + source=source, + omit=omit, + include=include, + debug=debug, + concurrency=options.concurrency, ) - if 'debug' in options.actions: + if options.action == "debug": return self.do_debug(args) - if 'erase' in options.actions or options.erase_first: + elif options.action == "erase": self.coverage.erase() - else: - self.coverage.load() + return OK - if 'execute' in options.actions: - self.do_execute(options, args) + elif options.action == "run": + return self.do_run(options, args) - if 'combine' in options.actions: - self.coverage.combine() + elif options.action == "combine": + if options.append: + self.coverage.load() + data_dirs = args or None + self.coverage.combine(data_dirs) self.coverage.save() + return OK # Remaining actions are reporting, with some common options. report_args = dict( - morfs = args, - ignore_errors = options.ignore_errors, - omit = omit, - include = include, + morfs=unglob_args(args), + ignore_errors=options.ignore_errors, + omit=omit, + include=include, ) - if 'report' in options.actions: + self.coverage.load() + + total = None + if options.action == "report": total = self.coverage.report( - show_missing=options.show_missing, **report_args) - if 'annotate' in options.actions: + show_missing=options.show_missing, + skip_covered=options.skip_covered, **report_args) + elif options.action == "annotate": self.coverage.annotate( directory=options.directory, **report_args) - if 'html' in options.actions: + elif options.action == "html": total = self.coverage.html_report( directory=options.directory, title=options.title, **report_args) - if 'xml' in options.actions: + elif options.action == "xml": outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, **report_args) - if options.fail_under is not None: - if total >= options.fail_under: - return OK - else: - return FAIL_UNDER - else: - return OK + if total is not None: + # Apply the command line fail-under options, and then use the config + # value, so we can get fail_under from the config file. + if options.fail_under is not None: + self.coverage.set_option("report:fail_under", options.fail_under) + + if self.coverage.get_option("report:fail_under"): + # Total needs to be rounded, but don't want to report 100 + # unless it is really 100. + if 99 < total < 100: + total = 99 + else: + total = round(total) + + if total >= self.coverage.get_option("report:fail_under"): + return OK + else: + return FAIL_UNDER + + return OK def help(self, error=None, topic=None, parser=None): """Display an error message, or the named topic.""" assert error or topic or parser if error: print(error) - print("Use 'coverage help' for help.") + print("Use '%s help' for help." % (self.program_name,)) elif parser: print(parser.format_help().strip()) else: - help_msg = HELP_TOPICS.get(topic, '').strip() + help_params = dict(self.covpkg.__dict__) + help_params['program_name'] = self.program_name + if CTracer is not None: + help_params['extension_modifier'] = 'with C extension' + else: + help_params['extension_modifier'] = 'without C extension' + help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip() if help_msg: - print(help_msg % self.covpkg.__dict__) + print(help_msg.format(**help_params)) else: print("Don't know topic %r" % topic) @@ -494,13 +563,13 @@ class CoverageScript(object): """ # Handle help. if options.help: - if self.classic: + if self.global_option: self.help_fn(topic='help') else: self.help_fn(parser=parser) return True - if "help" in options.actions: + if options.action == "help": if args: for a in args: parser = CMDS.get(a) @@ -519,98 +588,97 @@ class CoverageScript(object): return False - def args_ok(self, options, args): - """Check for conflicts and problems in the options. - - Returns True if everything is ok, or False if not. - - """ - for i in ['erase', 'execute']: - for j in ['annotate', 'html', 'report', 'combine']: - if (i in options.actions) and (j in options.actions): - self.help_fn("You can't specify the '%s' and '%s' " - "options at the same time." % (i, j)) - return False - - if not options.actions: - self.help_fn( - "You must specify at least one of -e, -x, -c, -r, -a, or -b." - ) - return False - args_allowed = ( - 'execute' in options.actions or - 'annotate' in options.actions or - 'html' in options.actions or - 'debug' in options.actions or - 'report' in options.actions or - 'xml' in options.actions - ) - if not args_allowed and args: - self.help_fn("Unexpected arguments: %s" % " ".join(args)) - return False - - if 'execute' in options.actions and not args: - self.help_fn("Nothing to do.") - return False - - return True - - def do_execute(self, options, args): + def do_run(self, options, args): """Implementation of 'coverage run'.""" - # Set the first path element properly. - old_path0 = sys.path[0] + if not args: + self.help_fn("Nothing to do.") + return ERR + + if options.append and self.coverage.get_option("run:parallel"): + self.help_fn("Can't append to data files in parallel mode.") + return ERR + + if options.concurrency == "multiprocessing": + # Can't set other run-affecting command line options with + # multiprocessing. + for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']: + # As it happens, all of these options have no default, meaning + # they will be None if they have not been specified. + if getattr(options, opt_name) is not None: + self.help_fn( + "Options affecting multiprocessing must be specified " + "in a configuration file." + ) + return ERR + + if not self.coverage.get_option("run:parallel"): + if not options.append: + self.coverage.erase() # Run the script. self.coverage.start() code_ran = True try: - try: - if options.module: - sys.path[0] = '' - self.run_python_module(args[0], args) - else: - filename = args[0] - sys.path[0] = os.path.abspath(os.path.dirname(filename)) - self.run_python_file(filename, args) - except NoSource: - code_ran = False - raise + if options.module: + self.run_python_module(args[0], args) + else: + filename = args[0] + self.run_python_file(filename, args) + except NoSource: + code_ran = False + raise finally: self.coverage.stop() if code_ran: + if options.append: + data_file = self.coverage.get_option("run:data_file") + if self.path_exists(data_file): + self.coverage.combine(data_paths=[data_file]) self.coverage.save() - # Restore the old path - sys.path[0] = old_path0 + return OK def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: - self.help_fn("What information would you like: data, sys?") + self.help_fn("What information would you like: config, data, sys?") return ERR + for info in args: if info == 'sys': - print("-- sys ----------------------------------------") - for line in info_formatter(self.coverage.sysinfo()): + sys_info = self.coverage.sys_info() + print(info_header("sys")) + for line in info_formatter(sys_info): print(" %s" % line) elif info == 'data': - print("-- data ---------------------------------------") self.coverage.load() - print("path: %s" % self.coverage.data.filename) - print("has_arcs: %r" % self.coverage.data.has_arcs()) - summary = self.coverage.data.summary(fullpath=True) - if summary: + data = self.coverage.data + print(info_header("data")) + print("path: %s" % self.coverage.data_files.filename) + if data: + print("has_arcs: %r" % data.has_arcs()) + summary = data.line_counts(fullpath=True) filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: - print("%s: %d lines" % (f, summary[f])) + line = "%s: %d lines" % (f, summary[f]) + plugin = data.file_tracer(f) + if plugin: + line += " [%s]" % plugin + print(line) else: print("No data collected") + elif info == 'config': + print(info_header("config")) + config_info = self.coverage.config.__dict__.items() + for line in info_formatter(config_info): + print(" %s" % line) else: self.help_fn("Don't know what you mean by %r" % info) return ERR + return OK @@ -618,98 +686,63 @@ def unshell_list(s): """Turn a command-line argument into a list.""" if not s: return None - if sys.platform == 'win32': - # When running coverage as coverage.exe, some of the behavior + if env.WINDOWS: + # When running coverage.py as coverage.exe, some of the behavior # of the shell is emulated: wildcards are expanded into a list of - # filenames. So you have to single-quote patterns on the command + # file names. So you have to single-quote patterns on the command # line, but (not) helpfully, the single quotes are included in the # argument, so we have to strip them off here. s = s.strip("'") return s.split(',') +def unglob_args(args): + """Interpret shell wildcards for platforms that need it.""" + if env.WINDOWS: + globbed = [] + for arg in args: + if '?' in arg or '*' in arg: + globbed.extend(glob.glob(arg)) + else: + globbed.append(arg) + args = globbed + return args + + HELP_TOPICS = { -# ------------------------- -'classic': -r"""Coverage.py version %(__version__)s -Measure, collect, and report on code coverage in Python programs. + 'help': """\ + Coverage.py, version {__version__} {extension_modifier} + Measure, collect, and report on code coverage in Python programs. -Usage: + usage: {program_name} [options] [args] -coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] - Execute the module, passing the given command-line arguments, collecting - coverage data. With the -p option, include the machine name and process - id in the .coverage file name. With -L, measure coverage even inside the - Python installed library, which isn't done by default. With --timid, use a - simpler but slower trace method. + Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. -coverage -e - Erase collected coverage data. + Use "{program_name} help " for detailed help on any command. + For full documentation, see {__url__} + """, -coverage -c - Combine data from multiple coverage files (as created by -p option above) - and store it into a single file representing the union of the coverage. + 'minimum_help': """\ + Code coverage for Python. Use '{program_name} help' for help. + """, -coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] - Report on the statement coverage for the given files. With the -m - option, show line numbers of the statements that weren't executed. - -coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...] - Create an HTML report of the coverage of the given files. Each file gets - its own page, with the file listing decorated to show executed, excluded, - and missed lines. - -coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...] - Make annotated copies of the given files, marking statements that - are executed with > and statements that are missed with !. - --d DIR - Write output files for -b or -a to this directory. - --i Ignore errors while reporting or annotating. - --o DIR,... - Omit reporting or annotating files when their filename path starts with - a directory listed in the omit list. - e.g. coverage -i -r -o c:\python25,lib\enthought\traits - -Coverage data is saved in the file .coverage by default. Set the -COVERAGE_FILE environment variable to save it somewhere else. -""", -# ------------------------- -'help': """\ -Coverage.py, version %(__version__)s -Measure, collect, and report on code coverage in Python programs. - -usage: coverage [options] [args] - -Commands: - annotate Annotate source files with execution information. - combine Combine a number of data files. - erase Erase previously collected coverage data. - help Get help on using coverage.py. - html Create an HTML report. - report Report coverage stats on modules. - run Run a Python program and measure code execution. - xml Create an XML report of coverage results. - -Use "coverage help " for detailed help on any command. -Use "coverage help classic" for help on older command syntax. -For more information, see %(__url__)s -""", -# ------------------------- -'minimum_help': """\ -Code coverage for Python. Use 'coverage help' for help. -""", -# ------------------------- -'version': """\ -Coverage.py, version %(__version__)s. %(__url__)s -""", + 'version': """\ + Coverage.py, version {__version__} {extension_modifier} + Documentation at {__url__} + """, } def main(argv=None): - """The main entry point to Coverage. + """The main entry point to coverage.py. This is installed as the script entry point. @@ -717,26 +750,19 @@ def main(argv=None): if argv is None: argv = sys.argv[1:] try: - start = time.clock() status = CoverageScript().command_line(argv) - end = time.clock() - if 0: - print("time: %.3fs" % (end - start)) - except ExceptionDuringRun: + except ExceptionDuringRun as err: # An exception was caught while running the product code. The # sys.exc_info() return tuple is packed into an ExceptionDuringRun # exception. - _, err, _ = sys.exc_info() traceback.print_exception(*err.args) status = ERR - except CoverageException: + except CoverageException as err: # A controlled error inside coverage.py: print the message to the user. - _, err, _ = sys.exc_info() print(err) status = ERR - except SystemExit: + except SystemExit as err: # The user called `sys.exit()`. Exit with their argument, if any. - _, err, _ = sys.exc_info() if err.args: status = err.args[0] else: diff --git a/python/helpers/coveragepy/coverage/codeunit.py b/python/helpers/coveragepy/coverage/codeunit.py deleted file mode 100644 index ca1ae5c56d6b..000000000000 --- a/python/helpers/coveragepy/coverage/codeunit.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Code unit (module) handling for Coverage.""" - -import glob, os - -from coverage.backward import open_source, string_class, StringIO -from coverage.misc import CoverageException - - -def code_unit_factory(morfs, file_locator): - """Construct a list of CodeUnits from polymorphic inputs. - - `morfs` is a module or a filename, or a list of same. - - `file_locator` is a FileLocator that can help resolve filenames. - - Returns a list of CodeUnit objects. - - """ - # Be sure we have a list. - if not isinstance(morfs, (list, tuple)): - morfs = [morfs] - - # On Windows, the shell doesn't expand wildcards. Do it here. - globbed = [] - for morf in morfs: - if isinstance(morf, string_class) and ('?' in morf or '*' in morf): - globbed.extend(glob.glob(morf)) - else: - globbed.append(morf) - morfs = globbed - - code_units = [CodeUnit(morf, file_locator) for morf in morfs] - - return code_units - - -class CodeUnit(object): - """Code unit: a filename or module. - - Instance attributes: - - `name` is a human-readable name for this code unit. - `filename` is the os path from which we can read the source. - `relative` is a boolean. - - """ - def __init__(self, morf, file_locator): - self.file_locator = file_locator - - if hasattr(morf, '__file__'): - f = morf.__file__ - else: - f = morf - # .pyc files should always refer to a .py instead. - if f.endswith('.pyc') or f.endswith('.pyo'): - f = f[:-1] - elif f.endswith('$py.class'): # Jython - f = f[:-9] + ".py" - self.filename = self.file_locator.canonical_filename(f) - - if hasattr(morf, '__name__'): - n = modname = morf.__name__ - self.relative = True - else: - n = os.path.splitext(morf)[0] - rel = self.file_locator.relative_filename(n) - if os.path.isabs(n): - self.relative = (rel != n) - else: - self.relative = True - n = rel - modname = None - self.name = n - self.modname = modname - - def __repr__(self): - return "" % (self.name, self.filename) - - # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all - # of them defined. - - def __lt__(self, other): - return self.name < other.name - def __le__(self, other): - return self.name <= other.name - def __eq__(self, other): - return self.name == other.name - def __ne__(self, other): - return self.name != other.name - def __gt__(self, other): - return self.name > other.name - def __ge__(self, other): - return self.name >= other.name - - def flat_rootname(self): - """A base for a flat filename to correspond to this code unit. - - Useful for writing files about the code where you want all the files in - the same directory, but need to differentiate same-named files from - different directories. - - For example, the file a/b/c.py might return 'a_b_c' - - """ - if self.modname: - return self.modname.replace('.', '_') - else: - root = os.path.splitdrive(self.name)[1] - return root.replace('\\', '_').replace('/', '_').replace('.', '_') - - def source_file(self): - """Return an open file for reading the source of the code unit.""" - if os.path.exists(self.filename): - # A regular text file: open it. - return open_source(self.filename) - - # Maybe it's in a zip file? - source = self.file_locator.get_zip_data(self.filename) - if source is not None: - return StringIO(source) - - # Couldn't find source. - raise CoverageException( - "No source for code '%s'." % self.filename - ) - - def should_be_python(self): - """Does it seem like this file should contain Python? - - This is used to decide if a file reported as part of the exection of - a program was really likely to have contained Python in the first - place. - - """ - # Get the file extension. - _, ext = os.path.splitext(self.filename) - - # Anything named *.py* should be Python. - if ext.startswith('.py'): - return True - # A file with no extension should be Python. - if not ext: - return True - # Everything else is probably not Python. - return False diff --git a/python/helpers/coveragepy/coverage/collector.py b/python/helpers/coveragepy/coverage/collector.py index 8ba7d87cd4e0..3e28b3b149f4 100644 --- a/python/helpers/coveragepy/coverage/collector.py +++ b/python/helpers/coveragepy/coverage/collector.py @@ -1,152 +1,47 @@ -"""Raw data collector for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +import os +import sys + +from coverage import env +from coverage.backward import iitems +from coverage.files import abs_file +from coverage.misc import CoverageException, isolate_module +from coverage.pytracer import PyTracer + +os = isolate_module(os) -import os, sys, threading try: # Use the C extension code when we can, for speed. - from coverage.tracer import CTracer # pylint: disable=F0401,E0611 + from coverage.tracer import CTracer, CFileDisposition except ImportError: # Couldn't import the C extension, maybe it isn't built. if os.getenv('COVERAGE_TEST_TRACER') == 'c': - # During testing, we use the COVERAGE_TEST_TRACER env var to indicate - # that we've fiddled with the environment to test this fallback code. - # If we thought we had a C tracer, but couldn't import it, then exit - # quickly and clearly instead of dribbling confusing errors. I'm using - # sys.exit here instead of an exception because an exception here - # causes all sorts of other noise in unittest. - sys.stderr.write( - "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n" - ) + # During testing, we use the COVERAGE_TEST_TRACER environment variable + # to indicate that we've fiddled with the environment to test this + # fallback code. If we thought we had a C tracer, but couldn't import + # it, then exit quickly and clearly instead of dribbling confusing + # errors. I'm using sys.exit here instead of an exception because an + # exception here causes all sorts of other noise in unittest. + sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") sys.exit(1) CTracer = None -class PyTracer(object): - """Python implementation of the raw data tracer.""" +class FileDisposition(object): + """A simple value type for recording what to do with a file.""" + pass - # Because of poor implementations of trace-function-manipulating tools, - # the Python trace function must be kept very simple. In particular, there - # must be only one function ever set as the trace function, both through - # sys.settrace, and as the return value from the trace function. Put - # another way, the trace function must always return itself. It cannot - # swap in other functions, or return None to avoid tracing a particular - # frame. - # - # The trace manipulator that introduced this restriction is DecoratorTools, - # which sets a trace function, and then later restores the pre-existing one - # by calling sys.settrace with a function it found in the current frame. - # - # Systems that use DecoratorTools (or similar trace manipulations) must use - # PyTracer to get accurate results. The command-line --timid argument is - # used to force the use of this tracer. - def __init__(self): - self.data = None - self.should_trace = None - self.should_trace_cache = None - self.warn = None - self.cur_file_data = None - self.last_line = 0 - self.data_stack = [] - self.last_exc_back = None - self.last_exc_firstlineno = 0 - self.arcs = False - self.thread = None - self.stopped = False - - def _trace(self, frame, event, arg_unused): - """The trace function passed to sys.settrace.""" - - if self.stopped: - return - - if 0: - sys.stderr.write("trace event: %s %r @%d\n" % ( - event, frame.f_code.co_filename, frame.f_lineno - )) - - if self.last_exc_back: - if frame == self.last_exc_back: - # Someone forgot a return event. - if self.arcs and self.cur_file_data: - pair = (self.last_line, -self.last_exc_firstlineno) - self.cur_file_data[pair] = None - self.cur_file_data, self.last_line = self.data_stack.pop() - self.last_exc_back = None - - if event == 'call': - # Entering a new function context. Decide if we should trace - # in this file. - self.data_stack.append((self.cur_file_data, self.last_line)) - filename = frame.f_code.co_filename - if filename not in self.should_trace_cache: - tracename = self.should_trace(filename, frame) - self.should_trace_cache[filename] = tracename - else: - tracename = self.should_trace_cache[filename] - #print("called, stack is %d deep, tracename is %r" % ( - # len(self.data_stack), tracename)) - if tracename: - if tracename not in self.data: - self.data[tracename] = {} - self.cur_file_data = self.data[tracename] - else: - self.cur_file_data = None - # Set the last_line to -1 because the next arc will be entering a - # code block, indicated by (-1, n). - self.last_line = -1 - elif event == 'line': - # Record an executed line. - if self.cur_file_data is not None: - if self.arcs: - #print("lin", self.last_line, frame.f_lineno) - self.cur_file_data[(self.last_line, frame.f_lineno)] = None - else: - #print("lin", frame.f_lineno) - self.cur_file_data[frame.f_lineno] = None - self.last_line = frame.f_lineno - elif event == 'return': - if self.arcs and self.cur_file_data: - first = frame.f_code.co_firstlineno - self.cur_file_data[(self.last_line, -first)] = None - # Leaving this function, pop the filename stack. - self.cur_file_data, self.last_line = self.data_stack.pop() - #print("returned, stack is %d deep" % (len(self.data_stack))) - elif event == 'exception': - #print("exc", self.last_line, frame.f_lineno) - self.last_exc_back = frame.f_back - self.last_exc_firstlineno = frame.f_code.co_firstlineno - return self._trace - - def start(self): - """Start this Tracer. - - Return a Python function suitable for use with sys.settrace(). - - """ - self.thread = threading.currentThread() - sys.settrace(self._trace) - return self._trace - - def stop(self): - """Stop this Tracer.""" - self.stopped = True - if self.thread != threading.currentThread(): - # Called on a different thread than started us: we can't unhook - # ourseves, but we've set the flag that we should stop, so we won't - # do any more tracing. - return - - if hasattr(sys, "gettrace") and self.warn: - if sys.gettrace() != self._trace: - msg = "Trace function changed, measurement is likely wrong: %r" - self.warn(msg % (sys.gettrace(),)) - #print("Stopping tracer on %s" % threading.current_thread().ident) - sys.settrace(None) - - def get_stats(self): - """Return a dictionary of statistics, or None.""" - return None +def should_start_context(frame): + """Who-Tests-What hack: Determine whether this frame begins a new who-context.""" + fn_name = frame.f_code.co_name + if fn_name.startswith("test"): + return fn_name class Collector(object): @@ -170,12 +65,17 @@ class Collector(object): # the top, and resumed when they become the top again. _collectors = [] - def __init__(self, should_trace, timid, branch, warn): + # The concurrency settings we support here. + SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"]) + + def __init__(self, should_trace, check_include, timid, branch, warn, concurrency): """Create a collector. - `should_trace` is a function, taking a filename, and returning a - canonicalized filename, or None depending on whether the file should - be traced or not. + `should_trace` is a function, taking a file name, and returning a + `coverage.FileDisposition object`. + + `check_include` is a function taking a file name and a frame. It returns + a boolean: True if the file should be traced, False if not. If `timid` is true, then a slower simpler trace function will be used. This is important for some environments where manipulation of @@ -189,10 +89,55 @@ class Collector(object): `warn` is a warning function, taking a single string message argument, to be used if a warning needs to be issued. + `concurrency` is a list of strings indicating the concurrency libraries + in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" + (the default). Of these four values, only one can be supplied. Other + values are ignored. + """ self.should_trace = should_trace + self.check_include = check_include self.warn = warn self.branch = branch + self.threading = None + + self.concur_id_func = None + + # We can handle a few concurrency options here, but only one at a time. + these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency) + if len(these_concurrencies) > 1: + raise CoverageException("Conflicting concurrency settings: %s" % concurrency) + self.concurrency = these_concurrencies.pop() if these_concurrencies else '' + + try: + if self.concurrency == "greenlet": + import greenlet + self.concur_id_func = greenlet.getcurrent + elif self.concurrency == "eventlet": + import eventlet.greenthread # pylint: disable=import-error,useless-suppression + self.concur_id_func = eventlet.greenthread.getcurrent + elif self.concurrency == "gevent": + import gevent # pylint: disable=import-error,useless-suppression + self.concur_id_func = gevent.getcurrent + elif self.concurrency == "thread" or not self.concurrency: + # It's important to import threading only if we need it. If + # it's imported early, and the program being measured uses + # gevent, then gevent's monkey-patching won't work properly. + import threading + self.threading = threading + else: + raise CoverageException("Don't understand concurrency=%s" % concurrency) + except ImportError: + raise CoverageException( + "Couldn't trace with concurrency=%s, the module isn't installed." % ( + self.concurrency, + ) + ) + + # Who-Tests-What is just a hack at the moment, so turn it on with an + # environment variable. + self.wtw = int(os.getenv('COVERAGE_WTW', 0)) + self.reset() if timid: @@ -203,8 +148,15 @@ class Collector(object): # trace function. self._trace_class = CTracer or PyTracer + if self._trace_class is CTracer: + self.file_disposition_class = CFileDisposition + self.supports_plugins = True + else: + self.file_disposition_class = FileDisposition + self.supports_plugins = False + def __repr__(self): - return "" % id(self) + return "" % (id(self), self.tracer_name()) def tracer_name(self): """Return the class name of the tracer we're using.""" @@ -212,14 +164,46 @@ class Collector(object): def reset(self): """Clear collected data, and prepare to collect more.""" - # A dictionary mapping filenames to dicts with linenumber keys, - # or mapping filenames to dicts with linenumber pairs as keys. + # A dictionary mapping file names to dicts with line number keys (if not + # branch coverage), or mapping file names to dicts with line number + # pairs as keys (if branch coverage). self.data = {} - # A cache of the results from should_trace, the decision about whether - # to trace execution in a file. A dict of filename to (filename or - # None). - self.should_trace_cache = {} + # A dict mapping contexts to data dictionaries. + self.contexts = {} + self.contexts[None] = self.data + + # A dictionary mapping file names to file tracer plugin names that will + # handle them. + self.file_tracers = {} + + # The .should_trace_cache attribute is a cache from file names to + # coverage.FileDisposition objects, or None. When a file is first + # considered for tracing, a FileDisposition is obtained from + # Coverage.should_trace. Its .trace attribute indicates whether the + # file should be traced or not. If it should be, a plugin with dynamic + # file names can decide not to trace it based on the dynamic file name + # being excluded by the inclusion rules, in which case the + # FileDisposition will be replaced by None in the cache. + if env.PYPY: + import __pypy__ # pylint: disable=import-error + # Alex Gaynor said: + # should_trace_cache is a strictly growing key: once a key is in + # it, it never changes. Further, the keys used to access it are + # generally constant, given sufficient context. That is to say, at + # any given point _trace() is called, pypy is able to know the key. + # This is because the key is determined by the physical source code + # line, and that's invariant with the call site. + # + # This property of a dict with immutable keys, combined with + # call-site-constant keys is a match for PyPy's module dict, + # which is optimized for such workloads. + # + # This gives a 20% benefit on the workload described at + # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage + self.should_trace_cache = __pypy__.newdict("module") + else: + self.should_trace_cache = {} # Our active Tracers. self.tracers = [] @@ -228,12 +212,35 @@ class Collector(object): """Start a new Tracer object, and store it in self.tracers.""" tracer = self._trace_class() tracer.data = self.data - tracer.arcs = self.branch + tracer.trace_arcs = self.branch tracer.should_trace = self.should_trace tracer.should_trace_cache = self.should_trace_cache tracer.warn = self.warn + + if hasattr(tracer, 'concur_id_func'): + tracer.concur_id_func = self.concur_id_func + elif self.concur_id_func: + raise CoverageException( + "Can't support concurrency=%s with %s, only threads are supported" % ( + self.concurrency, self.tracer_name(), + ) + ) + + if hasattr(tracer, 'file_tracers'): + tracer.file_tracers = self.file_tracers + if hasattr(tracer, 'threading'): + tracer.threading = self.threading + if hasattr(tracer, 'check_include'): + tracer.check_include = self.check_include + if self.wtw: + if hasattr(tracer, 'should_start_context'): + tracer.should_start_context = should_start_context + if hasattr(tracer, 'switch_context'): + tracer.switch_context = self.switch_context + fn = tracer.start() self.tracers.append(tracer) + return fn # The trace function has to be set individually on each thread before @@ -242,16 +249,16 @@ class Collector(object): # install this as a trace function, and the first time it's called, it does # the real trace installation. - def _installation_trace(self, frame_unused, event_unused, arg_unused): + def _installation_trace(self, frame, event, arg): """Called on new threads, installs the real tracer.""" - # Remove ourselves as the trace function + # Remove ourselves as the trace function. sys.settrace(None) # Install the real tracer. fn = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: - fn = fn(frame_unused, event_unused, arg_unused) + fn = fn(frame, event, arg) # Return the new trace function to continue tracing in this scope. return fn @@ -259,39 +266,47 @@ class Collector(object): """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() - self._collectors.append(self) - #print("Started: %r" % self._collectors, file=sys.stderr) - # Check to see whether we had a fullcoverage tracer installed. + # Check to see whether we had a fullcoverage tracer installed. If so, + # get the stack frames it stashed away for us. traces0 = [] - if hasattr(sys, "gettrace"): - fn0 = sys.gettrace() - if fn0: - tracer0 = getattr(fn0, '__self__', None) - if tracer0: - traces0 = getattr(tracer0, 'traces', []) + fn0 = sys.gettrace() + if fn0: + tracer0 = getattr(fn0, '__self__', None) + if tracer0: + traces0 = getattr(tracer0, 'traces', []) - # Install the tracer on this thread. - fn = self._start_tracer() + try: + # Install the tracer on this thread. + fn = self._start_tracer() + except: + if self._collectors: + self._collectors[-1].resume() + raise + # If _start_tracer succeeded, then we add ourselves to the global + # stack of collectors. + self._collectors.append(self) + + # Replay all the events from fullcoverage into the new trace function. for args in traces0: (frame, event, arg), lineno = args try: fn(frame, event, arg, lineno=lineno) except TypeError: - raise Exception( - "fullcoverage must be run with the C trace function." - ) + raise Exception("fullcoverage must be run with the C trace function.") # Install our installation tracer in threading, to jump start other # threads. - threading.settrace(self._installation_trace) + if self.threading: + self.threading.settrace(self._installation_trace) def stop(self): """Stop collecting trace information.""" - #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors - assert self._collectors[-1] is self + assert self._collectors[-1] is self, ( + "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1]) + ) self.pause() self.tracers = [] @@ -310,44 +325,48 @@ class Collector(object): if stats: print("\nCoverage.py tracer stats:") for k in sorted(stats.keys()): - print("%16s: %s" % (k, stats[k])) - threading.settrace(None) + print("%20s: %s" % (k, stats[k])) + if self.threading: + self.threading.settrace(None) def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() - threading.settrace(self._installation_trace) + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() - def get_line_data(self): - """Return the line data collected. + def switch_context(self, new_context): + """Who-Tests-What hack: switch to a new who-context.""" + # Make a new data dict, or find the existing one, and switch all the + # tracers to use it. + data = self.contexts.setdefault(new_context, {}) + for tracer in self.tracers: + tracer.data = data - Data is { filename: { lineno: None, ...}, ...} + def save_data(self, covdata): + """Save the collected data to a `CoverageData`. + + Also resets the collector. """ + def abs_file_dict(d): + """Return a dict like d, but with keys modified by `abs_file`.""" + return dict((abs_file(k), v) for k, v in iitems(d)) + if self.branch: - # If we were measuring branches, then we have to re-build the dict - # to show line data. - line_data = {} - for f, arcs in self.data.items(): - line_data[f] = ldf = {} - for l1, _ in list(arcs.keys()): - if l1: - ldf[l1] = None - return line_data + covdata.add_arcs(abs_file_dict(self.data)) else: - return self.data + covdata.add_lines(abs_file_dict(self.data)) + covdata.add_file_tracers(abs_file_dict(self.file_tracers)) - def get_arc_data(self): - """Return the arc data collected. + if self.wtw: + # Just a hack, so just hack it. + import pprint + out_file = "coverage_wtw_{:06}.py".format(os.getpid()) + with open(out_file, "w") as wtw_out: + pprint.pprint(self.contexts, wtw_out) - Data is { filename: { (l1, l2): None, ...}, ...} - - Note that no data is collected or returned if the Collector wasn't - created with `branch` true. - - """ - if self.branch: - return self.data - else: - return {} + self.reset() diff --git a/python/helpers/coveragepy/coverage/config.py b/python/helpers/coveragepy/coverage/config.py index 87318ff12452..d6f5af0a6f5a 100644 --- a/python/helpers/coveragepy/coverage/config.py +++ b/python/helpers/coveragepy/coverage/config.py @@ -1,31 +1,68 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Config file for coverage.py""" -import os, re, sys -from coverage.backward import string_class, iitems +import collections +import os +import re +import sys -# In py3, # ConfigParser was renamed to the more-standard configparser -try: - import configparser # pylint: disable=F0401 -except ImportError: - import ConfigParser as configparser +from coverage.backward import configparser, iitems, string_class +from coverage.misc import contract, CoverageException, isolate_module + +os = isolate_module(os) class HandyConfigParser(configparser.RawConfigParser): """Our specialization of ConfigParser.""" + def __init__(self, section_prefix): + configparser.RawConfigParser.__init__(self) + self.section_prefix = section_prefix + def read(self, filename): - """Read a filename as UTF-8 configuration data.""" + """Read a file name as UTF-8 configuration data.""" kwargs = {} if sys.version_info >= (3, 2): kwargs['encoding'] = "utf-8" return configparser.RawConfigParser.read(self, filename, **kwargs) - def get(self, *args, **kwargs): - v = configparser.RawConfigParser.get(self, *args, **kwargs) + def has_option(self, section, option): + section = self.section_prefix + section + return configparser.RawConfigParser.has_option(self, section, option) + + def has_section(self, section): + section = self.section_prefix + section + return configparser.RawConfigParser.has_section(self, section) + + def options(self, section): + section = self.section_prefix + section + return configparser.RawConfigParser.options(self, section) + + def get_section(self, section): + """Get the contents of a section, as a dictionary.""" + d = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + + def get(self, section, *args, **kwargs): + """Get a value, replacing environment variables also. + + The arguments are the same as `RawConfigParser.get`, but in the found + value, ``$WORD`` or ``${WORD}`` are replaced by the value of the + environment variable ``WORD``. + + Returns the finished value. + + """ + section = self.section_prefix + section + v = configparser.RawConfigParser.get(self, section, *args, **kwargs) def dollar_replace(m): """Called for each $replacement.""" # Only one of the groups will have matched, just get its text. - word = [w for w in m.groups() if w is not None][0] + word = next(w for w in m.groups() if w is not None) # pragma: part covered if word == "$": return "$" else: @@ -59,28 +96,39 @@ class HandyConfigParser(configparser.RawConfigParser): values.append(value) return values - def getlinelist(self, section, option): - """Read a list of full-line strings. + def getregexlist(self, section, option): + """Read a list of full-line regexes. The value of `section` and `option` is treated as a newline-separated - list of strings. Each value is stripped of whitespace. + list of regexes. Each value is stripped of whitespace. Returns the list of strings. """ - value_list = self.get(section, option) - return list(filter(None, value_list.split('\n'))) + line_list = self.get(section, option) + value_list = [] + for value in line_list.splitlines(): + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise CoverageException( + "Invalid [%s].%s value %r: %s" % (section, option, value, e) + ) + if value: + value_list.append(value) + return value_list -# The default line exclusion regexes +# The default line exclusion regexes. DEFAULT_EXCLUDE = [ - '(?i)# *pragma[: ]*no *cover', - ] + r'(?i)#\s*pragma[:\s]?\s*no\s*cover', +] # The default partial branch regexes, to be modified by the user. DEFAULT_PARTIAL = [ - '(?i)# *pragma[: ]*no *branch', - ] + r'(?i)#\s*pragma[:\s]?\s*no\s*branch', +] # The default partial branch regexes, based on Python semantics. # These are any Python branching constructs that can't actually execute all @@ -88,7 +136,7 @@ DEFAULT_PARTIAL = [ DEFAULT_PARTIAL_ALWAYS = [ 'while (True|1|False|0):', 'if (True|1|False|0):', - ] +] class CoverageConfig(object): @@ -106,44 +154,44 @@ class CoverageConfig(object): # Defaults for [run] self.branch = False + self.concurrency = None self.cover_pylib = False self.data_file = ".coverage" - self.parallel = False - self.timid = False - self.source = None self.debug = [] + self.note = None + self.parallel = False + self.plugins = [] + self.source = None + self.timid = False # Defaults for [report] self.exclude_list = DEFAULT_EXCLUDE[:] + self.fail_under = 0 self.ignore_errors = False self.include = None self.omit = None - self.partial_list = DEFAULT_PARTIAL[:] self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.partial_list = DEFAULT_PARTIAL[:] self.precision = 0 self.show_missing = False + self.skip_covered = False # Defaults for [html] - self.html_dir = "htmlcov" self.extra_css = None + self.html_dir = "htmlcov" self.html_title = "Coverage report" # Defaults for [xml] self.xml_output = "coverage.xml" + self.xml_package_depth = 99 # Defaults for [paths] self.paths = {} - def from_environment(self, env_var): - """Read configuration from the `env_var` environment variable.""" - # Timidity: for nose users, read an environment variable. This is a - # cheap hack, since the rest of the command line arguments aren't - # recognized, but it solves some users' problems. - env = os.environ.get(env_var, '') - if env: - self.timid = ('--timid' in env) + # Options for plugins + self.plugin_options = {} - MUST_BE_LIST = ["omit", "include", "debug"] + MUST_BE_LIST = ["omit", "include", "debug", "plugins", "concurrency"] def from_args(self, **kwargs): """Read config values from `kwargs`.""" @@ -153,61 +201,167 @@ class CoverageConfig(object): v = [v] setattr(self, k, v) - def from_file(self, filename): + @contract(filename=str) + def from_file(self, filename, section_prefix=""): """Read configuration from a .rc file. `filename` is a file name to read. + Returns True or False, whether the file could be read. + """ self.attempted_config_files.append(filename) - cp = HandyConfigParser() - files_read = cp.read(filename) - if files_read is not None: # return value changed in 2.4 - self.config_files.extend(files_read) + cp = HandyConfigParser(section_prefix) + try: + files_read = cp.read(filename) + except configparser.Error as err: + raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) + if not files_read: + return False + self.config_files.extend(files_read) + + try: + for option_spec in self.CONFIG_FILE_OPTIONS: + self._set_attr_from_config_option(cp, *option_spec) + except ValueError as err: + raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) + + # Check that there are no unrecognized options. + all_options = collections.defaultdict(set) for option_spec in self.CONFIG_FILE_OPTIONS: - self.set_attr_from_config_option(cp, *option_spec) + section, option = option_spec[1].split(":") + all_options[section].add(option) + + for section, options in iitems(all_options): + if cp.has_section(section): + for unknown in set(cp.options(section)) - options: + if section_prefix: + section = section_prefix + section + raise CoverageException( + "Unrecognized option '[%s] %s=' in config file %s" % ( + section, unknown, filename + ) + ) # [paths] is special if cp.has_section('paths'): for option in cp.options('paths'): self.paths[option] = cp.getlist('paths', option) + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + + return True + CONFIG_FILE_OPTIONS = [ + # These are *args for _set_attr_from_config_option: + # (attr, where, type_="") + # + # attr is the attribute to set on the CoverageConfig object. + # where is the section:name to read from the configuration file. + # type_ is the optional type to apply, by using .getTYPE to read the + # configuration value from the file. + # [run] ('branch', 'run:branch', 'boolean'), + ('concurrency', 'run:concurrency', 'list'), ('cover_pylib', 'run:cover_pylib', 'boolean'), ('data_file', 'run:data_file'), ('debug', 'run:debug', 'list'), ('include', 'run:include', 'list'), + ('note', 'run:note'), ('omit', 'run:omit', 'list'), ('parallel', 'run:parallel', 'boolean'), + ('plugins', 'run:plugins', 'list'), ('source', 'run:source', 'list'), ('timid', 'run:timid', 'boolean'), # [report] - ('exclude_list', 'report:exclude_lines', 'linelist'), + ('exclude_list', 'report:exclude_lines', 'regexlist'), + ('fail_under', 'report:fail_under', 'int'), ('ignore_errors', 'report:ignore_errors', 'boolean'), ('include', 'report:include', 'list'), ('omit', 'report:omit', 'list'), - ('partial_list', 'report:partial_branches', 'linelist'), - ('partial_always_list', 'report:partial_branches_always', 'linelist'), + ('partial_always_list', 'report:partial_branches_always', 'regexlist'), + ('partial_list', 'report:partial_branches', 'regexlist'), ('precision', 'report:precision', 'int'), ('show_missing', 'report:show_missing', 'boolean'), + ('skip_covered', 'report:skip_covered', 'boolean'), + ('sort', 'report:sort'), # [html] - ('html_dir', 'html:directory'), ('extra_css', 'html:extra_css'), + ('html_dir', 'html:directory'), ('html_title', 'html:title'), # [xml] ('xml_output', 'xml:output'), - ] + ('xml_package_depth', 'xml:package_depth', 'int'), + ] - def set_attr_from_config_option(self, cp, attr, where, type_=''): + def _set_attr_from_config_option(self, cp, attr, where, type_=''): """Set an attribute on self if it exists in the ConfigParser.""" section, option = where.split(":") if cp.has_option(section, option): - method = getattr(cp, 'get'+type_) + method = getattr(cp, 'get' + type_) setattr(self, attr, method(section, option)) + + def get_plugin_options(self, plugin): + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) + + def set_option(self, option_name, value): + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + setattr(self, attr, value) + return + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + self.plugin_options.setdefault(plugin_name, {})[key] = value + return + + # If we get here, we didn't find the option. + raise CoverageException("No such option: %r" % option_name) + + def get_option(self, option_name): + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + return getattr(self, attr) + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + return self.plugin_options.get(plugin_name, {}).get(key) + + # If we get here, we didn't find the option. + raise CoverageException("No such option: %r" % option_name) diff --git a/python/helpers/coveragepy/coverage/control.py b/python/helpers/coveragepy/coverage/control.py index f75a3dda5b1b..d3e6708563c0 100644 --- a/python/helpers/coveragepy/coverage/control.py +++ b/python/helpers/coveragepy/coverage/control.py @@ -1,49 +1,67 @@ -"""Core control stuff for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt -import atexit, os, random, socket, sys +"""Core control stuff for coverage.py.""" +import atexit +import inspect +import os +import platform +import re +import sys +import traceback + +from coverage import env, files from coverage.annotate import AnnotateReporter -from coverage.backward import string_class, iitems, sorted # pylint: disable=W0622 -from coverage.codeunit import code_unit_factory, CodeUnit +from coverage.backward import string_class, iitems from coverage.collector import Collector from coverage.config import CoverageConfig -from coverage.data import CoverageData +from coverage.data import CoverageData, CoverageDataFiles from coverage.debug import DebugControl -from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher +from coverage.files import TreeMatcher, FnmatchMatcher from coverage.files import PathAliases, find_python_files, prep_patterns +from coverage.files import ModuleMatcher, abs_file from coverage.html import HtmlReporter from coverage.misc import CoverageException, bool_or_none, join_regex -from coverage.misc import file_be_gone +from coverage.misc import file_be_gone, isolate_module +from coverage.multiproc import patch_multiprocessing +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins +from coverage.python import PythonFileReporter from coverage.results import Analysis, Numbers from coverage.summary import SummaryReporter from coverage.xmlreport import XmlReporter +os = isolate_module(os) + # Pypy has some unusual stuff in the "stdlib". Consider those locations # when deciding where the stdlib is. try: - import _structseq # pylint: disable=F0401 + import _structseq except ImportError: _structseq = None -class coverage(object): +class Coverage(object): """Programmatic access to coverage.py. To use:: - from coverage import coverage + from coverage import Coverage - cov = coverage() + cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory='covhtml') """ - def __init__(self, data_file=None, data_suffix=None, cover_pylib=None, - auto_data=False, timid=None, branch=None, config_file=True, - source=None, omit=None, include=None, debug=None, - debug_file=None): + def __init__( + self, data_file=None, data_suffix=None, cover_pylib=None, + auto_data=False, timid=None, branch=None, config_file=True, + source=None, omit=None, include=None, debug=None, + concurrency=None, + ): """ `data_file` is the base name of the data file to use, defaulting to ".coverage". `data_suffix` is appended (with a dot) to `data_file` to @@ -65,324 +83,271 @@ class coverage(object): If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. - `config_file` determines what config file to read. If it is a string, - it is the name of the config file to read. If it is True, then a - standard file is read (".coveragerc"). If it is False, then no file is - read. + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg"). It is not an error for these files + to not be found. + + * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. - `include` and `omit` are lists of filename patterns. Files that match + `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is - desired. `debug_file` is the file to write debug messages to, - defaulting to stderr. + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results if these libraries are in use. Valid strings are "greenlet", + "eventlet", "gevent", "multiprocessing", or "thread" (the default). + This can also be a list of these strings. + + .. versionadded:: 4.0 + The `concurrency` parameter. + + .. versionadded:: 4.2 + The `concurrency` parameter can now be a list of strings. """ - from coverage import __version__ - - # A record of all the warnings that have been issued. - self._warnings = [] - # Build our configuration from a number of sources: # 1: defaults: self.config = CoverageConfig() - # 2: from the coveragerc file: + # 2: from the rcfile, .coveragerc or setup.cfg file: if config_file: - if config_file is True: + # pylint: disable=redefined-variable-type + did_read_rc = False + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = (config_file is not True) + if not specified_file: config_file = ".coveragerc" - try: - self.config.from_file(config_file) - except ValueError: - _, err, _ = sys.exc_info() - raise CoverageException( - "Couldn't read config file %s: %s" % (config_file, err) - ) + self.config_file = config_file + + did_read_rc = self.config.from_file(config_file) + + if not did_read_rc: + if specified_file: + raise CoverageException( + "Couldn't read '%s' as a config file" % config_file + ) + self.config.from_file("setup.cfg", section_prefix="coverage:") # 3: from environment variables: - self.config.from_environment('COVERAGE_OPTIONS') env_data_file = os.environ.get('COVERAGE_FILE') if env_data_file: self.config.data_file = env_data_file + debugs = os.environ.get('COVERAGE_DEBUG') + if debugs: + self.config.debug.extend(debugs.split(",")) # 4: from constructor arguments: self.config.from_args( data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, omit=omit, include=include, debug=debug, + concurrency=concurrency, ) - # Create and configure the debugging controller. - self.debug = DebugControl(self.config.debug, debug_file or sys.stderr) + self._debug_file = None + self._auto_data = auto_data + self._data_suffix = data_suffix - self.auto_data = auto_data + # The matchers for _should_trace. + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = self.cover_match = None + self.include_match = self.omit_match = None - # _exclude_re is a dict mapping exclusion list names to compiled + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + + # A record of all the warnings that have been issued. + self._warnings = [] + + # Other instance attributes, set later. + self.omit = self.include = self.source = None + self.source_pkgs = None + self.data = self.data_files = self.collector = None + self.plugins = None + self.pylib_dirs = self.cover_dirs = None + self.data_suffix = self.run_suffix = None + self._exclude_re = None + self.debug = None + + # State machine variables: + # Have we initialized everything? + self._inited = False + # Have we started collecting and not stopped it? + self._started = False + # Have we measured some data and not harvested it? + self._measured = False + + # If we have sub-process measurement happening automatically, then we + # want any explicit creation of a Coverage object to mean, this process + # is already coverage-aware, so don't auto-measure it. By now, the + # auto-creation of a Coverage object has already happened. But we can + # find it and tell it not to save its data. + if not env.METACOV: + _prevent_sub_process_measurement() + + def _init(self): + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. + + """ + if self._inited: + return + + # Create and configure the debugging controller. COVERAGE_DEBUG_FILE + # is an environment variable, the name of a file to append debug logs + # to. + if self._debug_file is None: + debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE") + if debug_file_name: + self._debug_file = open(debug_file_name, "a") + else: + self._debug_file = sys.stderr + self.debug = DebugControl(self.config.debug, self._debug_file) + + # Load plugins + self.plugins = Plugins.load_plugins(self.config.plugins, self.config, self.debug) + + # _exclude_re is a dict that maps exclusion list names to compiled # regexes. self._exclude_re = {} self._exclude_regex_stale() - self.file_locator = FileLocator() + files.set_relative_directory() # The source argument can be directories or package names. self.source = [] self.source_pkgs = [] for src in self.config.source or []: if os.path.exists(src): - self.source.append(self.file_locator.canonical_filename(src)) + self.source.append(files.canonical_filename(src)) else: self.source_pkgs.append(src) self.omit = prep_patterns(self.config.omit) self.include = prep_patterns(self.config.include) + concurrency = self.config.concurrency or [] + if "multiprocessing" in concurrency: + patch_multiprocessing(rcfile=self.config_file) + #concurrency = None + # Multi-processing uses parallel for the subprocesses, so also use + # it for the main process. + self.config.parallel = True + self.collector = Collector( - self._should_trace, timid=self.config.timid, - branch=self.config.branch, warn=self._warn + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + timid=self.config.timid, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, ) + # Early warning if we aren't going to be able to support plugins. + if self.plugins.file_tracers and not self.collector.supports_plugins: + self._warn( + "Plugin file tracers (%s) aren't supported with %s" % ( + ", ".join( + plugin._coverage_plugin_name + for plugin in self.plugins.file_tracers + ), + self.collector.tracer_name(), + ) + ) + for plugin in self.plugins.file_tracers: + plugin._coverage_enabled = False + # Suffixes are a bit tricky. We want to use the data suffix only when # collecting data, not when combining data. So we save it as # `self.run_suffix` now, and promote it to `self.data_suffix` if we # find that we are collecting data later. - if data_suffix or self.config.parallel: - if not isinstance(data_suffix, string_class): + if self._data_suffix or self.config.parallel: + if not isinstance(self._data_suffix, string_class): # if data_suffix=True, use .machinename.pid.random - data_suffix = True + self._data_suffix = True else: - data_suffix = None + self._data_suffix = None self.data_suffix = None - self.run_suffix = data_suffix + self.run_suffix = self._data_suffix # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. - self.data = CoverageData( - basename=self.config.data_file, - collector="coverage v%s" % __version__, - debug=self.debug, - ) + self.data = CoverageData(debug=self.debug) + self.data_files = CoverageDataFiles(basename=self.config.data_file, warn=self._warn) - # The dirs for files considered "installed with the interpreter". - self.pylib_dirs = [] + # The directories for files considered "installed with the interpreter". + self.pylib_dirs = set() if not self.config.cover_pylib: # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. - for m in (atexit, os, random, socket, _structseq): + for m in (atexit, inspect, os, platform, re, _structseq, traceback): if m is not None and hasattr(m, "__file__"): - m_dir = self._canonical_dir(m) - if m_dir not in self.pylib_dirs: - self.pylib_dirs.append(m_dir) + self.pylib_dirs.add(self._canonical_dir(m)) + if _structseq and not hasattr(_structseq, '__file__'): + # PyPy 2.4 has no __file__ in the builtin modules, but the code + # objects still have the file names. So dig into one to find + # the path to exclude. + structseq_new = _structseq.structseq_new + try: + structseq_file = structseq_new.func_code.co_filename + except AttributeError: + structseq_file = structseq_new.__code__.co_filename + self.pylib_dirs.add(self._canonical_dir(structseq_file)) - # To avoid tracing the coverage code itself, we skip anything located - # where we are. - self.cover_dir = self._canonical_dir(__file__) - - # The matchers for _should_trace. - self.source_match = None - self.pylib_match = self.cover_match = None - self.include_match = self.omit_match = None + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_dirs = [self._canonical_dir(__file__)] + if env.TESTING: + # When testing, we use PyContracts, which should be considered + # part of coverage.py, and it uses six. Exclude those directories + # just as we exclude ourselves. + import contracts + import six + for mod in [contracts, six]: + self.cover_dirs.append(self._canonical_dir(mod)) # Set the reporting precision. Numbers.set_precision(self.config.precision) - # Is it ok for no data to be collected? - self._warn_no_data = True - self._warn_unimported_source = True - - # State machine variables: - # Have we started collecting and not stopped it? - self._started = False - # Have we measured some data and not harvested it? - self._measured = False - atexit.register(self._atexit) - def _canonical_dir(self, morf): - """Return the canonical directory of the module or file `morf`.""" - return os.path.split(CodeUnit(morf, self.file_locator).filename)[0] - - def _source_for_file(self, filename): - """Return the source file for `filename`.""" - if not filename.endswith(".py"): - if filename[-4:-1] == ".py": - filename = filename[:-1] - elif filename.endswith("$py.class"): # jython - filename = filename[:-9] + ".py" - return filename - - def _should_trace_with_reason(self, filename, frame): - """Decide whether to trace execution in `filename`, with a reason. - - This function is called from the trace function. As each new file name - is encountered, this function determines whether it is traced or not. - - Returns a pair of values: the first indicates whether the file should - be traced: it's a canonicalized filename if it should be traced, None - if it should not. The second value is a string, the resason for the - decision. - - """ - if not filename: - # Empty string is pretty useless - return None, "empty string isn't a filename" - - if filename.startswith('<'): - # Lots of non-file execution is represented with artificial - # filenames like "", "", or - # "". Don't ever trace these executions, since we - # can't do anything with the data later anyway. - return None, "not a real filename" - - self._check_for_packages() - - # Compiled Python files have two filenames: frame.f_code.co_filename is - # the filename at the time the .pyc was compiled. The second name is - # __file__, which is where the .pyc was actually loaded from. Since - # .pyc files can be moved after compilation (for example, by being - # installed), we look for __file__ in the frame and prefer it to the - # co_filename value. - dunder_file = frame.f_globals.get('__file__') - if dunder_file: - filename = self._source_for_file(dunder_file) - - # Jython reports the .class file to the tracer, use the source file. - if filename.endswith("$py.class"): - filename = filename[:-9] + ".py" - - canonical = self.file_locator.canonical_filename(filename) - - # If the user specified source or include, then that's authoritative - # about the outer bound of what to measure and we don't have to apply - # any canned exclusions. If they didn't, then we have to exclude the - # stdlib and coverage.py directories. - if self.source_match: - if not self.source_match.match(canonical): - return None, "falls outside the --source trees" - elif self.include_match: - if not self.include_match.match(canonical): - return None, "falls outside the --include trees" - else: - # If we aren't supposed to trace installed code, then check if this - # is near the Python standard library and skip it if so. - if self.pylib_match and self.pylib_match.match(canonical): - return None, "is in the stdlib" - - # We exclude the coverage code itself, since a little of it will be - # measured otherwise. - if self.cover_match and self.cover_match.match(canonical): - return None, "is part of coverage.py" - - # Check the file against the omit pattern. - if self.omit_match and self.omit_match.match(canonical): - return None, "is inside an --omit pattern" - - return canonical, "because we love you" - - def _should_trace(self, filename, frame): - """Decide whether to trace execution in `filename`. - - Calls `_should_trace_with_reason`, and returns just the decision. - - """ - canonical, reason = self._should_trace_with_reason(filename, frame) - if self.debug.should('trace'): - if not canonical: - msg = "Not tracing %r: %s" % (filename, reason) - else: - msg = "Tracing %r" % (filename,) - self.debug.write(msg) - return canonical - - def _warn(self, msg): - """Use `msg` as a warning.""" - self._warnings.append(msg) - sys.stderr.write("Coverage.py warning: %s\n" % msg) - - def _check_for_packages(self): - """Update the source_match matcher with latest imported packages.""" - # Our self.source_pkgs attribute is a list of package names we want to - # measure. Each time through here, we see if we've imported any of - # them yet. If so, we add its file to source_match, and we don't have - # to look for that package any more. - if self.source_pkgs: - found = [] - for pkg in self.source_pkgs: - try: - mod = sys.modules[pkg] - except KeyError: - continue - - found.append(pkg) - - try: - pkg_file = mod.__file__ - except AttributeError: - pkg_file = None - else: - d, f = os.path.split(pkg_file) - if f.startswith('__init__'): - # This is actually a package, return the directory. - pkg_file = d - else: - pkg_file = self._source_for_file(pkg_file) - pkg_file = self.file_locator.canonical_filename(pkg_file) - if not os.path.exists(pkg_file): - pkg_file = None - - if pkg_file: - self.source.append(pkg_file) - self.source_match.add(pkg_file) - else: - self._warn("Module %s has no Python source." % pkg) - - for pkg in found: - self.source_pkgs.remove(pkg) - - def use_cache(self, usecache): - """Control the use of a data file (incorrectly called a cache). - - `usecache` is true or false, whether to read and write data on disk. - - """ - self.data.usefile(usecache) - - def load(self): - """Load previously-collected coverage data from the data file.""" - self.collector.reset() - self.data.read() - - def start(self): - """Start measuring code coverage. - - Coverage measurement actually occurs in functions called after `start` - is invoked. Statements in the same scope as `start` won't be measured. - - Once you invoke `start`, you must also call `stop` eventually, or your - process might not shut down cleanly. - - """ - if self.run_suffix: - # Calling start() means we're running code, so use the run_suffix - # as the data_suffix when we eventually save the data. - self.data_suffix = self.run_suffix - if self.auto_data: - self.load() + self._inited = True # Create the matchers we need for _should_trace if self.source or self.source_pkgs: self.source_match = TreeMatcher(self.source) + self.source_pkgs_match = ModuleMatcher(self.source_pkgs) else: - if self.cover_dir: - self.cover_match = TreeMatcher([self.cover_dir]) + if self.cover_dirs: + self.cover_match = TreeMatcher(self.cover_dirs) if self.pylib_dirs: self.pylib_match = TreeMatcher(self.pylib_dirs) if self.include: @@ -391,14 +356,349 @@ class coverage(object): self.omit_match = FnmatchMatcher(self.omit) # The user may want to debug things, show info if desired. + wrote_any = False if self.debug.should('config'): - self.debug.write("Configuration values:") config_info = sorted(self.config.__dict__.items()) - self.debug.write_formatted_info(config_info) + self.debug.write_formatted_info("config", config_info) + wrote_any = True if self.debug.should('sys'): - self.debug.write("Debugging info:") - self.debug.write_formatted_info(self.sysinfo()) + self.debug.write_formatted_info("sys", self.sys_info()) + for plugin in self.plugins: + header = "sys: " + plugin._coverage_plugin_name + info = plugin.sys_info() + self.debug.write_formatted_info(header, info) + wrote_any = True + + if wrote_any: + self.debug.write_formatted_info("end", ()) + + def _canonical_dir(self, morf): + """Return the canonical directory of the module or file `morf`.""" + morf_filename = PythonFileReporter(morf, self).filename + return os.path.split(morf_filename)[0] + + def _source_for_file(self, filename): + """Return the source file for `filename`. + + Given a file name being traced, return the best guess as to the source + file to attribute it to. + + """ + if filename.endswith(".py"): + # .py files are themselves source files. + return filename + + elif filename.endswith((".pyc", ".pyo")): + # Bytecode files probably have source files near them. + py_filename = filename[:-1] + if os.path.exists(py_filename): + # Found a .py file, use that. + return py_filename + if env.WINDOWS: + # On Windows, it could be a .pyw file. + pyw_filename = py_filename + "w" + if os.path.exists(pyw_filename): + return pyw_filename + # Didn't find source, but it's probably the .py file we want. + return py_filename + + elif filename.endswith("$py.class"): + # Jython is easy to guess. + return filename[:-9] + ".py" + + # No idea, just use the file name as-is. + return filename + + def _name_for_module(self, module_globals, filename): + """Get the name of the module for a set of globals and file name. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + dunder_name = module_globals.get('__name__', None) + + if isinstance(dunder_name, str) and dunder_name != '__main__': + # This is the usual case: an imported module. + return dunder_name + + loader = module_globals.get('__loader__', None) + for attrname in ('fullname', 'name'): # attribute renamed in py3.2 + if hasattr(loader, attrname): + fullname = getattr(loader, attrname) + else: + continue + + if isinstance(fullname, str) and fullname != '__main__': + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + def _should_trace_internal(self, filename, frame): + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = _disposition_init(self.collector.file_disposition_class, filename) + + def nope(disp, reason): + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals.get('__file__') + if dunder_file: + filename = self._source_for_file(dunder_file) + if original_filename and not original_filename.startswith('<'): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith('memory:'): + return nope(disp, "memory isn't traceable") + + if filename.startswith('<'): + # Lots of non-file execution is represented with artificial + # file names like "", "", or + # "". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "not a real file name") + + # pyexpat does a dumb thing, calling the trace function explicitly from + # C code with a C file name. + if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename): + return nope(disp, "pyexpat lies about itself") + + # Jython reports the .class file to the tracer, use the source file. + if filename.endswith("$py.class"): + filename = filename[:-9] + ".py" + + canonical = files.canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = files.canonical_filename( + file_tracer.source_filename() + ) + break + except Exception: + self._warn( + "Disabling plugin %r due to an exception:" % ( + plugin._coverage_plugin_name + ) + ) + traceback.print_exc() + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise CoverageException( + "Plugin %r didn't set source_filename for %r" % + (plugin, disp.original_filename) + ) + reason = self._check_include_omit_etc_internal( + disp.source_filename, frame, + ) + if reason: + nope(disp, reason) + + return disp + + def _check_include_omit_etc_internal(self, filename, frame): + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = self._name_for_module(frame.f_globals, filename) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match: + if self.source_pkgs_match.match(modulename): + if modulename in self.source_pkgs: + self.source_pkgs.remove(modulename) + return None # There's no reason to skip this file. + + if not self.source_match.match(filename): + return "falls outside the --source trees" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match and self.cover_match.match(filename): + return "is part of coverage.py" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No reason found to skip this file. + return None + + def _should_trace(self, filename, frame): + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + disp = self._should_trace_internal(filename, frame) + if self.debug.should('trace'): + self.debug.write(_disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename, frame): + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + reason = self._check_include_omit_etc_internal(filename, frame) + if self.debug.should('trace'): + if not reason: + msg = "Including %r" % (filename,) + else: + msg = "Not including %r: %s" % (filename, reason) + self.debug.write(msg) + + return not reason + + def _warn(self, msg): + """Use `msg` as a warning.""" + self._warnings.append(msg) + if self.debug.should('pid'): + msg = "[%d] %s" % (os.getpid(), msg) + sys.stderr.write("Coverage.py warning: %s\n" % msg) + + def get_option(self, option_name): + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name, value): + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be a Python + value where appropriate. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling:: + + cov.set_option("run:branch", True) + + has the same effect as this configuration file:: + + [run] + branch = True + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def use_cache(self, usecache): + """Obsolete method.""" + self._init() + if not usecache: + self._warn("use_cache(False) is no longer supported.") + + def load(self): + """Load previously-collected coverage data from the data file.""" + self._init() + self.collector.reset() + self.data_files.read(self.data) + + def start(self): + """Start measuring code coverage. + + Coverage measurement actually occurs in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + """ + self._init() + if self.run_suffix: + # Calling start() means we're running code, so use the run_suffix + # as the data_suffix when we eventually save the data. + self.data_suffix = self.run_suffix + if self._auto_data: + self.load() self.collector.start() self._started = True @@ -406,14 +706,15 @@ class coverage(object): def stop(self): """Stop measuring code coverage.""" + if self._started: + self.collector.stop() self._started = False - self.collector.stop() def _atexit(self): """Clean up on process shutdown.""" if self._started: self.stop() - if self.auto_data: + if self._auto_data: self.save() def erase(self): @@ -423,11 +724,14 @@ class coverage(object): discarding the data file. """ + self._init() self.collector.reset() self.data.erase() + self.data_files.erase(parallel=self.config.parallel) def clear_exclude(self, which='exclude'): """Clear the exclude list.""" + self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() @@ -446,6 +750,7 @@ class coverage(object): is marked for special treatment during reporting. """ + self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() @@ -464,79 +769,86 @@ class coverage(object): def get_exclude_list(self, which='exclude'): """Return a list of excluded regex patterns. - `which` indicates which list is desired. See `exclude` for the lists - that are available, and their meaning. + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. """ + self._init() return getattr(self.config, which + "_list") def save(self): """Save the collected coverage data to the data file.""" - data_suffix = self.data_suffix - if data_suffix is True: - # If data_suffix was a simple true value, then make a suffix with - # plenty of distinguishing information. We do this here in - # `save()` at the last minute so that the pid will be correct even - # if the process forks. - extra = "" - if _TEST_NAME_FILE: - f = open(_TEST_NAME_FILE) - test_name = f.read() - f.close() - extra = "." + test_name - data_suffix = "%s%s.%s.%06d" % ( - socket.gethostname(), extra, os.getpid(), - random.randint(0, 999999) - ) + self._init() + self.get_data() + self.data_files.write(self.data, suffix=self.data_suffix) - self._harvest_data() - self.data.write(suffix=data_suffix) - - def combine(self): + def combine(self, data_paths=None): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + .. versionadded:: 4.0 + The `data_paths` parameter. + """ + self._init() + self.get_data() + aliases = None if self.config.paths: - aliases = PathAliases(self.file_locator) + aliases = PathAliases() for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) - self.data.combine_parallel_data(aliases=aliases) - def _harvest_data(self): + self.data_files.combine_parallel_data(self.data, aliases=aliases, data_paths=data_paths) + + def get_data(self): """Get the collected data and reset the collector. Also warn about various problems collecting data. - """ - if not self._measured: - return + Returns a :class:`coverage.CoverageData`, the collected coverage data. - self.data.add_line_data(self.collector.get_line_data()) - self.data.add_arc_data(self.collector.get_arc_data()) - self.collector.reset() + .. versionadded:: 4.0 + + """ + self._init() + if not self._measured: + return self.data + + self.collector.save_data(self.data) # If there are still entries in the source_pkgs list, then we never # encountered those packages. if self._warn_unimported_source: for pkg in self.source_pkgs: - self._warn("Module %s was never imported." % pkg) + if pkg not in sys.modules: + self._warn("Module %s was never imported." % pkg) + elif not ( + hasattr(sys.modules[pkg], '__file__') and + os.path.exists(sys.modules[pkg].__file__) + ): + self._warn("Module %s has no Python source." % pkg) + else: + self._warn("Module %s was previously imported, but not measured." % pkg) # Find out if we got any data. - summary = self.data.summary() - if not summary and self._warn_no_data: + if not self.data and self._warn_no_data: self._warn("No data was collected.") # Find files that were never executed at all. for src in self.source: for py_file in find_python_files(src): - py_file = self.file_locator.canonical_filename(py_file) + py_file = files.canonical_filename(py_file) if self.omit_match and self.omit_match.match(py_file): # Turns out this file was omitted, so don't pull it back @@ -545,7 +857,11 @@ class coverage(object): self.data.touch_file(py_file) + if self.config.note: + self.data.add_run_info(note=self.config.note) + self._measured = False + return self.data # Backward compatibility with version 1. def analysis(self, morf): @@ -556,10 +872,10 @@ class coverage(object): def analysis2(self, morf): """Analyze a module. - `morf` is a module or a filename. It will be analyzed to determine + `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: - * The filename for the module. + * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from @@ -570,6 +886,7 @@ class coverage(object): coverage data. """ + self._init() analysis = self._analyze(morf) return ( analysis.filename, @@ -585,38 +902,92 @@ class coverage(object): Returns an `Analysis` object. """ - self._harvest_data() - if not isinstance(it, CodeUnit): - it = code_unit_factory(it, self.file_locator)[0] + self.get_data() + if not isinstance(it, FileReporter): + it = self._get_file_reporter(it) - return Analysis(self, it) + return Analysis(self.data, it) - def report(self, morfs=None, show_missing=True, ignore_errors=None, - file=None, # pylint: disable=W0622 - omit=None, include=None - ): + def _get_file_reporter(self, morf): + """Get a FileReporter for a module or file name.""" + plugin = None + file_reporter = "python" + + if isinstance(morf, string_class): + abs_morf = abs_file(morf) + plugin_name = self.data.file_tracer(abs_morf) + if plugin_name: + plugin = self.plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(abs_morf) + if file_reporter is None: + raise CoverageException( + "Plugin %r did not provide a file reporter for %r." % ( + plugin._coverage_plugin_name, morf + ) + ) + + if file_reporter == "python": + # pylint: disable=redefined-variable-type + file_reporter = PythonFileReporter(morf, self) + + return file_reporter + + def _get_file_reporters(self, morfs=None): + """Get a list of FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + the list of FileReporters. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + if not morfs: + morfs = self.data.measured_files() + + # Be sure we have a list. + if not isinstance(morfs, (list, tuple)): + morfs = [morfs] + + file_reporters = [] + for morf in morfs: + file_reporter = self._get_file_reporter(morf) + file_reporters.append(file_reporter) + + return file_reporters + + def report( + self, morfs=None, show_missing=None, ignore_errors=None, + file=None, # pylint: disable=redefined-builtin + omit=None, include=None, skip_covered=None, + ): """Write a summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. - `include` is a list of filename patterns. Modules whose filenames - match those patterns will be included in the report. Modules matching - `omit` will not be included in the report. + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, - show_missing=show_missing, + show_missing=show_missing, skip_covered=skip_covered, ) reporter = SummaryReporter(self, self.config) return reporter.report(morfs, outfile=file) - def annotate(self, morfs=None, directory=None, ignore_errors=None, - omit=None, include=None): + def annotate( + self, morfs=None, directory=None, ignore_errors=None, + omit=None, include=None, + ): """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new @@ -624,10 +995,10 @@ class coverage(object): marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include ) @@ -648,12 +1019,12 @@ class coverage(object): `title` is a text string (not HTML) to use as the title of the HTML report. - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, html_dir=directory, extra_css=extra_css, html_title=title, @@ -661,8 +1032,10 @@ class coverage(object): reporter = HtmlReporter(self, self.config) return reporter.report(morfs) - def xml_report(self, morfs=None, outfile=None, ignore_errors=None, - omit=None, include=None): + def xml_report( + self, morfs=None, outfile=None, ignore_errors=None, + omit=None, include=None, + ): """Generate an XML report of coverage results. The report is compatible with Cobertura reports. @@ -670,12 +1043,12 @@ class coverage(object): Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. - See `coverage.report()` for other arguments. + See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ - self._harvest_data() + self.get_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, @@ -686,69 +1059,115 @@ class coverage(object): if self.config.xml_output == '-': outfile = sys.stdout else: - outfile = open(self.config.xml_output, "w") + # Ensure that the output directory is created; done here + # because this report pre-opens the output file. + # HTMLReport does this using the Report plumbing because + # its task is more complex, being multiple files. + output_dir = os.path.dirname(self.config.xml_output) + if output_dir and not os.path.isdir(output_dir): + os.makedirs(output_dir) + open_kwargs = {} + if env.PY3: + open_kwargs['encoding'] = 'utf8' + outfile = open(self.config.xml_output, "w", **open_kwargs) file_to_close = outfile try: - try: - reporter = XmlReporter(self, self.config) - return reporter.report(morfs, outfile=outfile) - except CoverageException: - delete_file = True - raise + reporter = XmlReporter(self, self.config) + return reporter.report(morfs, outfile=outfile) + except CoverageException: + delete_file = True + raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output) - def sysinfo(self): + def sys_info(self): """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod - import platform, re - try: - implementation = platform.python_implementation() - except AttributeError: - implementation = "unknown" + self._init() + + ft_plugins = [] + for ft in self.plugins.file_tracers: + ft_name = ft._coverage_plugin_name + if not ft._coverage_enabled: + ft_name += " (disabled)" + ft_plugins.append(ft_name) info = [ ('version', covmod.__version__), ('coverage', covmod.__file__), - ('cover_dir', self.cover_dir), + ('cover_dirs', self.cover_dirs), ('pylib_dirs', self.pylib_dirs), ('tracer', self.collector.tracer_name()), + ('plugins.file_tracers', ft_plugins), ('config_files', self.config.attempted_config_files), ('configs_read', self.config.config_files), - ('data_path', self.data.filename), + ('data_path', self.data_files.filename), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), - ('implementation', implementation), + ('implementation', platform.python_implementation()), ('executable', sys.executable), ('cwd', os.getcwd()), ('path', sys.path), - ('environment', sorted([ - ("%s = %s" % (k, v)) for k, v in iitems(os.environ) - if re.search(r"^COV|^PY", k) - ])), + ('environment', sorted( + ("%s = %s" % (k, v)) + for k, v in iitems(os.environ) + if k.startswith(("COV", "PY")) + )), ('command_line', " ".join(getattr(sys, 'argv', ['???']))), ] - if self.source_match: - info.append(('source_match', self.source_match.info())) - if self.include_match: - info.append(('include_match', self.include_match.info())) - if self.omit_match: - info.append(('omit_match', self.omit_match.info())) - if self.cover_match: - info.append(('cover_match', self.cover_match.info())) - if self.pylib_match: - info.append(('pylib_match', self.pylib_match.info())) + + matcher_names = [ + 'source_match', 'source_pkgs_match', + 'include_match', 'omit_match', + 'cover_match', 'pylib_match', + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = '-none-' + info.append((matcher_name, matcher_info)) return info +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + +def _disposition_init(cls, original_filename): + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def _disposition_debug_msg(disp): + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = "Tracing %r" % (disp.original_filename,) + if disp.file_tracer: + msg += ": will be traced by %r" % disp.file_tracer + else: + msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason) + return msg + + def process_startup(): - """Call this at Python startup to perhaps measure coverage. + """Call this at Python start-up to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file @@ -766,14 +1185,41 @@ def process_startup(): import coverage; coverage.process_startup() + Returns the :class:`Coverage` instance that was started, or None if it was + not started by this call. + """ cps = os.environ.get("COVERAGE_PROCESS_START") - if cps: - cov = coverage(config_file=cps, auto_data=True) - cov.start() - cov._warn_no_data = False - cov._warn_unimported_source = False + if not cps: + # No request for coverage, nothing to do. + return None + + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://bitbucket.org/ned/coveragepy/issue/340/keyerror-subpy has more + # details. + + if hasattr(process_startup, "coverage"): + # We've annotated this function before, so we must have already + # started coverage.py in this process. Nothing to do. + return None + + cov = Coverage(config_file=cps, auto_data=True) + process_startup.coverage = cov + cov.start() + cov._warn_no_data = False + cov._warn_unimported_source = False + + return cov -# A hack for debugging testing in subprocesses. -_TEST_NAME_FILE = "" #"/tmp/covtest.txt" +def _prevent_sub_process_measurement(): + """Stop any subprocess auto-measurement from writing data.""" + auto_created_coverage = getattr(process_startup, "coverage", None) + if auto_created_coverage is not None: + auto_created_coverage._auto_data = False diff --git a/python/helpers/coveragepy/coverage/data.py b/python/helpers/coveragepy/coverage/data.py index fb88c5b1e638..60e104d962ad 100644 --- a/python/helpers/coveragepy/coverage/data.py +++ b/python/helpers/coveragepy/coverage/data.py @@ -1,81 +1,643 @@ -"""Coverage data for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt +"""Coverage data for coverage.py.""" + +import glob +import itertools +import json +import optparse import os +import os.path +import random +import re +import socket -from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622 +from coverage import env +from coverage.backward import iitems, string_class +from coverage.debug import _TEST_NAME_FILE from coverage.files import PathAliases -from coverage.misc import file_be_gone +from coverage.misc import CoverageException, file_be_gone, isolate_module + +os = isolate_module(os) class CoverageData(object): """Manages collected coverage data, including file storage. - The data file format is a pickled dict, with these keys: + This class is the public supported API to the data coverage.py collects + during program execution. It includes information about what code was + executed. It does not include information from the analysis phase, to + determine what lines could have been executed, or what lines were not + executed. - * collector: a string identifying the collecting software + .. note:: - * lines: a dict mapping filenames to sorted lists of line numbers - executed: - { 'file1': [17,23,45], 'file2': [1,2,3], ... } + The file format is not documented or guaranteed. It will change in + the future, in possibly complicated ways. Do not read coverage.py + data files directly. Use this API to avoid disruption. - * arcs: a dict mapping filenames to sorted lists of line number pairs: - { 'file1': [(17,23), (17,25), (25,26)], ... } + There are a number of kinds of data that can be collected: + + * **lines**: the line numbers of source lines that were executed. + These are always available. + + * **arcs**: pairs of source and destination line numbers for transitions + between source lines. These are only available if branch coverage was + used. + + * **file tracer names**: the module names of the file tracer plugins that + handled each file in the data. + + * **run information**: information about the program execution. This is + written during "coverage run", and then accumulated during "coverage + combine". + + Lines, arcs, and file tracer names are stored for each source file. File + names in this API are case-sensitive, even on platforms with + case-insensitive file systems. + + To read a coverage.py data file, use :meth:`read_file`, or + :meth:`read_fileobj` if you have an already-opened file. You can then + access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, + or :meth:`file_tracer`. Run information is available with + :meth:`run_infos`. + + The :meth:`has_arcs` method indicates whether arc data is available. You + can get a list of the files in the data with :meth:`measured_files`. + A summary of the line data is available from :meth:`line_counts`. As with + most Python containers, you can determine if there is any data at all by + using this object as a boolean value. + + + Most data files will be created by coverage.py itself, but you can use + methods here to create data files if you like. The :meth:`add_lines`, + :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways + that are convenient for coverage.py. The :meth:`add_run_info` method adds + key-value pairs to the run information. + + To add a file without any measured data, use :meth:`touch_file`. + + You write to a named file with :meth:`write_file`, or to an already opened + file with :meth:`write_fileobj`. + + You can clear the data in memory with :meth:`erase`. Two data collections + can be combined by using :meth:`update` on one :class:`CoverageData`, + passing it the other. """ - def __init__(self, basename=None, collector=None, debug=None): + # The data file format is JSON, with these keys: + # + # * lines: a dict mapping file names to lists of line numbers + # executed:: + # + # { "file1": [17,23,45], "file2": [1,2,3], ... } + # + # * arcs: a dict mapping file names to lists of line number pairs:: + # + # { "file1": [[17,23], [17,25], [25,26]], ... } + # + # * file_tracers: a dict mapping file names to plugin names:: + # + # { "file1": "django.coverage", ... } + # + # * runs: a list of dicts of information about the coverage.py runs + # contributing to the data:: + # + # [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ] + # + # Only one of `lines` or `arcs` will be present: with branch coverage, data + # is stored as arcs. Without branch coverage, it is stored as lines. The + # line data is easily recovered from the arcs: it is all the first elements + # of the pairs that are greater than zero. + + def __init__(self, debug=None): """Create a CoverageData. - `basename` is the name of the file to use for storing data. - - `collector` is a string describing the coverage measurement software. - `debug` is a `DebugControl` object for writing debug messages. """ - self.collector = collector or 'unknown' - self.debug = debug - - self.use_file = True - - # Construct the filename that will be used for data file storage, if we - # ever do any file storage. - self.filename = basename or ".coverage" - self.filename = os.path.abspath(self.filename) + self._debug = debug # A map from canonical Python source file name to a dictionary in # which there's an entry for each line number that has been # executed: # - # { - # 'filename1.py': { 12: None, 47: None, ... }, - # ... - # } + # { 'filename1.py': [12, 47, 1001], ... } # - self.lines = {} + self._lines = None # A map from canonical Python source file name to a dictionary with an # entry for each pair of line numbers forming an arc: # - # { - # 'filename1.py': { (12,14): None, (47,48): None, ... }, - # ... - # } + # { 'filename1.py': [(12,14), (47,48), ... ], ... } # - self.arcs = {} + self._arcs = None - def usefile(self, use_file=True): - """Set whether or not to use a disk file for data.""" - self.use_file = use_file + # A map from canonical source file name to a plugin module name: + # + # { 'filename1.py': 'django.coverage', ... } + # + self._file_tracers = {} - def read(self): - """Read coverage data from the coverage data file (if it exists).""" - if self.use_file: - self.lines, self.arcs = self._read_file(self.filename) + # A list of dicts of information about the coverage.py runs. + self._runs = [] + + def __repr__(self): + return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format( + klass=self.__class__.__name__, + lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)), + arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)), + tracers="{{{0}}}".format(len(self._file_tracers)), + runs="[{0}]".format(len(self._runs)), + ) + + ## + ## Reading data + ## + + def has_arcs(self): + """Does this data have arcs? + + Arc data is only available if branch coverage was used during + collection. + + Returns a boolean. + + """ + return self._has_arcs() + + def lines(self, filename): + """Get the list of lines executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no lines executed, in which case an empty list is returned. + + If the file was executed, returns a list of integers, the line numbers + executed in the file. The list is in no particular order. + + """ + if self._arcs is not None: + arcs = self._arcs.get(filename) + if arcs is not None: + all_lines = itertools.chain.from_iterable(arcs) + return list(set(l for l in all_lines if l > 0)) + elif self._lines is not None: + return self._lines.get(filename) + return None + + def arcs(self, filename): + """Get the list of arcs executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no arcs executed, in which case an empty list is returned. + + If the file was executed, returns a list of 2-tuples of integers. Each + pair is a starting line number and an ending line number for a + transition from one line to another. The list is in no particular + order. + + Negative numbers have special meaning. If the starting line number is + -N, it represents an entry to the code object that starts at line N. + If the ending ling number is -N, it's an exit from the code object that + starts at line N. + + """ + if self._arcs is not None: + if filename in self._arcs: + return self._arcs[filename] + return None + + def file_tracer(self, filename): + """Get the plugin name of the file tracer for a file. + + Returns the name of the plugin that handles this file. If the file was + measured, but didn't use a plugin, then "" is returned. If the file + was not measured, then None is returned. + + """ + # Because the vast majority of files involve no plugin, we don't store + # them explicitly in self._file_tracers. Check the measured data + # instead to see if it was a known file with no plugin. + if filename in (self._arcs or self._lines or {}): + return self._file_tracers.get(filename, "") + return None + + def run_infos(self): + """Return the list of dicts of run information. + + For data collected during a single run, this will be a one-element + list. If data has been combined, there will be one element for each + original data file. + + """ + return self._runs + + def measured_files(self): + """A list of all files that had been measured.""" + return list(self._arcs or self._lines or {}) + + def line_counts(self, fullpath=False): + """Return a dict summarizing the line coverage data. + + Keys are based on the file names, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + Returns a dict mapping file names to counts of lines. + + """ + summ = {} + if fullpath: + filename_fn = lambda f: f else: - self.lines, self.arcs = {}, {} + filename_fn = os.path.basename + for filename in self.measured_files(): + summ[filename_fn(filename)] = len(self.lines(filename)) + return summ - def write(self, suffix=None): + def __nonzero__(self): + return bool(self._lines or self._arcs) + + __bool__ = __nonzero__ + + def read_fileobj(self, file_obj): + """Read the coverage data from the given file object. + + Should only be used on an empty CoverageData object. + + """ + data = self._read_raw_data(file_obj) + + self._lines = self._arcs = None + + if 'lines' in data: + self._lines = data['lines'] + if 'arcs' in data: + self._arcs = dict( + (fname, [tuple(pair) for pair in arcs]) + for fname, arcs in iitems(data['arcs']) + ) + self._file_tracers = data.get('file_tracers', {}) + self._runs = data.get('runs', []) + + self._validate() + + def read_file(self, filename): + """Read the coverage data from `filename` into this object.""" + if self._debug and self._debug.should('dataio'): + self._debug.write("Reading data from %r" % (filename,)) + try: + with self._open_for_reading(filename) as f: + self.read_fileobj(f) + except Exception as exc: + raise CoverageException( + "Couldn't read data from '%s': %s: %s" % ( + filename, exc.__class__.__name__, exc, + ) + ) + + _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!" + + @classmethod + def _open_for_reading(cls, filename): + """Open a file appropriately for reading data.""" + return open(filename, "r") + + @classmethod + def _read_raw_data(cls, file_obj): + """Read the raw data from a file object.""" + go_away = file_obj.read(len(cls._GO_AWAY)) + if go_away != cls._GO_AWAY: + raise CoverageException("Doesn't seem to be a coverage.py data file") + return json.load(file_obj) + + @classmethod + def _read_raw_data_file(cls, filename): + """Read the raw data from a file, for debugging.""" + with cls._open_for_reading(filename) as f: + return cls._read_raw_data(f) + + ## + ## Writing data + ## + + def add_lines(self, line_data): + """Add measured line data. + + `line_data` is a dictionary mapping file names to dictionaries:: + + { filename: { lineno: None, ... }, ...} + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding lines: %d files, %d lines total" % ( + len(line_data), sum(len(lines) for lines in line_data.values()) + )) + if self._has_arcs(): + raise CoverageException("Can't add lines to existing arc data") + + if self._lines is None: + self._lines = {} + for filename, linenos in iitems(line_data): + if filename in self._lines: + new_linenos = set(self._lines[filename]) + new_linenos.update(linenos) + linenos = new_linenos + self._lines[filename] = list(linenos) + + self._validate() + + def add_arcs(self, arc_data): + """Add measured arc data. + + `arc_data` is a dictionary mapping file names to dictionaries:: + + { filename: { (l1,l2): None, ... }, ...} + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding arcs: %d files, %d arcs total" % ( + len(arc_data), sum(len(arcs) for arcs in arc_data.values()) + )) + if self._has_lines(): + raise CoverageException("Can't add arcs to existing line data") + + if self._arcs is None: + self._arcs = {} + for filename, arcs in iitems(arc_data): + if filename in self._arcs: + new_arcs = set(self._arcs[filename]) + new_arcs.update(arcs) + arcs = new_arcs + self._arcs[filename] = list(arcs) + + self._validate() + + def add_file_tracers(self, file_tracers): + """Add per-file plugin information. + + `file_tracers` is { filename: plugin_name, ... } + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) + + existing_files = self._arcs or self._lines or {} + for filename, plugin_name in iitems(file_tracers): + if filename not in existing_files: + raise CoverageException( + "Can't add file tracer data for unmeasured file '%s'" % (filename,) + ) + existing_plugin = self._file_tracers.get(filename) + if existing_plugin is not None and plugin_name != existing_plugin: + raise CoverageException( + "Conflicting file tracer name for '%s': %r vs %r" % ( + filename, existing_plugin, plugin_name, + ) + ) + self._file_tracers[filename] = plugin_name + + self._validate() + + def add_run_info(self, **kwargs): + """Add information about the run. + + Keywords are arbitrary, and are stored in the run dictionary. Values + must be JSON serializable. You may use this function more than once, + but repeated keywords overwrite each other. + + """ + if self._debug and self._debug.should('dataop'): + self._debug.write("Adding run info: %r" % (kwargs,)) + if not self._runs: + self._runs = [{}] + self._runs[0].update(kwargs) + self._validate() + + def touch_file(self, filename): + """Ensure that `filename` appears in the data, empty if needed.""" + if self._debug and self._debug.should('dataop'): + self._debug.write("Touching %r" % (filename,)) + if not self._has_arcs() and not self._has_lines(): + raise CoverageException("Can't touch files in an empty CoverageData") + + if self._has_arcs(): + where = self._arcs + else: + where = self._lines + where.setdefault(filename, []) + + self._validate() + + def write_fileobj(self, file_obj): + """Write the coverage data to `file_obj`.""" + + # Create the file data. + file_data = {} + + if self._has_arcs(): + file_data['arcs'] = self._arcs + + if self._has_lines(): + file_data['lines'] = self._lines + + if self._file_tracers: + file_data['file_tracers'] = self._file_tracers + + if self._runs: + file_data['runs'] = self._runs + + # Write the data to the file. + file_obj.write(self._GO_AWAY) + json.dump(file_data, file_obj) + + def write_file(self, filename): + """Write the coverage data to `filename`.""" + if self._debug and self._debug.should('dataio'): + self._debug.write("Writing data to %r" % (filename,)) + with open(filename, 'w') as fdata: + self.write_fileobj(fdata) + + def erase(self): + """Erase the data in this object.""" + self._lines = None + self._arcs = None + self._file_tracers = {} + self._runs = [] + self._validate() + + def update(self, other_data, aliases=None): + """Update this data with data from another `CoverageData`. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + """ + if self._has_lines() and other_data._has_arcs(): + raise CoverageException("Can't combine arc data with line data") + if self._has_arcs() and other_data._has_lines(): + raise CoverageException("Can't combine line data with arc data") + + aliases = aliases or PathAliases() + + # _file_tracers: only have a string, so they have to agree. + # Have to do these first, so that our examination of self._arcs and + # self._lines won't be confused by data updated from other_data. + for filename in other_data.measured_files(): + other_plugin = other_data.file_tracer(filename) + filename = aliases.map(filename) + this_plugin = self.file_tracer(filename) + if this_plugin is None: + if other_plugin: + self._file_tracers[filename] = other_plugin + elif this_plugin != other_plugin: + raise CoverageException( + "Conflicting file tracer name for '%s': %r vs %r" % ( + filename, this_plugin, other_plugin, + ) + ) + + # _runs: add the new runs to these runs. + self._runs.extend(other_data._runs) + + # _lines: merge dicts. + if other_data._has_lines(): + if self._lines is None: + self._lines = {} + for filename, file_lines in iitems(other_data._lines): + filename = aliases.map(filename) + if filename in self._lines: + lines = set(self._lines[filename]) + lines.update(file_lines) + file_lines = list(lines) + self._lines[filename] = file_lines + + # _arcs: merge dicts. + if other_data._has_arcs(): + if self._arcs is None: + self._arcs = {} + for filename, file_arcs in iitems(other_data._arcs): + filename = aliases.map(filename) + if filename in self._arcs: + arcs = set(self._arcs[filename]) + arcs.update(file_arcs) + file_arcs = list(arcs) + self._arcs[filename] = file_arcs + + self._validate() + + ## + ## Miscellaneous + ## + + def _validate(self): + """If we are in paranoid mode, validate that everything is right.""" + if env.TESTING: + self._validate_invariants() + + def _validate_invariants(self): + """Validate internal invariants.""" + # Only one of _lines or _arcs should exist. + assert not(self._has_lines() and self._has_arcs()), ( + "Shouldn't have both _lines and _arcs" + ) + + # _lines should be a dict of lists of ints. + if self._has_lines(): + for fname, lines in iitems(self._lines): + assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,) + assert all(isinstance(x, int) for x in lines), ( + "_lines[%r] shouldn't be %r" % (fname, lines) + ) + + # _arcs should be a dict of lists of pairs of ints. + if self._has_arcs(): + for fname, arcs in iitems(self._arcs): + assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,) + assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), ( + "_arcs[%r] shouldn't be %r" % (fname, arcs) + ) + + # _file_tracers should have only non-empty strings as values. + for fname, plugin in iitems(self._file_tracers): + assert isinstance(fname, string_class), ( + "Key in _file_tracers shouldn't be %r" % (fname,) + ) + assert plugin and isinstance(plugin, string_class), ( + "_file_tracers[%r] shoudn't be %r" % (fname, plugin) + ) + + # _runs should be a list of dicts. + for val in self._runs: + assert isinstance(val, dict) + for key in val: + assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,) + + def add_to_hash(self, filename, hasher): + """Contribute `filename`'s data to the `hasher`. + + `hasher` is a `coverage.misc.Hasher` instance to be updated with + the file's data. It should only get the results data, not the run + data. + + """ + if self._has_arcs(): + hasher.update(sorted(self.arcs(filename) or [])) + else: + hasher.update(sorted(self.lines(filename) or [])) + hasher.update(self.file_tracer(filename)) + + ## + ## Internal + ## + + def _has_lines(self): + """Do we have data in self._lines?""" + return self._lines is not None + + def _has_arcs(self): + """Do we have data in self._arcs?""" + return self._arcs is not None + + +class CoverageDataFiles(object): + """Manage the use of coverage data files.""" + + def __init__(self, basename=None, warn=None): + """Create a CoverageDataFiles to manage data files. + + `warn` is the warning function to use. + + `basename` is the name of the file to use for storing data. + + """ + self.warn = warn + # Construct the file name that will be used for data storage. + self.filename = os.path.abspath(basename or ".coverage") + + def erase(self, parallel=False): + """Erase the data from the file storage. + + If `parallel` is true, then also deletes data files created from the + basename by parallel-mode. + + """ + file_be_gone(self.filename) + if parallel: + data_dir, local = os.path.split(self.filename) + localdot = local + '.*' + pattern = os.path.join(os.path.abspath(data_dir), localdot) + for filename in glob.glob(pattern): + file_be_gone(filename) + + def read(self, data): + """Read the coverage data.""" + if os.path.exists(self.filename): + data.read_file(self.filename) + + def write(self, data, suffix=None): """Write the collected coverage data to a file. `suffix` is a suffix to append to the base file name. This can be used @@ -84,98 +646,27 @@ class CoverageData(object): the suffix. """ - if self.use_file: - filename = self.filename - if suffix: - filename += "." + suffix - self.write_file(filename) - - def erase(self): - """Erase the data, both in this object, and from its file storage.""" - if self.use_file: - if self.filename: - file_be_gone(self.filename) - self.lines = {} - self.arcs = {} - - def line_data(self): - """Return the map from filenames to lists of line numbers executed.""" - return dict( - [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)] + filename = self.filename + if suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + extra = "" + if _TEST_NAME_FILE: # pragma: debugging + with open(_TEST_NAME_FILE) as f: + test_name = f.read() + extra = "." + test_name + suffix = "%s%s.%s.%06d" % ( + socket.gethostname(), extra, os.getpid(), + random.randint(0, 999999) ) - def arc_data(self): - """Return the map from filenames to lists of line number pairs.""" - return dict( - [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)] - ) + if suffix: + filename += "." + suffix + data.write_file(filename) - def write_file(self, filename): - """Write the coverage data to `filename`.""" - - # Create the file data. - data = {} - - data['lines'] = self.line_data() - arcs = self.arc_data() - if arcs: - data['arcs'] = arcs - - if self.collector: - data['collector'] = self.collector - - if self.debug and self.debug.should('dataio'): - self.debug.write("Writing data to %r" % (filename,)) - - # Write the pickle to the file. - fdata = open(filename, 'wb') - try: - pickle.dump(data, fdata, 2) - finally: - fdata.close() - - def read_file(self, filename): - """Read the coverage data from `filename`.""" - self.lines, self.arcs = self._read_file(filename) - - def raw_data(self, filename): - """Return the raw pickled data from `filename`.""" - if self.debug and self.debug.should('dataio'): - self.debug.write("Reading data from %r" % (filename,)) - fdata = open(filename, 'rb') - try: - data = pickle.load(fdata) - finally: - fdata.close() - return data - - def _read_file(self, filename): - """Return the stored coverage data from the given file. - - Returns two values, suitable for assigning to `self.lines` and - `self.arcs`. - - """ - lines = {} - arcs = {} - try: - data = self.raw_data(filename) - if isinstance(data, dict): - # Unpack the 'lines' item. - lines = dict([ - (f, dict.fromkeys(linenos, None)) - for f, linenos in iitems(data.get('lines', {})) - ]) - # Unpack the 'arcs' item. - arcs = dict([ - (f, dict.fromkeys(arcpairs, None)) - for f, arcpairs in iitems(data.get('arcs', {})) - ]) - except Exception: - pass - return lines, arcs - - def combine_parallel_data(self, aliases=None): + def combine_parallel_data(self, data, aliases=None, data_paths=None): """Combine a number of data files together. Treat `self.filename` as a file prefix, and combine the data from all @@ -184,95 +675,94 @@ class CoverageData(object): If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. + If `data_paths` is provided, it is a list of directories or files to + combine. Directories are searched for files that start with + `self.filename` plus dot as a prefix, and those files are combined. + + If `data_paths` is not provided, then the directory portion of + `self.filename` is used as the directory to search for data files. + + Every data file found and combined is then deleted from disk. If a file + cannot be read, a warning will be issued, and the file will not be + deleted. + """ - aliases = aliases or PathAliases() + # Because of the os.path.abspath in the constructor, data_dir will + # never be an empty string. data_dir, local = os.path.split(self.filename) - localdot = local + '.' - for f in os.listdir(data_dir or '.'): - if f.startswith(localdot): - full_path = os.path.join(data_dir, f) - new_lines, new_arcs = self._read_file(full_path) - for filename, file_data in iitems(new_lines): - filename = aliases.map(filename) - self.lines.setdefault(filename, {}).update(file_data) - for filename, file_data in iitems(new_arcs): - filename = aliases.map(filename) - self.arcs.setdefault(filename, {}).update(file_data) - if f != local: - os.remove(full_path) + localdot = local + '.*' - def add_line_data(self, line_data): - """Add executed line data. + data_paths = data_paths or [data_dir] + files_to_combine = [] + for p in data_paths: + if os.path.isfile(p): + files_to_combine.append(os.path.abspath(p)) + elif os.path.isdir(p): + pattern = os.path.join(os.path.abspath(p), localdot) + files_to_combine.extend(glob.glob(pattern)) + else: + raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,)) - `line_data` is { filename: { lineno: None, ... }, ...} + for f in files_to_combine: + new_data = CoverageData() + try: + new_data.read_file(f) + except CoverageException as exc: + if self.warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + self.warn(str(exc)) + else: + data.update(new_data, aliases=aliases) + file_be_gone(f) - """ - for filename, linenos in iitems(line_data): - self.lines.setdefault(filename, {}).update(linenos) - def add_arc_data(self, arc_data): - """Add measured arc data. +def canonicalize_json_data(data): + """Canonicalize our JSON data so it can be compared.""" + for fname, lines in iitems(data.get('lines', {})): + data['lines'][fname] = sorted(lines) + for fname, arcs in iitems(data.get('arcs', {})): + data['arcs'][fname] = sorted(arcs) - `arc_data` is { filename: { (l1,l2): None, ... }, ...} - """ - for filename, arcs in iitems(arc_data): - self.arcs.setdefault(filename, {}).update(arcs) +def pretty_data(data): + """Format data as JSON, but as nicely as possible. - def touch_file(self, filename): - """Ensure that `filename` appears in the data, empty if needed.""" - self.lines.setdefault(filename, {}) + Returns a string. - def measured_files(self): - """A list of all files that had been measured.""" - return list(self.lines.keys()) + """ + # Start with a basic JSON dump. + out = json.dumps(data, indent=4, sort_keys=True) + # But pairs of numbers shouldn't be split across lines... + out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out) + # Trailing spaces mess with tests, get rid of them. + out = re.sub(r"(?m)\s+$", "", out) + return out - def executed_lines(self, filename): - """A map containing all the line numbers executed in `filename`. - If `filename` hasn't been collected at all (because it wasn't executed) - then return an empty map. +def debug_main(args): + """Dump the raw data from data files. - """ - return self.lines.get(filename) or {} + Run this as:: - def executed_arcs(self, filename): - """A map containing all the arcs executed in `filename`.""" - return self.arcs.get(filename) or {} + $ python -m coverage.data [FILE] - def add_to_hash(self, filename, hasher): - """Contribute `filename`'s data to the Md5Hash `hasher`.""" - hasher.update(self.executed_lines(filename)) - hasher.update(self.executed_arcs(filename)) + """ + parser = optparse.OptionParser() + parser.add_option( + "-c", "--canonical", action="store_true", + help="Sort data into a canonical order", + ) + options, args = parser.parse_args(args) - def summary(self, fullpath=False): - """Return a dict summarizing the coverage data. - - Keys are based on the filenames, and values are the number of executed - lines. If `fullpath` is true, then the keys are the full pathnames of - the files, otherwise they are the basenames of the files. - - """ - summ = {} - if fullpath: - filename_fn = lambda f: f - else: - filename_fn = os.path.basename - for filename, lines in iitems(self.lines): - summ[filename_fn(filename)] = len(lines) - return summ - - def has_arcs(self): - """Does this data have arcs?""" - return bool(self.arcs) + for filename in (args or [".coverage"]): + print("--- {0} ------------------------------".format(filename)) + data = CoverageData._read_raw_data_file(filename) + if options.canonical: + canonicalize_json_data(data) + print(pretty_data(data)) if __name__ == '__main__': - # Ad-hoc: show the raw data in a data file. - import pprint, sys - covdata = CoverageData() - if sys.argv[1:]: - fname = sys.argv[1] - else: - fname = covdata.filename - pprint.pprint(covdata.raw_data(fname)) + import sys + debug_main(sys.argv[1:]) diff --git a/python/helpers/coveragepy/coverage/debug.py b/python/helpers/coveragepy/coverage/debug.py index 104f3b1d0a43..8ed664ce2ce7 100644 --- a/python/helpers/coveragepy/coverage/debug.py +++ b/python/helpers/coveragepy/coverage/debug.py @@ -1,6 +1,15 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Control of and utilities for debugging.""" +import inspect import os +import sys + +from coverage.misc import isolate_module + +os = isolate_module(os) # When debugging, it can be helpful to force some options, especially when @@ -8,6 +17,9 @@ import os # This is a list of forced debugging options. FORCED_DEBUG = [] +# A hack for debugging testing in sub-processes. +_TEST_NAME_FILE = "" # "/tmp/covtest.txt" + class DebugControl(object): """Control and output for debugging.""" @@ -17,6 +29,9 @@ class DebugControl(object): self.options = options self.output = output + def __repr__(self): + return "" % (self.options, self.output) + def should(self, option): """Decide whether to output debug information in category `option`.""" return (option in self.options or option in FORCED_DEBUG) @@ -26,14 +41,22 @@ class DebugControl(object): if self.should('pid'): msg = "pid %5d: %s" % (os.getpid(), msg) self.output.write(msg+"\n") + if self.should('callers'): + dump_stack_frames(out=self.output) self.output.flush() - def write_formatted_info(self, info): + def write_formatted_info(self, header, info): """Write a sequence of (label,data) pairs nicely.""" + self.write(info_header(header)) for line in info_formatter(info): self.write(" %s" % line) +def info_header(label): + """Make a nice header string.""" + return "--{0:-<60s}".format(" "+label+" ") + + def info_formatter(info): """Produce a sequence of formatted lines from info. @@ -41,14 +64,51 @@ def info_formatter(info): nicely formatted, ready to print. """ - label_len = max([len(l) for l, _d in info]) + info = list(info) + if not info: + return + label_len = max(len(l) for l, _d in info) for label, data in info: if data == []: data = "-none-" - if isinstance(data, (list, tuple)): + if isinstance(data, (list, set, tuple)): prefix = "%*s:" % (label_len, label) for e in data: yield "%*s %s" % (label_len+1, prefix, e) prefix = "" else: yield "%*s: %s" % (label_len, label, data) + + +def short_stack(limit=None): # pragma: debugging + """Return a string summarizing the call stack. + + The string is multi-line, with one line per stack frame. Each line shows + the function name, the file name, and the line number: + + ... + start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95 + import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81 + import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159 + ... + + `limit` is the number of frames to include, defaulting to all of them. + + """ + stack = inspect.stack()[limit:0:-1] + return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack) + + +def dump_stack_frames(limit=None, out=None): # pragma: debugging + """Print a summary of the stack to stdout, or some place else.""" + out = out or sys.stdout + out.write(short_stack(limit=limit)) + out.write("\n") + + +def log(msg, stack=False): # pragma: debugging + """Write a log message as forcefully as possible.""" + with open("/tmp/covlog.txt", "a") as f: + f.write("{pid}: {msg}\n".format(pid=os.getpid(), msg=msg)) + if stack: + dump_stack_frames(out=f) diff --git a/python/helpers/coveragepy/coverage/env.py b/python/helpers/coveragepy/coverage/env.py new file mode 100644 index 000000000000..4cd02c04fa3a --- /dev/null +++ b/python/helpers/coveragepy/coverage/env.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + +"""Determine facts about the environment.""" + +import os +import sys + +# Operating systems. +WINDOWS = sys.platform == "win32" +LINUX = sys.platform == "linux2" + +# Python implementations. +PYPY = '__pypy__' in sys.builtin_module_names + +# Python versions. +PYVERSION = sys.version_info +PY2 = PYVERSION < (3, 0) +PY3 = PYVERSION >= (3, 0) + +# Coverage.py specifics. + +# Are we using the C-implemented trace function? +C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c' + +# Are we coverage-measuring ourselves? +METACOV = os.getenv('COVERAGE_COVERAGE', '') != '' + +# Are we running our test suite? +# Even when running tests, you can use COVERAGE_TESTING=0 to disable the +# test-specific behavior like contracts. +TESTING = os.getenv('COVERAGE_TESTING', '') == 'True' diff --git a/python/helpers/coveragepy/coverage/execfile.py b/python/helpers/coveragepy/coverage/execfile.py index f6ebdf79bb9e..3e20a527d931 100644 --- a/python/helpers/coveragepy/coverage/execfile.py +++ b/python/helpers/coveragepy/coverage/execfile.py @@ -1,41 +1,73 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """Execute files of Python code.""" -import imp, marshal, os, sys +import marshal +import os +import sys +import types -from coverage.backward import exec_code_object, open_source -from coverage.misc import ExceptionDuringRun, NoCode, NoSource +from coverage.backward import BUILTINS +from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec +from coverage.misc import ExceptionDuringRun, NoCode, NoSource, isolate_module +from coverage.phystokens import compile_unicode +from coverage.python import get_python_source + +os = isolate_module(os) -try: - # In Py 2.x, the builtins were in __builtin__ - BUILTINS = sys.modules['__builtin__'] -except KeyError: - # In Py 3.x, they're in builtins - BUILTINS = sys.modules['builtins'] - - -def rsplit1(s, sep): - """The same as s.rsplit(sep, 1), but works in 2.3""" - parts = s.split(sep) - return sep.join(parts[:-1]), parts[-1] - - -def run_python_module(modulename, args): - """Run a python module, as though with ``python -m name args...``. - - `modulename` is the name of the module, possibly a dot-separated name. - `args` is the argument array to present as sys.argv, including the first - element naming the module being executed. +class DummyLoader(object): + """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. + Currently only implements the .fullname attribute """ - openfile = None - glo, loc = globals(), locals() - try: + def __init__(self, fullname, *_args): + self.fullname = fullname + + +if importlib_util_find_spec: + def find_module(modulename): + """Find the module named `modulename`. + + Returns the file path of the module, and the name of the enclosing + package. + """ + try: + spec = importlib_util_find_spec(modulename) + except ImportError as err: + raise NoSource(str(err)) + if not spec: + raise NoSource("No module named %r" % (modulename,)) + pathname = spec.origin + packagename = spec.name + if pathname.endswith("__init__.py") and not modulename.endswith("__init__"): + mod_main = modulename + ".__main__" + spec = importlib_util_find_spec(mod_main) + if not spec: + raise NoSource( + "No module named %s; " + "%r is a package and cannot be directly executed" + % (mod_main, modulename) + ) + pathname = spec.origin + packagename = spec.name + packagename = packagename.rpartition(".")[0] + return pathname, packagename +else: + def find_module(modulename): + """Find the module named `modulename`. + + Returns the file path of the module, and the name of the enclosing + package. + """ + openfile = None + glo, loc = globals(), locals() try: # Search for the module - inside its parent package, if any - using # standard import mechanics. if '.' in modulename: - packagename, name = rsplit1(modulename, '.') + packagename, name = modulename.rsplit('.', 1) package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ else: @@ -57,51 +89,92 @@ def run_python_module(modulename, args): package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ openfile, pathname, _ = imp.find_module(name, searchpath) - except ImportError: - _, err, _ = sys.exc_info() + except ImportError as err: raise NoSource(str(err)) - finally: - if openfile: - openfile.close() + finally: + if openfile: + openfile.close() + + return pathname, packagename + + +def run_python_module(modulename, args): + """Run a Python module, as though with ``python -m name args...``. + + `modulename` is the name of the module, possibly a dot-separated name. + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + """ + pathname, packagename = find_module(modulename) - # Finally, hand the file off to run_python_file for execution. pathname = os.path.abspath(pathname) args[0] = pathname - run_python_file(pathname, args, package=packagename) + run_python_file(pathname, args, package=packagename, modulename=modulename, path0="") -def run_python_file(filename, args, package=None): - """Run a python file as if it were the main program on the command line. +def run_python_file(filename, args, package=None, modulename=None, path0=None): + """Run a Python file as if it were the main program on the command line. `filename` is the path to the file to execute, it need not be a .py file. `args` is the argument array to present as sys.argv, including the first element naming the file being executed. `package` is the name of the enclosing package, if any. + `modulename` is the name of the module the file was run as. + + `path0` is the value to put into sys.path[0]. If it's None, then this + function will decide on a value. + """ + if modulename is None and sys.version_info >= (3, 3): + modulename = '__main__' + # Create a module to serve as __main__ old_main_mod = sys.modules['__main__'] - main_mod = imp.new_module('__main__') + main_mod = types.ModuleType('__main__') sys.modules['__main__'] = main_mod main_mod.__file__ = filename if package: main_mod.__package__ = package + if modulename: + main_mod.__loader__ = DummyLoader(modulename) + main_mod.__builtins__ = BUILTINS # Set sys.argv properly. old_argv = sys.argv sys.argv = args + if os.path.isdir(filename): + # Running a directory means running the __main__.py file in that + # directory. + my_path0 = filename + + for ext in [".py", ".pyc", ".pyo"]: + try_filename = os.path.join(filename, "__main__" + ext) + if os.path.exists(try_filename): + filename = try_filename + break + else: + raise NoSource("Can't find '__main__' module in '%s'" % filename) + else: + my_path0 = os.path.abspath(os.path.dirname(filename)) + + # Set sys.path correctly. + old_path0 = sys.path[0] + sys.path[0] = path0 if path0 is not None else my_path0 + try: # Make a code object somehow. - if filename.endswith(".pyc") or filename.endswith(".pyo"): + if filename.endswith((".pyc", ".pyo")): code = make_code_from_pyc(filename) else: code = make_code_from_py(filename) # Execute the code object. try: - exec_code_object(code, main_mod.__dict__) + exec(code, main_mod.__dict__) except SystemExit: # The user called sys.exit(). Just pass it along to the upper # layers, where it will be handled. @@ -109,37 +182,34 @@ def run_python_file(filename, args, package=None): except: # Something went wrong while executing the user code. # Get the exc_info, and pack them into an exception that we can - # throw up to the outer loop. We peel two layers off the traceback + # throw up to the outer loop. We peel one layer off the traceback # so that the coverage.py code doesn't appear in the final printed # traceback. typ, err, tb = sys.exc_info() - raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next) - finally: - # Restore the old __main__ - sys.modules['__main__'] = old_main_mod - # Restore the old argv and path + # PyPy3 weirdness. If I don't access __context__, then somehow it + # is non-None when the exception is reported at the upper layer, + # and a nested exception is shown to the user. This getattr fixes + # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 + getattr(err, '__context__', None) + + raise ExceptionDuringRun(typ, err, tb.tb_next) + finally: + # Restore the old __main__, argv, and path. + sys.modules['__main__'] = old_main_mod sys.argv = old_argv + sys.path[0] = old_path0 + def make_code_from_py(filename): """Get source from `filename` and make a code object of it.""" # Open the source file. try: - source_file = open_source(filename) - except IOError: - raise NoSource("No file to run: %r" % filename) - - try: - source = source_file.read() - finally: - source_file.close() - - # We have the source. `compile` still needs the last line to be clean, - # so make sure it is, then compile a code object from it. - if not source or source[-1] != '\n': - source += '\n' - code = compile(source, filename, "exec") + source = get_python_source(filename) + except (IOError, NoSource): + raise NoSource("No file to run: '%s'" % filename) + code = compile_unicode(source, filename, "exec") return code @@ -148,13 +218,13 @@ def make_code_from_pyc(filename): try: fpyc = open(filename, "rb") except IOError: - raise NoCode("No file to run: %r" % filename) + raise NoCode("No file to run: '%s'" % filename) - try: + with fpyc: # First four bytes are a version-specific magic number. It has to # match or we won't run the file. magic = fpyc.read(4) - if magic != imp.get_magic(): + if magic != PYC_MAGIC_NUMBER: raise NoCode("Bad magic number in .pyc file") # Skip the junk in the header that we don't need. @@ -165,7 +235,5 @@ def make_code_from_pyc(filename): # The rest of the file is the code object we want. code = marshal.load(fpyc) - finally: - fpyc.close() return code diff --git a/python/helpers/coveragepy/coverage/files.py b/python/helpers/coveragepy/coverage/files.py index 464535a81653..44997d12c6bc 100644 --- a/python/helpers/coveragepy/coverage/files.py +++ b/python/helpers/coveragepy/coverage/files.py @@ -1,125 +1,163 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt + """File wrangling.""" -from coverage.backward import to_string -from coverage.misc import CoverageException -import fnmatch, os, os.path, re, sys -import ntpath, posixpath +import fnmatch +import ntpath +import os +import os.path +import posixpath +import re +import sys -class FileLocator(object): - """Understand how filenames work.""" +from coverage import env +from coverage.backward import unicode_class +from coverage.misc import contract, CoverageException, join_regex, isolate_module - def __init__(self): - # The absolute path to our current directory. - self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep) - # Cache of results of calling the canonical_filename() method, to - # avoid duplicating work. - self.canonical_filename_cache = {} +os = isolate_module(os) - def relative_filename(self, filename): - """Return the relative form of `filename`. - The filename will be relative to the current directory when the - `FileLocator` was constructed. +def set_relative_directory(): + """Set the directory that `relative_filename` will be relative to.""" + global RELATIVE_DIR, CANONICAL_FILENAME_CACHE - """ - fnorm = os.path.normcase(filename) - if fnorm.startswith(self.relative_dir): - filename = filename[len(self.relative_dir):] - return filename + # The absolute path to our current directory. + RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep) - def canonical_filename(self, filename): - """Return a canonical filename for `filename`. + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + CANONICAL_FILENAME_CACHE = {} - An absolute path with no redundant components and normalized case. - """ - if filename not in self.canonical_filename_cache: - if not os.path.isabs(filename): - for path in [os.curdir] + sys.path: - if path is None: - continue - f = os.path.join(path, filename) - if os.path.exists(f): - filename = f - break - cf = abs_file(filename) - self.canonical_filename_cache[filename] = cf - return self.canonical_filename_cache[filename] +def relative_directory(): + """Return the directory that `relative_filename` is relative to.""" + return RELATIVE_DIR - def get_zip_data(self, filename): - """Get data from `filename` if it is a zip file path. - Returns the string data read from the zip file, or None if no zip file - could be found or `filename` isn't in it. The data returned will be - an empty string if the file is empty. +@contract(returns='unicode') +def relative_filename(filename): + """Return the relative form of `filename`. - """ - import zipimport - markers = ['.zip'+os.sep, '.egg'+os.sep] - for marker in markers: - if marker in filename: - parts = filename.split(marker) - try: - zi = zipimport.zipimporter(parts[0]+marker[:-1]) - except zipimport.ZipImportError: + The file name will be relative to the current directory when the + `set_relative_directory` was called. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(RELATIVE_DIR): + filename = filename[len(RELATIVE_DIR):] + return unicode_filename(filename) + + +@contract(returns='unicode') +def canonical_filename(filename): + """Return a canonical file name for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in CANONICAL_FILENAME_CACHE: + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: continue - try: - data = zi.get_data(parts[1]) - except IOError: - continue - return to_string(data) - return None + f = os.path.join(path, filename) + if os.path.exists(f): + filename = f + break + cf = abs_file(filename) + CANONICAL_FILENAME_CACHE[filename] = cf + return CANONICAL_FILENAME_CACHE[filename] -if sys.platform == 'win32': +def flat_rootname(filename): + """A base for a flat file name to correspond to this file. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py will return 'a_b_c_py' + + """ + name = ntpath.splitdrive(filename)[1] + return re.sub(r"[\\/.:]", "_", name) + + +if env.WINDOWS: + + _ACTUAL_PATH_CACHE = {} + _ACTUAL_PATH_LIST_CACHE = {} def actual_path(path): """Get the actual path of `path`, including the correct case.""" - if path in actual_path.cache: - return actual_path.cache[path] + if env.PY2 and isinstance(path, unicode_class): + path = path.encode(sys.getfilesystemencoding()) + if path in _ACTUAL_PATH_CACHE: + return _ACTUAL_PATH_CACHE[path] head, tail = os.path.split(path) if not tail: - actpath = head + # This means head is the drive spec: normalize it. + actpath = head.upper() elif not head: actpath = tail else: head = actual_path(head) - if head in actual_path.list_cache: - files = actual_path.list_cache[head] + if head in _ACTUAL_PATH_LIST_CACHE: + files = _ACTUAL_PATH_LIST_CACHE[head] else: try: files = os.listdir(head) except OSError: files = [] - actual_path.list_cache[head] = files + _ACTUAL_PATH_LIST_CACHE[head] = files normtail = os.path.normcase(tail) for f in files: if os.path.normcase(f) == normtail: tail = f break actpath = os.path.join(head, tail) - actual_path.cache[path] = actpath + _ACTUAL_PATH_CACHE[path] = actpath return actpath - actual_path.cache = {} - actual_path.list_cache = {} - else: def actual_path(filename): """The actual path for non-Windows platforms.""" return filename +if env.PY2: + @contract(returns='unicode') + def unicode_filename(filename): + """Return a Unicode version of `filename`.""" + if isinstance(filename, str): + encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() + filename = filename.decode(encoding, "replace") + return filename +else: + @contract(filename='unicode', returns='unicode') + def unicode_filename(filename): + """Return a Unicode version of `filename`.""" + return filename + + +@contract(returns='unicode') def abs_file(filename): """Return the absolute normalized form of `filename`.""" path = os.path.expandvars(os.path.expanduser(filename)) path = os.path.abspath(os.path.realpath(path)) path = actual_path(path) + path = unicode_filename(path) return path +RELATIVE_DIR = None +CANONICAL_FILENAME_CACHE = None +set_relative_directory() + + def isabs_anywhere(filename): """Is `filename` an absolute path on any OS?""" return ntpath.isabs(filename) or posixpath.isabs(filename) @@ -137,7 +175,7 @@ def prep_patterns(patterns): """ prepped = [] for p in patterns or []: - if p.startswith("*") or p.startswith("?"): + if p.startswith(("*", "?")): prepped.append(p) else: prepped.append(abs_file(p)) @@ -147,7 +185,7 @@ def prep_patterns(patterns): class TreeMatcher(object): """A matcher for files in a tree.""" def __init__(self, directories): - self.dirs = directories[:] + self.dirs = list(directories) def __repr__(self): return "" % self.dirs @@ -156,10 +194,6 @@ class TreeMatcher(object): """A list of strings for displaying when dumping state.""" return self.dirs - def add(self, directory): - """Add another directory to the list we match for.""" - self.dirs.append(directory) - def match(self, fpath): """Does `fpath` indicate a file in one of our trees?""" for d in self.dirs: @@ -173,10 +207,49 @@ class TreeMatcher(object): return False +class ModuleMatcher(object): + """A matcher for modules in a tree.""" + def __init__(self, module_names): + self.modules = list(module_names) + + def __repr__(self): + return "" % (self.modules) + + def info(self): + """A list of strings for displaying when dumping state.""" + return self.modules + + def match(self, module_name): + """Does `module_name` indicate a module in one of our packages?""" + if not module_name: + return False + + for m in self.modules: + if module_name.startswith(m): + if module_name == m: + return True + if module_name[len(m)] == '.': + # This is a module in the package + return True + + return False + + class FnmatchMatcher(object): - """A matcher for files by filename pattern.""" + """A matcher for files by file name pattern.""" def __init__(self, pats): self.pats = pats[:] + # fnmatch is platform-specific. On Windows, it does the Windows thing + # of treating / and \ as equivalent. But on other platforms, we need to + # take care of that ourselves. + fnpats = (fnmatch.translate(p) for p in pats) + fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats) + if env.WINDOWS: + # Windows is also case-insensitive. BTW: the regex docs say that + # flags like (?i) have to be at the beginning, but fnmatch puts + # them at the end, and having two there seems to work fine. + fnpats = (p + "(?i)" for p in fnpats) + self.re = re.compile(join_regex(fnpats)) def __repr__(self): return "" % self.pats @@ -186,11 +259,8 @@ class FnmatchMatcher(object): return self.pats def match(self, fpath): - """Does `fpath` match one of our filename patterns?""" - for pat in self.pats: - if fnmatch.fnmatch(fpath, pat): - return True - return False + """Does `fpath` match one of our file name patterns?""" + return self.re.match(fpath) is not None def sep(s): @@ -213,12 +283,9 @@ class PathAliases(object): A `PathAliases` object tracks a list of pattern/result pairs, and can map a path through those aliases to produce a unified path. - `locator` is a FileLocator that is used to canonicalize the results. - """ - def __init__(self, locator=None): + def __init__(self): self.aliases = [] - self.locator = locator def add(self, pattern, result): """Add the `pattern`/`result` pair to the list of aliases. @@ -245,11 +312,10 @@ class PathAliases(object): pattern = abs_file(pattern) pattern += pattern_sep - # Make a regex from the pattern. fnmatch always adds a \Z or $ to + # Make a regex from the pattern. fnmatch always adds a \Z to # match the whole string, which we don't want. regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(') - if regex_pat.endswith("$"): - regex_pat = regex_pat[:-1] + # We want */a/b.py to match on Windows too, so change slash to match # either separator. regex_pat = regex_pat.replace(r"\/", r"[\\/]") @@ -272,6 +338,10 @@ class PathAliases(object): The separator style in the result is made to match that of the result in the alias. + Returns the mapped path. If a mapping has happened, this is a + canonical path. If no mapping has happened, it is the original value + of `path` unchanged. + """ for regex, result, pattern_sep, result_sep in self.aliases: m = regex.match(path) @@ -279,8 +349,7 @@ class PathAliases(object): new = path.replace(m.group(0), result) if pattern_sep != result_sep: new = new.replace(pattern_sep, result_sep) - if self.locator: - new = self.locator.canonical_filename(new) + new = canonical_filename(new) return new return path @@ -291,7 +360,7 @@ def find_python_files(dirname): To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows - best, but subdirectories are checked for a __init__.py to be sure we only + best, but sub-directories are checked for a __init__.py to be sure we only find the importable files. """ diff --git a/python/helpers/coveragepy/coverage/fullcoverage/encodings.py b/python/helpers/coveragepy/coverage/fullcoverage/encodings.py deleted file mode 100644 index 6a258d6710c4..000000000000 --- a/python/helpers/coveragepy/coverage/fullcoverage/encodings.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Imposter encodings module that installs a coverage-style tracer. - -This is NOT the encodings module; it is an imposter that sets up tracing -instrumentation and then replaces itself with the real encodings module. - -If the directory that holds this file is placed first in the PYTHONPATH when -using "coverage" to run Python's tests, then this file will become the very -first module imported by the internals of Python 3. It installs a -coverage-compatible trace function that can watch Standard Library modules -execute from the very earliest stages of Python's own boot process. This fixes -a problem with coverage - that it starts too late to trace the coverage of many -of the most fundamental modules in the Standard Library. - -""" - -import sys - -class FullCoverageTracer(object): - def __init__(self): - # `traces` is a list of trace events. Frames are tricky: the same - # frame object is used for a whole scope, with new line numbers - # written into it. So in one scope, all the frame objects are the - # same object, and will eventually all will point to the last line - # executed. So we keep the line numbers alongside the frames. - # The list looks like: - # - # traces = [ - # ((frame, event, arg), lineno), ... - # ] - # - self.traces = [] - - def fullcoverage_trace(self, *args): - frame, event, arg = args - self.traces.append((args, frame.f_lineno)) - return self.fullcoverage_trace - -sys.settrace(FullCoverageTracer().fullcoverage_trace) - -# In coverage/files.py is actual_filename(), which uses glob.glob. I don't -# understand why, but that use of glob borks everything if fullcoverage is in -# effect. So here we make an ugly hail-mary pass to switch off glob.glob over -# there. This means when using fullcoverage, Windows path names will not be -# their actual case. - -#sys.fullcoverage = True - -# Finally, remove our own directory from sys.path; remove ourselves from -# sys.modules; and re-import "encodings", which will be the real package -# this time. Note that the delete from sys.modules dictionary has to -# happen last, since all of the symbols in this module will become None -# at that exact moment, including "sys". - -parentdir = max(filter(__file__.startswith, sys.path), key=len) -sys.path.remove(parentdir) -del sys.modules['encodings'] -import encodings diff --git a/python/helpers/coveragepy/coverage/html.py b/python/helpers/coveragepy/coverage/html.py index 5242236c1ed9..e5b1db2a01dc 100644 --- a/python/helpers/coveragepy/coverage/html.py +++ b/python/helpers/coveragepy/coverage/html.py @@ -1,15 +1,24 @@ -"""HTML reporting for Coverage.""" +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt -import os, re, shutil, sys +"""HTML reporting for coverage.py.""" + +import datetime +import json +import os +import shutil import coverage -from coverage.backward import pickle -from coverage.misc import CoverageException, Hasher -from coverage.phystokens import source_token_lines, source_encoding +from coverage import env +from coverage.backward import iitems +from coverage.files import flat_rootname +from coverage.misc import CoverageException, Hasher, isolate_module from coverage.report import Reporter from coverage.results import Numbers from coverage.templite import Templite +os = isolate_module(os) + # Static files are looked for in a list of places. STATIC_PATH = [ @@ -20,6 +29,7 @@ STATIC_PATH = [ os.path.join(os.path.dirname(__file__), "htmlfiles"), ] + def data_filename(fname, pkgdir=""): """Return the path to a data file of ours. @@ -27,69 +37,84 @@ def data_filename(fname, pkgdir=""): is returned. Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir` - is provided, at that subdirectory. + is provided, at that sub-directory. """ + tried = [] for static_dir in STATIC_PATH: static_filename = os.path.join(static_dir, fname) if os.path.exists(static_filename): return static_filename + else: + tried.append(static_filename) if pkgdir: static_filename = os.path.join(static_dir, pkgdir, fname) if os.path.exists(static_filename): return static_filename - raise CoverageException("Couldn't find static file %r" % fname) + else: + tried.append(static_filename) + raise CoverageException( + "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried) + ) -def data(fname): +def read_data(fname): """Return the contents of a data file of ours.""" - data_file = open(data_filename(fname)) - try: + with open(data_filename(fname)) as data_file: return data_file.read() - finally: - data_file.close() + + +def write_html(fname, html): + """Write `html` to `fname`, properly encoded.""" + with open(fname, "wb") as fout: + fout.write(html.encode('ascii', 'xmlcharrefreplace')) class HtmlReporter(Reporter): """HTML reporting.""" - # These files will be copied from the htmlfiles dir to the output dir. + # These files will be copied from the htmlfiles directory to the output + # directory. STATIC_FILES = [ - ("style.css", ""), - ("jquery.min.js", "jquery"), - ("jquery.hotkeys.js", "jquery-hotkeys"), - ("jquery.isonscreen.js", "jquery-isonscreen"), - ("jquery.tablesorter.min.js", "jquery-tablesorter"), - ("coverage_html.js", ""), - ("keybd_closed.png", ""), - ("keybd_open.png", ""), - ] + ("style.css", ""), + ("jquery.min.js", "jquery"), + ("jquery.debounce.min.js", "jquery-debounce"), + ("jquery.hotkeys.js", "jquery-hotkeys"), + ("jquery.isonscreen.js", "jquery-isonscreen"), + ("jquery.tablesorter.min.js", "jquery-tablesorter"), + ("coverage_html.js", ""), + ("keybd_closed.png", ""), + ("keybd_open.png", ""), + ] def __init__(self, cov, config): super(HtmlReporter, self).__init__(cov, config) self.directory = None + title = self.config.html_title + if env.PY2: + title = title.decode("utf8") self.template_globals = { 'escape': escape, - 'title': self.config.html_title, + 'pair': pair, + 'title': title, '__url__': coverage.__url__, '__version__': coverage.__version__, - } - self.source_tmpl = Templite( - data("pyfile.html"), self.template_globals - ) + } + self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals) self.coverage = cov self.files = [] - self.arcs = self.coverage.data.has_arcs() + self.has_arcs = self.coverage.data.has_arcs() self.status = HtmlStatus() self.extra_css = None self.totals = Numbers() + self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') def report(self, morfs): """Generate an HTML report for `morfs`. - `morfs` is a list of modules or filenames. + `morfs` is a list of modules or file names. """ assert self.config.html_dir, "must give a directory for html reporting" @@ -100,7 +125,7 @@ class HtmlReporter(Reporter): # Check that this run used the same settings as the last run. m = Hasher() m.update(self.config) - these_settings = m.digest() + these_settings = m.hexdigest() if self.status.settings_hash() != these_settings: self.status.reset() self.status.set_settings_hash(these_settings) @@ -119,8 +144,7 @@ class HtmlReporter(Reporter): self.index_file() self.make_local_static_report_files() - - return self.totals.pc_covered + return self.totals.n_statements and self.totals.pc_covered def make_local_static_report_files(self): """Make local instances of static files for HTML report.""" @@ -129,63 +153,43 @@ class HtmlReporter(Reporter): shutil.copyfile( data_filename(static, pkgdir), os.path.join(self.directory, static) - ) + ) # The user may have extra CSS they want copied. if self.extra_css: shutil.copyfile( self.config.extra_css, os.path.join(self.directory, self.extra_css) - ) + ) - def write_html(self, fname, html): - """Write `html` to `fname`, properly encoded.""" - fout = open(fname, "wb") - try: - fout.write(html.encode('ascii', 'xmlcharrefreplace')) - finally: - fout.close() - - def file_hash(self, source, cu): + def file_hash(self, source, fr): """Compute a hash that changes if the file needs to be re-reported.""" m = Hasher() m.update(source) - self.coverage.data.add_to_hash(cu.filename, m) - return m.digest() + self.coverage.data.add_to_hash(fr.filename, m) + return m.hexdigest() - def html_file(self, cu, analysis): + def html_file(self, fr, analysis): """Generate an HTML file for one source file.""" - source_file = cu.source_file() - try: - source = source_file.read() - finally: - source_file.close() + source = fr.source() # Find out if the file on disk is already correct. - flat_rootname = cu.flat_rootname() - this_hash = self.file_hash(source, cu) - that_hash = self.status.file_hash(flat_rootname) + rootname = flat_rootname(fr.relative_filename()) + this_hash = self.file_hash(source.encode('utf-8'), fr) + that_hash = self.status.file_hash(rootname) if this_hash == that_hash: # Nothing has changed to require the file to be reported again. - self.files.append(self.status.index_info(flat_rootname)) + self.files.append(self.status.index_info(rootname)) return - self.status.set_file_hash(flat_rootname, this_hash) - - # If need be, determine the encoding of the source file. We use it - # later to properly write the HTML. - if sys.version_info < (3, 0): - encoding = source_encoding(source) - # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it. - if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf": - source = source[3:] - encoding = "utf-8" + self.status.set_file_hash(rootname, this_hash) # Get the numbers for this file. nums = analysis.numbers - if self.arcs: + if self.has_arcs: missing_branch_arcs = analysis.missing_branch_arcs() + arcs_executed = analysis.arcs_executed() # These classes determine which lines are highlighted by default. c_run = "run hide_run" @@ -195,35 +199,44 @@ class HtmlReporter(Reporter): lines = [] - for lineno, line in enumerate(source_token_lines(source)): - lineno += 1 # 1-based line numbers. + for lineno, line in enumerate(fr.source_token_lines(), start=1): # Figure out how to mark this line. line_class = [] annotate_html = "" - annotate_title = "" + annotate_long = "" if lineno in analysis.statements: line_class.append("stm") if lineno in analysis.excluded: line_class.append(c_exc) elif lineno in analysis.missing: line_class.append(c_mis) - elif self.arcs and lineno in missing_branch_arcs: + elif self.has_arcs and lineno in missing_branch_arcs: line_class.append(c_par) - annlines = [] + shorts = [] + longs = [] for b in missing_branch_arcs[lineno]: if b < 0: - annlines.append("exit") + shorts.append("exit") else: - annlines.append(str(b)) - annotate_html = "   ".join(annlines) - if len(annlines) > 1: - annotate_title = "no jumps to these line numbers" - elif len(annlines) == 1: - annotate_title = "no jump to this line number" + shorts.append(b) + longs.append(fr.missing_arc_description(lineno, b, arcs_executed)) + # 202F is NARROW NO-BREAK SPACE. + # 219B is RIGHTWARDS ARROW WITH STROKE. + short_fmt = "%s ↛ %s" + annotate_html = ",   ".join(short_fmt % (lineno, d) for d in shorts) + + if len(longs) == 1: + annotate_long = longs[0] + else: + annotate_long = "%d missed branches: %s" % ( + len(longs), + ", ".join("%d) %s" % (num, ann_long) + for num, ann_long in enumerate(longs, start=1)), + ) elif lineno in analysis.statements: line_class.append(c_run) - # Build the HTML for the line + # Build the HTML for the line. html = [] for tok_type, tok_text in line: if tok_type == "ws": @@ -231,61 +244,59 @@ class HtmlReporter(Reporter): else: tok_html = escape(tok_text) or ' ' html.append( - "%s" % (tok_type, tok_html) - ) + '%s' % (tok_type, tok_html) + ) lines.append({ 'html': ''.join(html), 'number': lineno, 'class': ' '.join(line_class) or "pln", 'annotate': annotate_html, - 'annotate_title': annotate_title, + 'annotate_long': annotate_long, }) # Write the HTML page for this file. - html = spaceless(self.source_tmpl.render({ - 'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run, - 'arcs': self.arcs, 'extra_css': self.extra_css, - 'cu': cu, 'nums': nums, 'lines': lines, - })) + html = self.source_tmpl.render({ + 'c_exc': c_exc, + 'c_mis': c_mis, + 'c_par': c_par, + 'c_run': c_run, + 'has_arcs': self.has_arcs, + 'extra_css': self.extra_css, + 'fr': fr, + 'nums': nums, + 'lines': lines, + 'time_stamp': self.time_stamp, + }) - if sys.version_info < (3, 0): - html = html.decode(encoding) - - html_filename = flat_rootname + ".html" + html_filename = rootname + ".html" html_path = os.path.join(self.directory, html_filename) - self.write_html(html_path, html) + write_html(html_path, html) # Save this file's information for the index file. index_info = { 'nums': nums, 'html_filename': html_filename, - 'name': cu.name, - } + 'relative_filename': fr.relative_filename(), + } self.files.append(index_info) - self.status.set_index_info(flat_rootname, index_info) + self.status.set_index_info(rootname, index_info) def index_file(self): """Write the index.html file for this report.""" - index_tmpl = Templite( - data("index.html"), self.template_globals - ) + index_tmpl = Templite(read_data("index.html"), self.template_globals) - self.totals = sum([f['nums'] for f in self.files]) + self.totals = sum(f['nums'] for f in self.files) html = index_tmpl.render({ - 'arcs': self.arcs, + 'has_arcs': self.has_arcs, 'extra_css': self.extra_css, 'files': self.files, 'totals': self.totals, + 'time_stamp': self.time_stamp, }) - if sys.version_info < (3, 0): - html = html.decode("utf-8") - self.write_html( - os.path.join(self.directory, "index.html"), - html - ) + write_html(os.path.join(self.directory, "index.html"), html) # Write the latest hashes for next time. self.status.write(self.directory) @@ -294,9 +305,37 @@ class HtmlReporter(Reporter): class HtmlStatus(object): """The status information we keep to support incremental reporting.""" - STATUS_FILE = "status.dat" + STATUS_FILE = "status.json" STATUS_FORMAT = 1 + # pylint: disable=wrong-spelling-in-comment,useless-suppression + # The data looks like: + # + # { + # 'format': 1, + # 'settings': '540ee119c15d52a68a53fe6f0897346d', + # 'version': '4.0a1', + # 'files': { + # 'cogapp___init__': { + # 'hash': 'e45581a5b48f879f301c0f30bf77a50c', + # 'index': { + # 'html_filename': 'cogapp___init__.html', + # 'name': 'cogapp/__init__', + # 'nums': , + # } + # }, + # ... + # 'cogapp_whiteutils': { + # 'hash': '8504bb427fc488c4176809ded0277d51', + # 'index': { + # 'html_filename': 'cogapp_whiteutils.html', + # 'name': 'cogapp/whiteutils', + # 'nums': , + # } + # }, + # }, + # } + def __init__(self): self.reset() @@ -310,11 +349,8 @@ class HtmlStatus(object): usable = False try: status_file = os.path.join(directory, self.STATUS_FILE) - fstatus = open(status_file, "rb") - try: - status = pickle.load(fstatus) - finally: - fstatus.close() + with open(status_file, "r") as fstatus: + status = json.load(fstatus) except (IOError, ValueError): usable = False else: @@ -325,7 +361,10 @@ class HtmlStatus(object): usable = False if usable: - self.files = status['files'] + self.files = {} + for filename, fileinfo in iitems(status['files']): + fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums']) + self.files[filename] = fileinfo self.settings = status['settings'] else: self.reset() @@ -333,17 +372,26 @@ class HtmlStatus(object): def write(self, directory): """Write the current status to `directory`.""" status_file = os.path.join(directory, self.STATUS_FILE) + files = {} + for filename, fileinfo in iitems(self.files): + fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args() + files[filename] = fileinfo + status = { 'format': self.STATUS_FORMAT, 'version': coverage.__version__, 'settings': self.settings, - 'files': self.files, - } - fout = open(status_file, "wb") - try: - pickle.dump(status, fout) - finally: - fout.close() + 'files': files, + } + with open(status_file, "w") as fout: + json.dump(status, fout) + + # Older versions of ShiningPanda look for the old name, status.dat. + # Accomodate them if we are running under Jenkins. + # https://issues.jenkins-ci.org/browse/JENKINS-28428 + if "JENKINS_URL" in os.environ: + with open(os.path.join(directory, "status.dat"), "w") as dat: + dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n") def settings_hash(self): """Get the hash of the coverage.py settings.""" @@ -373,24 +421,15 @@ class HtmlStatus(object): # Helpers for templates and generating HTML def escape(t): - """HTML-escape the text in `t`.""" - return (t - # Convert HTML special chars into HTML entities. - .replace("&", "&").replace("<", "<").replace(">", ">") - .replace("'", "'").replace('"', """) - # Convert runs of spaces: "......" -> " . . ." - .replace(" ", "  ") - # To deal with odd-length runs, convert the final pair of spaces - # so that "....." -> " .  ." - .replace(" ", "  ") - ) + """HTML-escape the text in `t`. -def spaceless(html): - """Squeeze out some annoying extra space from an HTML string. - - Nicely-formatted templates mean lots of extra space in the result. - Get rid of some. + This is only suitable for HTML text, not attributes. """ - html = re.sub(r">\s+

\n

0) { + no_rows.hide(); + } + table.show(); + + } + else { + // Filter table items by value. + var hidden = 0; + var shown = 0; + + // Hide / show elements. + $.each(table_row_names, function () { + var element = $(this).parents("tr"); + + if ($(this).text().indexOf(filter_value) === -1) { + // hide + element.addClass("hidden"); + hidden++; + } + else { + // show + element.removeClass("hidden"); + shown++; + } + }); + + // Show placeholder if no rows will be displayed. + if (no_rows.length > 0) { + if (shown === 0) { + // Show placeholder, hide table. + no_rows.show(); + table.hide(); + } + else { + // Hide placeholder, show table. + no_rows.hide(); + table.show(); + } + } + + // Manage dynamic header: + if (hidden > 0) { + // Calculate new dynamic sum values based on visible rows. + for (var column = 2; column < 20; column++) { + // Calculate summed value. + var cells = table_rows.find('td:nth-child(' + column + ')'); + if (!cells.length) { + // No more columns...! + break; + } + + var sum = 0, numer = 0, denom = 0; + $.each(cells.filter(':visible'), function () { + var ratio = $(this).data("ratio"); + if (ratio) { + var splitted = ratio.split(" "); + numer += parseInt(splitted[0], 10); + denom += parseInt(splitted[1], 10); + } + else { + sum += parseInt(this.innerHTML, 10); + } + }); + + // Get footer cell element. + var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')'); + + // Set value into dynamic footer cell element. + if (cells[0].innerHTML.indexOf('%') > -1) { + // Percentage columns use the numerator and denominator, + // and adapt to the number of decimal places. + var match = /\.([0-9]+)/.exec(cells[0].innerHTML); + var places = 0; + if (match) { + places = match[1].length; + } + var pct = numer * 100 / denom; + footer_cell.text(pct.toFixed(places) + '%'); + } + else { + footer_cell.text(sum); + } + } + + // Hide standard footer, show dynamic footer. + table_footer.addClass("hidden"); + table_dynamic_footer.removeClass("hidden"); + } + else { + // Show standard footer, hide dynamic footer. + table_footer.removeClass("hidden"); + table_dynamic_footer.addClass("hidden"); + } + } + })); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + $("#filter").trigger("change"); +}; + // Loaded on index.html coverage.index_ready = function ($) { // Look for a cookie containing previous sort settings: @@ -95,6 +227,7 @@ coverage.index_ready = function ($) { coverage.assign_shortkeys(); coverage.wire_up_help_panel(); + coverage.wire_up_filter(); // Watch for page unload events so we can save the final sort settings: $(window).unload(function () { @@ -129,6 +262,11 @@ coverage.pyfile_ready = function ($) { coverage.assign_shortkeys(); coverage.wire_up_help_panel(); + + coverage.init_scroll_markers(); + + // Rebuild scroll markers after window high changing + $(window).resize(coverage.resize_scroll_markers); }; coverage.toggle_lines = function (btn, cls) { @@ -187,12 +325,13 @@ coverage.to_next_chunk = function () { // Find the start of the next colored chunk. var probe = c.sel_end; + var color, probe_line; while (true) { - var probe_line = c.line_elt(probe); + probe_line = c.line_elt(probe); if (probe_line.length === 0) { return; } - var color = probe_line.css("background-color"); + color = probe_line.css("background-color"); if (!c.is_transparent(color)) { break; } @@ -374,3 +513,72 @@ coverage.scroll_window = function (to_pos) { coverage.finish_scrolling = function () { $("html,body").stop(true, true); }; + +coverage.init_scroll_markers = function () { + var c = coverage; + // Init some variables + c.lines_len = $('td.text p').length; + c.body_h = $('body').height(); + c.header_h = $('div#header').height(); + c.missed_lines = $('td.text p.mis, td.text p.par'); + + // Build html + c.resize_scroll_markers(); +}; + +coverage.resize_scroll_markers = function () { + var c = coverage, + min_line_height = 3, + max_line_height = 10, + visible_window_h = $(window).height(); + + $('#scroll_marker').remove(); + // Don't build markers if the window has no scroll bar. + if (c.body_h <= visible_window_h) { + return; + } + + $("body").append("

 
"); + var scroll_marker = $('#scroll_marker'), + marker_scale = scroll_marker.height() / c.body_h, + line_height = scroll_marker.height() / c.lines_len; + + // Line height must be between the extremes. + if (line_height > min_line_height) { + if (line_height > max_line_height) { + line_height = max_line_height; + } + } + else { + line_height = min_line_height; + } + + var previous_line = -99, + last_mark, + last_top; + + c.missed_lines.each(function () { + var line_top = Math.round($(this).offset().top * marker_scale), + id_name = $(this).attr('id'), + line_number = parseInt(id_name.substring(1, id_name.length)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.css({ + 'height': line_top + line_height - last_top + }); + } + else { + // Add colored line in scroll_marker block. + scroll_marker.append('
'); + last_mark = $('#m' + line_number); + last_mark.css({ + 'height': line_height, + 'top': line_top + }); + last_top = line_top; + } + + previous_line = line_number; + }); +}; diff --git a/python/helpers/coveragepy/coverage/htmlfiles/index.html b/python/helpers/coveragepy/coverage/htmlfiles/index.html index c831823dd239..ee2deab0b627 100644 --- a/python/helpers/coveragepy/coverage/htmlfiles/index.html +++ b/python/helpers/coveragepy/coverage/htmlfiles/index.html @@ -1,101 +1,115 @@ - +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #} + + - + {{ title|escape }} - + {% if extra_css %} - + {% endif %} - - - - - + + + + + - + -