mirror of
https://gitflic.ru/project/openide/openide.git
synced 2026-01-05 01:50:56 +07:00
Update bundled coverage.py to version 4.2.0
This commit is contained in:
@@ -1,3 +1,6 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Code coverage measurement for Python.
|
||||
|
||||
Ned Batchelder
|
||||
@@ -5,73 +8,16 @@ http://nedbatchelder.com/code/coverage
|
||||
|
||||
"""
|
||||
|
||||
from coverage.version import __version__, __url__
|
||||
from coverage.version import __version__, __url__, version_info
|
||||
|
||||
from coverage.control import coverage, process_startup
|
||||
from coverage.control import Coverage, process_startup
|
||||
from coverage.data import CoverageData
|
||||
from coverage.cmdline import main, CoverageScript
|
||||
from coverage.misc import CoverageException
|
||||
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
|
||||
from coverage.pytracer import PyTracer
|
||||
|
||||
# Module-level functions. The original API to this module was based on
|
||||
# functions defined directly in the module, with a singleton of the coverage()
|
||||
# class. That design hampered programmability, so the current api uses
|
||||
# explicitly-created coverage objects. But for backward compatibility, here we
|
||||
# define the top-level functions to create the singleton when they are first
|
||||
# called.
|
||||
|
||||
# Singleton object for use with module-level functions. The singleton is
|
||||
# created as needed when one of the module-level functions is called.
|
||||
_the_coverage = None
|
||||
|
||||
def _singleton_method(name):
|
||||
"""Return a function to the `name` method on a singleton `coverage` object.
|
||||
|
||||
The singleton object is created the first time one of these functions is
|
||||
called.
|
||||
|
||||
"""
|
||||
# Disable pylint msg W0612, because a bunch of variables look unused, but
|
||||
# they're accessed via locals().
|
||||
# pylint: disable=W0612
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
"""Singleton wrapper around a coverage method."""
|
||||
global _the_coverage
|
||||
if not _the_coverage:
|
||||
_the_coverage = coverage(auto_data=True)
|
||||
return getattr(_the_coverage, name)(*args, **kwargs)
|
||||
|
||||
import inspect
|
||||
meth = getattr(coverage, name)
|
||||
args, varargs, kw, defaults = inspect.getargspec(meth)
|
||||
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
|
||||
docstring = meth.__doc__
|
||||
wrapper.__doc__ = ("""\
|
||||
A first-use-singleton wrapper around coverage.%(name)s.
|
||||
|
||||
This wrapper is provided for backward compatibility with legacy code.
|
||||
New code should use coverage.%(name)s directly.
|
||||
|
||||
%(name)s%(argspec)s:
|
||||
|
||||
%(docstring)s
|
||||
""" % locals()
|
||||
)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
# Define the module-level functions.
|
||||
use_cache = _singleton_method('use_cache')
|
||||
start = _singleton_method('start')
|
||||
stop = _singleton_method('stop')
|
||||
erase = _singleton_method('erase')
|
||||
exclude = _singleton_method('exclude')
|
||||
analysis = _singleton_method('analysis')
|
||||
analysis2 = _singleton_method('analysis2')
|
||||
report = _singleton_method('report')
|
||||
annotate = _singleton_method('annotate')
|
||||
|
||||
# Backward compatibility.
|
||||
coverage = Coverage
|
||||
|
||||
# On Windows, we encode and decode deep enough that something goes wrong and
|
||||
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
|
||||
@@ -87,34 +33,3 @@ try:
|
||||
del sys.modules['coverage.coverage']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
# COPYRIGHT AND LICENSE
|
||||
#
|
||||
# Copyright 2001 Gareth Rees. All rights reserved.
|
||||
# Copyright 2004-2013 Ned Batchelder. All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
||||
# DAMAGE.
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Coverage.py's main entry point."""
|
||||
|
||||
import sys
|
||||
from coverage.cmdline import main
|
||||
sys.exit(main())
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
"""Source file annotation for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
import os, re
|
||||
"""Source file annotation for coverage.py."""
|
||||
|
||||
from coverage.backward import sorted # pylint: disable=W0622
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.report import Reporter
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class AnnotateReporter(Reporter):
|
||||
"""Generate annotated source files showing line coverage.
|
||||
|
||||
@@ -42,37 +51,30 @@ class AnnotateReporter(Reporter):
|
||||
"""
|
||||
self.report_files(self.annotate_file, morfs, directory)
|
||||
|
||||
def annotate_file(self, cu, analysis):
|
||||
def annotate_file(self, fr, analysis):
|
||||
"""Annotate a single file.
|
||||
|
||||
`cu` is the CodeUnit for the file to annotate.
|
||||
`fr` is the FileReporter for the file to annotate.
|
||||
|
||||
"""
|
||||
if not cu.relative:
|
||||
return
|
||||
|
||||
filename = cu.filename
|
||||
source = cu.source_file()
|
||||
if self.directory:
|
||||
dest_file = os.path.join(self.directory, cu.flat_rootname())
|
||||
dest_file += ".py,cover"
|
||||
else:
|
||||
dest_file = filename + ",cover"
|
||||
dest = open(dest_file, 'w')
|
||||
|
||||
statements = sorted(analysis.statements)
|
||||
missing = sorted(analysis.missing)
|
||||
excluded = sorted(analysis.excluded)
|
||||
|
||||
lineno = 0
|
||||
if self.directory:
|
||||
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
|
||||
if dest_file.endswith("_py"):
|
||||
dest_file = dest_file[:-3] + ".py"
|
||||
dest_file += ",cover"
|
||||
else:
|
||||
dest_file = fr.filename + ",cover"
|
||||
|
||||
with io.open(dest_file, 'w', encoding='utf8') as dest:
|
||||
i = 0
|
||||
j = 0
|
||||
covered = True
|
||||
while True:
|
||||
line = source.readline()
|
||||
if line == '':
|
||||
break
|
||||
lineno += 1
|
||||
source = fr.source()
|
||||
for lineno, line in enumerate(source.splitlines(True), start=1):
|
||||
while i < len(statements) and statements[i] < lineno:
|
||||
i += 1
|
||||
while j < len(missing) and missing[j] < lineno:
|
||||
@@ -80,23 +82,22 @@ class AnnotateReporter(Reporter):
|
||||
if i < len(statements) and statements[i] == lineno:
|
||||
covered = j >= len(missing) or missing[j] > lineno
|
||||
if self.blank_re.match(line):
|
||||
dest.write(' ')
|
||||
dest.write(u' ')
|
||||
elif self.else_re.match(line):
|
||||
# Special logic for lines containing only 'else:'.
|
||||
if i >= len(statements) and j >= len(missing):
|
||||
dest.write('! ')
|
||||
dest.write(u'! ')
|
||||
elif i >= len(statements) or j >= len(missing):
|
||||
dest.write('> ')
|
||||
dest.write(u'> ')
|
||||
elif statements[i] == missing[j]:
|
||||
dest.write('! ')
|
||||
dest.write(u'! ')
|
||||
else:
|
||||
dest.write('> ')
|
||||
dest.write(u'> ')
|
||||
elif lineno in excluded:
|
||||
dest.write('- ')
|
||||
dest.write(u'- ')
|
||||
elif covered:
|
||||
dest.write('> ')
|
||||
dest.write(u'> ')
|
||||
else:
|
||||
dest.write('! ')
|
||||
dest.write(u'! ')
|
||||
|
||||
dest.write(line)
|
||||
source.close()
|
||||
dest.close()
|
||||
|
||||
42
python/helpers/coveragepy/coverage/backunittest.py
Normal file
42
python/helpers/coveragepy/coverage/backunittest.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Implementations of unittest features from the future."""
|
||||
|
||||
# Use unittest2 if it's available, otherwise unittest. This gives us
|
||||
# back-ported features for 2.6.
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
|
||||
def unittest_has(method):
|
||||
"""Does `unittest.TestCase` have `method` defined?"""
|
||||
return hasattr(unittest.TestCase, method)
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
"""Just like unittest.TestCase, but with assert methods added.
|
||||
|
||||
Designed to be compatible with 3.1 unittest. Methods are only defined if
|
||||
`unittest` doesn't have them.
|
||||
|
||||
"""
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
# Many Pythons have this method defined. But PyPy3 has a bug with it
|
||||
# somehow (https://bitbucket.org/pypy/pypy/issues/2092), so always use our
|
||||
# own implementation that works everywhere, at least for the ways we're
|
||||
# calling it.
|
||||
def assertCountEqual(self, s1, s2):
|
||||
"""Assert these have the same elements, regardless of order."""
|
||||
self.assertEqual(sorted(s1), sorted(s2))
|
||||
|
||||
if not unittest_has('assertRaisesRegex'):
|
||||
def assertRaisesRegex(self, *args, **kwargs):
|
||||
return self.assertRaisesRegexp(*args, **kwargs)
|
||||
|
||||
if not unittest_has('assertRegex'):
|
||||
def assertRegex(self, *args, **kwargs):
|
||||
return self.assertRegexpMatches(*args, **kwargs)
|
||||
@@ -1,60 +1,29 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Add things to old Pythons so I can pretend they are newer."""
|
||||
|
||||
# This file does lots of tricky stuff, so disable a bunch of lintisms.
|
||||
# pylint: disable=F0401,W0611,W0622
|
||||
# F0401: Unable to import blah
|
||||
# W0611: Unused import blah
|
||||
# W0622: Redefining built-in blah
|
||||
# This file does lots of tricky stuff, so disable a bunch of pylint warnings.
|
||||
# pylint: disable=redefined-builtin
|
||||
# pylint: disable=unused-import
|
||||
# pxlint: disable=no-name-in-module
|
||||
|
||||
import os, re, sys
|
||||
import sys
|
||||
|
||||
# Python 2.3 doesn't have `set`
|
||||
try:
|
||||
set = set # new in 2.4
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
from coverage import env
|
||||
|
||||
# Python 2.3 doesn't have `sorted`.
|
||||
try:
|
||||
sorted = sorted
|
||||
except NameError:
|
||||
def sorted(iterable):
|
||||
"""A 2.3-compatible implementation of `sorted`."""
|
||||
lst = list(iterable)
|
||||
lst.sort()
|
||||
return lst
|
||||
|
||||
# Python 2.3 doesn't have `reversed`.
|
||||
try:
|
||||
reversed = reversed
|
||||
except NameError:
|
||||
def reversed(iterable):
|
||||
"""A 2.3-compatible implementation of `reversed`."""
|
||||
lst = list(iterable)
|
||||
return lst[::-1]
|
||||
|
||||
# rpartition is new in 2.5
|
||||
try:
|
||||
"".rpartition
|
||||
except AttributeError:
|
||||
def rpartition(s, sep):
|
||||
"""Implement s.rpartition(sep) for old Pythons."""
|
||||
i = s.rfind(sep)
|
||||
if i == -1:
|
||||
return ('', '', s)
|
||||
else:
|
||||
return (s[:i], sep, s[i+len(sep):])
|
||||
else:
|
||||
def rpartition(s, sep):
|
||||
"""A common interface for new Pythons."""
|
||||
return s.rpartition(sep)
|
||||
|
||||
# Pythons 2 and 3 differ on where to get StringIO
|
||||
# Pythons 2 and 3 differ on where to get StringIO.
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
BytesIO = StringIO
|
||||
except ImportError:
|
||||
from io import StringIO, BytesIO
|
||||
from io import StringIO
|
||||
|
||||
# In py3, ConfigParser was renamed to the more-standard configparser
|
||||
try:
|
||||
import configparser
|
||||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
|
||||
# What's a string called?
|
||||
try:
|
||||
@@ -62,6 +31,12 @@ try:
|
||||
except NameError:
|
||||
string_class = str
|
||||
|
||||
# What's a Unicode string called?
|
||||
try:
|
||||
unicode_class = unicode
|
||||
except NameError:
|
||||
unicode_class = str
|
||||
|
||||
# Where do pickles come from?
|
||||
try:
|
||||
import cPickle as pickle
|
||||
@@ -72,7 +47,16 @@ except ImportError:
|
||||
try:
|
||||
range = xrange
|
||||
except NameError:
|
||||
range = range
|
||||
range = range # pylint: disable=redefined-variable-type
|
||||
|
||||
# shlex.quote is new, but there's an undocumented implementation in "pipes",
|
||||
# who knew!?
|
||||
try:
|
||||
from shlex import quote as shlex_quote
|
||||
except ImportError:
|
||||
# Useful function, available under a different (undocumented) name
|
||||
# in Python versions earlier than 3.3.
|
||||
from pipes import quote as shlex_quote
|
||||
|
||||
# A function to iterate listlessly over a dict's items.
|
||||
try:
|
||||
@@ -86,71 +70,32 @@ else:
|
||||
"""Produce the items from dict `d`."""
|
||||
return d.iteritems()
|
||||
|
||||
# Exec is a statement in Py2, a function in Py3
|
||||
if sys.version_info >= (3, 0):
|
||||
def exec_code_object(code, global_map):
|
||||
"""A wrapper around exec()."""
|
||||
exec(code, global_map)
|
||||
else:
|
||||
# OK, this is pretty gross. In Py2, exec was a statement, but that will
|
||||
# be a syntax error if we try to put it in a Py3 file, even if it is never
|
||||
# executed. So hide it inside an evaluated string literal instead.
|
||||
eval(
|
||||
compile(
|
||||
"def exec_code_object(code, global_map):\n"
|
||||
" exec code in global_map\n",
|
||||
"<exec_function>", "exec"
|
||||
)
|
||||
)
|
||||
|
||||
# Reading Python source and interpreting the coding comment is a big deal.
|
||||
if sys.version_info >= (3, 0):
|
||||
# Python 3.2 provides `tokenize.open`, the best way to open source files.
|
||||
import tokenize
|
||||
# Getting the `next` function from an iterator is different in 2 and 3.
|
||||
try:
|
||||
open_source = tokenize.open # pylint: disable=E1101
|
||||
iter([]).next
|
||||
except AttributeError:
|
||||
from io import TextIOWrapper
|
||||
detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
|
||||
# Copied from the 3.2 stdlib:
|
||||
def open_source(fname):
|
||||
"""Open a file in read only mode using the encoding detected by
|
||||
detect_encoding().
|
||||
"""
|
||||
buffer = open(fname, 'rb')
|
||||
encoding, _ = detect_encoding(buffer.readline)
|
||||
buffer.seek(0)
|
||||
text = TextIOWrapper(buffer, encoding, line_buffering=True)
|
||||
text.mode = 'r'
|
||||
return text
|
||||
def iternext(seq):
|
||||
"""Get the `next` function for iterating over `seq`."""
|
||||
return iter(seq).__next__
|
||||
else:
|
||||
def open_source(fname):
|
||||
"""Open a source file the best way."""
|
||||
return open(fname, "rU")
|
||||
|
||||
def iternext(seq):
|
||||
"""Get the `next` function for iterating over `seq`."""
|
||||
return iter(seq).next
|
||||
|
||||
# Python 3.x is picky about bytes and strings, so provide methods to
|
||||
# get them right, and make them no-ops in 2.x
|
||||
if sys.version_info >= (3, 0):
|
||||
if env.PY3:
|
||||
def to_bytes(s):
|
||||
"""Convert string `s` to bytes."""
|
||||
return s.encode('utf8')
|
||||
|
||||
def to_string(b):
|
||||
"""Convert bytes `b` to a string."""
|
||||
return b.decode('utf8')
|
||||
|
||||
def binary_bytes(byte_values):
|
||||
"""Produce a byte string with the ints from `byte_values`."""
|
||||
return bytes(byte_values)
|
||||
|
||||
def byte_to_int(byte_value):
|
||||
"""Turn an element of a bytes object into an int."""
|
||||
return byte_value
|
||||
|
||||
def bytes_to_ints(bytes_value):
|
||||
"""Turn a bytes object into a sequence of ints."""
|
||||
# In Py3, iterating bytes gives ints.
|
||||
# In Python 3, iterating bytes gives ints.
|
||||
return bytes_value
|
||||
|
||||
else:
|
||||
@@ -158,27 +103,70 @@ else:
|
||||
"""Convert string `s` to bytes (no-op in 2.x)."""
|
||||
return s
|
||||
|
||||
def to_string(b):
|
||||
"""Convert bytes `b` to a string (no-op in 2.x)."""
|
||||
return b
|
||||
|
||||
def binary_bytes(byte_values):
|
||||
"""Produce a byte string with the ints from `byte_values`."""
|
||||
return "".join([chr(b) for b in byte_values])
|
||||
|
||||
def byte_to_int(byte_value):
|
||||
"""Turn an element of a bytes object into an int."""
|
||||
return ord(byte_value)
|
||||
return "".join(chr(b) for b in byte_values)
|
||||
|
||||
def bytes_to_ints(bytes_value):
|
||||
"""Turn a bytes object into a sequence of ints."""
|
||||
for byte in bytes_value:
|
||||
yield ord(byte)
|
||||
|
||||
# Md5 is available in different places.
|
||||
|
||||
try:
|
||||
import hashlib
|
||||
md5 = hashlib.md5
|
||||
# In Python 2.x, the builtins were in __builtin__
|
||||
BUILTINS = sys.modules['__builtin__']
|
||||
except KeyError:
|
||||
# In Python 3.x, they're in builtins
|
||||
BUILTINS = sys.modules['builtins']
|
||||
|
||||
|
||||
# imp was deprecated in Python 3.3
|
||||
try:
|
||||
import importlib
|
||||
import importlib.util
|
||||
imp = None
|
||||
except ImportError:
|
||||
import md5
|
||||
md5 = md5.new
|
||||
importlib = None
|
||||
|
||||
# We only want to use importlib if it has everything we need.
|
||||
try:
|
||||
importlib_util_find_spec = importlib.util.find_spec
|
||||
except Exception:
|
||||
import imp
|
||||
importlib_util_find_spec = None
|
||||
|
||||
# What is the .pyc magic number for this version of Python?
|
||||
try:
|
||||
PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
|
||||
except AttributeError:
|
||||
PYC_MAGIC_NUMBER = imp.get_magic()
|
||||
|
||||
|
||||
def import_local_file(modname, modfile=None):
|
||||
"""Import a local file as a module.
|
||||
|
||||
Opens a file in the current directory named `modname`.py, imports it
|
||||
as `modname`, and returns the module object. `modfile` is the file to
|
||||
import if it isn't in the current directory.
|
||||
|
||||
"""
|
||||
try:
|
||||
from importlib.machinery import SourceFileLoader
|
||||
except ImportError:
|
||||
SourceFileLoader = None
|
||||
|
||||
if modfile is None:
|
||||
modfile = modname + '.py'
|
||||
if SourceFileLoader:
|
||||
mod = SourceFileLoader(modname, modfile).load_module()
|
||||
else:
|
||||
for suff in imp.get_suffixes(): # pragma: part covered
|
||||
if suff[0] == '.py':
|
||||
break
|
||||
|
||||
with open(modfile, 'r') as f:
|
||||
# pylint: disable=undefined-loop-variable
|
||||
mod = imp.load_module(modname, f, modfile, suff)
|
||||
|
||||
return mod
|
||||
|
||||
@@ -1,62 +1,9 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Bytecode manipulation for coverage.py"""
|
||||
|
||||
import opcode, types
|
||||
|
||||
from coverage.backward import byte_to_int
|
||||
|
||||
class ByteCode(object):
|
||||
"""A single bytecode."""
|
||||
def __init__(self):
|
||||
# The offset of this bytecode in the code object.
|
||||
self.offset = -1
|
||||
|
||||
# The opcode, defined in the `opcode` module.
|
||||
self.op = -1
|
||||
|
||||
# The argument, a small integer, whose meaning depends on the opcode.
|
||||
self.arg = -1
|
||||
|
||||
# The offset in the code object of the next bytecode.
|
||||
self.next_offset = -1
|
||||
|
||||
# The offset to jump to.
|
||||
self.jump_to = -1
|
||||
|
||||
|
||||
class ByteCodes(object):
|
||||
"""Iterator over byte codes in `code`.
|
||||
|
||||
Returns `ByteCode` objects.
|
||||
|
||||
"""
|
||||
# pylint: disable=R0924
|
||||
def __init__(self, code):
|
||||
self.code = code
|
||||
|
||||
def __getitem__(self, i):
|
||||
return byte_to_int(self.code[i])
|
||||
|
||||
def __iter__(self):
|
||||
offset = 0
|
||||
while offset < len(self.code):
|
||||
bc = ByteCode()
|
||||
bc.op = self[offset]
|
||||
bc.offset = offset
|
||||
|
||||
next_offset = offset+1
|
||||
if bc.op >= opcode.HAVE_ARGUMENT:
|
||||
bc.arg = self[offset+1] + 256*self[offset+2]
|
||||
next_offset += 2
|
||||
|
||||
label = -1
|
||||
if bc.op in opcode.hasjrel:
|
||||
label = next_offset + bc.arg
|
||||
elif bc.op in opcode.hasjabs:
|
||||
label = bc.arg
|
||||
bc.jump_to = label
|
||||
|
||||
bc.next_offset = offset = next_offset
|
||||
yield bc
|
||||
import types
|
||||
|
||||
|
||||
class CodeObjects(object):
|
||||
|
||||
@@ -1,114 +1,142 @@
|
||||
"""Command-line support for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
import optparse, os, sys, time, traceback
|
||||
"""Command-line support for coverage.py."""
|
||||
|
||||
from coverage.backward import sorted # pylint: disable=W0622
|
||||
import glob
|
||||
import optparse
|
||||
import os.path
|
||||
import sys
|
||||
import textwrap
|
||||
import traceback
|
||||
|
||||
from coverage import env
|
||||
from coverage.collector import CTracer
|
||||
from coverage.execfile import run_python_file, run_python_module
|
||||
from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
|
||||
from coverage.debug import info_formatter
|
||||
from coverage.debug import info_formatter, info_header
|
||||
|
||||
|
||||
class Opts(object):
|
||||
"""A namespace class for individual options we'll build parsers from."""
|
||||
|
||||
append = optparse.make_option(
|
||||
'-a', '--append', action='store_false', dest="erase_first",
|
||||
help="Append coverage data to .coverage, otherwise it is started "
|
||||
"clean with each run."
|
||||
'-a', '--append', action='store_true',
|
||||
help="Append coverage data to .coverage, otherwise it starts clean each time.",
|
||||
)
|
||||
branch = optparse.make_option(
|
||||
'', '--branch', action='store_true',
|
||||
help="Measure branch coverage in addition to statement coverage."
|
||||
help="Measure branch coverage in addition to statement coverage.",
|
||||
)
|
||||
CONCURRENCY_CHOICES = [
|
||||
"thread", "gevent", "greenlet", "eventlet", "multiprocessing",
|
||||
]
|
||||
concurrency = optparse.make_option(
|
||||
'', '--concurrency', action='store', metavar="LIB",
|
||||
choices=CONCURRENCY_CHOICES,
|
||||
help=(
|
||||
"Properly measure code using a concurrency library. "
|
||||
"Valid values are: %s."
|
||||
) % ", ".join(CONCURRENCY_CHOICES),
|
||||
)
|
||||
debug = optparse.make_option(
|
||||
'', '--debug', action='store', metavar="OPTS",
|
||||
help="Debug options, separated by commas"
|
||||
help="Debug options, separated by commas",
|
||||
)
|
||||
directory = optparse.make_option(
|
||||
'-d', '--directory', action='store', metavar="DIR",
|
||||
help="Write the output files to DIR."
|
||||
help="Write the output files to DIR.",
|
||||
)
|
||||
fail_under = optparse.make_option(
|
||||
'', '--fail-under', action='store', metavar="MIN", type="int",
|
||||
help="Exit with a status of 2 if the total coverage is less than MIN."
|
||||
help="Exit with a status of 2 if the total coverage is less than MIN.",
|
||||
)
|
||||
help = optparse.make_option(
|
||||
'-h', '--help', action='store_true',
|
||||
help="Get help on this command."
|
||||
help="Get help on this command.",
|
||||
)
|
||||
ignore_errors = optparse.make_option(
|
||||
'-i', '--ignore-errors', action='store_true',
|
||||
help="Ignore errors while reading source files."
|
||||
help="Ignore errors while reading source files.",
|
||||
)
|
||||
include = optparse.make_option(
|
||||
'', '--include', action='store',
|
||||
metavar="PAT1,PAT2,...",
|
||||
help="Include files only when their filename path matches one of "
|
||||
"these patterns. Usually needs quoting on the command line."
|
||||
help=(
|
||||
"Include only files whose paths match one of these patterns. "
|
||||
"Accepts shell-style wildcards, which must be quoted."
|
||||
),
|
||||
)
|
||||
pylib = optparse.make_option(
|
||||
'-L', '--pylib', action='store_true',
|
||||
help="Measure coverage even inside the Python installed library, "
|
||||
help=(
|
||||
"Measure coverage even inside the Python installed library, "
|
||||
"which isn't done by default."
|
||||
),
|
||||
)
|
||||
show_missing = optparse.make_option(
|
||||
'-m', '--show-missing', action='store_true',
|
||||
help="Show line numbers of statements in each module that weren't "
|
||||
"executed."
|
||||
help="Show line numbers of statements in each module that weren't executed.",
|
||||
)
|
||||
old_omit = optparse.make_option(
|
||||
'-o', '--omit', action='store',
|
||||
metavar="PAT1,PAT2,...",
|
||||
help="Omit files when their filename matches one of these patterns. "
|
||||
"Usually needs quoting on the command line."
|
||||
skip_covered = optparse.make_option(
|
||||
'--skip-covered', action='store_true',
|
||||
help="Skip files with 100% coverage.",
|
||||
)
|
||||
omit = optparse.make_option(
|
||||
'', '--omit', action='store',
|
||||
metavar="PAT1,PAT2,...",
|
||||
help="Omit files when their filename matches one of these patterns. "
|
||||
"Usually needs quoting on the command line."
|
||||
help=(
|
||||
"Omit files whose paths match one of these patterns. "
|
||||
"Accepts shell-style wildcards, which must be quoted."
|
||||
),
|
||||
)
|
||||
output_xml = optparse.make_option(
|
||||
'-o', '', action='store', dest="outfile",
|
||||
metavar="OUTFILE",
|
||||
help="Write the XML report to this file. Defaults to 'coverage.xml'"
|
||||
help="Write the XML report to this file. Defaults to 'coverage.xml'",
|
||||
)
|
||||
parallel_mode = optparse.make_option(
|
||||
'-p', '--parallel-mode', action='store_true',
|
||||
help="Append the machine name, process id and random number to the "
|
||||
help=(
|
||||
"Append the machine name, process id and random number to the "
|
||||
".coverage data file name to simplify collecting data from "
|
||||
"many processes."
|
||||
),
|
||||
)
|
||||
module = optparse.make_option(
|
||||
'-m', '--module', action='store_true',
|
||||
help="<pyfile> is an importable Python module, not a script path, "
|
||||
help=(
|
||||
"<pyfile> is an importable Python module, not a script path, "
|
||||
"to be run as 'python -m' would run it."
|
||||
),
|
||||
)
|
||||
rcfile = optparse.make_option(
|
||||
'', '--rcfile', action='store',
|
||||
help="Specify configuration file. Defaults to '.coveragerc'"
|
||||
help="Specify configuration file. Defaults to '.coveragerc'",
|
||||
)
|
||||
source = optparse.make_option(
|
||||
'', '--source', action='store', metavar="SRC1,SRC2,...",
|
||||
help="A list of packages or directories of code to be measured."
|
||||
help="A list of packages or directories of code to be measured.",
|
||||
)
|
||||
timid = optparse.make_option(
|
||||
'', '--timid', action='store_true',
|
||||
help="Use a simpler but slower trace method. Try this if you get "
|
||||
help=(
|
||||
"Use a simpler but slower trace method. Try this if you get "
|
||||
"seemingly impossible results!"
|
||||
),
|
||||
)
|
||||
title = optparse.make_option(
|
||||
'', '--title', action='store', metavar="TITLE",
|
||||
help="A text string to use as the title on the HTML."
|
||||
help="A text string to use as the title on the HTML.",
|
||||
)
|
||||
version = optparse.make_option(
|
||||
'', '--version', action='store_true',
|
||||
help="Display version information and exit."
|
||||
help="Display version information and exit.",
|
||||
)
|
||||
|
||||
|
||||
class CoverageOptionParser(optparse.OptionParser, object):
|
||||
"""Base OptionParser for coverage.
|
||||
"""Base OptionParser for coverage.py.
|
||||
|
||||
Problems don't exit the program.
|
||||
Defaults are initialized for all options.
|
||||
@@ -120,24 +148,26 @@ class CoverageOptionParser(optparse.OptionParser, object):
|
||||
add_help_option=False, *args, **kwargs
|
||||
)
|
||||
self.set_defaults(
|
||||
actions=[],
|
||||
action=None,
|
||||
append=None,
|
||||
branch=None,
|
||||
concurrency=None,
|
||||
debug=None,
|
||||
directory=None,
|
||||
fail_under=None,
|
||||
help=None,
|
||||
ignore_errors=None,
|
||||
include=None,
|
||||
module=None,
|
||||
omit=None,
|
||||
parallel_mode=None,
|
||||
module=None,
|
||||
pylib=None,
|
||||
rcfile=True,
|
||||
show_missing=None,
|
||||
skip_covered=None,
|
||||
source=None,
|
||||
timid=None,
|
||||
title=None,
|
||||
erase_first=None,
|
||||
version=None,
|
||||
)
|
||||
|
||||
@@ -152,7 +182,7 @@ class CoverageOptionParser(optparse.OptionParser, object):
|
||||
"""Used to stop the optparse error handler ending the process."""
|
||||
pass
|
||||
|
||||
def parse_args(self, args=None, options=None):
|
||||
def parse_args_ok(self, args=None, options=None):
|
||||
"""Call optparse.parse_args, but return a triple:
|
||||
|
||||
(ok, options, args)
|
||||
@@ -171,175 +201,173 @@ class CoverageOptionParser(optparse.OptionParser, object):
|
||||
raise self.OptionParserError
|
||||
|
||||
|
||||
class ClassicOptionParser(CoverageOptionParser):
|
||||
"""Command-line parser for coverage.py classic arguments."""
|
||||
class GlobalOptionParser(CoverageOptionParser):
|
||||
"""Command-line parser for coverage.py global option arguments."""
|
||||
|
||||
def __init__(self):
|
||||
super(ClassicOptionParser, self).__init__()
|
||||
|
||||
self.add_action('-a', '--annotate', 'annotate')
|
||||
self.add_action('-b', '--html', 'html')
|
||||
self.add_action('-c', '--combine', 'combine')
|
||||
self.add_action('-e', '--erase', 'erase')
|
||||
self.add_action('-r', '--report', 'report')
|
||||
self.add_action('-x', '--execute', 'execute')
|
||||
super(GlobalOptionParser, self).__init__()
|
||||
|
||||
self.add_options([
|
||||
Opts.directory,
|
||||
Opts.help,
|
||||
Opts.ignore_errors,
|
||||
Opts.pylib,
|
||||
Opts.show_missing,
|
||||
Opts.old_omit,
|
||||
Opts.parallel_mode,
|
||||
Opts.timid,
|
||||
Opts.version,
|
||||
])
|
||||
|
||||
def add_action(self, dash, dashdash, action_code):
|
||||
"""Add a specialized option that is the action to execute."""
|
||||
option = self.add_option(dash, dashdash, action='callback',
|
||||
callback=self._append_action
|
||||
)
|
||||
option.action_code = action_code
|
||||
|
||||
def _append_action(self, option, opt_unused, value_unused, parser):
|
||||
"""Callback for an option that adds to the `actions` list."""
|
||||
parser.values.actions.append(option.action_code)
|
||||
|
||||
|
||||
class CmdOptionParser(CoverageOptionParser):
|
||||
"""Parse one of the new-style commands for coverage.py."""
|
||||
|
||||
def __init__(self, action, options=None, defaults=None, usage=None,
|
||||
cmd=None, description=None
|
||||
):
|
||||
"""Create an OptionParser for a coverage command.
|
||||
def __init__(self, action, options, defaults=None, usage=None, description=None):
|
||||
"""Create an OptionParser for a coverage.py command.
|
||||
|
||||
`action` is the slug to put into `options.actions`.
|
||||
`action` is the slug to put into `options.action`.
|
||||
`options` is a list of Option's for the command.
|
||||
`defaults` is a dict of default value for options.
|
||||
`usage` is the usage string to display in help.
|
||||
`cmd` is the command name, if different than `action`.
|
||||
`description` is the description of the command, for the help text.
|
||||
|
||||
"""
|
||||
if usage:
|
||||
usage = "%prog " + usage
|
||||
super(CmdOptionParser, self).__init__(
|
||||
prog="coverage %s" % (cmd or action),
|
||||
usage=usage,
|
||||
description=description,
|
||||
)
|
||||
self.set_defaults(actions=[action], **(defaults or {}))
|
||||
if options:
|
||||
self.set_defaults(action=action, **(defaults or {}))
|
||||
self.add_options(options)
|
||||
self.cmd = cmd or action
|
||||
self.cmd = action
|
||||
|
||||
def __eq__(self, other):
|
||||
# A convenience equality, so that I can put strings in unit test
|
||||
# results, and they will compare equal to objects.
|
||||
return (other == "<CmdOptionParser:%s>" % self.cmd)
|
||||
|
||||
def get_prog_name(self):
|
||||
"""Override of an undocumented function in optparse.OptionParser."""
|
||||
program_name = super(CmdOptionParser, self).get_prog_name()
|
||||
|
||||
# Include the sub-command for this parser as part of the command.
|
||||
return "%(command)s %(subcommand)s" % {'command': program_name, 'subcommand': self.cmd}
|
||||
|
||||
|
||||
GLOBAL_ARGS = [
|
||||
Opts.rcfile,
|
||||
Opts.debug,
|
||||
Opts.help,
|
||||
Opts.rcfile,
|
||||
]
|
||||
|
||||
CMDS = {
|
||||
'annotate': CmdOptionParser("annotate",
|
||||
'annotate': CmdOptionParser(
|
||||
"annotate",
|
||||
[
|
||||
Opts.directory,
|
||||
Opts.ignore_errors,
|
||||
Opts.omit,
|
||||
Opts.include,
|
||||
Opts.omit,
|
||||
] + GLOBAL_ARGS,
|
||||
usage="[options] [modules]",
|
||||
description = "Make annotated copies of the given files, marking "
|
||||
"statements that are executed with > and statements that are "
|
||||
"missed with !."
|
||||
description=(
|
||||
"Make annotated copies of the given files, marking statements that are executed "
|
||||
"with > and statements that are missed with !."
|
||||
),
|
||||
),
|
||||
|
||||
'combine': CmdOptionParser("combine", GLOBAL_ARGS,
|
||||
usage = " ",
|
||||
description = "Combine data from multiple coverage files collected "
|
||||
'combine': CmdOptionParser(
|
||||
"combine",
|
||||
[
|
||||
Opts.append,
|
||||
] + GLOBAL_ARGS,
|
||||
usage="[options] <path1> <path2> ... <pathN>",
|
||||
description=(
|
||||
"Combine data from multiple coverage files collected "
|
||||
"with 'run -p'. The combined results are written to a single "
|
||||
"file representing the union of the data."
|
||||
"file representing the union of the data. The positional "
|
||||
"arguments are data files or directories containing data files. "
|
||||
"If no paths are provided, data files in the default data file's "
|
||||
"directory are combined."
|
||||
),
|
||||
),
|
||||
|
||||
'debug': CmdOptionParser("debug", GLOBAL_ARGS,
|
||||
'debug': CmdOptionParser(
|
||||
"debug", GLOBAL_ARGS,
|
||||
usage="<topic>",
|
||||
description = "Display information on the internals of coverage.py, "
|
||||
description=(
|
||||
"Display information on the internals of coverage.py, "
|
||||
"for diagnosing problems. "
|
||||
"Topics are 'data' to show a summary of the collected data, "
|
||||
"or 'sys' to show installation information."
|
||||
),
|
||||
|
||||
'erase': CmdOptionParser("erase", GLOBAL_ARGS,
|
||||
usage = " ",
|
||||
description = "Erase previously collected coverage data."
|
||||
),
|
||||
|
||||
'help': CmdOptionParser("help", GLOBAL_ARGS,
|
||||
'erase': CmdOptionParser(
|
||||
"erase", GLOBAL_ARGS,
|
||||
description="Erase previously collected coverage data.",
|
||||
),
|
||||
|
||||
'help': CmdOptionParser(
|
||||
"help", GLOBAL_ARGS,
|
||||
usage="[command]",
|
||||
description = "Describe how to use coverage.py"
|
||||
description="Describe how to use coverage.py",
|
||||
),
|
||||
|
||||
'html': CmdOptionParser("html",
|
||||
'html': CmdOptionParser(
|
||||
"html",
|
||||
[
|
||||
Opts.directory,
|
||||
Opts.fail_under,
|
||||
Opts.ignore_errors,
|
||||
Opts.omit,
|
||||
Opts.include,
|
||||
Opts.omit,
|
||||
Opts.title,
|
||||
] + GLOBAL_ARGS,
|
||||
usage="[options] [modules]",
|
||||
description = "Create an HTML report of the coverage of the files. "
|
||||
description=(
|
||||
"Create an HTML report of the coverage of the files. "
|
||||
"Each file gets its own page, with the source decorated to show "
|
||||
"executed, excluded, and missed lines."
|
||||
),
|
||||
),
|
||||
|
||||
'report': CmdOptionParser("report",
|
||||
'report': CmdOptionParser(
|
||||
"report",
|
||||
[
|
||||
Opts.fail_under,
|
||||
Opts.ignore_errors,
|
||||
Opts.omit,
|
||||
Opts.include,
|
||||
Opts.omit,
|
||||
Opts.show_missing,
|
||||
Opts.skip_covered,
|
||||
] + GLOBAL_ARGS,
|
||||
usage="[options] [modules]",
|
||||
description="Report coverage statistics on modules."
|
||||
),
|
||||
|
||||
'run': CmdOptionParser("execute",
|
||||
'run': CmdOptionParser(
|
||||
"run",
|
||||
[
|
||||
Opts.append,
|
||||
Opts.branch,
|
||||
Opts.debug,
|
||||
Opts.concurrency,
|
||||
Opts.include,
|
||||
Opts.module,
|
||||
Opts.omit,
|
||||
Opts.pylib,
|
||||
Opts.parallel_mode,
|
||||
Opts.module,
|
||||
Opts.timid,
|
||||
Opts.source,
|
||||
Opts.omit,
|
||||
Opts.include,
|
||||
Opts.timid,
|
||||
] + GLOBAL_ARGS,
|
||||
defaults = {'erase_first': True},
|
||||
cmd = "run",
|
||||
usage="[options] <pyfile> [program options]",
|
||||
description="Run a Python program, measuring code execution."
|
||||
),
|
||||
|
||||
'xml': CmdOptionParser("xml",
|
||||
'xml': CmdOptionParser(
|
||||
"xml",
|
||||
[
|
||||
Opts.fail_under,
|
||||
Opts.ignore_errors,
|
||||
Opts.omit,
|
||||
Opts.include,
|
||||
Opts.omit,
|
||||
Opts.output_xml,
|
||||
] + GLOBAL_ARGS,
|
||||
cmd = "xml",
|
||||
usage="[options] [modules]",
|
||||
description="Generate an XML report of coverage results."
|
||||
),
|
||||
@@ -350,10 +378,10 @@ OK, ERR, FAIL_UNDER = 0, 1, 2
|
||||
|
||||
|
||||
class CoverageScript(object):
|
||||
"""The command-line interface to Coverage."""
|
||||
"""The command-line interface to coverage.py."""
|
||||
|
||||
def __init__(self, _covpkg=None, _run_python_file=None,
|
||||
_run_python_module=None, _help_fn=None):
|
||||
_run_python_module=None, _help_fn=None, _path_exists=None):
|
||||
# _covpkg is for dependency injection, so we can test this code.
|
||||
if _covpkg:
|
||||
self.covpkg = _covpkg
|
||||
@@ -365,12 +393,26 @@ class CoverageScript(object):
|
||||
self.run_python_file = _run_python_file or run_python_file
|
||||
self.run_python_module = _run_python_module or run_python_module
|
||||
self.help_fn = _help_fn or self.help
|
||||
self.classic = False
|
||||
self.path_exists = _path_exists or os.path.exists
|
||||
self.global_option = False
|
||||
|
||||
self.coverage = None
|
||||
|
||||
self.program_name = os.path.basename(sys.argv[0])
|
||||
if self.program_name == '__main__.py':
|
||||
self.program_name = 'coverage'
|
||||
if env.WINDOWS:
|
||||
# entry_points={'console_scripts':...} on Windows makes files
|
||||
# called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
|
||||
# invoke coverage-script.py, coverage3-script.py, and
|
||||
# coverage-3.5-script.py. argv[0] is the .py file, but we want to
|
||||
# get back to the original form.
|
||||
auto_suffix = "-script.py"
|
||||
if self.program_name.endswith(auto_suffix):
|
||||
self.program_name = self.program_name[:-len(auto_suffix)]
|
||||
|
||||
def command_line(self, argv):
|
||||
"""The bulk of the command line interface to Coverage.
|
||||
"""The bulk of the command line interface to coverage.py.
|
||||
|
||||
`argv` is the argument list to process.
|
||||
|
||||
@@ -382,11 +424,11 @@ class CoverageScript(object):
|
||||
self.help_fn(topic='minimum_help')
|
||||
return OK
|
||||
|
||||
# The command syntax we parse depends on the first argument. Classic
|
||||
# syntax always starts with an option.
|
||||
self.classic = argv[0].startswith('-')
|
||||
if self.classic:
|
||||
parser = ClassicOptionParser()
|
||||
# The command syntax we parse depends on the first argument. Global
|
||||
# switch syntax always starts with an option.
|
||||
self.global_option = argv[0].startswith('-')
|
||||
if self.global_option:
|
||||
parser = GlobalOptionParser()
|
||||
else:
|
||||
parser = CMDS.get(argv[0])
|
||||
if not parser:
|
||||
@@ -395,7 +437,7 @@ class CoverageScript(object):
|
||||
argv = argv[1:]
|
||||
|
||||
parser.help_fn = self.help_fn
|
||||
ok, options, args = parser.parse_args(argv)
|
||||
ok, options, args = parser.parse_args_ok(argv)
|
||||
if not ok:
|
||||
return ERR
|
||||
|
||||
@@ -403,9 +445,9 @@ class CoverageScript(object):
|
||||
if self.do_help(options, args, parser):
|
||||
return OK
|
||||
|
||||
# Check for conflicts and problems in the options.
|
||||
if not self.args_ok(options, args):
|
||||
return ERR
|
||||
# We need to be able to import from the current directory, because
|
||||
# plugins may try to, for example, to read Django settings.
|
||||
sys.path[0] = ''
|
||||
|
||||
# Listify the list options.
|
||||
source = unshell_list(options.source)
|
||||
@@ -424,51 +466,72 @@ class CoverageScript(object):
|
||||
omit=omit,
|
||||
include=include,
|
||||
debug=debug,
|
||||
concurrency=options.concurrency,
|
||||
)
|
||||
|
||||
if 'debug' in options.actions:
|
||||
if options.action == "debug":
|
||||
return self.do_debug(args)
|
||||
|
||||
if 'erase' in options.actions or options.erase_first:
|
||||
elif options.action == "erase":
|
||||
self.coverage.erase()
|
||||
else:
|
||||
return OK
|
||||
|
||||
elif options.action == "run":
|
||||
return self.do_run(options, args)
|
||||
|
||||
elif options.action == "combine":
|
||||
if options.append:
|
||||
self.coverage.load()
|
||||
|
||||
if 'execute' in options.actions:
|
||||
self.do_execute(options, args)
|
||||
|
||||
if 'combine' in options.actions:
|
||||
self.coverage.combine()
|
||||
data_dirs = args or None
|
||||
self.coverage.combine(data_dirs)
|
||||
self.coverage.save()
|
||||
return OK
|
||||
|
||||
# Remaining actions are reporting, with some common options.
|
||||
report_args = dict(
|
||||
morfs = args,
|
||||
morfs=unglob_args(args),
|
||||
ignore_errors=options.ignore_errors,
|
||||
omit=omit,
|
||||
include=include,
|
||||
)
|
||||
|
||||
if 'report' in options.actions:
|
||||
self.coverage.load()
|
||||
|
||||
total = None
|
||||
if options.action == "report":
|
||||
total = self.coverage.report(
|
||||
show_missing=options.show_missing, **report_args)
|
||||
if 'annotate' in options.actions:
|
||||
show_missing=options.show_missing,
|
||||
skip_covered=options.skip_covered, **report_args)
|
||||
elif options.action == "annotate":
|
||||
self.coverage.annotate(
|
||||
directory=options.directory, **report_args)
|
||||
if 'html' in options.actions:
|
||||
elif options.action == "html":
|
||||
total = self.coverage.html_report(
|
||||
directory=options.directory, title=options.title,
|
||||
**report_args)
|
||||
if 'xml' in options.actions:
|
||||
elif options.action == "xml":
|
||||
outfile = options.outfile
|
||||
total = self.coverage.xml_report(outfile=outfile, **report_args)
|
||||
|
||||
if total is not None:
|
||||
# Apply the command line fail-under options, and then use the config
|
||||
# value, so we can get fail_under from the config file.
|
||||
if options.fail_under is not None:
|
||||
if total >= options.fail_under:
|
||||
self.coverage.set_option("report:fail_under", options.fail_under)
|
||||
|
||||
if self.coverage.get_option("report:fail_under"):
|
||||
# Total needs to be rounded, but don't want to report 100
|
||||
# unless it is really 100.
|
||||
if 99 < total < 100:
|
||||
total = 99
|
||||
else:
|
||||
total = round(total)
|
||||
|
||||
if total >= self.coverage.get_option("report:fail_under"):
|
||||
return OK
|
||||
else:
|
||||
return FAIL_UNDER
|
||||
else:
|
||||
|
||||
return OK
|
||||
|
||||
def help(self, error=None, topic=None, parser=None):
|
||||
@@ -476,13 +539,19 @@ class CoverageScript(object):
|
||||
assert error or topic or parser
|
||||
if error:
|
||||
print(error)
|
||||
print("Use 'coverage help' for help.")
|
||||
print("Use '%s help' for help." % (self.program_name,))
|
||||
elif parser:
|
||||
print(parser.format_help().strip())
|
||||
else:
|
||||
help_msg = HELP_TOPICS.get(topic, '').strip()
|
||||
help_params = dict(self.covpkg.__dict__)
|
||||
help_params['program_name'] = self.program_name
|
||||
if CTracer is not None:
|
||||
help_params['extension_modifier'] = 'with C extension'
|
||||
else:
|
||||
help_params['extension_modifier'] = 'without C extension'
|
||||
help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
|
||||
if help_msg:
|
||||
print(help_msg % self.covpkg.__dict__)
|
||||
print(help_msg.format(**help_params))
|
||||
else:
|
||||
print("Don't know topic %r" % topic)
|
||||
|
||||
@@ -494,13 +563,13 @@ class CoverageScript(object):
|
||||
"""
|
||||
# Handle help.
|
||||
if options.help:
|
||||
if self.classic:
|
||||
if self.global_option:
|
||||
self.help_fn(topic='help')
|
||||
else:
|
||||
self.help_fn(parser=parser)
|
||||
return True
|
||||
|
||||
if "help" in options.actions:
|
||||
if options.action == "help":
|
||||
if args:
|
||||
for a in args:
|
||||
parser = CMDS.get(a)
|
||||
@@ -519,59 +588,42 @@ class CoverageScript(object):
|
||||
|
||||
return False
|
||||
|
||||
def args_ok(self, options, args):
|
||||
"""Check for conflicts and problems in the options.
|
||||
|
||||
Returns True if everything is ok, or False if not.
|
||||
|
||||
"""
|
||||
for i in ['erase', 'execute']:
|
||||
for j in ['annotate', 'html', 'report', 'combine']:
|
||||
if (i in options.actions) and (j in options.actions):
|
||||
self.help_fn("You can't specify the '%s' and '%s' "
|
||||
"options at the same time." % (i, j))
|
||||
return False
|
||||
|
||||
if not options.actions:
|
||||
self.help_fn(
|
||||
"You must specify at least one of -e, -x, -c, -r, -a, or -b."
|
||||
)
|
||||
return False
|
||||
args_allowed = (
|
||||
'execute' in options.actions or
|
||||
'annotate' in options.actions or
|
||||
'html' in options.actions or
|
||||
'debug' in options.actions or
|
||||
'report' in options.actions or
|
||||
'xml' in options.actions
|
||||
)
|
||||
if not args_allowed and args:
|
||||
self.help_fn("Unexpected arguments: %s" % " ".join(args))
|
||||
return False
|
||||
|
||||
if 'execute' in options.actions and not args:
|
||||
self.help_fn("Nothing to do.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def do_execute(self, options, args):
|
||||
def do_run(self, options, args):
|
||||
"""Implementation of 'coverage run'."""
|
||||
|
||||
# Set the first path element properly.
|
||||
old_path0 = sys.path[0]
|
||||
if not args:
|
||||
self.help_fn("Nothing to do.")
|
||||
return ERR
|
||||
|
||||
if options.append and self.coverage.get_option("run:parallel"):
|
||||
self.help_fn("Can't append to data files in parallel mode.")
|
||||
return ERR
|
||||
|
||||
if options.concurrency == "multiprocessing":
|
||||
# Can't set other run-affecting command line options with
|
||||
# multiprocessing.
|
||||
for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
|
||||
# As it happens, all of these options have no default, meaning
|
||||
# they will be None if they have not been specified.
|
||||
if getattr(options, opt_name) is not None:
|
||||
self.help_fn(
|
||||
"Options affecting multiprocessing must be specified "
|
||||
"in a configuration file."
|
||||
)
|
||||
return ERR
|
||||
|
||||
if not self.coverage.get_option("run:parallel"):
|
||||
if not options.append:
|
||||
self.coverage.erase()
|
||||
|
||||
# Run the script.
|
||||
self.coverage.start()
|
||||
code_ran = True
|
||||
try:
|
||||
try:
|
||||
if options.module:
|
||||
sys.path[0] = ''
|
||||
self.run_python_module(args[0], args)
|
||||
else:
|
||||
filename = args[0]
|
||||
sys.path[0] = os.path.abspath(os.path.dirname(filename))
|
||||
self.run_python_file(filename, args)
|
||||
except NoSource:
|
||||
code_ran = False
|
||||
@@ -579,38 +631,54 @@ class CoverageScript(object):
|
||||
finally:
|
||||
self.coverage.stop()
|
||||
if code_ran:
|
||||
if options.append:
|
||||
data_file = self.coverage.get_option("run:data_file")
|
||||
if self.path_exists(data_file):
|
||||
self.coverage.combine(data_paths=[data_file])
|
||||
self.coverage.save()
|
||||
|
||||
# Restore the old path
|
||||
sys.path[0] = old_path0
|
||||
return OK
|
||||
|
||||
def do_debug(self, args):
|
||||
"""Implementation of 'coverage debug'."""
|
||||
|
||||
if not args:
|
||||
self.help_fn("What information would you like: data, sys?")
|
||||
self.help_fn("What information would you like: config, data, sys?")
|
||||
return ERR
|
||||
|
||||
for info in args:
|
||||
if info == 'sys':
|
||||
print("-- sys ----------------------------------------")
|
||||
for line in info_formatter(self.coverage.sysinfo()):
|
||||
sys_info = self.coverage.sys_info()
|
||||
print(info_header("sys"))
|
||||
for line in info_formatter(sys_info):
|
||||
print(" %s" % line)
|
||||
elif info == 'data':
|
||||
print("-- data ---------------------------------------")
|
||||
self.coverage.load()
|
||||
print("path: %s" % self.coverage.data.filename)
|
||||
print("has_arcs: %r" % self.coverage.data.has_arcs())
|
||||
summary = self.coverage.data.summary(fullpath=True)
|
||||
if summary:
|
||||
data = self.coverage.data
|
||||
print(info_header("data"))
|
||||
print("path: %s" % self.coverage.data_files.filename)
|
||||
if data:
|
||||
print("has_arcs: %r" % data.has_arcs())
|
||||
summary = data.line_counts(fullpath=True)
|
||||
filenames = sorted(summary.keys())
|
||||
print("\n%d files:" % len(filenames))
|
||||
for f in filenames:
|
||||
print("%s: %d lines" % (f, summary[f]))
|
||||
line = "%s: %d lines" % (f, summary[f])
|
||||
plugin = data.file_tracer(f)
|
||||
if plugin:
|
||||
line += " [%s]" % plugin
|
||||
print(line)
|
||||
else:
|
||||
print("No data collected")
|
||||
elif info == 'config':
|
||||
print(info_header("config"))
|
||||
config_info = self.coverage.config.__dict__.items()
|
||||
for line in info_formatter(config_info):
|
||||
print(" %s" % line)
|
||||
else:
|
||||
self.help_fn("Don't know what you mean by %r" % info)
|
||||
return ERR
|
||||
|
||||
return OK
|
||||
|
||||
|
||||
@@ -618,8 +686,8 @@ def unshell_list(s):
|
||||
"""Turn a command-line argument into a list."""
|
||||
if not s:
|
||||
return None
|
||||
if sys.platform == 'win32':
|
||||
# When running coverage as coverage.exe, some of the behavior
|
||||
if env.WINDOWS:
|
||||
# When running coverage.py as coverage.exe, some of the behavior
|
||||
# of the shell is emulated: wildcards are expanded into a list of
|
||||
# file names. So you have to single-quote patterns on the command
|
||||
# line, but (not) helpfully, the single quotes are included in the
|
||||
@@ -628,60 +696,25 @@ def unshell_list(s):
|
||||
return s.split(',')
|
||||
|
||||
|
||||
def unglob_args(args):
|
||||
"""Interpret shell wildcards for platforms that need it."""
|
||||
if env.WINDOWS:
|
||||
globbed = []
|
||||
for arg in args:
|
||||
if '?' in arg or '*' in arg:
|
||||
globbed.extend(glob.glob(arg))
|
||||
else:
|
||||
globbed.append(arg)
|
||||
args = globbed
|
||||
return args
|
||||
|
||||
|
||||
HELP_TOPICS = {
|
||||
# -------------------------
|
||||
'classic':
|
||||
r"""Coverage.py version %(__version__)s
|
||||
Measure, collect, and report on code coverage in Python programs.
|
||||
|
||||
Usage:
|
||||
|
||||
coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
|
||||
Execute the module, passing the given command-line arguments, collecting
|
||||
coverage data. With the -p option, include the machine name and process
|
||||
id in the .coverage file name. With -L, measure coverage even inside the
|
||||
Python installed library, which isn't done by default. With --timid, use a
|
||||
simpler but slower trace method.
|
||||
|
||||
coverage -e
|
||||
Erase collected coverage data.
|
||||
|
||||
coverage -c
|
||||
Combine data from multiple coverage files (as created by -p option above)
|
||||
and store it into a single file representing the union of the coverage.
|
||||
|
||||
coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
|
||||
Report on the statement coverage for the given files. With the -m
|
||||
option, show line numbers of the statements that weren't executed.
|
||||
|
||||
coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...]
|
||||
Create an HTML report of the coverage of the given files. Each file gets
|
||||
its own page, with the file listing decorated to show executed, excluded,
|
||||
and missed lines.
|
||||
|
||||
coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
|
||||
Make annotated copies of the given files, marking statements that
|
||||
are executed with > and statements that are missed with !.
|
||||
|
||||
-d DIR
|
||||
Write output files for -b or -a to this directory.
|
||||
|
||||
-i Ignore errors while reporting or annotating.
|
||||
|
||||
-o DIR,...
|
||||
Omit reporting or annotating files when their filename path starts with
|
||||
a directory listed in the omit list.
|
||||
e.g. coverage -i -r -o c:\python25,lib\enthought\traits
|
||||
|
||||
Coverage data is saved in the file .coverage by default. Set the
|
||||
COVERAGE_FILE environment variable to save it somewhere else.
|
||||
""",
|
||||
# -------------------------
|
||||
'help': """\
|
||||
Coverage.py, version %(__version__)s
|
||||
Coverage.py, version {__version__} {extension_modifier}
|
||||
Measure, collect, and report on code coverage in Python programs.
|
||||
|
||||
usage: coverage <command> [options] [args]
|
||||
usage: {program_name} <command> [options] [args]
|
||||
|
||||
Commands:
|
||||
annotate Annotate source files with execution information.
|
||||
@@ -693,23 +726,23 @@ Commands:
|
||||
run Run a Python program and measure code execution.
|
||||
xml Create an XML report of coverage results.
|
||||
|
||||
Use "coverage help <command>" for detailed help on any command.
|
||||
Use "coverage help classic" for help on older command syntax.
|
||||
For more information, see %(__url__)s
|
||||
Use "{program_name} help <command>" for detailed help on any command.
|
||||
For full documentation, see {__url__}
|
||||
""",
|
||||
# -------------------------
|
||||
|
||||
'minimum_help': """\
|
||||
Code coverage for Python. Use 'coverage help' for help.
|
||||
Code coverage for Python. Use '{program_name} help' for help.
|
||||
""",
|
||||
# -------------------------
|
||||
|
||||
'version': """\
|
||||
Coverage.py, version %(__version__)s. %(__url__)s
|
||||
Coverage.py, version {__version__} {extension_modifier}
|
||||
Documentation at {__url__}
|
||||
""",
|
||||
}
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
"""The main entry point to Coverage.
|
||||
"""The main entry point to coverage.py.
|
||||
|
||||
This is installed as the script entry point.
|
||||
|
||||
@@ -717,26 +750,19 @@ def main(argv=None):
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
try:
|
||||
start = time.clock()
|
||||
status = CoverageScript().command_line(argv)
|
||||
end = time.clock()
|
||||
if 0:
|
||||
print("time: %.3fs" % (end - start))
|
||||
except ExceptionDuringRun:
|
||||
except ExceptionDuringRun as err:
|
||||
# An exception was caught while running the product code. The
|
||||
# sys.exc_info() return tuple is packed into an ExceptionDuringRun
|
||||
# exception.
|
||||
_, err, _ = sys.exc_info()
|
||||
traceback.print_exception(*err.args)
|
||||
status = ERR
|
||||
except CoverageException:
|
||||
except CoverageException as err:
|
||||
# A controlled error inside coverage.py: print the message to the user.
|
||||
_, err, _ = sys.exc_info()
|
||||
print(err)
|
||||
status = ERR
|
||||
except SystemExit:
|
||||
except SystemExit as err:
|
||||
# The user called `sys.exit()`. Exit with their argument, if any.
|
||||
_, err, _ = sys.exc_info()
|
||||
if err.args:
|
||||
status = err.args[0]
|
||||
else:
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
"""Code unit (module) handling for Coverage."""
|
||||
|
||||
import glob, os
|
||||
|
||||
from coverage.backward import open_source, string_class, StringIO
|
||||
from coverage.misc import CoverageException
|
||||
|
||||
|
||||
def code_unit_factory(morfs, file_locator):
|
||||
"""Construct a list of CodeUnits from polymorphic inputs.
|
||||
|
||||
`morfs` is a module or a filename, or a list of same.
|
||||
|
||||
`file_locator` is a FileLocator that can help resolve filenames.
|
||||
|
||||
Returns a list of CodeUnit objects.
|
||||
|
||||
"""
|
||||
# Be sure we have a list.
|
||||
if not isinstance(morfs, (list, tuple)):
|
||||
morfs = [morfs]
|
||||
|
||||
# On Windows, the shell doesn't expand wildcards. Do it here.
|
||||
globbed = []
|
||||
for morf in morfs:
|
||||
if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
|
||||
globbed.extend(glob.glob(morf))
|
||||
else:
|
||||
globbed.append(morf)
|
||||
morfs = globbed
|
||||
|
||||
code_units = [CodeUnit(morf, file_locator) for morf in morfs]
|
||||
|
||||
return code_units
|
||||
|
||||
|
||||
class CodeUnit(object):
|
||||
"""Code unit: a filename or module.
|
||||
|
||||
Instance attributes:
|
||||
|
||||
`name` is a human-readable name for this code unit.
|
||||
`filename` is the os path from which we can read the source.
|
||||
`relative` is a boolean.
|
||||
|
||||
"""
|
||||
def __init__(self, morf, file_locator):
|
||||
self.file_locator = file_locator
|
||||
|
||||
if hasattr(morf, '__file__'):
|
||||
f = morf.__file__
|
||||
else:
|
||||
f = morf
|
||||
# .pyc files should always refer to a .py instead.
|
||||
if f.endswith('.pyc') or f.endswith('.pyo'):
|
||||
f = f[:-1]
|
||||
elif f.endswith('$py.class'): # Jython
|
||||
f = f[:-9] + ".py"
|
||||
self.filename = self.file_locator.canonical_filename(f)
|
||||
|
||||
if hasattr(morf, '__name__'):
|
||||
n = modname = morf.__name__
|
||||
self.relative = True
|
||||
else:
|
||||
n = os.path.splitext(morf)[0]
|
||||
rel = self.file_locator.relative_filename(n)
|
||||
if os.path.isabs(n):
|
||||
self.relative = (rel != n)
|
||||
else:
|
||||
self.relative = True
|
||||
n = rel
|
||||
modname = None
|
||||
self.name = n
|
||||
self.modname = modname
|
||||
|
||||
def __repr__(self):
|
||||
return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename)
|
||||
|
||||
# Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
|
||||
# of them defined.
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.name < other.name
|
||||
def __le__(self, other):
|
||||
return self.name <= other.name
|
||||
def __eq__(self, other):
|
||||
return self.name == other.name
|
||||
def __ne__(self, other):
|
||||
return self.name != other.name
|
||||
def __gt__(self, other):
|
||||
return self.name > other.name
|
||||
def __ge__(self, other):
|
||||
return self.name >= other.name
|
||||
|
||||
def flat_rootname(self):
|
||||
"""A base for a flat filename to correspond to this code unit.
|
||||
|
||||
Useful for writing files about the code where you want all the files in
|
||||
the same directory, but need to differentiate same-named files from
|
||||
different directories.
|
||||
|
||||
For example, the file a/b/c.py might return 'a_b_c'
|
||||
|
||||
"""
|
||||
if self.modname:
|
||||
return self.modname.replace('.', '_')
|
||||
else:
|
||||
root = os.path.splitdrive(self.name)[1]
|
||||
return root.replace('\\', '_').replace('/', '_').replace('.', '_')
|
||||
|
||||
def source_file(self):
|
||||
"""Return an open file for reading the source of the code unit."""
|
||||
if os.path.exists(self.filename):
|
||||
# A regular text file: open it.
|
||||
return open_source(self.filename)
|
||||
|
||||
# Maybe it's in a zip file?
|
||||
source = self.file_locator.get_zip_data(self.filename)
|
||||
if source is not None:
|
||||
return StringIO(source)
|
||||
|
||||
# Couldn't find source.
|
||||
raise CoverageException(
|
||||
"No source for code '%s'." % self.filename
|
||||
)
|
||||
|
||||
def should_be_python(self):
|
||||
"""Does it seem like this file should contain Python?
|
||||
|
||||
This is used to decide if a file reported as part of the exection of
|
||||
a program was really likely to have contained Python in the first
|
||||
place.
|
||||
|
||||
"""
|
||||
# Get the file extension.
|
||||
_, ext = os.path.splitext(self.filename)
|
||||
|
||||
# Anything named *.py* should be Python.
|
||||
if ext.startswith('.py'):
|
||||
return True
|
||||
# A file with no extension should be Python.
|
||||
if not ext:
|
||||
return True
|
||||
# Everything else is probably not Python.
|
||||
return False
|
||||
@@ -1,152 +1,47 @@
|
||||
"""Raw data collector for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from coverage import env
|
||||
from coverage.backward import iitems
|
||||
from coverage.files import abs_file
|
||||
from coverage.misc import CoverageException, isolate_module
|
||||
from coverage.pytracer import PyTracer
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
import os, sys, threading
|
||||
|
||||
try:
|
||||
# Use the C extension code when we can, for speed.
|
||||
from coverage.tracer import CTracer # pylint: disable=F0401,E0611
|
||||
from coverage.tracer import CTracer, CFileDisposition
|
||||
except ImportError:
|
||||
# Couldn't import the C extension, maybe it isn't built.
|
||||
if os.getenv('COVERAGE_TEST_TRACER') == 'c':
|
||||
# During testing, we use the COVERAGE_TEST_TRACER env var to indicate
|
||||
# that we've fiddled with the environment to test this fallback code.
|
||||
# If we thought we had a C tracer, but couldn't import it, then exit
|
||||
# quickly and clearly instead of dribbling confusing errors. I'm using
|
||||
# sys.exit here instead of an exception because an exception here
|
||||
# causes all sorts of other noise in unittest.
|
||||
sys.stderr.write(
|
||||
"*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
|
||||
)
|
||||
# During testing, we use the COVERAGE_TEST_TRACER environment variable
|
||||
# to indicate that we've fiddled with the environment to test this
|
||||
# fallback code. If we thought we had a C tracer, but couldn't import
|
||||
# it, then exit quickly and clearly instead of dribbling confusing
|
||||
# errors. I'm using sys.exit here instead of an exception because an
|
||||
# exception here causes all sorts of other noise in unittest.
|
||||
sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
|
||||
sys.exit(1)
|
||||
CTracer = None
|
||||
|
||||
|
||||
class PyTracer(object):
|
||||
"""Python implementation of the raw data tracer."""
|
||||
class FileDisposition(object):
|
||||
"""A simple value type for recording what to do with a file."""
|
||||
pass
|
||||
|
||||
# Because of poor implementations of trace-function-manipulating tools,
|
||||
# the Python trace function must be kept very simple. In particular, there
|
||||
# must be only one function ever set as the trace function, both through
|
||||
# sys.settrace, and as the return value from the trace function. Put
|
||||
# another way, the trace function must always return itself. It cannot
|
||||
# swap in other functions, or return None to avoid tracing a particular
|
||||
# frame.
|
||||
#
|
||||
# The trace manipulator that introduced this restriction is DecoratorTools,
|
||||
# which sets a trace function, and then later restores the pre-existing one
|
||||
# by calling sys.settrace with a function it found in the current frame.
|
||||
#
|
||||
# Systems that use DecoratorTools (or similar trace manipulations) must use
|
||||
# PyTracer to get accurate results. The command-line --timid argument is
|
||||
# used to force the use of this tracer.
|
||||
|
||||
def __init__(self):
|
||||
self.data = None
|
||||
self.should_trace = None
|
||||
self.should_trace_cache = None
|
||||
self.warn = None
|
||||
self.cur_file_data = None
|
||||
self.last_line = 0
|
||||
self.data_stack = []
|
||||
self.last_exc_back = None
|
||||
self.last_exc_firstlineno = 0
|
||||
self.arcs = False
|
||||
self.thread = None
|
||||
self.stopped = False
|
||||
|
||||
def _trace(self, frame, event, arg_unused):
|
||||
"""The trace function passed to sys.settrace."""
|
||||
|
||||
if self.stopped:
|
||||
return
|
||||
|
||||
if 0:
|
||||
sys.stderr.write("trace event: %s %r @%d\n" % (
|
||||
event, frame.f_code.co_filename, frame.f_lineno
|
||||
))
|
||||
|
||||
if self.last_exc_back:
|
||||
if frame == self.last_exc_back:
|
||||
# Someone forgot a return event.
|
||||
if self.arcs and self.cur_file_data:
|
||||
pair = (self.last_line, -self.last_exc_firstlineno)
|
||||
self.cur_file_data[pair] = None
|
||||
self.cur_file_data, self.last_line = self.data_stack.pop()
|
||||
self.last_exc_back = None
|
||||
|
||||
if event == 'call':
|
||||
# Entering a new function context. Decide if we should trace
|
||||
# in this file.
|
||||
self.data_stack.append((self.cur_file_data, self.last_line))
|
||||
filename = frame.f_code.co_filename
|
||||
if filename not in self.should_trace_cache:
|
||||
tracename = self.should_trace(filename, frame)
|
||||
self.should_trace_cache[filename] = tracename
|
||||
else:
|
||||
tracename = self.should_trace_cache[filename]
|
||||
#print("called, stack is %d deep, tracename is %r" % (
|
||||
# len(self.data_stack), tracename))
|
||||
if tracename:
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = {}
|
||||
self.cur_file_data = self.data[tracename]
|
||||
else:
|
||||
self.cur_file_data = None
|
||||
# Set the last_line to -1 because the next arc will be entering a
|
||||
# code block, indicated by (-1, n).
|
||||
self.last_line = -1
|
||||
elif event == 'line':
|
||||
# Record an executed line.
|
||||
if self.cur_file_data is not None:
|
||||
if self.arcs:
|
||||
#print("lin", self.last_line, frame.f_lineno)
|
||||
self.cur_file_data[(self.last_line, frame.f_lineno)] = None
|
||||
else:
|
||||
#print("lin", frame.f_lineno)
|
||||
self.cur_file_data[frame.f_lineno] = None
|
||||
self.last_line = frame.f_lineno
|
||||
elif event == 'return':
|
||||
if self.arcs and self.cur_file_data:
|
||||
first = frame.f_code.co_firstlineno
|
||||
self.cur_file_data[(self.last_line, -first)] = None
|
||||
# Leaving this function, pop the filename stack.
|
||||
self.cur_file_data, self.last_line = self.data_stack.pop()
|
||||
#print("returned, stack is %d deep" % (len(self.data_stack)))
|
||||
elif event == 'exception':
|
||||
#print("exc", self.last_line, frame.f_lineno)
|
||||
self.last_exc_back = frame.f_back
|
||||
self.last_exc_firstlineno = frame.f_code.co_firstlineno
|
||||
return self._trace
|
||||
|
||||
def start(self):
|
||||
"""Start this Tracer.
|
||||
|
||||
Return a Python function suitable for use with sys.settrace().
|
||||
|
||||
"""
|
||||
self.thread = threading.currentThread()
|
||||
sys.settrace(self._trace)
|
||||
return self._trace
|
||||
|
||||
def stop(self):
|
||||
"""Stop this Tracer."""
|
||||
self.stopped = True
|
||||
if self.thread != threading.currentThread():
|
||||
# Called on a different thread than started us: we can't unhook
|
||||
# ourseves, but we've set the flag that we should stop, so we won't
|
||||
# do any more tracing.
|
||||
return
|
||||
|
||||
if hasattr(sys, "gettrace") and self.warn:
|
||||
if sys.gettrace() != self._trace:
|
||||
msg = "Trace function changed, measurement is likely wrong: %r"
|
||||
self.warn(msg % (sys.gettrace(),))
|
||||
#print("Stopping tracer on %s" % threading.current_thread().ident)
|
||||
sys.settrace(None)
|
||||
|
||||
def get_stats(self):
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return None
|
||||
def should_start_context(frame):
|
||||
"""Who-Tests-What hack: Determine whether this frame begins a new who-context."""
|
||||
fn_name = frame.f_code.co_name
|
||||
if fn_name.startswith("test"):
|
||||
return fn_name
|
||||
|
||||
|
||||
class Collector(object):
|
||||
@@ -170,12 +65,17 @@ class Collector(object):
|
||||
# the top, and resumed when they become the top again.
|
||||
_collectors = []
|
||||
|
||||
def __init__(self, should_trace, timid, branch, warn):
|
||||
# The concurrency settings we support here.
|
||||
SUPPORTED_CONCURRENCIES = set(["greenlet", "eventlet", "gevent", "thread"])
|
||||
|
||||
def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
|
||||
"""Create a collector.
|
||||
|
||||
`should_trace` is a function, taking a file name, and returning a
|
||||
canonicalized filename, or None depending on whether the file should
|
||||
be traced or not.
|
||||
`coverage.FileDisposition object`.
|
||||
|
||||
`check_include` is a function taking a file name and a frame. It returns
|
||||
a boolean: True if the file should be traced, False if not.
|
||||
|
||||
If `timid` is true, then a slower simpler trace function will be
|
||||
used. This is important for some environments where manipulation of
|
||||
@@ -189,10 +89,55 @@ class Collector(object):
|
||||
`warn` is a warning function, taking a single string message argument,
|
||||
to be used if a warning needs to be issued.
|
||||
|
||||
`concurrency` is a list of strings indicating the concurrency libraries
|
||||
in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
|
||||
(the default). Of these four values, only one can be supplied. Other
|
||||
values are ignored.
|
||||
|
||||
"""
|
||||
self.should_trace = should_trace
|
||||
self.check_include = check_include
|
||||
self.warn = warn
|
||||
self.branch = branch
|
||||
self.threading = None
|
||||
|
||||
self.concur_id_func = None
|
||||
|
||||
# We can handle a few concurrency options here, but only one at a time.
|
||||
these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
|
||||
if len(these_concurrencies) > 1:
|
||||
raise CoverageException("Conflicting concurrency settings: %s" % concurrency)
|
||||
self.concurrency = these_concurrencies.pop() if these_concurrencies else ''
|
||||
|
||||
try:
|
||||
if self.concurrency == "greenlet":
|
||||
import greenlet
|
||||
self.concur_id_func = greenlet.getcurrent
|
||||
elif self.concurrency == "eventlet":
|
||||
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
|
||||
self.concur_id_func = eventlet.greenthread.getcurrent
|
||||
elif self.concurrency == "gevent":
|
||||
import gevent # pylint: disable=import-error,useless-suppression
|
||||
self.concur_id_func = gevent.getcurrent
|
||||
elif self.concurrency == "thread" or not self.concurrency:
|
||||
# It's important to import threading only if we need it. If
|
||||
# it's imported early, and the program being measured uses
|
||||
# gevent, then gevent's monkey-patching won't work properly.
|
||||
import threading
|
||||
self.threading = threading
|
||||
else:
|
||||
raise CoverageException("Don't understand concurrency=%s" % concurrency)
|
||||
except ImportError:
|
||||
raise CoverageException(
|
||||
"Couldn't trace with concurrency=%s, the module isn't installed." % (
|
||||
self.concurrency,
|
||||
)
|
||||
)
|
||||
|
||||
# Who-Tests-What is just a hack at the moment, so turn it on with an
|
||||
# environment variable.
|
||||
self.wtw = int(os.getenv('COVERAGE_WTW', 0))
|
||||
|
||||
self.reset()
|
||||
|
||||
if timid:
|
||||
@@ -203,8 +148,15 @@ class Collector(object):
|
||||
# trace function.
|
||||
self._trace_class = CTracer or PyTracer
|
||||
|
||||
if self._trace_class is CTracer:
|
||||
self.file_disposition_class = CFileDisposition
|
||||
self.supports_plugins = True
|
||||
else:
|
||||
self.file_disposition_class = FileDisposition
|
||||
self.supports_plugins = False
|
||||
|
||||
def __repr__(self):
|
||||
return "<Collector at 0x%x>" % id(self)
|
||||
return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
|
||||
|
||||
def tracer_name(self):
|
||||
"""Return the class name of the tracer we're using."""
|
||||
@@ -212,13 +164,45 @@ class Collector(object):
|
||||
|
||||
def reset(self):
|
||||
"""Clear collected data, and prepare to collect more."""
|
||||
# A dictionary mapping filenames to dicts with linenumber keys,
|
||||
# or mapping filenames to dicts with linenumber pairs as keys.
|
||||
# A dictionary mapping file names to dicts with line number keys (if not
|
||||
# branch coverage), or mapping file names to dicts with line number
|
||||
# pairs as keys (if branch coverage).
|
||||
self.data = {}
|
||||
|
||||
# A cache of the results from should_trace, the decision about whether
|
||||
# to trace execution in a file. A dict of filename to (filename or
|
||||
# None).
|
||||
# A dict mapping contexts to data dictionaries.
|
||||
self.contexts = {}
|
||||
self.contexts[None] = self.data
|
||||
|
||||
# A dictionary mapping file names to file tracer plugin names that will
|
||||
# handle them.
|
||||
self.file_tracers = {}
|
||||
|
||||
# The .should_trace_cache attribute is a cache from file names to
|
||||
# coverage.FileDisposition objects, or None. When a file is first
|
||||
# considered for tracing, a FileDisposition is obtained from
|
||||
# Coverage.should_trace. Its .trace attribute indicates whether the
|
||||
# file should be traced or not. If it should be, a plugin with dynamic
|
||||
# file names can decide not to trace it based on the dynamic file name
|
||||
# being excluded by the inclusion rules, in which case the
|
||||
# FileDisposition will be replaced by None in the cache.
|
||||
if env.PYPY:
|
||||
import __pypy__ # pylint: disable=import-error
|
||||
# Alex Gaynor said:
|
||||
# should_trace_cache is a strictly growing key: once a key is in
|
||||
# it, it never changes. Further, the keys used to access it are
|
||||
# generally constant, given sufficient context. That is to say, at
|
||||
# any given point _trace() is called, pypy is able to know the key.
|
||||
# This is because the key is determined by the physical source code
|
||||
# line, and that's invariant with the call site.
|
||||
#
|
||||
# This property of a dict with immutable keys, combined with
|
||||
# call-site-constant keys is a match for PyPy's module dict,
|
||||
# which is optimized for such workloads.
|
||||
#
|
||||
# This gives a 20% benefit on the workload described at
|
||||
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
|
||||
self.should_trace_cache = __pypy__.newdict("module")
|
||||
else:
|
||||
self.should_trace_cache = {}
|
||||
|
||||
# Our active Tracers.
|
||||
@@ -228,12 +212,35 @@ class Collector(object):
|
||||
"""Start a new Tracer object, and store it in self.tracers."""
|
||||
tracer = self._trace_class()
|
||||
tracer.data = self.data
|
||||
tracer.arcs = self.branch
|
||||
tracer.trace_arcs = self.branch
|
||||
tracer.should_trace = self.should_trace
|
||||
tracer.should_trace_cache = self.should_trace_cache
|
||||
tracer.warn = self.warn
|
||||
|
||||
if hasattr(tracer, 'concur_id_func'):
|
||||
tracer.concur_id_func = self.concur_id_func
|
||||
elif self.concur_id_func:
|
||||
raise CoverageException(
|
||||
"Can't support concurrency=%s with %s, only threads are supported" % (
|
||||
self.concurrency, self.tracer_name(),
|
||||
)
|
||||
)
|
||||
|
||||
if hasattr(tracer, 'file_tracers'):
|
||||
tracer.file_tracers = self.file_tracers
|
||||
if hasattr(tracer, 'threading'):
|
||||
tracer.threading = self.threading
|
||||
if hasattr(tracer, 'check_include'):
|
||||
tracer.check_include = self.check_include
|
||||
if self.wtw:
|
||||
if hasattr(tracer, 'should_start_context'):
|
||||
tracer.should_start_context = should_start_context
|
||||
if hasattr(tracer, 'switch_context'):
|
||||
tracer.switch_context = self.switch_context
|
||||
|
||||
fn = tracer.start()
|
||||
self.tracers.append(tracer)
|
||||
|
||||
return fn
|
||||
|
||||
# The trace function has to be set individually on each thread before
|
||||
@@ -242,16 +249,16 @@ class Collector(object):
|
||||
# install this as a trace function, and the first time it's called, it does
|
||||
# the real trace installation.
|
||||
|
||||
def _installation_trace(self, frame_unused, event_unused, arg_unused):
|
||||
def _installation_trace(self, frame, event, arg):
|
||||
"""Called on new threads, installs the real tracer."""
|
||||
# Remove ourselves as the trace function
|
||||
# Remove ourselves as the trace function.
|
||||
sys.settrace(None)
|
||||
# Install the real tracer.
|
||||
fn = self._start_tracer()
|
||||
# Invoke the real trace function with the current event, to be sure
|
||||
# not to lose an event.
|
||||
if fn:
|
||||
fn = fn(frame_unused, event_unused, arg_unused)
|
||||
fn = fn(frame, event, arg)
|
||||
# Return the new trace function to continue tracing in this scope.
|
||||
return fn
|
||||
|
||||
@@ -259,39 +266,47 @@ class Collector(object):
|
||||
"""Start collecting trace information."""
|
||||
if self._collectors:
|
||||
self._collectors[-1].pause()
|
||||
self._collectors.append(self)
|
||||
#print("Started: %r" % self._collectors, file=sys.stderr)
|
||||
|
||||
# Check to see whether we had a fullcoverage tracer installed.
|
||||
# Check to see whether we had a fullcoverage tracer installed. If so,
|
||||
# get the stack frames it stashed away for us.
|
||||
traces0 = []
|
||||
if hasattr(sys, "gettrace"):
|
||||
fn0 = sys.gettrace()
|
||||
if fn0:
|
||||
tracer0 = getattr(fn0, '__self__', None)
|
||||
if tracer0:
|
||||
traces0 = getattr(tracer0, 'traces', [])
|
||||
|
||||
try:
|
||||
# Install the tracer on this thread.
|
||||
fn = self._start_tracer()
|
||||
except:
|
||||
if self._collectors:
|
||||
self._collectors[-1].resume()
|
||||
raise
|
||||
|
||||
# If _start_tracer succeeded, then we add ourselves to the global
|
||||
# stack of collectors.
|
||||
self._collectors.append(self)
|
||||
|
||||
# Replay all the events from fullcoverage into the new trace function.
|
||||
for args in traces0:
|
||||
(frame, event, arg), lineno = args
|
||||
try:
|
||||
fn(frame, event, arg, lineno=lineno)
|
||||
except TypeError:
|
||||
raise Exception(
|
||||
"fullcoverage must be run with the C trace function."
|
||||
)
|
||||
raise Exception("fullcoverage must be run with the C trace function.")
|
||||
|
||||
# Install our installation tracer in threading, to jump start other
|
||||
# threads.
|
||||
threading.settrace(self._installation_trace)
|
||||
if self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
|
||||
def stop(self):
|
||||
"""Stop collecting trace information."""
|
||||
#print >>sys.stderr, "Stopping: %r" % self._collectors
|
||||
assert self._collectors
|
||||
assert self._collectors[-1] is self
|
||||
assert self._collectors[-1] is self, (
|
||||
"Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
|
||||
)
|
||||
|
||||
self.pause()
|
||||
self.tracers = []
|
||||
@@ -310,44 +325,48 @@ class Collector(object):
|
||||
if stats:
|
||||
print("\nCoverage.py tracer stats:")
|
||||
for k in sorted(stats.keys()):
|
||||
print("%16s: %s" % (k, stats[k]))
|
||||
threading.settrace(None)
|
||||
print("%20s: %s" % (k, stats[k]))
|
||||
if self.threading:
|
||||
self.threading.settrace(None)
|
||||
|
||||
def resume(self):
|
||||
"""Resume tracing after a `pause`."""
|
||||
for tracer in self.tracers:
|
||||
tracer.start()
|
||||
threading.settrace(self._installation_trace)
|
||||
if self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
else:
|
||||
self._start_tracer()
|
||||
|
||||
def get_line_data(self):
|
||||
"""Return the line data collected.
|
||||
def switch_context(self, new_context):
|
||||
"""Who-Tests-What hack: switch to a new who-context."""
|
||||
# Make a new data dict, or find the existing one, and switch all the
|
||||
# tracers to use it.
|
||||
data = self.contexts.setdefault(new_context, {})
|
||||
for tracer in self.tracers:
|
||||
tracer.data = data
|
||||
|
||||
Data is { filename: { lineno: None, ...}, ...}
|
||||
def save_data(self, covdata):
|
||||
"""Save the collected data to a `CoverageData`.
|
||||
|
||||
Also resets the collector.
|
||||
|
||||
"""
|
||||
def abs_file_dict(d):
|
||||
"""Return a dict like d, but with keys modified by `abs_file`."""
|
||||
return dict((abs_file(k), v) for k, v in iitems(d))
|
||||
|
||||
if self.branch:
|
||||
# If we were measuring branches, then we have to re-build the dict
|
||||
# to show line data.
|
||||
line_data = {}
|
||||
for f, arcs in self.data.items():
|
||||
line_data[f] = ldf = {}
|
||||
for l1, _ in list(arcs.keys()):
|
||||
if l1:
|
||||
ldf[l1] = None
|
||||
return line_data
|
||||
covdata.add_arcs(abs_file_dict(self.data))
|
||||
else:
|
||||
return self.data
|
||||
covdata.add_lines(abs_file_dict(self.data))
|
||||
covdata.add_file_tracers(abs_file_dict(self.file_tracers))
|
||||
|
||||
def get_arc_data(self):
|
||||
"""Return the arc data collected.
|
||||
if self.wtw:
|
||||
# Just a hack, so just hack it.
|
||||
import pprint
|
||||
out_file = "coverage_wtw_{:06}.py".format(os.getpid())
|
||||
with open(out_file, "w") as wtw_out:
|
||||
pprint.pprint(self.contexts, wtw_out)
|
||||
|
||||
Data is { filename: { (l1, l2): None, ...}, ...}
|
||||
|
||||
Note that no data is collected or returned if the Collector wasn't
|
||||
created with `branch` true.
|
||||
|
||||
"""
|
||||
if self.branch:
|
||||
return self.data
|
||||
else:
|
||||
return {}
|
||||
self.reset()
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Config file for coverage.py"""
|
||||
|
||||
import os, re, sys
|
||||
from coverage.backward import string_class, iitems
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# In py3, # ConfigParser was renamed to the more-standard configparser
|
||||
try:
|
||||
import configparser # pylint: disable=F0401
|
||||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
from coverage.backward import configparser, iitems, string_class
|
||||
from coverage.misc import contract, CoverageException, isolate_module
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class HandyConfigParser(configparser.RawConfigParser):
|
||||
"""Our specialization of ConfigParser."""
|
||||
|
||||
def __init__(self, section_prefix):
|
||||
configparser.RawConfigParser.__init__(self)
|
||||
self.section_prefix = section_prefix
|
||||
|
||||
def read(self, filename):
|
||||
"""Read a file name as UTF-8 configuration data."""
|
||||
kwargs = {}
|
||||
@@ -20,12 +28,41 @@ class HandyConfigParser(configparser.RawConfigParser):
|
||||
kwargs['encoding'] = "utf-8"
|
||||
return configparser.RawConfigParser.read(self, filename, **kwargs)
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
v = configparser.RawConfigParser.get(self, *args, **kwargs)
|
||||
def has_option(self, section, option):
|
||||
section = self.section_prefix + section
|
||||
return configparser.RawConfigParser.has_option(self, section, option)
|
||||
|
||||
def has_section(self, section):
|
||||
section = self.section_prefix + section
|
||||
return configparser.RawConfigParser.has_section(self, section)
|
||||
|
||||
def options(self, section):
|
||||
section = self.section_prefix + section
|
||||
return configparser.RawConfigParser.options(self, section)
|
||||
|
||||
def get_section(self, section):
|
||||
"""Get the contents of a section, as a dictionary."""
|
||||
d = {}
|
||||
for opt in self.options(section):
|
||||
d[opt] = self.get(section, opt)
|
||||
return d
|
||||
|
||||
def get(self, section, *args, **kwargs):
|
||||
"""Get a value, replacing environment variables also.
|
||||
|
||||
The arguments are the same as `RawConfigParser.get`, but in the found
|
||||
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
|
||||
environment variable ``WORD``.
|
||||
|
||||
Returns the finished value.
|
||||
|
||||
"""
|
||||
section = self.section_prefix + section
|
||||
v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
|
||||
def dollar_replace(m):
|
||||
"""Called for each $replacement."""
|
||||
# Only one of the groups will have matched, just get its text.
|
||||
word = [w for w in m.groups() if w is not None][0]
|
||||
word = next(w for w in m.groups() if w is not None) # pragma: part covered
|
||||
if word == "$":
|
||||
return "$"
|
||||
else:
|
||||
@@ -59,27 +96,38 @@ class HandyConfigParser(configparser.RawConfigParser):
|
||||
values.append(value)
|
||||
return values
|
||||
|
||||
def getlinelist(self, section, option):
|
||||
"""Read a list of full-line strings.
|
||||
def getregexlist(self, section, option):
|
||||
"""Read a list of full-line regexes.
|
||||
|
||||
The value of `section` and `option` is treated as a newline-separated
|
||||
list of strings. Each value is stripped of whitespace.
|
||||
list of regexes. Each value is stripped of whitespace.
|
||||
|
||||
Returns the list of strings.
|
||||
|
||||
"""
|
||||
value_list = self.get(section, option)
|
||||
return list(filter(None, value_list.split('\n')))
|
||||
line_list = self.get(section, option)
|
||||
value_list = []
|
||||
for value in line_list.splitlines():
|
||||
value = value.strip()
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise CoverageException(
|
||||
"Invalid [%s].%s value %r: %s" % (section, option, value, e)
|
||||
)
|
||||
if value:
|
||||
value_list.append(value)
|
||||
return value_list
|
||||
|
||||
|
||||
# The default line exclusion regexes
|
||||
# The default line exclusion regexes.
|
||||
DEFAULT_EXCLUDE = [
|
||||
'(?i)# *pragma[: ]*no *cover',
|
||||
r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
|
||||
]
|
||||
|
||||
# The default partial branch regexes, to be modified by the user.
|
||||
DEFAULT_PARTIAL = [
|
||||
'(?i)# *pragma[: ]*no *branch',
|
||||
r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
|
||||
]
|
||||
|
||||
# The default partial branch regexes, based on Python semantics.
|
||||
@@ -106,44 +154,44 @@ class CoverageConfig(object):
|
||||
|
||||
# Defaults for [run]
|
||||
self.branch = False
|
||||
self.concurrency = None
|
||||
self.cover_pylib = False
|
||||
self.data_file = ".coverage"
|
||||
self.parallel = False
|
||||
self.timid = False
|
||||
self.source = None
|
||||
self.debug = []
|
||||
self.note = None
|
||||
self.parallel = False
|
||||
self.plugins = []
|
||||
self.source = None
|
||||
self.timid = False
|
||||
|
||||
# Defaults for [report]
|
||||
self.exclude_list = DEFAULT_EXCLUDE[:]
|
||||
self.fail_under = 0
|
||||
self.ignore_errors = False
|
||||
self.include = None
|
||||
self.omit = None
|
||||
self.partial_list = DEFAULT_PARTIAL[:]
|
||||
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
|
||||
self.partial_list = DEFAULT_PARTIAL[:]
|
||||
self.precision = 0
|
||||
self.show_missing = False
|
||||
self.skip_covered = False
|
||||
|
||||
# Defaults for [html]
|
||||
self.html_dir = "htmlcov"
|
||||
self.extra_css = None
|
||||
self.html_dir = "htmlcov"
|
||||
self.html_title = "Coverage report"
|
||||
|
||||
# Defaults for [xml]
|
||||
self.xml_output = "coverage.xml"
|
||||
self.xml_package_depth = 99
|
||||
|
||||
# Defaults for [paths]
|
||||
self.paths = {}
|
||||
|
||||
def from_environment(self, env_var):
|
||||
"""Read configuration from the `env_var` environment variable."""
|
||||
# Timidity: for nose users, read an environment variable. This is a
|
||||
# cheap hack, since the rest of the command line arguments aren't
|
||||
# recognized, but it solves some users' problems.
|
||||
env = os.environ.get(env_var, '')
|
||||
if env:
|
||||
self.timid = ('--timid' in env)
|
||||
# Options for plugins
|
||||
self.plugin_options = {}
|
||||
|
||||
MUST_BE_LIST = ["omit", "include", "debug"]
|
||||
MUST_BE_LIST = ["omit", "include", "debug", "plugins", "concurrency"]
|
||||
|
||||
def from_args(self, **kwargs):
|
||||
"""Read config values from `kwargs`."""
|
||||
@@ -153,61 +201,167 @@ class CoverageConfig(object):
|
||||
v = [v]
|
||||
setattr(self, k, v)
|
||||
|
||||
def from_file(self, filename):
|
||||
@contract(filename=str)
|
||||
def from_file(self, filename, section_prefix=""):
|
||||
"""Read configuration from a .rc file.
|
||||
|
||||
`filename` is a file name to read.
|
||||
|
||||
Returns True or False, whether the file could be read.
|
||||
|
||||
"""
|
||||
self.attempted_config_files.append(filename)
|
||||
|
||||
cp = HandyConfigParser()
|
||||
cp = HandyConfigParser(section_prefix)
|
||||
try:
|
||||
files_read = cp.read(filename)
|
||||
if files_read is not None: # return value changed in 2.4
|
||||
except configparser.Error as err:
|
||||
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
|
||||
if not files_read:
|
||||
return False
|
||||
|
||||
self.config_files.extend(files_read)
|
||||
|
||||
try:
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
self.set_attr_from_config_option(cp, *option_spec)
|
||||
self._set_attr_from_config_option(cp, *option_spec)
|
||||
except ValueError as err:
|
||||
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
|
||||
|
||||
# Check that there are no unrecognized options.
|
||||
all_options = collections.defaultdict(set)
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
section, option = option_spec[1].split(":")
|
||||
all_options[section].add(option)
|
||||
|
||||
for section, options in iitems(all_options):
|
||||
if cp.has_section(section):
|
||||
for unknown in set(cp.options(section)) - options:
|
||||
if section_prefix:
|
||||
section = section_prefix + section
|
||||
raise CoverageException(
|
||||
"Unrecognized option '[%s] %s=' in config file %s" % (
|
||||
section, unknown, filename
|
||||
)
|
||||
)
|
||||
|
||||
# [paths] is special
|
||||
if cp.has_section('paths'):
|
||||
for option in cp.options('paths'):
|
||||
self.paths[option] = cp.getlist('paths', option)
|
||||
|
||||
# plugins can have options
|
||||
for plugin in self.plugins:
|
||||
if cp.has_section(plugin):
|
||||
self.plugin_options[plugin] = cp.get_section(plugin)
|
||||
|
||||
return True
|
||||
|
||||
CONFIG_FILE_OPTIONS = [
|
||||
# These are *args for _set_attr_from_config_option:
|
||||
# (attr, where, type_="")
|
||||
#
|
||||
# attr is the attribute to set on the CoverageConfig object.
|
||||
# where is the section:name to read from the configuration file.
|
||||
# type_ is the optional type to apply, by using .getTYPE to read the
|
||||
# configuration value from the file.
|
||||
|
||||
# [run]
|
||||
('branch', 'run:branch', 'boolean'),
|
||||
('concurrency', 'run:concurrency', 'list'),
|
||||
('cover_pylib', 'run:cover_pylib', 'boolean'),
|
||||
('data_file', 'run:data_file'),
|
||||
('debug', 'run:debug', 'list'),
|
||||
('include', 'run:include', 'list'),
|
||||
('note', 'run:note'),
|
||||
('omit', 'run:omit', 'list'),
|
||||
('parallel', 'run:parallel', 'boolean'),
|
||||
('plugins', 'run:plugins', 'list'),
|
||||
('source', 'run:source', 'list'),
|
||||
('timid', 'run:timid', 'boolean'),
|
||||
|
||||
# [report]
|
||||
('exclude_list', 'report:exclude_lines', 'linelist'),
|
||||
('exclude_list', 'report:exclude_lines', 'regexlist'),
|
||||
('fail_under', 'report:fail_under', 'int'),
|
||||
('ignore_errors', 'report:ignore_errors', 'boolean'),
|
||||
('include', 'report:include', 'list'),
|
||||
('omit', 'report:omit', 'list'),
|
||||
('partial_list', 'report:partial_branches', 'linelist'),
|
||||
('partial_always_list', 'report:partial_branches_always', 'linelist'),
|
||||
('partial_always_list', 'report:partial_branches_always', 'regexlist'),
|
||||
('partial_list', 'report:partial_branches', 'regexlist'),
|
||||
('precision', 'report:precision', 'int'),
|
||||
('show_missing', 'report:show_missing', 'boolean'),
|
||||
('skip_covered', 'report:skip_covered', 'boolean'),
|
||||
('sort', 'report:sort'),
|
||||
|
||||
# [html]
|
||||
('html_dir', 'html:directory'),
|
||||
('extra_css', 'html:extra_css'),
|
||||
('html_dir', 'html:directory'),
|
||||
('html_title', 'html:title'),
|
||||
|
||||
# [xml]
|
||||
('xml_output', 'xml:output'),
|
||||
('xml_package_depth', 'xml:package_depth', 'int'),
|
||||
]
|
||||
|
||||
def set_attr_from_config_option(self, cp, attr, where, type_=''):
|
||||
def _set_attr_from_config_option(self, cp, attr, where, type_=''):
|
||||
"""Set an attribute on self if it exists in the ConfigParser."""
|
||||
section, option = where.split(":")
|
||||
if cp.has_option(section, option):
|
||||
method = getattr(cp, 'get' + type_)
|
||||
setattr(self, attr, method(section, option))
|
||||
|
||||
def get_plugin_options(self, plugin):
|
||||
"""Get a dictionary of options for the plugin named `plugin`."""
|
||||
return self.plugin_options.get(plugin, {})
|
||||
|
||||
def set_option(self, option_name, value):
|
||||
"""Set an option in the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
`value` is the new value for the option.
|
||||
|
||||
"""
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
setattr(self, attr, value)
|
||||
return
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
self.plugin_options.setdefault(plugin_name, {})[key] = value
|
||||
return
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise CoverageException("No such option: %r" % option_name)
|
||||
|
||||
def get_option(self, option_name):
|
||||
"""Get an option from the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
Returns the value of the option.
|
||||
|
||||
"""
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
return getattr(self, attr)
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
return self.plugin_options.get(plugin_name, {}).get(key)
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise CoverageException("No such option: %r" % option_name)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,81 +1,643 @@
|
||||
"""Coverage data for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Coverage data for coverage.py."""
|
||||
|
||||
import glob
|
||||
import itertools
|
||||
import json
|
||||
import optparse
|
||||
import os
|
||||
import os.path
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
|
||||
from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622
|
||||
from coverage import env
|
||||
from coverage.backward import iitems, string_class
|
||||
from coverage.debug import _TEST_NAME_FILE
|
||||
from coverage.files import PathAliases
|
||||
from coverage.misc import file_be_gone
|
||||
from coverage.misc import CoverageException, file_be_gone, isolate_module
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class CoverageData(object):
|
||||
"""Manages collected coverage data, including file storage.
|
||||
|
||||
The data file format is a pickled dict, with these keys:
|
||||
This class is the public supported API to the data coverage.py collects
|
||||
during program execution. It includes information about what code was
|
||||
executed. It does not include information from the analysis phase, to
|
||||
determine what lines could have been executed, or what lines were not
|
||||
executed.
|
||||
|
||||
* collector: a string identifying the collecting software
|
||||
.. note::
|
||||
|
||||
* lines: a dict mapping filenames to sorted lists of line numbers
|
||||
executed:
|
||||
{ 'file1': [17,23,45], 'file2': [1,2,3], ... }
|
||||
The file format is not documented or guaranteed. It will change in
|
||||
the future, in possibly complicated ways. Do not read coverage.py
|
||||
data files directly. Use this API to avoid disruption.
|
||||
|
||||
* arcs: a dict mapping filenames to sorted lists of line number pairs:
|
||||
{ 'file1': [(17,23), (17,25), (25,26)], ... }
|
||||
There are a number of kinds of data that can be collected:
|
||||
|
||||
* **lines**: the line numbers of source lines that were executed.
|
||||
These are always available.
|
||||
|
||||
* **arcs**: pairs of source and destination line numbers for transitions
|
||||
between source lines. These are only available if branch coverage was
|
||||
used.
|
||||
|
||||
* **file tracer names**: the module names of the file tracer plugins that
|
||||
handled each file in the data.
|
||||
|
||||
* **run information**: information about the program execution. This is
|
||||
written during "coverage run", and then accumulated during "coverage
|
||||
combine".
|
||||
|
||||
Lines, arcs, and file tracer names are stored for each source file. File
|
||||
names in this API are case-sensitive, even on platforms with
|
||||
case-insensitive file systems.
|
||||
|
||||
To read a coverage.py data file, use :meth:`read_file`, or
|
||||
:meth:`read_fileobj` if you have an already-opened file. You can then
|
||||
access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
|
||||
or :meth:`file_tracer`. Run information is available with
|
||||
:meth:`run_infos`.
|
||||
|
||||
The :meth:`has_arcs` method indicates whether arc data is available. You
|
||||
can get a list of the files in the data with :meth:`measured_files`.
|
||||
A summary of the line data is available from :meth:`line_counts`. As with
|
||||
most Python containers, you can determine if there is any data at all by
|
||||
using this object as a boolean value.
|
||||
|
||||
|
||||
Most data files will be created by coverage.py itself, but you can use
|
||||
methods here to create data files if you like. The :meth:`add_lines`,
|
||||
:meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
|
||||
that are convenient for coverage.py. The :meth:`add_run_info` method adds
|
||||
key-value pairs to the run information.
|
||||
|
||||
To add a file without any measured data, use :meth:`touch_file`.
|
||||
|
||||
You write to a named file with :meth:`write_file`, or to an already opened
|
||||
file with :meth:`write_fileobj`.
|
||||
|
||||
You can clear the data in memory with :meth:`erase`. Two data collections
|
||||
can be combined by using :meth:`update` on one :class:`CoverageData`,
|
||||
passing it the other.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, basename=None, collector=None, debug=None):
|
||||
# The data file format is JSON, with these keys:
|
||||
#
|
||||
# * lines: a dict mapping file names to lists of line numbers
|
||||
# executed::
|
||||
#
|
||||
# { "file1": [17,23,45], "file2": [1,2,3], ... }
|
||||
#
|
||||
# * arcs: a dict mapping file names to lists of line number pairs::
|
||||
#
|
||||
# { "file1": [[17,23], [17,25], [25,26]], ... }
|
||||
#
|
||||
# * file_tracers: a dict mapping file names to plugin names::
|
||||
#
|
||||
# { "file1": "django.coverage", ... }
|
||||
#
|
||||
# * runs: a list of dicts of information about the coverage.py runs
|
||||
# contributing to the data::
|
||||
#
|
||||
# [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
|
||||
#
|
||||
# Only one of `lines` or `arcs` will be present: with branch coverage, data
|
||||
# is stored as arcs. Without branch coverage, it is stored as lines. The
|
||||
# line data is easily recovered from the arcs: it is all the first elements
|
||||
# of the pairs that are greater than zero.
|
||||
|
||||
def __init__(self, debug=None):
|
||||
"""Create a CoverageData.
|
||||
|
||||
`basename` is the name of the file to use for storing data.
|
||||
|
||||
`collector` is a string describing the coverage measurement software.
|
||||
|
||||
`debug` is a `DebugControl` object for writing debug messages.
|
||||
|
||||
"""
|
||||
self.collector = collector or 'unknown'
|
||||
self.debug = debug
|
||||
|
||||
self.use_file = True
|
||||
|
||||
# Construct the filename that will be used for data file storage, if we
|
||||
# ever do any file storage.
|
||||
self.filename = basename or ".coverage"
|
||||
self.filename = os.path.abspath(self.filename)
|
||||
self._debug = debug
|
||||
|
||||
# A map from canonical Python source file name to a dictionary in
|
||||
# which there's an entry for each line number that has been
|
||||
# executed:
|
||||
#
|
||||
# {
|
||||
# 'filename1.py': { 12: None, 47: None, ... },
|
||||
# ...
|
||||
# }
|
||||
# { 'filename1.py': [12, 47, 1001], ... }
|
||||
#
|
||||
self.lines = {}
|
||||
self._lines = None
|
||||
|
||||
# A map from canonical Python source file name to a dictionary with an
|
||||
# entry for each pair of line numbers forming an arc:
|
||||
#
|
||||
# {
|
||||
# 'filename1.py': { (12,14): None, (47,48): None, ... },
|
||||
# ...
|
||||
# }
|
||||
# { 'filename1.py': [(12,14), (47,48), ... ], ... }
|
||||
#
|
||||
self.arcs = {}
|
||||
self._arcs = None
|
||||
|
||||
def usefile(self, use_file=True):
|
||||
"""Set whether or not to use a disk file for data."""
|
||||
self.use_file = use_file
|
||||
# A map from canonical source file name to a plugin module name:
|
||||
#
|
||||
# { 'filename1.py': 'django.coverage', ... }
|
||||
#
|
||||
self._file_tracers = {}
|
||||
|
||||
def read(self):
|
||||
"""Read coverage data from the coverage data file (if it exists)."""
|
||||
if self.use_file:
|
||||
self.lines, self.arcs = self._read_file(self.filename)
|
||||
# A list of dicts of information about the coverage.py runs.
|
||||
self._runs = []
|
||||
|
||||
def __repr__(self):
|
||||
return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
|
||||
klass=self.__class__.__name__,
|
||||
lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
|
||||
arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
|
||||
tracers="{{{0}}}".format(len(self._file_tracers)),
|
||||
runs="[{0}]".format(len(self._runs)),
|
||||
)
|
||||
|
||||
##
|
||||
## Reading data
|
||||
##
|
||||
|
||||
def has_arcs(self):
|
||||
"""Does this data have arcs?
|
||||
|
||||
Arc data is only available if branch coverage was used during
|
||||
collection.
|
||||
|
||||
Returns a boolean.
|
||||
|
||||
"""
|
||||
return self._has_arcs()
|
||||
|
||||
def lines(self, filename):
|
||||
"""Get the list of lines executed for a file.
|
||||
|
||||
If the file was not measured, returns None. A file might be measured,
|
||||
and have no lines executed, in which case an empty list is returned.
|
||||
|
||||
If the file was executed, returns a list of integers, the line numbers
|
||||
executed in the file. The list is in no particular order.
|
||||
|
||||
"""
|
||||
if self._arcs is not None:
|
||||
arcs = self._arcs.get(filename)
|
||||
if arcs is not None:
|
||||
all_lines = itertools.chain.from_iterable(arcs)
|
||||
return list(set(l for l in all_lines if l > 0))
|
||||
elif self._lines is not None:
|
||||
return self._lines.get(filename)
|
||||
return None
|
||||
|
||||
def arcs(self, filename):
|
||||
"""Get the list of arcs executed for a file.
|
||||
|
||||
If the file was not measured, returns None. A file might be measured,
|
||||
and have no arcs executed, in which case an empty list is returned.
|
||||
|
||||
If the file was executed, returns a list of 2-tuples of integers. Each
|
||||
pair is a starting line number and an ending line number for a
|
||||
transition from one line to another. The list is in no particular
|
||||
order.
|
||||
|
||||
Negative numbers have special meaning. If the starting line number is
|
||||
-N, it represents an entry to the code object that starts at line N.
|
||||
If the ending ling number is -N, it's an exit from the code object that
|
||||
starts at line N.
|
||||
|
||||
"""
|
||||
if self._arcs is not None:
|
||||
if filename in self._arcs:
|
||||
return self._arcs[filename]
|
||||
return None
|
||||
|
||||
def file_tracer(self, filename):
|
||||
"""Get the plugin name of the file tracer for a file.
|
||||
|
||||
Returns the name of the plugin that handles this file. If the file was
|
||||
measured, but didn't use a plugin, then "" is returned. If the file
|
||||
was not measured, then None is returned.
|
||||
|
||||
"""
|
||||
# Because the vast majority of files involve no plugin, we don't store
|
||||
# them explicitly in self._file_tracers. Check the measured data
|
||||
# instead to see if it was a known file with no plugin.
|
||||
if filename in (self._arcs or self._lines or {}):
|
||||
return self._file_tracers.get(filename, "")
|
||||
return None
|
||||
|
||||
def run_infos(self):
|
||||
"""Return the list of dicts of run information.
|
||||
|
||||
For data collected during a single run, this will be a one-element
|
||||
list. If data has been combined, there will be one element for each
|
||||
original data file.
|
||||
|
||||
"""
|
||||
return self._runs
|
||||
|
||||
def measured_files(self):
|
||||
"""A list of all files that had been measured."""
|
||||
return list(self._arcs or self._lines or {})
|
||||
|
||||
def line_counts(self, fullpath=False):
|
||||
"""Return a dict summarizing the line coverage data.
|
||||
|
||||
Keys are based on the file names, and values are the number of executed
|
||||
lines. If `fullpath` is true, then the keys are the full pathnames of
|
||||
the files, otherwise they are the basenames of the files.
|
||||
|
||||
Returns a dict mapping file names to counts of lines.
|
||||
|
||||
"""
|
||||
summ = {}
|
||||
if fullpath:
|
||||
filename_fn = lambda f: f
|
||||
else:
|
||||
self.lines, self.arcs = {}, {}
|
||||
filename_fn = os.path.basename
|
||||
for filename in self.measured_files():
|
||||
summ[filename_fn(filename)] = len(self.lines(filename))
|
||||
return summ
|
||||
|
||||
def write(self, suffix=None):
|
||||
def __nonzero__(self):
|
||||
return bool(self._lines or self._arcs)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def read_fileobj(self, file_obj):
|
||||
"""Read the coverage data from the given file object.
|
||||
|
||||
Should only be used on an empty CoverageData object.
|
||||
|
||||
"""
|
||||
data = self._read_raw_data(file_obj)
|
||||
|
||||
self._lines = self._arcs = None
|
||||
|
||||
if 'lines' in data:
|
||||
self._lines = data['lines']
|
||||
if 'arcs' in data:
|
||||
self._arcs = dict(
|
||||
(fname, [tuple(pair) for pair in arcs])
|
||||
for fname, arcs in iitems(data['arcs'])
|
||||
)
|
||||
self._file_tracers = data.get('file_tracers', {})
|
||||
self._runs = data.get('runs', [])
|
||||
|
||||
self._validate()
|
||||
|
||||
def read_file(self, filename):
|
||||
"""Read the coverage data from `filename` into this object."""
|
||||
if self._debug and self._debug.should('dataio'):
|
||||
self._debug.write("Reading data from %r" % (filename,))
|
||||
try:
|
||||
with self._open_for_reading(filename) as f:
|
||||
self.read_fileobj(f)
|
||||
except Exception as exc:
|
||||
raise CoverageException(
|
||||
"Couldn't read data from '%s': %s: %s" % (
|
||||
filename, exc.__class__.__name__, exc,
|
||||
)
|
||||
)
|
||||
|
||||
_GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
|
||||
|
||||
@classmethod
|
||||
def _open_for_reading(cls, filename):
|
||||
"""Open a file appropriately for reading data."""
|
||||
return open(filename, "r")
|
||||
|
||||
@classmethod
|
||||
def _read_raw_data(cls, file_obj):
|
||||
"""Read the raw data from a file object."""
|
||||
go_away = file_obj.read(len(cls._GO_AWAY))
|
||||
if go_away != cls._GO_AWAY:
|
||||
raise CoverageException("Doesn't seem to be a coverage.py data file")
|
||||
return json.load(file_obj)
|
||||
|
||||
@classmethod
|
||||
def _read_raw_data_file(cls, filename):
|
||||
"""Read the raw data from a file, for debugging."""
|
||||
with cls._open_for_reading(filename) as f:
|
||||
return cls._read_raw_data(f)
|
||||
|
||||
##
|
||||
## Writing data
|
||||
##
|
||||
|
||||
def add_lines(self, line_data):
|
||||
"""Add measured line data.
|
||||
|
||||
`line_data` is a dictionary mapping file names to dictionaries::
|
||||
|
||||
{ filename: { lineno: None, ... }, ...}
|
||||
|
||||
"""
|
||||
if self._debug and self._debug.should('dataop'):
|
||||
self._debug.write("Adding lines: %d files, %d lines total" % (
|
||||
len(line_data), sum(len(lines) for lines in line_data.values())
|
||||
))
|
||||
if self._has_arcs():
|
||||
raise CoverageException("Can't add lines to existing arc data")
|
||||
|
||||
if self._lines is None:
|
||||
self._lines = {}
|
||||
for filename, linenos in iitems(line_data):
|
||||
if filename in self._lines:
|
||||
new_linenos = set(self._lines[filename])
|
||||
new_linenos.update(linenos)
|
||||
linenos = new_linenos
|
||||
self._lines[filename] = list(linenos)
|
||||
|
||||
self._validate()
|
||||
|
||||
def add_arcs(self, arc_data):
|
||||
"""Add measured arc data.
|
||||
|
||||
`arc_data` is a dictionary mapping file names to dictionaries::
|
||||
|
||||
{ filename: { (l1,l2): None, ... }, ...}
|
||||
|
||||
"""
|
||||
if self._debug and self._debug.should('dataop'):
|
||||
self._debug.write("Adding arcs: %d files, %d arcs total" % (
|
||||
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
|
||||
))
|
||||
if self._has_lines():
|
||||
raise CoverageException("Can't add arcs to existing line data")
|
||||
|
||||
if self._arcs is None:
|
||||
self._arcs = {}
|
||||
for filename, arcs in iitems(arc_data):
|
||||
if filename in self._arcs:
|
||||
new_arcs = set(self._arcs[filename])
|
||||
new_arcs.update(arcs)
|
||||
arcs = new_arcs
|
||||
self._arcs[filename] = list(arcs)
|
||||
|
||||
self._validate()
|
||||
|
||||
def add_file_tracers(self, file_tracers):
|
||||
"""Add per-file plugin information.
|
||||
|
||||
`file_tracers` is { filename: plugin_name, ... }
|
||||
|
||||
"""
|
||||
if self._debug and self._debug.should('dataop'):
|
||||
self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
|
||||
|
||||
existing_files = self._arcs or self._lines or {}
|
||||
for filename, plugin_name in iitems(file_tracers):
|
||||
if filename not in existing_files:
|
||||
raise CoverageException(
|
||||
"Can't add file tracer data for unmeasured file '%s'" % (filename,)
|
||||
)
|
||||
existing_plugin = self._file_tracers.get(filename)
|
||||
if existing_plugin is not None and plugin_name != existing_plugin:
|
||||
raise CoverageException(
|
||||
"Conflicting file tracer name for '%s': %r vs %r" % (
|
||||
filename, existing_plugin, plugin_name,
|
||||
)
|
||||
)
|
||||
self._file_tracers[filename] = plugin_name
|
||||
|
||||
self._validate()
|
||||
|
||||
def add_run_info(self, **kwargs):
|
||||
"""Add information about the run.
|
||||
|
||||
Keywords are arbitrary, and are stored in the run dictionary. Values
|
||||
must be JSON serializable. You may use this function more than once,
|
||||
but repeated keywords overwrite each other.
|
||||
|
||||
"""
|
||||
if self._debug and self._debug.should('dataop'):
|
||||
self._debug.write("Adding run info: %r" % (kwargs,))
|
||||
if not self._runs:
|
||||
self._runs = [{}]
|
||||
self._runs[0].update(kwargs)
|
||||
self._validate()
|
||||
|
||||
def touch_file(self, filename):
|
||||
"""Ensure that `filename` appears in the data, empty if needed."""
|
||||
if self._debug and self._debug.should('dataop'):
|
||||
self._debug.write("Touching %r" % (filename,))
|
||||
if not self._has_arcs() and not self._has_lines():
|
||||
raise CoverageException("Can't touch files in an empty CoverageData")
|
||||
|
||||
if self._has_arcs():
|
||||
where = self._arcs
|
||||
else:
|
||||
where = self._lines
|
||||
where.setdefault(filename, [])
|
||||
|
||||
self._validate()
|
||||
|
||||
def write_fileobj(self, file_obj):
|
||||
"""Write the coverage data to `file_obj`."""
|
||||
|
||||
# Create the file data.
|
||||
file_data = {}
|
||||
|
||||
if self._has_arcs():
|
||||
file_data['arcs'] = self._arcs
|
||||
|
||||
if self._has_lines():
|
||||
file_data['lines'] = self._lines
|
||||
|
||||
if self._file_tracers:
|
||||
file_data['file_tracers'] = self._file_tracers
|
||||
|
||||
if self._runs:
|
||||
file_data['runs'] = self._runs
|
||||
|
||||
# Write the data to the file.
|
||||
file_obj.write(self._GO_AWAY)
|
||||
json.dump(file_data, file_obj)
|
||||
|
||||
def write_file(self, filename):
|
||||
"""Write the coverage data to `filename`."""
|
||||
if self._debug and self._debug.should('dataio'):
|
||||
self._debug.write("Writing data to %r" % (filename,))
|
||||
with open(filename, 'w') as fdata:
|
||||
self.write_fileobj(fdata)
|
||||
|
||||
def erase(self):
|
||||
"""Erase the data in this object."""
|
||||
self._lines = None
|
||||
self._arcs = None
|
||||
self._file_tracers = {}
|
||||
self._runs = []
|
||||
self._validate()
|
||||
|
||||
def update(self, other_data, aliases=None):
|
||||
"""Update this data with data from another `CoverageData`.
|
||||
|
||||
If `aliases` is provided, it's a `PathAliases` object that is used to
|
||||
re-map paths to match the local machine's.
|
||||
|
||||
"""
|
||||
if self._has_lines() and other_data._has_arcs():
|
||||
raise CoverageException("Can't combine arc data with line data")
|
||||
if self._has_arcs() and other_data._has_lines():
|
||||
raise CoverageException("Can't combine line data with arc data")
|
||||
|
||||
aliases = aliases or PathAliases()
|
||||
|
||||
# _file_tracers: only have a string, so they have to agree.
|
||||
# Have to do these first, so that our examination of self._arcs and
|
||||
# self._lines won't be confused by data updated from other_data.
|
||||
for filename in other_data.measured_files():
|
||||
other_plugin = other_data.file_tracer(filename)
|
||||
filename = aliases.map(filename)
|
||||
this_plugin = self.file_tracer(filename)
|
||||
if this_plugin is None:
|
||||
if other_plugin:
|
||||
self._file_tracers[filename] = other_plugin
|
||||
elif this_plugin != other_plugin:
|
||||
raise CoverageException(
|
||||
"Conflicting file tracer name for '%s': %r vs %r" % (
|
||||
filename, this_plugin, other_plugin,
|
||||
)
|
||||
)
|
||||
|
||||
# _runs: add the new runs to these runs.
|
||||
self._runs.extend(other_data._runs)
|
||||
|
||||
# _lines: merge dicts.
|
||||
if other_data._has_lines():
|
||||
if self._lines is None:
|
||||
self._lines = {}
|
||||
for filename, file_lines in iitems(other_data._lines):
|
||||
filename = aliases.map(filename)
|
||||
if filename in self._lines:
|
||||
lines = set(self._lines[filename])
|
||||
lines.update(file_lines)
|
||||
file_lines = list(lines)
|
||||
self._lines[filename] = file_lines
|
||||
|
||||
# _arcs: merge dicts.
|
||||
if other_data._has_arcs():
|
||||
if self._arcs is None:
|
||||
self._arcs = {}
|
||||
for filename, file_arcs in iitems(other_data._arcs):
|
||||
filename = aliases.map(filename)
|
||||
if filename in self._arcs:
|
||||
arcs = set(self._arcs[filename])
|
||||
arcs.update(file_arcs)
|
||||
file_arcs = list(arcs)
|
||||
self._arcs[filename] = file_arcs
|
||||
|
||||
self._validate()
|
||||
|
||||
##
|
||||
## Miscellaneous
|
||||
##
|
||||
|
||||
def _validate(self):
|
||||
"""If we are in paranoid mode, validate that everything is right."""
|
||||
if env.TESTING:
|
||||
self._validate_invariants()
|
||||
|
||||
def _validate_invariants(self):
|
||||
"""Validate internal invariants."""
|
||||
# Only one of _lines or _arcs should exist.
|
||||
assert not(self._has_lines() and self._has_arcs()), (
|
||||
"Shouldn't have both _lines and _arcs"
|
||||
)
|
||||
|
||||
# _lines should be a dict of lists of ints.
|
||||
if self._has_lines():
|
||||
for fname, lines in iitems(self._lines):
|
||||
assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
|
||||
assert all(isinstance(x, int) for x in lines), (
|
||||
"_lines[%r] shouldn't be %r" % (fname, lines)
|
||||
)
|
||||
|
||||
# _arcs should be a dict of lists of pairs of ints.
|
||||
if self._has_arcs():
|
||||
for fname, arcs in iitems(self._arcs):
|
||||
assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
|
||||
assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
|
||||
"_arcs[%r] shouldn't be %r" % (fname, arcs)
|
||||
)
|
||||
|
||||
# _file_tracers should have only non-empty strings as values.
|
||||
for fname, plugin in iitems(self._file_tracers):
|
||||
assert isinstance(fname, string_class), (
|
||||
"Key in _file_tracers shouldn't be %r" % (fname,)
|
||||
)
|
||||
assert plugin and isinstance(plugin, string_class), (
|
||||
"_file_tracers[%r] shoudn't be %r" % (fname, plugin)
|
||||
)
|
||||
|
||||
# _runs should be a list of dicts.
|
||||
for val in self._runs:
|
||||
assert isinstance(val, dict)
|
||||
for key in val:
|
||||
assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
|
||||
|
||||
def add_to_hash(self, filename, hasher):
|
||||
"""Contribute `filename`'s data to the `hasher`.
|
||||
|
||||
`hasher` is a `coverage.misc.Hasher` instance to be updated with
|
||||
the file's data. It should only get the results data, not the run
|
||||
data.
|
||||
|
||||
"""
|
||||
if self._has_arcs():
|
||||
hasher.update(sorted(self.arcs(filename) or []))
|
||||
else:
|
||||
hasher.update(sorted(self.lines(filename) or []))
|
||||
hasher.update(self.file_tracer(filename))
|
||||
|
||||
##
|
||||
## Internal
|
||||
##
|
||||
|
||||
def _has_lines(self):
|
||||
"""Do we have data in self._lines?"""
|
||||
return self._lines is not None
|
||||
|
||||
def _has_arcs(self):
|
||||
"""Do we have data in self._arcs?"""
|
||||
return self._arcs is not None
|
||||
|
||||
|
||||
class CoverageDataFiles(object):
|
||||
"""Manage the use of coverage data files."""
|
||||
|
||||
def __init__(self, basename=None, warn=None):
|
||||
"""Create a CoverageDataFiles to manage data files.
|
||||
|
||||
`warn` is the warning function to use.
|
||||
|
||||
`basename` is the name of the file to use for storing data.
|
||||
|
||||
"""
|
||||
self.warn = warn
|
||||
# Construct the file name that will be used for data storage.
|
||||
self.filename = os.path.abspath(basename or ".coverage")
|
||||
|
||||
def erase(self, parallel=False):
|
||||
"""Erase the data from the file storage.
|
||||
|
||||
If `parallel` is true, then also deletes data files created from the
|
||||
basename by parallel-mode.
|
||||
|
||||
"""
|
||||
file_be_gone(self.filename)
|
||||
if parallel:
|
||||
data_dir, local = os.path.split(self.filename)
|
||||
localdot = local + '.*'
|
||||
pattern = os.path.join(os.path.abspath(data_dir), localdot)
|
||||
for filename in glob.glob(pattern):
|
||||
file_be_gone(filename)
|
||||
|
||||
def read(self, data):
|
||||
"""Read the coverage data."""
|
||||
if os.path.exists(self.filename):
|
||||
data.read_file(self.filename)
|
||||
|
||||
def write(self, data, suffix=None):
|
||||
"""Write the collected coverage data to a file.
|
||||
|
||||
`suffix` is a suffix to append to the base file name. This can be used
|
||||
@@ -84,98 +646,27 @@ class CoverageData(object):
|
||||
the suffix.
|
||||
|
||||
"""
|
||||
if self.use_file:
|
||||
filename = self.filename
|
||||
if suffix is True:
|
||||
# If data_suffix was a simple true value, then make a suffix with
|
||||
# plenty of distinguishing information. We do this here in
|
||||
# `save()` at the last minute so that the pid will be correct even
|
||||
# if the process forks.
|
||||
extra = ""
|
||||
if _TEST_NAME_FILE: # pragma: debugging
|
||||
with open(_TEST_NAME_FILE) as f:
|
||||
test_name = f.read()
|
||||
extra = "." + test_name
|
||||
suffix = "%s%s.%s.%06d" % (
|
||||
socket.gethostname(), extra, os.getpid(),
|
||||
random.randint(0, 999999)
|
||||
)
|
||||
|
||||
if suffix:
|
||||
filename += "." + suffix
|
||||
self.write_file(filename)
|
||||
data.write_file(filename)
|
||||
|
||||
def erase(self):
|
||||
"""Erase the data, both in this object, and from its file storage."""
|
||||
if self.use_file:
|
||||
if self.filename:
|
||||
file_be_gone(self.filename)
|
||||
self.lines = {}
|
||||
self.arcs = {}
|
||||
|
||||
def line_data(self):
|
||||
"""Return the map from filenames to lists of line numbers executed."""
|
||||
return dict(
|
||||
[(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
|
||||
)
|
||||
|
||||
def arc_data(self):
|
||||
"""Return the map from filenames to lists of line number pairs."""
|
||||
return dict(
|
||||
[(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
|
||||
)
|
||||
|
||||
def write_file(self, filename):
|
||||
"""Write the coverage data to `filename`."""
|
||||
|
||||
# Create the file data.
|
||||
data = {}
|
||||
|
||||
data['lines'] = self.line_data()
|
||||
arcs = self.arc_data()
|
||||
if arcs:
|
||||
data['arcs'] = arcs
|
||||
|
||||
if self.collector:
|
||||
data['collector'] = self.collector
|
||||
|
||||
if self.debug and self.debug.should('dataio'):
|
||||
self.debug.write("Writing data to %r" % (filename,))
|
||||
|
||||
# Write the pickle to the file.
|
||||
fdata = open(filename, 'wb')
|
||||
try:
|
||||
pickle.dump(data, fdata, 2)
|
||||
finally:
|
||||
fdata.close()
|
||||
|
||||
def read_file(self, filename):
|
||||
"""Read the coverage data from `filename`."""
|
||||
self.lines, self.arcs = self._read_file(filename)
|
||||
|
||||
def raw_data(self, filename):
|
||||
"""Return the raw pickled data from `filename`."""
|
||||
if self.debug and self.debug.should('dataio'):
|
||||
self.debug.write("Reading data from %r" % (filename,))
|
||||
fdata = open(filename, 'rb')
|
||||
try:
|
||||
data = pickle.load(fdata)
|
||||
finally:
|
||||
fdata.close()
|
||||
return data
|
||||
|
||||
def _read_file(self, filename):
|
||||
"""Return the stored coverage data from the given file.
|
||||
|
||||
Returns two values, suitable for assigning to `self.lines` and
|
||||
`self.arcs`.
|
||||
|
||||
"""
|
||||
lines = {}
|
||||
arcs = {}
|
||||
try:
|
||||
data = self.raw_data(filename)
|
||||
if isinstance(data, dict):
|
||||
# Unpack the 'lines' item.
|
||||
lines = dict([
|
||||
(f, dict.fromkeys(linenos, None))
|
||||
for f, linenos in iitems(data.get('lines', {}))
|
||||
])
|
||||
# Unpack the 'arcs' item.
|
||||
arcs = dict([
|
||||
(f, dict.fromkeys(arcpairs, None))
|
||||
for f, arcpairs in iitems(data.get('arcs', {}))
|
||||
])
|
||||
except Exception:
|
||||
pass
|
||||
return lines, arcs
|
||||
|
||||
def combine_parallel_data(self, aliases=None):
|
||||
def combine_parallel_data(self, data, aliases=None, data_paths=None):
|
||||
"""Combine a number of data files together.
|
||||
|
||||
Treat `self.filename` as a file prefix, and combine the data from all
|
||||
@@ -184,95 +675,94 @@ class CoverageData(object):
|
||||
If `aliases` is provided, it's a `PathAliases` object that is used to
|
||||
re-map paths to match the local machine's.
|
||||
|
||||
If `data_paths` is provided, it is a list of directories or files to
|
||||
combine. Directories are searched for files that start with
|
||||
`self.filename` plus dot as a prefix, and those files are combined.
|
||||
|
||||
If `data_paths` is not provided, then the directory portion of
|
||||
`self.filename` is used as the directory to search for data files.
|
||||
|
||||
Every data file found and combined is then deleted from disk. If a file
|
||||
cannot be read, a warning will be issued, and the file will not be
|
||||
deleted.
|
||||
|
||||
"""
|
||||
aliases = aliases or PathAliases()
|
||||
# Because of the os.path.abspath in the constructor, data_dir will
|
||||
# never be an empty string.
|
||||
data_dir, local = os.path.split(self.filename)
|
||||
localdot = local + '.'
|
||||
for f in os.listdir(data_dir or '.'):
|
||||
if f.startswith(localdot):
|
||||
full_path = os.path.join(data_dir, f)
|
||||
new_lines, new_arcs = self._read_file(full_path)
|
||||
for filename, file_data in iitems(new_lines):
|
||||
filename = aliases.map(filename)
|
||||
self.lines.setdefault(filename, {}).update(file_data)
|
||||
for filename, file_data in iitems(new_arcs):
|
||||
filename = aliases.map(filename)
|
||||
self.arcs.setdefault(filename, {}).update(file_data)
|
||||
if f != local:
|
||||
os.remove(full_path)
|
||||
localdot = local + '.*'
|
||||
|
||||
def add_line_data(self, line_data):
|
||||
"""Add executed line data.
|
||||
|
||||
`line_data` is { filename: { lineno: None, ... }, ...}
|
||||
|
||||
"""
|
||||
for filename, linenos in iitems(line_data):
|
||||
self.lines.setdefault(filename, {}).update(linenos)
|
||||
|
||||
def add_arc_data(self, arc_data):
|
||||
"""Add measured arc data.
|
||||
|
||||
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
|
||||
|
||||
"""
|
||||
for filename, arcs in iitems(arc_data):
|
||||
self.arcs.setdefault(filename, {}).update(arcs)
|
||||
|
||||
def touch_file(self, filename):
|
||||
"""Ensure that `filename` appears in the data, empty if needed."""
|
||||
self.lines.setdefault(filename, {})
|
||||
|
||||
def measured_files(self):
|
||||
"""A list of all files that had been measured."""
|
||||
return list(self.lines.keys())
|
||||
|
||||
def executed_lines(self, filename):
|
||||
"""A map containing all the line numbers executed in `filename`.
|
||||
|
||||
If `filename` hasn't been collected at all (because it wasn't executed)
|
||||
then return an empty map.
|
||||
|
||||
"""
|
||||
return self.lines.get(filename) or {}
|
||||
|
||||
def executed_arcs(self, filename):
|
||||
"""A map containing all the arcs executed in `filename`."""
|
||||
return self.arcs.get(filename) or {}
|
||||
|
||||
def add_to_hash(self, filename, hasher):
|
||||
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
|
||||
hasher.update(self.executed_lines(filename))
|
||||
hasher.update(self.executed_arcs(filename))
|
||||
|
||||
def summary(self, fullpath=False):
|
||||
"""Return a dict summarizing the coverage data.
|
||||
|
||||
Keys are based on the filenames, and values are the number of executed
|
||||
lines. If `fullpath` is true, then the keys are the full pathnames of
|
||||
the files, otherwise they are the basenames of the files.
|
||||
|
||||
"""
|
||||
summ = {}
|
||||
if fullpath:
|
||||
filename_fn = lambda f: f
|
||||
data_paths = data_paths or [data_dir]
|
||||
files_to_combine = []
|
||||
for p in data_paths:
|
||||
if os.path.isfile(p):
|
||||
files_to_combine.append(os.path.abspath(p))
|
||||
elif os.path.isdir(p):
|
||||
pattern = os.path.join(os.path.abspath(p), localdot)
|
||||
files_to_combine.extend(glob.glob(pattern))
|
||||
else:
|
||||
filename_fn = os.path.basename
|
||||
for filename, lines in iitems(self.lines):
|
||||
summ[filename_fn(filename)] = len(lines)
|
||||
return summ
|
||||
raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
|
||||
|
||||
def has_arcs(self):
|
||||
"""Does this data have arcs?"""
|
||||
return bool(self.arcs)
|
||||
for f in files_to_combine:
|
||||
new_data = CoverageData()
|
||||
try:
|
||||
new_data.read_file(f)
|
||||
except CoverageException as exc:
|
||||
if self.warn:
|
||||
# The CoverageException has the file name in it, so just
|
||||
# use the message as the warning.
|
||||
self.warn(str(exc))
|
||||
else:
|
||||
data.update(new_data, aliases=aliases)
|
||||
file_be_gone(f)
|
||||
|
||||
|
||||
def canonicalize_json_data(data):
|
||||
"""Canonicalize our JSON data so it can be compared."""
|
||||
for fname, lines in iitems(data.get('lines', {})):
|
||||
data['lines'][fname] = sorted(lines)
|
||||
for fname, arcs in iitems(data.get('arcs', {})):
|
||||
data['arcs'][fname] = sorted(arcs)
|
||||
|
||||
|
||||
def pretty_data(data):
|
||||
"""Format data as JSON, but as nicely as possible.
|
||||
|
||||
Returns a string.
|
||||
|
||||
"""
|
||||
# Start with a basic JSON dump.
|
||||
out = json.dumps(data, indent=4, sort_keys=True)
|
||||
# But pairs of numbers shouldn't be split across lines...
|
||||
out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
|
||||
# Trailing spaces mess with tests, get rid of them.
|
||||
out = re.sub(r"(?m)\s+$", "", out)
|
||||
return out
|
||||
|
||||
|
||||
def debug_main(args):
|
||||
"""Dump the raw data from data files.
|
||||
|
||||
Run this as::
|
||||
|
||||
$ python -m coverage.data [FILE]
|
||||
|
||||
"""
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option(
|
||||
"-c", "--canonical", action="store_true",
|
||||
help="Sort data into a canonical order",
|
||||
)
|
||||
options, args = parser.parse_args(args)
|
||||
|
||||
for filename in (args or [".coverage"]):
|
||||
print("--- {0} ------------------------------".format(filename))
|
||||
data = CoverageData._read_raw_data_file(filename)
|
||||
if options.canonical:
|
||||
canonicalize_json_data(data)
|
||||
print(pretty_data(data))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Ad-hoc: show the raw data in a data file.
|
||||
import pprint, sys
|
||||
covdata = CoverageData()
|
||||
if sys.argv[1:]:
|
||||
fname = sys.argv[1]
|
||||
else:
|
||||
fname = covdata.filename
|
||||
pprint.pprint(covdata.raw_data(fname))
|
||||
import sys
|
||||
debug_main(sys.argv[1:])
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Control of and utilities for debugging."""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
|
||||
from coverage.misc import isolate_module
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
# When debugging, it can be helpful to force some options, especially when
|
||||
@@ -8,6 +17,9 @@ import os
|
||||
# This is a list of forced debugging options.
|
||||
FORCED_DEBUG = []
|
||||
|
||||
# A hack for debugging testing in sub-processes.
|
||||
_TEST_NAME_FILE = "" # "/tmp/covtest.txt"
|
||||
|
||||
|
||||
class DebugControl(object):
|
||||
"""Control and output for debugging."""
|
||||
@@ -17,6 +29,9 @@ class DebugControl(object):
|
||||
self.options = options
|
||||
self.output = output
|
||||
|
||||
def __repr__(self):
|
||||
return "<DebugControl options=%r output=%r>" % (self.options, self.output)
|
||||
|
||||
def should(self, option):
|
||||
"""Decide whether to output debug information in category `option`."""
|
||||
return (option in self.options or option in FORCED_DEBUG)
|
||||
@@ -26,14 +41,22 @@ class DebugControl(object):
|
||||
if self.should('pid'):
|
||||
msg = "pid %5d: %s" % (os.getpid(), msg)
|
||||
self.output.write(msg+"\n")
|
||||
if self.should('callers'):
|
||||
dump_stack_frames(out=self.output)
|
||||
self.output.flush()
|
||||
|
||||
def write_formatted_info(self, info):
|
||||
def write_formatted_info(self, header, info):
|
||||
"""Write a sequence of (label,data) pairs nicely."""
|
||||
self.write(info_header(header))
|
||||
for line in info_formatter(info):
|
||||
self.write(" %s" % line)
|
||||
|
||||
|
||||
def info_header(label):
|
||||
"""Make a nice header string."""
|
||||
return "--{0:-<60s}".format(" "+label+" ")
|
||||
|
||||
|
||||
def info_formatter(info):
|
||||
"""Produce a sequence of formatted lines from info.
|
||||
|
||||
@@ -41,14 +64,51 @@ def info_formatter(info):
|
||||
nicely formatted, ready to print.
|
||||
|
||||
"""
|
||||
label_len = max([len(l) for l, _d in info])
|
||||
info = list(info)
|
||||
if not info:
|
||||
return
|
||||
label_len = max(len(l) for l, _d in info)
|
||||
for label, data in info:
|
||||
if data == []:
|
||||
data = "-none-"
|
||||
if isinstance(data, (list, tuple)):
|
||||
if isinstance(data, (list, set, tuple)):
|
||||
prefix = "%*s:" % (label_len, label)
|
||||
for e in data:
|
||||
yield "%*s %s" % (label_len+1, prefix, e)
|
||||
prefix = ""
|
||||
else:
|
||||
yield "%*s: %s" % (label_len, label, data)
|
||||
|
||||
|
||||
def short_stack(limit=None): # pragma: debugging
|
||||
"""Return a string summarizing the call stack.
|
||||
|
||||
The string is multi-line, with one line per stack frame. Each line shows
|
||||
the function name, the file name, and the line number:
|
||||
|
||||
...
|
||||
start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
|
||||
import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
|
||||
import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
|
||||
...
|
||||
|
||||
`limit` is the number of frames to include, defaulting to all of them.
|
||||
|
||||
"""
|
||||
stack = inspect.stack()[limit:0:-1]
|
||||
return "\n".join("%30s : %s @%d" % (t[3], t[1], t[2]) for t in stack)
|
||||
|
||||
|
||||
def dump_stack_frames(limit=None, out=None): # pragma: debugging
|
||||
"""Print a summary of the stack to stdout, or some place else."""
|
||||
out = out or sys.stdout
|
||||
out.write(short_stack(limit=limit))
|
||||
out.write("\n")
|
||||
|
||||
|
||||
def log(msg, stack=False): # pragma: debugging
|
||||
"""Write a log message as forcefully as possible."""
|
||||
with open("/tmp/covlog.txt", "a") as f:
|
||||
f.write("{pid}: {msg}\n".format(pid=os.getpid(), msg=msg))
|
||||
if stack:
|
||||
dump_stack_frames(out=f)
|
||||
|
||||
32
python/helpers/coveragepy/coverage/env.py
Normal file
32
python/helpers/coveragepy/coverage/env.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Determine facts about the environment."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Operating systems.
|
||||
WINDOWS = sys.platform == "win32"
|
||||
LINUX = sys.platform == "linux2"
|
||||
|
||||
# Python implementations.
|
||||
PYPY = '__pypy__' in sys.builtin_module_names
|
||||
|
||||
# Python versions.
|
||||
PYVERSION = sys.version_info
|
||||
PY2 = PYVERSION < (3, 0)
|
||||
PY3 = PYVERSION >= (3, 0)
|
||||
|
||||
# Coverage.py specifics.
|
||||
|
||||
# Are we using the C-implemented trace function?
|
||||
C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
|
||||
|
||||
# Are we coverage-measuring ourselves?
|
||||
METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
|
||||
|
||||
# Are we running our test suite?
|
||||
# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
|
||||
# test-specific behavior like contracts.
|
||||
TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
|
||||
@@ -1,41 +1,73 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Execute files of Python code."""
|
||||
|
||||
import imp, marshal, os, sys
|
||||
import marshal
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
|
||||
from coverage.backward import exec_code_object, open_source
|
||||
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
|
||||
from coverage.backward import BUILTINS
|
||||
from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
|
||||
from coverage.misc import ExceptionDuringRun, NoCode, NoSource, isolate_module
|
||||
from coverage.phystokens import compile_unicode
|
||||
from coverage.python import get_python_source
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class DummyLoader(object):
|
||||
"""A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
|
||||
|
||||
Currently only implements the .fullname attribute
|
||||
"""
|
||||
def __init__(self, fullname, *_args):
|
||||
self.fullname = fullname
|
||||
|
||||
|
||||
if importlib_util_find_spec:
|
||||
def find_module(modulename):
|
||||
"""Find the module named `modulename`.
|
||||
|
||||
Returns the file path of the module, and the name of the enclosing
|
||||
package.
|
||||
"""
|
||||
try:
|
||||
# In Py 2.x, the builtins were in __builtin__
|
||||
BUILTINS = sys.modules['__builtin__']
|
||||
except KeyError:
|
||||
# In Py 3.x, they're in builtins
|
||||
BUILTINS = sys.modules['builtins']
|
||||
|
||||
|
||||
def rsplit1(s, sep):
|
||||
"""The same as s.rsplit(sep, 1), but works in 2.3"""
|
||||
parts = s.split(sep)
|
||||
return sep.join(parts[:-1]), parts[-1]
|
||||
|
||||
|
||||
def run_python_module(modulename, args):
|
||||
"""Run a python module, as though with ``python -m name args...``.
|
||||
|
||||
`modulename` is the name of the module, possibly a dot-separated name.
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the module being executed.
|
||||
spec = importlib_util_find_spec(modulename)
|
||||
except ImportError as err:
|
||||
raise NoSource(str(err))
|
||||
if not spec:
|
||||
raise NoSource("No module named %r" % (modulename,))
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
if pathname.endswith("__init__.py") and not modulename.endswith("__init__"):
|
||||
mod_main = modulename + ".__main__"
|
||||
spec = importlib_util_find_spec(mod_main)
|
||||
if not spec:
|
||||
raise NoSource(
|
||||
"No module named %s; "
|
||||
"%r is a package and cannot be directly executed"
|
||||
% (mod_main, modulename)
|
||||
)
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
packagename = packagename.rpartition(".")[0]
|
||||
return pathname, packagename
|
||||
else:
|
||||
def find_module(modulename):
|
||||
"""Find the module named `modulename`.
|
||||
|
||||
Returns the file path of the module, and the name of the enclosing
|
||||
package.
|
||||
"""
|
||||
openfile = None
|
||||
glo, loc = globals(), locals()
|
||||
try:
|
||||
try:
|
||||
# Search for the module - inside its parent package, if any - using
|
||||
# standard import mechanics.
|
||||
if '.' in modulename:
|
||||
packagename, name = rsplit1(modulename, '.')
|
||||
packagename, name = modulename.rsplit('.', 1)
|
||||
package = __import__(packagename, glo, loc, ['__path__'])
|
||||
searchpath = package.__path__
|
||||
else:
|
||||
@@ -57,51 +89,92 @@ def run_python_module(modulename, args):
|
||||
package = __import__(packagename, glo, loc, ['__path__'])
|
||||
searchpath = package.__path__
|
||||
openfile, pathname, _ = imp.find_module(name, searchpath)
|
||||
except ImportError:
|
||||
_, err, _ = sys.exc_info()
|
||||
except ImportError as err:
|
||||
raise NoSource(str(err))
|
||||
finally:
|
||||
if openfile:
|
||||
openfile.close()
|
||||
|
||||
# Finally, hand the file off to run_python_file for execution.
|
||||
return pathname, packagename
|
||||
|
||||
|
||||
def run_python_module(modulename, args):
|
||||
"""Run a Python module, as though with ``python -m name args...``.
|
||||
|
||||
`modulename` is the name of the module, possibly a dot-separated name.
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the module being executed.
|
||||
|
||||
"""
|
||||
pathname, packagename = find_module(modulename)
|
||||
|
||||
pathname = os.path.abspath(pathname)
|
||||
args[0] = pathname
|
||||
run_python_file(pathname, args, package=packagename)
|
||||
run_python_file(pathname, args, package=packagename, modulename=modulename, path0="")
|
||||
|
||||
|
||||
def run_python_file(filename, args, package=None):
|
||||
"""Run a python file as if it were the main program on the command line.
|
||||
def run_python_file(filename, args, package=None, modulename=None, path0=None):
|
||||
"""Run a Python file as if it were the main program on the command line.
|
||||
|
||||
`filename` is the path to the file to execute, it need not be a .py file.
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the file being executed. `package` is the name of the
|
||||
enclosing package, if any.
|
||||
|
||||
`modulename` is the name of the module the file was run as.
|
||||
|
||||
`path0` is the value to put into sys.path[0]. If it's None, then this
|
||||
function will decide on a value.
|
||||
|
||||
"""
|
||||
if modulename is None and sys.version_info >= (3, 3):
|
||||
modulename = '__main__'
|
||||
|
||||
# Create a module to serve as __main__
|
||||
old_main_mod = sys.modules['__main__']
|
||||
main_mod = imp.new_module('__main__')
|
||||
main_mod = types.ModuleType('__main__')
|
||||
sys.modules['__main__'] = main_mod
|
||||
main_mod.__file__ = filename
|
||||
if package:
|
||||
main_mod.__package__ = package
|
||||
if modulename:
|
||||
main_mod.__loader__ = DummyLoader(modulename)
|
||||
|
||||
main_mod.__builtins__ = BUILTINS
|
||||
|
||||
# Set sys.argv properly.
|
||||
old_argv = sys.argv
|
||||
sys.argv = args
|
||||
|
||||
if os.path.isdir(filename):
|
||||
# Running a directory means running the __main__.py file in that
|
||||
# directory.
|
||||
my_path0 = filename
|
||||
|
||||
for ext in [".py", ".pyc", ".pyo"]:
|
||||
try_filename = os.path.join(filename, "__main__" + ext)
|
||||
if os.path.exists(try_filename):
|
||||
filename = try_filename
|
||||
break
|
||||
else:
|
||||
raise NoSource("Can't find '__main__' module in '%s'" % filename)
|
||||
else:
|
||||
my_path0 = os.path.abspath(os.path.dirname(filename))
|
||||
|
||||
# Set sys.path correctly.
|
||||
old_path0 = sys.path[0]
|
||||
sys.path[0] = path0 if path0 is not None else my_path0
|
||||
|
||||
try:
|
||||
# Make a code object somehow.
|
||||
if filename.endswith(".pyc") or filename.endswith(".pyo"):
|
||||
if filename.endswith((".pyc", ".pyo")):
|
||||
code = make_code_from_pyc(filename)
|
||||
else:
|
||||
code = make_code_from_py(filename)
|
||||
|
||||
# Execute the code object.
|
||||
try:
|
||||
exec_code_object(code, main_mod.__dict__)
|
||||
exec(code, main_mod.__dict__)
|
||||
except SystemExit:
|
||||
# The user called sys.exit(). Just pass it along to the upper
|
||||
# layers, where it will be handled.
|
||||
@@ -109,37 +182,34 @@ def run_python_file(filename, args, package=None):
|
||||
except:
|
||||
# Something went wrong while executing the user code.
|
||||
# Get the exc_info, and pack them into an exception that we can
|
||||
# throw up to the outer loop. We peel two layers off the traceback
|
||||
# throw up to the outer loop. We peel one layer off the traceback
|
||||
# so that the coverage.py code doesn't appear in the final printed
|
||||
# traceback.
|
||||
typ, err, tb = sys.exc_info()
|
||||
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
|
||||
finally:
|
||||
# Restore the old __main__
|
||||
sys.modules['__main__'] = old_main_mod
|
||||
|
||||
# Restore the old argv and path
|
||||
# PyPy3 weirdness. If I don't access __context__, then somehow it
|
||||
# is non-None when the exception is reported at the upper layer,
|
||||
# and a nested exception is shown to the user. This getattr fixes
|
||||
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
|
||||
getattr(err, '__context__', None)
|
||||
|
||||
raise ExceptionDuringRun(typ, err, tb.tb_next)
|
||||
finally:
|
||||
# Restore the old __main__, argv, and path.
|
||||
sys.modules['__main__'] = old_main_mod
|
||||
sys.argv = old_argv
|
||||
sys.path[0] = old_path0
|
||||
|
||||
|
||||
def make_code_from_py(filename):
|
||||
"""Get source from `filename` and make a code object of it."""
|
||||
# Open the source file.
|
||||
try:
|
||||
source_file = open_source(filename)
|
||||
except IOError:
|
||||
raise NoSource("No file to run: %r" % filename)
|
||||
|
||||
try:
|
||||
source = source_file.read()
|
||||
finally:
|
||||
source_file.close()
|
||||
|
||||
# We have the source. `compile` still needs the last line to be clean,
|
||||
# so make sure it is, then compile a code object from it.
|
||||
if not source or source[-1] != '\n':
|
||||
source += '\n'
|
||||
code = compile(source, filename, "exec")
|
||||
source = get_python_source(filename)
|
||||
except (IOError, NoSource):
|
||||
raise NoSource("No file to run: '%s'" % filename)
|
||||
|
||||
code = compile_unicode(source, filename, "exec")
|
||||
return code
|
||||
|
||||
|
||||
@@ -148,13 +218,13 @@ def make_code_from_pyc(filename):
|
||||
try:
|
||||
fpyc = open(filename, "rb")
|
||||
except IOError:
|
||||
raise NoCode("No file to run: %r" % filename)
|
||||
raise NoCode("No file to run: '%s'" % filename)
|
||||
|
||||
try:
|
||||
with fpyc:
|
||||
# First four bytes are a version-specific magic number. It has to
|
||||
# match or we won't run the file.
|
||||
magic = fpyc.read(4)
|
||||
if magic != imp.get_magic():
|
||||
if magic != PYC_MAGIC_NUMBER:
|
||||
raise NoCode("Bad magic number in .pyc file")
|
||||
|
||||
# Skip the junk in the header that we don't need.
|
||||
@@ -165,7 +235,5 @@ def make_code_from_pyc(filename):
|
||||
|
||||
# The rest of the file is the code object we want.
|
||||
code = marshal.load(fpyc)
|
||||
finally:
|
||||
fpyc.close()
|
||||
|
||||
return code
|
||||
|
||||
@@ -1,40 +1,63 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""File wrangling."""
|
||||
|
||||
from coverage.backward import to_string
|
||||
from coverage.misc import CoverageException
|
||||
import fnmatch, os, os.path, re, sys
|
||||
import ntpath, posixpath
|
||||
import fnmatch
|
||||
import ntpath
|
||||
import os
|
||||
import os.path
|
||||
import posixpath
|
||||
import re
|
||||
import sys
|
||||
|
||||
class FileLocator(object):
|
||||
"""Understand how filenames work."""
|
||||
from coverage import env
|
||||
from coverage.backward import unicode_class
|
||||
from coverage.misc import contract, CoverageException, join_regex, isolate_module
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def set_relative_directory():
|
||||
"""Set the directory that `relative_filename` will be relative to."""
|
||||
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
|
||||
|
||||
def __init__(self):
|
||||
# The absolute path to our current directory.
|
||||
self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
|
||||
RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
|
||||
|
||||
# Cache of results of calling the canonical_filename() method, to
|
||||
# avoid duplicating work.
|
||||
self.canonical_filename_cache = {}
|
||||
CANONICAL_FILENAME_CACHE = {}
|
||||
|
||||
def relative_filename(self, filename):
|
||||
|
||||
def relative_directory():
|
||||
"""Return the directory that `relative_filename` is relative to."""
|
||||
return RELATIVE_DIR
|
||||
|
||||
|
||||
@contract(returns='unicode')
|
||||
def relative_filename(filename):
|
||||
"""Return the relative form of `filename`.
|
||||
|
||||
The file name will be relative to the current directory when the
|
||||
`FileLocator` was constructed.
|
||||
`set_relative_directory` was called.
|
||||
|
||||
"""
|
||||
fnorm = os.path.normcase(filename)
|
||||
if fnorm.startswith(self.relative_dir):
|
||||
filename = filename[len(self.relative_dir):]
|
||||
return filename
|
||||
if fnorm.startswith(RELATIVE_DIR):
|
||||
filename = filename[len(RELATIVE_DIR):]
|
||||
return unicode_filename(filename)
|
||||
|
||||
def canonical_filename(self, filename):
|
||||
|
||||
@contract(returns='unicode')
|
||||
def canonical_filename(filename):
|
||||
"""Return a canonical file name for `filename`.
|
||||
|
||||
An absolute path with no redundant components and normalized case.
|
||||
|
||||
"""
|
||||
if filename not in self.canonical_filename_cache:
|
||||
if filename not in CANONICAL_FILENAME_CACHE:
|
||||
if not os.path.isabs(filename):
|
||||
for path in [os.curdir] + sys.path:
|
||||
if path is None:
|
||||
@@ -44,82 +67,97 @@ class FileLocator(object):
|
||||
filename = f
|
||||
break
|
||||
cf = abs_file(filename)
|
||||
self.canonical_filename_cache[filename] = cf
|
||||
return self.canonical_filename_cache[filename]
|
||||
CANONICAL_FILENAME_CACHE[filename] = cf
|
||||
return CANONICAL_FILENAME_CACHE[filename]
|
||||
|
||||
def get_zip_data(self, filename):
|
||||
"""Get data from `filename` if it is a zip file path.
|
||||
|
||||
Returns the string data read from the zip file, or None if no zip file
|
||||
could be found or `filename` isn't in it. The data returned will be
|
||||
an empty string if the file is empty.
|
||||
def flat_rootname(filename):
|
||||
"""A base for a flat file name to correspond to this file.
|
||||
|
||||
Useful for writing files about the code where you want all the files in
|
||||
the same directory, but need to differentiate same-named files from
|
||||
different directories.
|
||||
|
||||
For example, the file a/b/c.py will return 'a_b_c_py'
|
||||
|
||||
"""
|
||||
import zipimport
|
||||
markers = ['.zip'+os.sep, '.egg'+os.sep]
|
||||
for marker in markers:
|
||||
if marker in filename:
|
||||
parts = filename.split(marker)
|
||||
try:
|
||||
zi = zipimport.zipimporter(parts[0]+marker[:-1])
|
||||
except zipimport.ZipImportError:
|
||||
continue
|
||||
try:
|
||||
data = zi.get_data(parts[1])
|
||||
except IOError:
|
||||
continue
|
||||
return to_string(data)
|
||||
return None
|
||||
name = ntpath.splitdrive(filename)[1]
|
||||
return re.sub(r"[\\/.:]", "_", name)
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
if env.WINDOWS:
|
||||
|
||||
_ACTUAL_PATH_CACHE = {}
|
||||
_ACTUAL_PATH_LIST_CACHE = {}
|
||||
|
||||
def actual_path(path):
|
||||
"""Get the actual path of `path`, including the correct case."""
|
||||
if path in actual_path.cache:
|
||||
return actual_path.cache[path]
|
||||
if env.PY2 and isinstance(path, unicode_class):
|
||||
path = path.encode(sys.getfilesystemencoding())
|
||||
if path in _ACTUAL_PATH_CACHE:
|
||||
return _ACTUAL_PATH_CACHE[path]
|
||||
|
||||
head, tail = os.path.split(path)
|
||||
if not tail:
|
||||
actpath = head
|
||||
# This means head is the drive spec: normalize it.
|
||||
actpath = head.upper()
|
||||
elif not head:
|
||||
actpath = tail
|
||||
else:
|
||||
head = actual_path(head)
|
||||
if head in actual_path.list_cache:
|
||||
files = actual_path.list_cache[head]
|
||||
if head in _ACTUAL_PATH_LIST_CACHE:
|
||||
files = _ACTUAL_PATH_LIST_CACHE[head]
|
||||
else:
|
||||
try:
|
||||
files = os.listdir(head)
|
||||
except OSError:
|
||||
files = []
|
||||
actual_path.list_cache[head] = files
|
||||
_ACTUAL_PATH_LIST_CACHE[head] = files
|
||||
normtail = os.path.normcase(tail)
|
||||
for f in files:
|
||||
if os.path.normcase(f) == normtail:
|
||||
tail = f
|
||||
break
|
||||
actpath = os.path.join(head, tail)
|
||||
actual_path.cache[path] = actpath
|
||||
_ACTUAL_PATH_CACHE[path] = actpath
|
||||
return actpath
|
||||
|
||||
actual_path.cache = {}
|
||||
actual_path.list_cache = {}
|
||||
|
||||
else:
|
||||
def actual_path(filename):
|
||||
"""The actual path for non-Windows platforms."""
|
||||
return filename
|
||||
|
||||
|
||||
if env.PY2:
|
||||
@contract(returns='unicode')
|
||||
def unicode_filename(filename):
|
||||
"""Return a Unicode version of `filename`."""
|
||||
if isinstance(filename, str):
|
||||
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
|
||||
filename = filename.decode(encoding, "replace")
|
||||
return filename
|
||||
else:
|
||||
@contract(filename='unicode', returns='unicode')
|
||||
def unicode_filename(filename):
|
||||
"""Return a Unicode version of `filename`."""
|
||||
return filename
|
||||
|
||||
|
||||
@contract(returns='unicode')
|
||||
def abs_file(filename):
|
||||
"""Return the absolute normalized form of `filename`."""
|
||||
path = os.path.expandvars(os.path.expanduser(filename))
|
||||
path = os.path.abspath(os.path.realpath(path))
|
||||
path = actual_path(path)
|
||||
path = unicode_filename(path)
|
||||
return path
|
||||
|
||||
|
||||
RELATIVE_DIR = None
|
||||
CANONICAL_FILENAME_CACHE = None
|
||||
set_relative_directory()
|
||||
|
||||
|
||||
def isabs_anywhere(filename):
|
||||
"""Is `filename` an absolute path on any OS?"""
|
||||
return ntpath.isabs(filename) or posixpath.isabs(filename)
|
||||
@@ -137,7 +175,7 @@ def prep_patterns(patterns):
|
||||
"""
|
||||
prepped = []
|
||||
for p in patterns or []:
|
||||
if p.startswith("*") or p.startswith("?"):
|
||||
if p.startswith(("*", "?")):
|
||||
prepped.append(p)
|
||||
else:
|
||||
prepped.append(abs_file(p))
|
||||
@@ -147,7 +185,7 @@ def prep_patterns(patterns):
|
||||
class TreeMatcher(object):
|
||||
"""A matcher for files in a tree."""
|
||||
def __init__(self, directories):
|
||||
self.dirs = directories[:]
|
||||
self.dirs = list(directories)
|
||||
|
||||
def __repr__(self):
|
||||
return "<TreeMatcher %r>" % self.dirs
|
||||
@@ -156,10 +194,6 @@ class TreeMatcher(object):
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.dirs
|
||||
|
||||
def add(self, directory):
|
||||
"""Add another directory to the list we match for."""
|
||||
self.dirs.append(directory)
|
||||
|
||||
def match(self, fpath):
|
||||
"""Does `fpath` indicate a file in one of our trees?"""
|
||||
for d in self.dirs:
|
||||
@@ -173,10 +207,49 @@ class TreeMatcher(object):
|
||||
return False
|
||||
|
||||
|
||||
class ModuleMatcher(object):
|
||||
"""A matcher for modules in a tree."""
|
||||
def __init__(self, module_names):
|
||||
self.modules = list(module_names)
|
||||
|
||||
def __repr__(self):
|
||||
return "<ModuleMatcher %r>" % (self.modules)
|
||||
|
||||
def info(self):
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.modules
|
||||
|
||||
def match(self, module_name):
|
||||
"""Does `module_name` indicate a module in one of our packages?"""
|
||||
if not module_name:
|
||||
return False
|
||||
|
||||
for m in self.modules:
|
||||
if module_name.startswith(m):
|
||||
if module_name == m:
|
||||
return True
|
||||
if module_name[len(m)] == '.':
|
||||
# This is a module in the package
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class FnmatchMatcher(object):
|
||||
"""A matcher for files by file name pattern."""
|
||||
def __init__(self, pats):
|
||||
self.pats = pats[:]
|
||||
# fnmatch is platform-specific. On Windows, it does the Windows thing
|
||||
# of treating / and \ as equivalent. But on other platforms, we need to
|
||||
# take care of that ourselves.
|
||||
fnpats = (fnmatch.translate(p) for p in pats)
|
||||
fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
|
||||
if env.WINDOWS:
|
||||
# Windows is also case-insensitive. BTW: the regex docs say that
|
||||
# flags like (?i) have to be at the beginning, but fnmatch puts
|
||||
# them at the end, and having two there seems to work fine.
|
||||
fnpats = (p + "(?i)" for p in fnpats)
|
||||
self.re = re.compile(join_regex(fnpats))
|
||||
|
||||
def __repr__(self):
|
||||
return "<FnmatchMatcher %r>" % self.pats
|
||||
@@ -187,10 +260,7 @@ class FnmatchMatcher(object):
|
||||
|
||||
def match(self, fpath):
|
||||
"""Does `fpath` match one of our file name patterns?"""
|
||||
for pat in self.pats:
|
||||
if fnmatch.fnmatch(fpath, pat):
|
||||
return True
|
||||
return False
|
||||
return self.re.match(fpath) is not None
|
||||
|
||||
|
||||
def sep(s):
|
||||
@@ -213,12 +283,9 @@ class PathAliases(object):
|
||||
A `PathAliases` object tracks a list of pattern/result pairs, and can
|
||||
map a path through those aliases to produce a unified path.
|
||||
|
||||
`locator` is a FileLocator that is used to canonicalize the results.
|
||||
|
||||
"""
|
||||
def __init__(self, locator=None):
|
||||
def __init__(self):
|
||||
self.aliases = []
|
||||
self.locator = locator
|
||||
|
||||
def add(self, pattern, result):
|
||||
"""Add the `pattern`/`result` pair to the list of aliases.
|
||||
@@ -245,11 +312,10 @@ class PathAliases(object):
|
||||
pattern = abs_file(pattern)
|
||||
pattern += pattern_sep
|
||||
|
||||
# Make a regex from the pattern. fnmatch always adds a \Z or $ to
|
||||
# Make a regex from the pattern. fnmatch always adds a \Z to
|
||||
# match the whole string, which we don't want.
|
||||
regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
|
||||
if regex_pat.endswith("$"):
|
||||
regex_pat = regex_pat[:-1]
|
||||
|
||||
# We want */a/b.py to match on Windows too, so change slash to match
|
||||
# either separator.
|
||||
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
|
||||
@@ -272,6 +338,10 @@ class PathAliases(object):
|
||||
The separator style in the result is made to match that of the result
|
||||
in the alias.
|
||||
|
||||
Returns the mapped path. If a mapping has happened, this is a
|
||||
canonical path. If no mapping has happened, it is the original value
|
||||
of `path` unchanged.
|
||||
|
||||
"""
|
||||
for regex, result, pattern_sep, result_sep in self.aliases:
|
||||
m = regex.match(path)
|
||||
@@ -279,8 +349,7 @@ class PathAliases(object):
|
||||
new = path.replace(m.group(0), result)
|
||||
if pattern_sep != result_sep:
|
||||
new = new.replace(pattern_sep, result_sep)
|
||||
if self.locator:
|
||||
new = self.locator.canonical_filename(new)
|
||||
new = canonical_filename(new)
|
||||
return new
|
||||
return path
|
||||
|
||||
@@ -291,7 +360,7 @@ def find_python_files(dirname):
|
||||
To be importable, the files have to be in a directory with a __init__.py,
|
||||
except for `dirname` itself, which isn't required to have one. The
|
||||
assumption is that `dirname` was specified directly, so the user knows
|
||||
best, but subdirectories are checked for a __init__.py to be sure we only
|
||||
best, but sub-directories are checked for a __init__.py to be sure we only
|
||||
find the importable files.
|
||||
|
||||
"""
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
"""Imposter encodings module that installs a coverage-style tracer.
|
||||
|
||||
This is NOT the encodings module; it is an imposter that sets up tracing
|
||||
instrumentation and then replaces itself with the real encodings module.
|
||||
|
||||
If the directory that holds this file is placed first in the PYTHONPATH when
|
||||
using "coverage" to run Python's tests, then this file will become the very
|
||||
first module imported by the internals of Python 3. It installs a
|
||||
coverage-compatible trace function that can watch Standard Library modules
|
||||
execute from the very earliest stages of Python's own boot process. This fixes
|
||||
a problem with coverage - that it starts too late to trace the coverage of many
|
||||
of the most fundamental modules in the Standard Library.
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
class FullCoverageTracer(object):
|
||||
def __init__(self):
|
||||
# `traces` is a list of trace events. Frames are tricky: the same
|
||||
# frame object is used for a whole scope, with new line numbers
|
||||
# written into it. So in one scope, all the frame objects are the
|
||||
# same object, and will eventually all will point to the last line
|
||||
# executed. So we keep the line numbers alongside the frames.
|
||||
# The list looks like:
|
||||
#
|
||||
# traces = [
|
||||
# ((frame, event, arg), lineno), ...
|
||||
# ]
|
||||
#
|
||||
self.traces = []
|
||||
|
||||
def fullcoverage_trace(self, *args):
|
||||
frame, event, arg = args
|
||||
self.traces.append((args, frame.f_lineno))
|
||||
return self.fullcoverage_trace
|
||||
|
||||
sys.settrace(FullCoverageTracer().fullcoverage_trace)
|
||||
|
||||
# In coverage/files.py is actual_filename(), which uses glob.glob. I don't
|
||||
# understand why, but that use of glob borks everything if fullcoverage is in
|
||||
# effect. So here we make an ugly hail-mary pass to switch off glob.glob over
|
||||
# there. This means when using fullcoverage, Windows path names will not be
|
||||
# their actual case.
|
||||
|
||||
#sys.fullcoverage = True
|
||||
|
||||
# Finally, remove our own directory from sys.path; remove ourselves from
|
||||
# sys.modules; and re-import "encodings", which will be the real package
|
||||
# this time. Note that the delete from sys.modules dictionary has to
|
||||
# happen last, since all of the symbols in this module will become None
|
||||
# at that exact moment, including "sys".
|
||||
|
||||
parentdir = max(filter(__file__.startswith, sys.path), key=len)
|
||||
sys.path.remove(parentdir)
|
||||
del sys.modules['encodings']
|
||||
import encodings
|
||||
@@ -1,15 +1,24 @@
|
||||
"""HTML reporting for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
import os, re, shutil, sys
|
||||
"""HTML reporting for coverage.py."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import coverage
|
||||
from coverage.backward import pickle
|
||||
from coverage.misc import CoverageException, Hasher
|
||||
from coverage.phystokens import source_token_lines, source_encoding
|
||||
from coverage import env
|
||||
from coverage.backward import iitems
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import CoverageException, Hasher, isolate_module
|
||||
from coverage.report import Reporter
|
||||
from coverage.results import Numbers
|
||||
from coverage.templite import Templite
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
# Static files are looked for in a list of places.
|
||||
STATIC_PATH = [
|
||||
@@ -20,6 +29,7 @@ STATIC_PATH = [
|
||||
os.path.join(os.path.dirname(__file__), "htmlfiles"),
|
||||
]
|
||||
|
||||
|
||||
def data_filename(fname, pkgdir=""):
|
||||
"""Return the path to a data file of ours.
|
||||
|
||||
@@ -27,36 +37,48 @@ def data_filename(fname, pkgdir=""):
|
||||
is returned.
|
||||
|
||||
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
|
||||
is provided, at that subdirectory.
|
||||
is provided, at that sub-directory.
|
||||
|
||||
"""
|
||||
tried = []
|
||||
for static_dir in STATIC_PATH:
|
||||
static_filename = os.path.join(static_dir, fname)
|
||||
if os.path.exists(static_filename):
|
||||
return static_filename
|
||||
else:
|
||||
tried.append(static_filename)
|
||||
if pkgdir:
|
||||
static_filename = os.path.join(static_dir, pkgdir, fname)
|
||||
if os.path.exists(static_filename):
|
||||
return static_filename
|
||||
raise CoverageException("Couldn't find static file %r" % fname)
|
||||
else:
|
||||
tried.append(static_filename)
|
||||
raise CoverageException(
|
||||
"Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
|
||||
)
|
||||
|
||||
|
||||
def data(fname):
|
||||
def read_data(fname):
|
||||
"""Return the contents of a data file of ours."""
|
||||
data_file = open(data_filename(fname))
|
||||
try:
|
||||
with open(data_filename(fname)) as data_file:
|
||||
return data_file.read()
|
||||
finally:
|
||||
data_file.close()
|
||||
|
||||
|
||||
def write_html(fname, html):
|
||||
"""Write `html` to `fname`, properly encoded."""
|
||||
with open(fname, "wb") as fout:
|
||||
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
|
||||
|
||||
|
||||
class HtmlReporter(Reporter):
|
||||
"""HTML reporting."""
|
||||
|
||||
# These files will be copied from the htmlfiles dir to the output dir.
|
||||
# These files will be copied from the htmlfiles directory to the output
|
||||
# directory.
|
||||
STATIC_FILES = [
|
||||
("style.css", ""),
|
||||
("jquery.min.js", "jquery"),
|
||||
("jquery.debounce.min.js", "jquery-debounce"),
|
||||
("jquery.hotkeys.js", "jquery-hotkeys"),
|
||||
("jquery.isonscreen.js", "jquery-isonscreen"),
|
||||
("jquery.tablesorter.min.js", "jquery-tablesorter"),
|
||||
@@ -68,23 +90,26 @@ class HtmlReporter(Reporter):
|
||||
def __init__(self, cov, config):
|
||||
super(HtmlReporter, self).__init__(cov, config)
|
||||
self.directory = None
|
||||
title = self.config.html_title
|
||||
if env.PY2:
|
||||
title = title.decode("utf8")
|
||||
self.template_globals = {
|
||||
'escape': escape,
|
||||
'title': self.config.html_title,
|
||||
'pair': pair,
|
||||
'title': title,
|
||||
'__url__': coverage.__url__,
|
||||
'__version__': coverage.__version__,
|
||||
}
|
||||
self.source_tmpl = Templite(
|
||||
data("pyfile.html"), self.template_globals
|
||||
)
|
||||
self.source_tmpl = Templite(read_data("pyfile.html"), self.template_globals)
|
||||
|
||||
self.coverage = cov
|
||||
|
||||
self.files = []
|
||||
self.arcs = self.coverage.data.has_arcs()
|
||||
self.has_arcs = self.coverage.data.has_arcs()
|
||||
self.status = HtmlStatus()
|
||||
self.extra_css = None
|
||||
self.totals = Numbers()
|
||||
self.time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
|
||||
|
||||
def report(self, morfs):
|
||||
"""Generate an HTML report for `morfs`.
|
||||
@@ -100,7 +125,7 @@ class HtmlReporter(Reporter):
|
||||
# Check that this run used the same settings as the last run.
|
||||
m = Hasher()
|
||||
m.update(self.config)
|
||||
these_settings = m.digest()
|
||||
these_settings = m.hexdigest()
|
||||
if self.status.settings_hash() != these_settings:
|
||||
self.status.reset()
|
||||
self.status.set_settings_hash(these_settings)
|
||||
@@ -119,8 +144,7 @@ class HtmlReporter(Reporter):
|
||||
self.index_file()
|
||||
|
||||
self.make_local_static_report_files()
|
||||
|
||||
return self.totals.pc_covered
|
||||
return self.totals.n_statements and self.totals.pc_covered
|
||||
|
||||
def make_local_static_report_files(self):
|
||||
"""Make local instances of static files for HTML report."""
|
||||
@@ -138,54 +162,34 @@ class HtmlReporter(Reporter):
|
||||
os.path.join(self.directory, self.extra_css)
|
||||
)
|
||||
|
||||
def write_html(self, fname, html):
|
||||
"""Write `html` to `fname`, properly encoded."""
|
||||
fout = open(fname, "wb")
|
||||
try:
|
||||
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
|
||||
finally:
|
||||
fout.close()
|
||||
|
||||
def file_hash(self, source, cu):
|
||||
def file_hash(self, source, fr):
|
||||
"""Compute a hash that changes if the file needs to be re-reported."""
|
||||
m = Hasher()
|
||||
m.update(source)
|
||||
self.coverage.data.add_to_hash(cu.filename, m)
|
||||
return m.digest()
|
||||
self.coverage.data.add_to_hash(fr.filename, m)
|
||||
return m.hexdigest()
|
||||
|
||||
def html_file(self, cu, analysis):
|
||||
def html_file(self, fr, analysis):
|
||||
"""Generate an HTML file for one source file."""
|
||||
source_file = cu.source_file()
|
||||
try:
|
||||
source = source_file.read()
|
||||
finally:
|
||||
source_file.close()
|
||||
source = fr.source()
|
||||
|
||||
# Find out if the file on disk is already correct.
|
||||
flat_rootname = cu.flat_rootname()
|
||||
this_hash = self.file_hash(source, cu)
|
||||
that_hash = self.status.file_hash(flat_rootname)
|
||||
rootname = flat_rootname(fr.relative_filename())
|
||||
this_hash = self.file_hash(source.encode('utf-8'), fr)
|
||||
that_hash = self.status.file_hash(rootname)
|
||||
if this_hash == that_hash:
|
||||
# Nothing has changed to require the file to be reported again.
|
||||
self.files.append(self.status.index_info(flat_rootname))
|
||||
self.files.append(self.status.index_info(rootname))
|
||||
return
|
||||
|
||||
self.status.set_file_hash(flat_rootname, this_hash)
|
||||
|
||||
# If need be, determine the encoding of the source file. We use it
|
||||
# later to properly write the HTML.
|
||||
if sys.version_info < (3, 0):
|
||||
encoding = source_encoding(source)
|
||||
# Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
|
||||
if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
|
||||
source = source[3:]
|
||||
encoding = "utf-8"
|
||||
self.status.set_file_hash(rootname, this_hash)
|
||||
|
||||
# Get the numbers for this file.
|
||||
nums = analysis.numbers
|
||||
|
||||
if self.arcs:
|
||||
if self.has_arcs:
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
arcs_executed = analysis.arcs_executed()
|
||||
|
||||
# These classes determine which lines are highlighted by default.
|
||||
c_run = "run hide_run"
|
||||
@@ -195,35 +199,44 @@ class HtmlReporter(Reporter):
|
||||
|
||||
lines = []
|
||||
|
||||
for lineno, line in enumerate(source_token_lines(source)):
|
||||
lineno += 1 # 1-based line numbers.
|
||||
for lineno, line in enumerate(fr.source_token_lines(), start=1):
|
||||
# Figure out how to mark this line.
|
||||
line_class = []
|
||||
annotate_html = ""
|
||||
annotate_title = ""
|
||||
annotate_long = ""
|
||||
if lineno in analysis.statements:
|
||||
line_class.append("stm")
|
||||
if lineno in analysis.excluded:
|
||||
line_class.append(c_exc)
|
||||
elif lineno in analysis.missing:
|
||||
line_class.append(c_mis)
|
||||
elif self.arcs and lineno in missing_branch_arcs:
|
||||
elif self.has_arcs and lineno in missing_branch_arcs:
|
||||
line_class.append(c_par)
|
||||
annlines = []
|
||||
shorts = []
|
||||
longs = []
|
||||
for b in missing_branch_arcs[lineno]:
|
||||
if b < 0:
|
||||
annlines.append("exit")
|
||||
shorts.append("exit")
|
||||
else:
|
||||
annlines.append(str(b))
|
||||
annotate_html = " ".join(annlines)
|
||||
if len(annlines) > 1:
|
||||
annotate_title = "no jumps to these line numbers"
|
||||
elif len(annlines) == 1:
|
||||
annotate_title = "no jump to this line number"
|
||||
shorts.append(b)
|
||||
longs.append(fr.missing_arc_description(lineno, b, arcs_executed))
|
||||
# 202F is NARROW NO-BREAK SPACE.
|
||||
# 219B is RIGHTWARDS ARROW WITH STROKE.
|
||||
short_fmt = "%s ↛ %s"
|
||||
annotate_html = ", ".join(short_fmt % (lineno, d) for d in shorts)
|
||||
|
||||
if len(longs) == 1:
|
||||
annotate_long = longs[0]
|
||||
else:
|
||||
annotate_long = "%d missed branches: %s" % (
|
||||
len(longs),
|
||||
", ".join("%d) %s" % (num, ann_long)
|
||||
for num, ann_long in enumerate(longs, start=1)),
|
||||
)
|
||||
elif lineno in analysis.statements:
|
||||
line_class.append(c_run)
|
||||
|
||||
# Build the HTML for the line
|
||||
# Build the HTML for the line.
|
||||
html = []
|
||||
for tok_type, tok_text in line:
|
||||
if tok_type == "ws":
|
||||
@@ -231,7 +244,7 @@ class HtmlReporter(Reporter):
|
||||
else:
|
||||
tok_html = escape(tok_text) or ' '
|
||||
html.append(
|
||||
"<span class='%s'>%s</span>" % (tok_type, tok_html)
|
||||
'<span class="%s">%s</span>' % (tok_type, tok_html)
|
||||
)
|
||||
|
||||
lines.append({
|
||||
@@ -239,53 +252,51 @@ class HtmlReporter(Reporter):
|
||||
'number': lineno,
|
||||
'class': ' '.join(line_class) or "pln",
|
||||
'annotate': annotate_html,
|
||||
'annotate_title': annotate_title,
|
||||
'annotate_long': annotate_long,
|
||||
})
|
||||
|
||||
# Write the HTML page for this file.
|
||||
html = spaceless(self.source_tmpl.render({
|
||||
'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
|
||||
'arcs': self.arcs, 'extra_css': self.extra_css,
|
||||
'cu': cu, 'nums': nums, 'lines': lines,
|
||||
}))
|
||||
html = self.source_tmpl.render({
|
||||
'c_exc': c_exc,
|
||||
'c_mis': c_mis,
|
||||
'c_par': c_par,
|
||||
'c_run': c_run,
|
||||
'has_arcs': self.has_arcs,
|
||||
'extra_css': self.extra_css,
|
||||
'fr': fr,
|
||||
'nums': nums,
|
||||
'lines': lines,
|
||||
'time_stamp': self.time_stamp,
|
||||
})
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
html = html.decode(encoding)
|
||||
|
||||
html_filename = flat_rootname + ".html"
|
||||
html_filename = rootname + ".html"
|
||||
html_path = os.path.join(self.directory, html_filename)
|
||||
self.write_html(html_path, html)
|
||||
write_html(html_path, html)
|
||||
|
||||
# Save this file's information for the index file.
|
||||
index_info = {
|
||||
'nums': nums,
|
||||
'html_filename': html_filename,
|
||||
'name': cu.name,
|
||||
'relative_filename': fr.relative_filename(),
|
||||
}
|
||||
self.files.append(index_info)
|
||||
self.status.set_index_info(flat_rootname, index_info)
|
||||
self.status.set_index_info(rootname, index_info)
|
||||
|
||||
def index_file(self):
|
||||
"""Write the index.html file for this report."""
|
||||
index_tmpl = Templite(
|
||||
data("index.html"), self.template_globals
|
||||
)
|
||||
index_tmpl = Templite(read_data("index.html"), self.template_globals)
|
||||
|
||||
self.totals = sum([f['nums'] for f in self.files])
|
||||
self.totals = sum(f['nums'] for f in self.files)
|
||||
|
||||
html = index_tmpl.render({
|
||||
'arcs': self.arcs,
|
||||
'has_arcs': self.has_arcs,
|
||||
'extra_css': self.extra_css,
|
||||
'files': self.files,
|
||||
'totals': self.totals,
|
||||
'time_stamp': self.time_stamp,
|
||||
})
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
html = html.decode("utf-8")
|
||||
self.write_html(
|
||||
os.path.join(self.directory, "index.html"),
|
||||
html
|
||||
)
|
||||
write_html(os.path.join(self.directory, "index.html"), html)
|
||||
|
||||
# Write the latest hashes for next time.
|
||||
self.status.write(self.directory)
|
||||
@@ -294,9 +305,37 @@ class HtmlReporter(Reporter):
|
||||
class HtmlStatus(object):
|
||||
"""The status information we keep to support incremental reporting."""
|
||||
|
||||
STATUS_FILE = "status.dat"
|
||||
STATUS_FILE = "status.json"
|
||||
STATUS_FORMAT = 1
|
||||
|
||||
# pylint: disable=wrong-spelling-in-comment,useless-suppression
|
||||
# The data looks like:
|
||||
#
|
||||
# {
|
||||
# 'format': 1,
|
||||
# 'settings': '540ee119c15d52a68a53fe6f0897346d',
|
||||
# 'version': '4.0a1',
|
||||
# 'files': {
|
||||
# 'cogapp___init__': {
|
||||
# 'hash': 'e45581a5b48f879f301c0f30bf77a50c',
|
||||
# 'index': {
|
||||
# 'html_filename': 'cogapp___init__.html',
|
||||
# 'name': 'cogapp/__init__',
|
||||
# 'nums': <coverage.results.Numbers object at 0x10ab7ed0>,
|
||||
# }
|
||||
# },
|
||||
# ...
|
||||
# 'cogapp_whiteutils': {
|
||||
# 'hash': '8504bb427fc488c4176809ded0277d51',
|
||||
# 'index': {
|
||||
# 'html_filename': 'cogapp_whiteutils.html',
|
||||
# 'name': 'cogapp/whiteutils',
|
||||
# 'nums': <coverage.results.Numbers object at 0x10ab7d90>,
|
||||
# }
|
||||
# },
|
||||
# },
|
||||
# }
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
@@ -310,11 +349,8 @@ class HtmlStatus(object):
|
||||
usable = False
|
||||
try:
|
||||
status_file = os.path.join(directory, self.STATUS_FILE)
|
||||
fstatus = open(status_file, "rb")
|
||||
try:
|
||||
status = pickle.load(fstatus)
|
||||
finally:
|
||||
fstatus.close()
|
||||
with open(status_file, "r") as fstatus:
|
||||
status = json.load(fstatus)
|
||||
except (IOError, ValueError):
|
||||
usable = False
|
||||
else:
|
||||
@@ -325,7 +361,10 @@ class HtmlStatus(object):
|
||||
usable = False
|
||||
|
||||
if usable:
|
||||
self.files = status['files']
|
||||
self.files = {}
|
||||
for filename, fileinfo in iitems(status['files']):
|
||||
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
|
||||
self.files[filename] = fileinfo
|
||||
self.settings = status['settings']
|
||||
else:
|
||||
self.reset()
|
||||
@@ -333,17 +372,26 @@ class HtmlStatus(object):
|
||||
def write(self, directory):
|
||||
"""Write the current status to `directory`."""
|
||||
status_file = os.path.join(directory, self.STATUS_FILE)
|
||||
files = {}
|
||||
for filename, fileinfo in iitems(self.files):
|
||||
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
|
||||
files[filename] = fileinfo
|
||||
|
||||
status = {
|
||||
'format': self.STATUS_FORMAT,
|
||||
'version': coverage.__version__,
|
||||
'settings': self.settings,
|
||||
'files': self.files,
|
||||
'files': files,
|
||||
}
|
||||
fout = open(status_file, "wb")
|
||||
try:
|
||||
pickle.dump(status, fout)
|
||||
finally:
|
||||
fout.close()
|
||||
with open(status_file, "w") as fout:
|
||||
json.dump(status, fout)
|
||||
|
||||
# Older versions of ShiningPanda look for the old name, status.dat.
|
||||
# Accomodate them if we are running under Jenkins.
|
||||
# https://issues.jenkins-ci.org/browse/JENKINS-28428
|
||||
if "JENKINS_URL" in os.environ:
|
||||
with open(os.path.join(directory, "status.dat"), "w") as dat:
|
||||
dat.write("https://issues.jenkins-ci.org/browse/JENKINS-28428\n")
|
||||
|
||||
def settings_hash(self):
|
||||
"""Get the hash of the coverage.py settings."""
|
||||
@@ -373,24 +421,15 @@ class HtmlStatus(object):
|
||||
# Helpers for templates and generating HTML
|
||||
|
||||
def escape(t):
|
||||
"""HTML-escape the text in `t`."""
|
||||
return (t
|
||||
# Convert HTML special chars into HTML entities.
|
||||
.replace("&", "&").replace("<", "<").replace(">", ">")
|
||||
.replace("'", "'").replace('"', """)
|
||||
# Convert runs of spaces: "......" -> " . . ."
|
||||
.replace(" ", " ")
|
||||
# To deal with odd-length runs, convert the final pair of spaces
|
||||
# so that "....." -> " . ."
|
||||
.replace(" ", " ")
|
||||
)
|
||||
"""HTML-escape the text in `t`.
|
||||
|
||||
def spaceless(html):
|
||||
"""Squeeze out some annoying extra space from an HTML string.
|
||||
|
||||
Nicely-formatted templates mean lots of extra space in the result.
|
||||
Get rid of some.
|
||||
This is only suitable for HTML text, not attributes.
|
||||
|
||||
"""
|
||||
html = re.sub(r">\s+<p ", ">\n<p ", html)
|
||||
return html
|
||||
# Convert HTML special chars into HTML entities.
|
||||
return t.replace("&", "&").replace("<", "<")
|
||||
|
||||
|
||||
def pair(ratio):
|
||||
"""Format a pair of numbers so JavaScript can read them in an attribute."""
|
||||
return "%s %s" % ratio
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
// For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
// Coverage.py HTML report browser code.
|
||||
/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
|
||||
/*global coverage: true, document, window, $ */
|
||||
|
||||
coverage = {};
|
||||
|
||||
// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key.
|
||||
// Find all the elements with shortkey_* class, and use them to assign a shortcut key.
|
||||
coverage.assign_shortkeys = function () {
|
||||
$("*[class*='shortkey_']").each(function (i, e) {
|
||||
$.each($(e).attr("class").split(" "), function (i, c) {
|
||||
@@ -35,6 +38,135 @@ coverage.wire_up_help_panel = function () {
|
||||
});
|
||||
};
|
||||
|
||||
// Create the events for the filter box.
|
||||
coverage.wire_up_filter = function () {
|
||||
// Cache elements.
|
||||
var table = $("table.index");
|
||||
var table_rows = table.find("tbody tr");
|
||||
var table_row_names = table_rows.find("td.name a");
|
||||
var no_rows = $("#no_rows");
|
||||
|
||||
// Create a duplicate table footer that we can modify with dynamic summed values.
|
||||
var table_footer = $("table.index tfoot tr");
|
||||
var table_dynamic_footer = table_footer.clone();
|
||||
table_dynamic_footer.attr('class', 'total_dynamic hidden');
|
||||
table_footer.after(table_dynamic_footer);
|
||||
|
||||
// Observe filter keyevents.
|
||||
$("#filter").on("keyup change", $.debounce(150, function (event) {
|
||||
var filter_value = $(this).val();
|
||||
|
||||
if (filter_value === "") {
|
||||
// Filter box is empty, remove all filtering.
|
||||
table_rows.removeClass("hidden");
|
||||
|
||||
// Show standard footer, hide dynamic footer.
|
||||
table_footer.removeClass("hidden");
|
||||
table_dynamic_footer.addClass("hidden");
|
||||
|
||||
// Hide placeholder, show table.
|
||||
if (no_rows.length > 0) {
|
||||
no_rows.hide();
|
||||
}
|
||||
table.show();
|
||||
|
||||
}
|
||||
else {
|
||||
// Filter table items by value.
|
||||
var hidden = 0;
|
||||
var shown = 0;
|
||||
|
||||
// Hide / show elements.
|
||||
$.each(table_row_names, function () {
|
||||
var element = $(this).parents("tr");
|
||||
|
||||
if ($(this).text().indexOf(filter_value) === -1) {
|
||||
// hide
|
||||
element.addClass("hidden");
|
||||
hidden++;
|
||||
}
|
||||
else {
|
||||
// show
|
||||
element.removeClass("hidden");
|
||||
shown++;
|
||||
}
|
||||
});
|
||||
|
||||
// Show placeholder if no rows will be displayed.
|
||||
if (no_rows.length > 0) {
|
||||
if (shown === 0) {
|
||||
// Show placeholder, hide table.
|
||||
no_rows.show();
|
||||
table.hide();
|
||||
}
|
||||
else {
|
||||
// Hide placeholder, show table.
|
||||
no_rows.hide();
|
||||
table.show();
|
||||
}
|
||||
}
|
||||
|
||||
// Manage dynamic header:
|
||||
if (hidden > 0) {
|
||||
// Calculate new dynamic sum values based on visible rows.
|
||||
for (var column = 2; column < 20; column++) {
|
||||
// Calculate summed value.
|
||||
var cells = table_rows.find('td:nth-child(' + column + ')');
|
||||
if (!cells.length) {
|
||||
// No more columns...!
|
||||
break;
|
||||
}
|
||||
|
||||
var sum = 0, numer = 0, denom = 0;
|
||||
$.each(cells.filter(':visible'), function () {
|
||||
var ratio = $(this).data("ratio");
|
||||
if (ratio) {
|
||||
var splitted = ratio.split(" ");
|
||||
numer += parseInt(splitted[0], 10);
|
||||
denom += parseInt(splitted[1], 10);
|
||||
}
|
||||
else {
|
||||
sum += parseInt(this.innerHTML, 10);
|
||||
}
|
||||
});
|
||||
|
||||
// Get footer cell element.
|
||||
var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')');
|
||||
|
||||
// Set value into dynamic footer cell element.
|
||||
if (cells[0].innerHTML.indexOf('%') > -1) {
|
||||
// Percentage columns use the numerator and denominator,
|
||||
// and adapt to the number of decimal places.
|
||||
var match = /\.([0-9]+)/.exec(cells[0].innerHTML);
|
||||
var places = 0;
|
||||
if (match) {
|
||||
places = match[1].length;
|
||||
}
|
||||
var pct = numer * 100 / denom;
|
||||
footer_cell.text(pct.toFixed(places) + '%');
|
||||
}
|
||||
else {
|
||||
footer_cell.text(sum);
|
||||
}
|
||||
}
|
||||
|
||||
// Hide standard footer, show dynamic footer.
|
||||
table_footer.addClass("hidden");
|
||||
table_dynamic_footer.removeClass("hidden");
|
||||
}
|
||||
else {
|
||||
// Show standard footer, hide dynamic footer.
|
||||
table_footer.removeClass("hidden");
|
||||
table_dynamic_footer.addClass("hidden");
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Trigger change event on setup, to force filter on page refresh
|
||||
// (filter value may still be present).
|
||||
$("#filter").trigger("change");
|
||||
};
|
||||
|
||||
// Loaded on index.html
|
||||
coverage.index_ready = function ($) {
|
||||
// Look for a cookie containing previous sort settings:
|
||||
@@ -95,6 +227,7 @@ coverage.index_ready = function ($) {
|
||||
|
||||
coverage.assign_shortkeys();
|
||||
coverage.wire_up_help_panel();
|
||||
coverage.wire_up_filter();
|
||||
|
||||
// Watch for page unload events so we can save the final sort settings:
|
||||
$(window).unload(function () {
|
||||
@@ -129,6 +262,11 @@ coverage.pyfile_ready = function ($) {
|
||||
|
||||
coverage.assign_shortkeys();
|
||||
coverage.wire_up_help_panel();
|
||||
|
||||
coverage.init_scroll_markers();
|
||||
|
||||
// Rebuild scroll markers after window high changing
|
||||
$(window).resize(coverage.resize_scroll_markers);
|
||||
};
|
||||
|
||||
coverage.toggle_lines = function (btn, cls) {
|
||||
@@ -187,12 +325,13 @@ coverage.to_next_chunk = function () {
|
||||
|
||||
// Find the start of the next colored chunk.
|
||||
var probe = c.sel_end;
|
||||
var color, probe_line;
|
||||
while (true) {
|
||||
var probe_line = c.line_elt(probe);
|
||||
probe_line = c.line_elt(probe);
|
||||
if (probe_line.length === 0) {
|
||||
return;
|
||||
}
|
||||
var color = probe_line.css("background-color");
|
||||
color = probe_line.css("background-color");
|
||||
if (!c.is_transparent(color)) {
|
||||
break;
|
||||
}
|
||||
@@ -374,3 +513,72 @@ coverage.scroll_window = function (to_pos) {
|
||||
coverage.finish_scrolling = function () {
|
||||
$("html,body").stop(true, true);
|
||||
};
|
||||
|
||||
coverage.init_scroll_markers = function () {
|
||||
var c = coverage;
|
||||
// Init some variables
|
||||
c.lines_len = $('td.text p').length;
|
||||
c.body_h = $('body').height();
|
||||
c.header_h = $('div#header').height();
|
||||
c.missed_lines = $('td.text p.mis, td.text p.par');
|
||||
|
||||
// Build html
|
||||
c.resize_scroll_markers();
|
||||
};
|
||||
|
||||
coverage.resize_scroll_markers = function () {
|
||||
var c = coverage,
|
||||
min_line_height = 3,
|
||||
max_line_height = 10,
|
||||
visible_window_h = $(window).height();
|
||||
|
||||
$('#scroll_marker').remove();
|
||||
// Don't build markers if the window has no scroll bar.
|
||||
if (c.body_h <= visible_window_h) {
|
||||
return;
|
||||
}
|
||||
|
||||
$("body").append("<div id='scroll_marker'> </div>");
|
||||
var scroll_marker = $('#scroll_marker'),
|
||||
marker_scale = scroll_marker.height() / c.body_h,
|
||||
line_height = scroll_marker.height() / c.lines_len;
|
||||
|
||||
// Line height must be between the extremes.
|
||||
if (line_height > min_line_height) {
|
||||
if (line_height > max_line_height) {
|
||||
line_height = max_line_height;
|
||||
}
|
||||
}
|
||||
else {
|
||||
line_height = min_line_height;
|
||||
}
|
||||
|
||||
var previous_line = -99,
|
||||
last_mark,
|
||||
last_top;
|
||||
|
||||
c.missed_lines.each(function () {
|
||||
var line_top = Math.round($(this).offset().top * marker_scale),
|
||||
id_name = $(this).attr('id'),
|
||||
line_number = parseInt(id_name.substring(1, id_name.length));
|
||||
|
||||
if (line_number === previous_line + 1) {
|
||||
// If this solid missed block just make previous mark higher.
|
||||
last_mark.css({
|
||||
'height': line_top + line_height - last_top
|
||||
});
|
||||
}
|
||||
else {
|
||||
// Add colored line in scroll_marker block.
|
||||
scroll_marker.append('<div id="m' + line_number + '" class="marker"></div>');
|
||||
last_mark = $('#m' + line_number);
|
||||
last_mark.css({
|
||||
'height': line_height,
|
||||
'top': line_top
|
||||
});
|
||||
last_top = line_top;
|
||||
}
|
||||
|
||||
previous_line = line_number;
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,101 +1,115 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
|
||||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>{{ title|escape }}</title>
|
||||
<link rel='stylesheet' href='style.css' type='text/css'>
|
||||
<link rel="stylesheet" href="style.css" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
|
||||
<link rel="stylesheet" href="{{ extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
<script type='text/javascript' src='jquery.min.js'></script>
|
||||
<script type='text/javascript' src='jquery.tablesorter.min.js'></script>
|
||||
<script type='text/javascript' src='jquery.hotkeys.js'></script>
|
||||
<script type='text/javascript' src='coverage_html.js'></script>
|
||||
<script type='text/javascript' charset='utf-8'>
|
||||
<script type="text/javascript" src="jquery.min.js"></script>
|
||||
<script type="text/javascript" src="jquery.debounce.min.js"></script>
|
||||
<script type="text/javascript" src="jquery.tablesorter.min.js"></script>
|
||||
<script type="text/javascript" src="jquery.hotkeys.js"></script>
|
||||
<script type="text/javascript" src="coverage_html.js"></script>
|
||||
<script type="text/javascript">
|
||||
jQuery(document).ready(coverage.index_ready);
|
||||
</script>
|
||||
</head>
|
||||
<body id='indexfile'>
|
||||
<body class="indexfile">
|
||||
|
||||
<div id='header'>
|
||||
<div class='content'>
|
||||
<div id="header">
|
||||
<div class="content">
|
||||
<h1>{{ title|escape }}:
|
||||
<span class='pc_cov'>{{totals.pc_covered_str}}%</span>
|
||||
<span class="pc_cov">{{totals.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
<img id='keyboard_icon' src='keybd_closed.png'>
|
||||
|
||||
<img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
|
||||
|
||||
<form id="filter_container">
|
||||
<input id="filter" type="text" value="" placeholder="filter..." />
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class='help_panel'>
|
||||
<img id='panel_icon' src='keybd_open.png'>
|
||||
<p class='legend'>Hot-keys on this page</p>
|
||||
<div class="help_panel">
|
||||
<img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
|
||||
<p class="legend">Hot-keys on this page</p>
|
||||
<div>
|
||||
<p class='keyhelp'>
|
||||
<span class='key'>n</span>
|
||||
<span class='key'>s</span>
|
||||
<span class='key'>m</span>
|
||||
<span class='key'>x</span>
|
||||
{% if arcs %}
|
||||
<span class='key'>b</span>
|
||||
<span class='key'>p</span>
|
||||
<p class="keyhelp">
|
||||
<span class="key">n</span>
|
||||
<span class="key">s</span>
|
||||
<span class="key">m</span>
|
||||
<span class="key">x</span>
|
||||
{% if has_arcs %}
|
||||
<span class="key">b</span>
|
||||
<span class="key">p</span>
|
||||
{% endif %}
|
||||
<span class='key'>c</span> change column sorting
|
||||
<span class="key">c</span> change column sorting
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id='index'>
|
||||
<table class='index'>
|
||||
<div id="index">
|
||||
<table class="index">
|
||||
<thead>
|
||||
{# The title='' attr doesn't work in Safari. #}
|
||||
<tr class='tablehead' title='Click to sort'>
|
||||
<th class='name left headerSortDown shortkey_n'>Module</th>
|
||||
<th class='shortkey_s'>statements</th>
|
||||
<th class='shortkey_m'>missing</th>
|
||||
<th class='shortkey_x'>excluded</th>
|
||||
{% if arcs %}
|
||||
<th class='shortkey_b'>branches</th>
|
||||
<th class='shortkey_p'>partial</th>
|
||||
{# The title="" attr doesn"t work in Safari. #}
|
||||
<tr class="tablehead" title="Click to sort">
|
||||
<th class="name left headerSortDown shortkey_n">Module</th>
|
||||
<th class="shortkey_s">statements</th>
|
||||
<th class="shortkey_m">missing</th>
|
||||
<th class="shortkey_x">excluded</th>
|
||||
{% if has_arcs %}
|
||||
<th class="shortkey_b">branches</th>
|
||||
<th class="shortkey_p">partial</th>
|
||||
{% endif %}
|
||||
<th class='right shortkey_c'>coverage</th>
|
||||
<th class="right shortkey_c">coverage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
{# HTML syntax requires thead, tfoot, tbody #}
|
||||
<tfoot>
|
||||
<tr class='total'>
|
||||
<td class='name left'>Total</td>
|
||||
<tr class="total">
|
||||
<td class="name left">Total</td>
|
||||
<td>{{totals.n_statements}}</td>
|
||||
<td>{{totals.n_missing}}</td>
|
||||
<td>{{totals.n_excluded}}</td>
|
||||
{% if arcs %}
|
||||
{% if has_arcs %}
|
||||
<td>{{totals.n_branches}}</td>
|
||||
<td>{{totals.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class='right'>{{totals.pc_covered_str}}%</td>
|
||||
<td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
</tfoot>
|
||||
<tbody>
|
||||
{% for file in files %}
|
||||
<tr class='file'>
|
||||
<td class='name left'><a href='{{file.html_filename}}'>{{file.name}}</a></td>
|
||||
<tr class="file">
|
||||
<td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
|
||||
<td>{{file.nums.n_statements}}</td>
|
||||
<td>{{file.nums.n_missing}}</td>
|
||||
<td>{{file.nums.n_excluded}}</td>
|
||||
{% if arcs %}
|
||||
{% if has_arcs %}
|
||||
<td>{{file.nums.n_branches}}</td>
|
||||
<td>{{file.nums.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class='right'>{{file.nums.pc_covered_str}}%</td>
|
||||
<td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<p id="no_rows">
|
||||
No items found using the specified filter.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div id='footer'>
|
||||
<div class='content'>
|
||||
<div id="footer">
|
||||
<div class="content">
|
||||
<p>
|
||||
<a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
9
python/helpers/coveragepy/coverage/htmlfiles/jquery.debounce.min.js
vendored
Normal file
9
python/helpers/coveragepy/coverage/htmlfiles/jquery.debounce.min.js
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
/*
|
||||
* jQuery throttle / debounce - v1.1 - 3/7/2010
|
||||
* http://benalman.com/projects/jquery-throttle-debounce-plugin/
|
||||
*
|
||||
* Copyright (c) 2010 "Cowboy" Ben Alman
|
||||
* Dual licensed under the MIT and GPL licenses.
|
||||
* http://benalman.com/about/license/
|
||||
*/
|
||||
(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this);
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
|
Before Width: | Height: | Size: 152 B After Width: | Height: | Size: 112 B |
Binary file not shown.
|
Before Width: | Height: | Size: 141 B After Width: | Height: | Size: 112 B |
@@ -1,87 +1,102 @@
|
||||
<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
|
||||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
{# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
|
||||
{# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
|
||||
<meta http-equiv='X-UA-Compatible' content='IE=emulateIE7' />
|
||||
<title>Coverage for {{cu.name|escape}}: {{nums.pc_covered_str}}%</title>
|
||||
<link rel='stylesheet' href='style.css' type='text/css'>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=emulateIE7" />
|
||||
<title>Coverage for {{fr.relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
|
||||
<link rel="stylesheet" href="style.css" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
|
||||
<link rel="stylesheet" href="{{ extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
<script type='text/javascript' src='jquery.min.js'></script>
|
||||
<script type='text/javascript' src='jquery.hotkeys.js'></script>
|
||||
<script type='text/javascript' src='jquery.isonscreen.js'></script>
|
||||
<script type='text/javascript' src='coverage_html.js'></script>
|
||||
<script type='text/javascript' charset='utf-8'>
|
||||
<script type="text/javascript" src="jquery.min.js"></script>
|
||||
<script type="text/javascript" src="jquery.hotkeys.js"></script>
|
||||
<script type="text/javascript" src="jquery.isonscreen.js"></script>
|
||||
<script type="text/javascript" src="coverage_html.js"></script>
|
||||
<script type="text/javascript">
|
||||
jQuery(document).ready(coverage.pyfile_ready);
|
||||
</script>
|
||||
</head>
|
||||
<body id='pyfile'>
|
||||
<body class="pyfile">
|
||||
|
||||
<div id='header'>
|
||||
<div class='content'>
|
||||
<h1>Coverage for <b>{{cu.name|escape}}</b> :
|
||||
<span class='pc_cov'>{{nums.pc_covered_str}}%</span>
|
||||
<div id="header">
|
||||
<div class="content">
|
||||
<h1>Coverage for <b>{{fr.relative_filename|escape}}</b> :
|
||||
<span class="pc_cov">{{nums.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
<img id='keyboard_icon' src='keybd_closed.png'>
|
||||
<h2 class='stats'>
|
||||
|
||||
<img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
|
||||
|
||||
<h2 class="stats">
|
||||
{{nums.n_statements}} statements
|
||||
<span class='{{c_run}} shortkey_r button_toggle_run'>{{nums.n_executed}} run</span>
|
||||
<span class='{{c_mis}} shortkey_m button_toggle_mis'>{{nums.n_missing}} missing</span>
|
||||
<span class='{{c_exc}} shortkey_x button_toggle_exc'>{{nums.n_excluded}} excluded</span>
|
||||
{% if arcs %}
|
||||
<span class='{{c_par}} shortkey_p button_toggle_par'>{{nums.n_partial_branches}} partial</span>
|
||||
<span class="{{c_run}} shortkey_r button_toggle_run">{{nums.n_executed}} run</span>
|
||||
<span class="{{c_mis}} shortkey_m button_toggle_mis">{{nums.n_missing}} missing</span>
|
||||
<span class="{{c_exc}} shortkey_x button_toggle_exc">{{nums.n_excluded}} excluded</span>
|
||||
|
||||
{% if has_arcs %}
|
||||
<span class="{{c_par}} shortkey_p button_toggle_par">{{nums.n_partial_branches}} partial</span>
|
||||
{% endif %}
|
||||
</h2>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class='help_panel'>
|
||||
<img id='panel_icon' src='keybd_open.png'>
|
||||
<p class='legend'>Hot-keys on this page</p>
|
||||
<div class="help_panel">
|
||||
<img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
|
||||
<p class="legend">Hot-keys on this page</p>
|
||||
<div>
|
||||
<p class='keyhelp'>
|
||||
<span class='key'>r</span>
|
||||
<span class='key'>m</span>
|
||||
<span class='key'>x</span>
|
||||
<span class='key'>p</span> toggle line displays
|
||||
<p class="keyhelp">
|
||||
<span class="key">r</span>
|
||||
<span class="key">m</span>
|
||||
<span class="key">x</span>
|
||||
<span class="key">p</span> toggle line displays
|
||||
</p>
|
||||
<p class='keyhelp'>
|
||||
<span class='key'>j</span>
|
||||
<span class='key'>k</span> next/prev highlighted chunk
|
||||
<p class="keyhelp">
|
||||
<span class="key">j</span>
|
||||
<span class="key">k</span> next/prev highlighted chunk
|
||||
</p>
|
||||
<p class='keyhelp'>
|
||||
<span class='key'>0</span> (zero) top of page
|
||||
<p class="keyhelp">
|
||||
<span class="key">0</span> (zero) top of page
|
||||
</p>
|
||||
<p class='keyhelp'>
|
||||
<span class='key'>1</span> (one) first highlighted chunk
|
||||
<p class="keyhelp">
|
||||
<span class="key">1</span> (one) first highlighted chunk
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id='source'>
|
||||
<table cellspacing='0' cellpadding='0'>
|
||||
<div id="source">
|
||||
<table>
|
||||
<tr>
|
||||
<td class='linenos' valign='top'>
|
||||
{% for line in lines %}
|
||||
<p id='n{{line.number}}' class='{{line.class}}'><a href='#n{{line.number}}'>{{line.number}}</a></p>
|
||||
<td class="linenos">
|
||||
{% for line in lines -%}
|
||||
<p id="n{{line.number}}" class="{{line.class}}"><a href="#n{{line.number}}">{{line.number}}</a></p>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td class='text' valign='top'>
|
||||
{% for line in lines %}
|
||||
<p id='t{{line.number}}' class='{{line.class}}'>{% if line.annotate %}<span class='annotate' title='{{line.annotate_title}}'>{{line.annotate}}</span>{% endif %}{{line.html}}<span class='strut'> </span></p>
|
||||
<td class="text">
|
||||
{# These are the source lines, which are very sensitive to whitespace. -#}
|
||||
{# The `{ # - # }` below are comments which slurp up the following space. -#}
|
||||
{% for line in lines -%}
|
||||
<p id="t{{line.number}}" class="{{line.class}}">{#-#}
|
||||
{% if line.annotate -%}
|
||||
<span class="annotate short">{{line.annotate}}</span>{#-#}
|
||||
<span class="annotate long">{{line.annotate_long}}</span>{#-#}
|
||||
{% endif -%}
|
||||
{{line.html}}<span class="strut"> </span>{#-#}
|
||||
</p>
|
||||
{% endfor %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div id='footer'>
|
||||
<div class='content'>
|
||||
<div id="footer">
|
||||
<div class="content">
|
||||
<p>
|
||||
<a class='nav' href='index.html'>« index</a> <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
|
||||
<a class="nav" href="index.html">« index</a> <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
/* CSS styles for Coverage. */
|
||||
/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt */
|
||||
|
||||
/* CSS styles for coverage.py. */
|
||||
|
||||
/* Page-wide styles */
|
||||
html, body, h1, h2, h3, p, td, th {
|
||||
html, body, h1, h2, h3, p, table, td, th {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
@@ -31,6 +35,17 @@ p {
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td {
|
||||
vertical-align: top;
|
||||
}
|
||||
table tr.hidden {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
p#no_rows {
|
||||
display: none;
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
a.nav {
|
||||
text-decoration: none;
|
||||
@@ -50,14 +65,14 @@ a.nav:hover {
|
||||
|
||||
#source {
|
||||
padding: 1em;
|
||||
font-family: "courier new", monospace;
|
||||
font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace;
|
||||
}
|
||||
|
||||
#indexfile #footer {
|
||||
.indexfile #footer {
|
||||
margin: 1em 3em;
|
||||
}
|
||||
|
||||
#pyfile #footer {
|
||||
.pyfile #footer {
|
||||
margin: 1em 1em;
|
||||
}
|
||||
|
||||
@@ -80,6 +95,16 @@ a.nav:hover {
|
||||
|
||||
h1 {
|
||||
font-size: 1.25em;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
#filter_container {
|
||||
display: inline-block;
|
||||
float: right;
|
||||
margin: 0 2em 0 0;
|
||||
}
|
||||
#filter_container input {
|
||||
width: 10em;
|
||||
}
|
||||
|
||||
h2.stats {
|
||||
@@ -130,22 +155,23 @@ h2.stats {
|
||||
/* Help panel */
|
||||
#keyboard_icon {
|
||||
float: right;
|
||||
margin: 5px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.help_panel {
|
||||
position: absolute;
|
||||
background: #ffc;
|
||||
background: #ffffcc;
|
||||
padding: .5em;
|
||||
border: 1px solid #883;
|
||||
display: none;
|
||||
}
|
||||
|
||||
#indexfile .help_panel {
|
||||
.indexfile .help_panel {
|
||||
width: 20em; height: 4em;
|
||||
}
|
||||
|
||||
#pyfile .help_panel {
|
||||
.pyfile .help_panel {
|
||||
width: 16em; height: 8em;
|
||||
}
|
||||
|
||||
@@ -201,7 +227,8 @@ td.text {
|
||||
margin: 0;
|
||||
padding: 0 0 0 .5em;
|
||||
border-left: 2px solid #ffffff;
|
||||
white-space: nowrap;
|
||||
white-space: pre;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.text p.mis {
|
||||
@@ -227,7 +254,6 @@ td.text {
|
||||
|
||||
.text span.annotate {
|
||||
font-family: georgia;
|
||||
font-style: italic;
|
||||
color: #666;
|
||||
float: right;
|
||||
padding-right: .5em;
|
||||
@@ -235,6 +261,27 @@ td.text {
|
||||
.text p.hide_par span.annotate {
|
||||
display: none;
|
||||
}
|
||||
.text span.annotate.long {
|
||||
display: none;
|
||||
}
|
||||
.text p:hover span.annotate.long {
|
||||
display: block;
|
||||
max-width: 50%;
|
||||
white-space: normal;
|
||||
float: right;
|
||||
position: absolute;
|
||||
top: 1.75em;
|
||||
right: 1em;
|
||||
width: 30em;
|
||||
height: auto;
|
||||
color: #333;
|
||||
background: #ffffcc;
|
||||
border: 1px solid #888;
|
||||
padding: .25em .5em;
|
||||
z-index: 999;
|
||||
border-radius: .2em;
|
||||
box-shadow: #cccccc .2em .2em .2em;
|
||||
}
|
||||
|
||||
/* Syntax coloring */
|
||||
.text .com {
|
||||
@@ -275,6 +322,14 @@ td.text {
|
||||
}
|
||||
#index th.headerSortDown, #index th.headerSortUp {
|
||||
border-bottom: 1px solid #000;
|
||||
white-space: nowrap;
|
||||
background: #eee;
|
||||
}
|
||||
#index th.headerSortDown:after {
|
||||
content: " ↓";
|
||||
}
|
||||
#index th.headerSortUp:after {
|
||||
content: " ↑";
|
||||
}
|
||||
#index td.name, #index th.name {
|
||||
text-align: left;
|
||||
@@ -284,13 +339,11 @@ td.text {
|
||||
text-decoration: none;
|
||||
color: #000;
|
||||
}
|
||||
#index td.name a:hover {
|
||||
text-decoration: underline;
|
||||
color: #000;
|
||||
#index tr.total,
|
||||
#index tr.total_dynamic {
|
||||
}
|
||||
#index tr.total {
|
||||
}
|
||||
#index tr.total td {
|
||||
#index tr.total td,
|
||||
#index tr.total_dynamic td {
|
||||
font-weight: bold;
|
||||
border-top: 1px solid #ccc;
|
||||
border-bottom: none;
|
||||
@@ -298,3 +351,25 @@ td.text {
|
||||
#index tr.file:hover {
|
||||
background: #eeeeee;
|
||||
}
|
||||
#index tr.file:hover td.name {
|
||||
text-decoration: underline;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
/* scroll marker styles */
|
||||
#scroll_marker {
|
||||
position: fixed;
|
||||
right: 0;
|
||||
top: 0;
|
||||
width: 16px;
|
||||
height: 100%;
|
||||
background: white;
|
||||
border-left: 1px solid #eee;
|
||||
}
|
||||
|
||||
#scroll_marker .marker {
|
||||
background: #eedddd;
|
||||
position: absolute;
|
||||
min-height: 3px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
@@ -1,12 +1,72 @@
|
||||
"""Miscellaneous stuff for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Miscellaneous stuff for coverage.py."""
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
import inspect
|
||||
import locale
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
|
||||
from coverage.backward import md5, sorted # pylint: disable=W0622
|
||||
from coverage.backward import string_class, to_bytes
|
||||
from coverage import env
|
||||
from coverage.backward import string_class, to_bytes, unicode_class
|
||||
|
||||
ISOLATED_MODULES = {}
|
||||
|
||||
|
||||
def isolate_module(mod):
|
||||
"""Copy a module so that we are isolated from aggressive mocking.
|
||||
|
||||
If a test suite mocks os.path.exists (for example), and then we need to use
|
||||
it during the test, everything will get tangled up if we use their mock.
|
||||
Making a copy of the module when we import it will isolate coverage.py from
|
||||
those complications.
|
||||
"""
|
||||
if mod not in ISOLATED_MODULES:
|
||||
new_mod = types.ModuleType(mod.__name__)
|
||||
ISOLATED_MODULES[mod] = new_mod
|
||||
for name in dir(mod):
|
||||
value = getattr(mod, name)
|
||||
if isinstance(value, types.ModuleType):
|
||||
value = isolate_module(value)
|
||||
setattr(new_mod, name, value)
|
||||
return ISOLATED_MODULES[mod]
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
# Use PyContracts for assertion testing on parameters and returns, but only if
|
||||
# we are running our own test suite.
|
||||
if env.TESTING:
|
||||
from contracts import contract # pylint: disable=unused-import
|
||||
from contracts import new_contract as raw_new_contract
|
||||
|
||||
def new_contract(*args, **kwargs):
|
||||
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
|
||||
try:
|
||||
return raw_new_contract(*args, **kwargs)
|
||||
except ValueError:
|
||||
# During meta-coverage, this module is imported twice, and
|
||||
# PyContracts doesn't like redefining contracts. It's OK.
|
||||
pass
|
||||
|
||||
# Define contract words that PyContract doesn't have.
|
||||
new_contract('bytes', lambda v: isinstance(v, bytes))
|
||||
if env.PY3:
|
||||
new_contract('unicode', lambda v: isinstance(v, unicode_class))
|
||||
else: # pragma: not covered
|
||||
# We aren't using real PyContracts, so just define a no-op decorator as a
|
||||
# stunt double.
|
||||
def contract(**unused):
|
||||
"""Dummy no-op implementation of `contract`."""
|
||||
return lambda func: func
|
||||
|
||||
def new_contract(*args_unused, **kwargs_unused):
|
||||
"""Dummy no-op implementation of `new_contract`."""
|
||||
pass
|
||||
|
||||
|
||||
def nice_pair(pair):
|
||||
@@ -42,7 +102,7 @@ def format_lines(statements, lines):
|
||||
lines = sorted(lines)
|
||||
while i < len(statements) and j < len(lines):
|
||||
if statements[i] == lines[j]:
|
||||
if start == None:
|
||||
if start is None:
|
||||
start = lines[j]
|
||||
end = lines[j]
|
||||
j += 1
|
||||
@@ -56,25 +116,25 @@ def format_lines(statements, lines):
|
||||
return ret
|
||||
|
||||
|
||||
def short_stack():
|
||||
"""Return a string summarizing the call stack."""
|
||||
stack = inspect.stack()[:0:-1]
|
||||
return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
|
||||
|
||||
|
||||
def expensive(fn):
|
||||
"""A decorator to cache the result of an expensive operation.
|
||||
"""A decorator to indicate that a method shouldn't be called more than once.
|
||||
|
||||
Only applies to methods with no arguments.
|
||||
Normally, this does nothing. During testing, this raises an exception if
|
||||
called more than once.
|
||||
|
||||
"""
|
||||
attr = "_cache_" + fn.__name__
|
||||
if env.TESTING:
|
||||
attr = "_once_" + fn.__name__
|
||||
|
||||
def _wrapped(self):
|
||||
"""Inner fn that checks the cache."""
|
||||
if not hasattr(self, attr):
|
||||
setattr(self, attr, fn(self))
|
||||
return getattr(self, attr)
|
||||
"""Inner function that checks the cache."""
|
||||
if hasattr(self, attr):
|
||||
raise Exception("Shouldn't have called %s more than once" % fn.__name__)
|
||||
setattr(self, attr, True)
|
||||
return fn(self)
|
||||
return _wrapped
|
||||
else:
|
||||
return fn
|
||||
|
||||
|
||||
def bool_or_none(b):
|
||||
@@ -87,34 +147,42 @@ def bool_or_none(b):
|
||||
|
||||
def join_regex(regexes):
|
||||
"""Combine a list of regexes into one that matches any of them."""
|
||||
if len(regexes) > 1:
|
||||
return "|".join(["(%s)" % r for r in regexes])
|
||||
elif regexes:
|
||||
return regexes[0]
|
||||
else:
|
||||
return ""
|
||||
return "|".join("(?:%s)" % r for r in regexes)
|
||||
|
||||
|
||||
def file_be_gone(path):
|
||||
"""Remove a file, and don't get annoyed if it doesn't exist."""
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError:
|
||||
_, e, _ = sys.exc_info()
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def output_encoding(outfile=None):
|
||||
"""Determine the encoding to use for output written to `outfile` or stdout."""
|
||||
if outfile is None:
|
||||
outfile = sys.stdout
|
||||
encoding = (
|
||||
getattr(outfile, "encoding", None) or
|
||||
getattr(sys.__stdout__, "encoding", None) or
|
||||
locale.getpreferredencoding()
|
||||
)
|
||||
return encoding
|
||||
|
||||
|
||||
class Hasher(object):
|
||||
"""Hashes Python data into md5."""
|
||||
def __init__(self):
|
||||
self.md5 = md5()
|
||||
self.md5 = hashlib.md5()
|
||||
|
||||
def update(self, v):
|
||||
"""Add `v` to the hash, recursively if needed."""
|
||||
self.md5.update(to_bytes(str(type(v))))
|
||||
if isinstance(v, string_class):
|
||||
self.md5.update(to_bytes(v))
|
||||
elif isinstance(v, bytes):
|
||||
self.md5.update(v)
|
||||
elif v is None:
|
||||
pass
|
||||
elif isinstance(v, (int, float)):
|
||||
@@ -137,27 +205,58 @@ class Hasher(object):
|
||||
self.update(k)
|
||||
self.update(a)
|
||||
|
||||
def digest(self):
|
||||
"""Retrieve the digest of the hash."""
|
||||
return self.md5.digest()
|
||||
def hexdigest(self):
|
||||
"""Retrieve the hex digest of the hash."""
|
||||
return self.md5.hexdigest()
|
||||
|
||||
|
||||
def _needs_to_implement(that, func_name):
|
||||
"""Helper to raise NotImplementedError in interface stubs."""
|
||||
if hasattr(that, "_coverage_plugin_name"):
|
||||
thing = "Plugin"
|
||||
name = that._coverage_plugin_name
|
||||
else:
|
||||
thing = "Class"
|
||||
klass = that.__class__
|
||||
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
|
||||
|
||||
raise NotImplementedError(
|
||||
"{thing} {name!r} needs to implement {func_name}()".format(
|
||||
thing=thing, name=name, func_name=func_name
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SimpleRepr(object):
|
||||
"""A mixin implementing a simple __repr__."""
|
||||
def __repr__(self):
|
||||
return "<{klass} @{id:x} {attrs}>".format(
|
||||
klass=self.__class__.__name__,
|
||||
id=id(self) & 0xFFFFFF,
|
||||
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
|
||||
)
|
||||
|
||||
|
||||
class CoverageException(Exception):
|
||||
"""An exception specific to Coverage."""
|
||||
"""An exception specific to coverage.py."""
|
||||
pass
|
||||
|
||||
|
||||
class NoSource(CoverageException):
|
||||
"""We couldn't find the source for a module."""
|
||||
pass
|
||||
|
||||
|
||||
class NoCode(NoSource):
|
||||
"""We couldn't find any code at all."""
|
||||
pass
|
||||
|
||||
|
||||
class NotPython(CoverageException):
|
||||
"""A source file turned out not to be parsable Python."""
|
||||
pass
|
||||
|
||||
|
||||
class ExceptionDuringRun(CoverageException):
|
||||
"""An exception happened while running customer code.
|
||||
|
||||
|
||||
99
python/helpers/coveragepy/coverage/multiproc.py
Normal file
99
python/helpers/coveragepy/coverage/multiproc.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Monkey-patching to add multiprocessing support for coverage.py"""
|
||||
|
||||
import multiprocessing
|
||||
import multiprocessing.process
|
||||
import os
|
||||
import sys
|
||||
|
||||
from coverage.misc import contract
|
||||
|
||||
# An attribute that will be set on the module to indicate that it has been
|
||||
# monkey-patched.
|
||||
PATCHED_MARKER = "_coverage$patched"
|
||||
|
||||
# The environment variable that specifies the rcfile for subprocesses.
|
||||
COVERAGE_RCFILE_ENV = "_COVERAGE_RCFILE"
|
||||
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
OriginalProcess = multiprocessing.process.BaseProcess
|
||||
else:
|
||||
OriginalProcess = multiprocessing.Process
|
||||
|
||||
original_bootstrap = OriginalProcess._bootstrap
|
||||
|
||||
class ProcessWithCoverage(OriginalProcess):
|
||||
"""A replacement for multiprocess.Process that starts coverage."""
|
||||
|
||||
def _bootstrap(self):
|
||||
"""Wrapper around _bootstrap to start coverage."""
|
||||
from coverage import Coverage # avoid circular import
|
||||
rcfile = os.environ[COVERAGE_RCFILE_ENV]
|
||||
cov = Coverage(data_suffix=True, config_file=rcfile)
|
||||
cov.start()
|
||||
try:
|
||||
return original_bootstrap(self)
|
||||
finally:
|
||||
cov.stop()
|
||||
cov.save()
|
||||
|
||||
|
||||
class Stowaway(object):
|
||||
"""An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
|
||||
def __init__(self, rcfile):
|
||||
self.rcfile = rcfile
|
||||
|
||||
def __getstate__(self):
|
||||
return {'rcfile': self.rcfile}
|
||||
|
||||
def __setstate__(self, state):
|
||||
patch_multiprocessing(state['rcfile'])
|
||||
|
||||
|
||||
@contract(rcfile=str)
|
||||
def patch_multiprocessing(rcfile):
|
||||
"""Monkey-patch the multiprocessing module.
|
||||
|
||||
This enables coverage measurement of processes started by multiprocessing.
|
||||
This involves aggressive monkey-patching.
|
||||
|
||||
`rcfile` is the path to the rcfile being used.
|
||||
|
||||
"""
|
||||
|
||||
if hasattr(multiprocessing, PATCHED_MARKER):
|
||||
return
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
|
||||
else:
|
||||
multiprocessing.Process = ProcessWithCoverage
|
||||
|
||||
# Set the value in ProcessWithCoverage that will be pickled into the child
|
||||
# process.
|
||||
os.environ[COVERAGE_RCFILE_ENV] = rcfile
|
||||
|
||||
# When spawning processes rather than forking them, we have no state in the
|
||||
# new process. We sneak in there with a Stowaway: we stuff one of our own
|
||||
# objects into the data that gets pickled and sent to the sub-process. When
|
||||
# the Stowaway is unpickled, it's __setstate__ method is called, which
|
||||
# re-applies the monkey-patch.
|
||||
# Windows only spawns, so this is needed to keep Windows working.
|
||||
try:
|
||||
from multiprocessing import spawn
|
||||
original_get_preparation_data = spawn.get_preparation_data
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
def get_preparation_data_with_stowaway(name):
|
||||
"""Get the original preparation data, and also insert our stowaway."""
|
||||
d = original_get_preparation_data(name)
|
||||
d['stowaway'] = Stowaway(rcfile)
|
||||
return d
|
||||
|
||||
spawn.get_preparation_data = get_preparation_data_with_stowaway
|
||||
|
||||
setattr(multiprocessing, PATCHED_MARKER, True)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,18 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Better tokenizing for coverage.py."""
|
||||
|
||||
import codecs, keyword, re, sys, token, tokenize
|
||||
from coverage.backward import set # pylint: disable=W0622
|
||||
from coverage.parser import generate_tokens
|
||||
import codecs
|
||||
import keyword
|
||||
import re
|
||||
import sys
|
||||
import token
|
||||
import tokenize
|
||||
|
||||
from coverage import env
|
||||
from coverage.backward import iternext
|
||||
from coverage.misc import contract
|
||||
|
||||
|
||||
def phys_tokens(toks):
|
||||
@@ -43,7 +53,7 @@ def phys_tokens(toks):
|
||||
inject_backslash = False
|
||||
elif ttype == token.STRING:
|
||||
if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
|
||||
# It's a multiline string and the first line ends with
|
||||
# It's a multi-line string and the first line ends with
|
||||
# a backslash, so we don't need to inject another.
|
||||
inject_backslash = False
|
||||
if inject_backslash:
|
||||
@@ -61,6 +71,7 @@ def phys_tokens(toks):
|
||||
last_lineno = elineno
|
||||
|
||||
|
||||
@contract(source='unicode')
|
||||
def source_token_lines(source):
|
||||
"""Generate a series of lines, one for each line in `source`.
|
||||
|
||||
@@ -76,11 +87,14 @@ def source_token_lines(source):
|
||||
is indistinguishable from a final line with a newline.
|
||||
|
||||
"""
|
||||
|
||||
ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
|
||||
line = []
|
||||
col = 0
|
||||
|
||||
source = source.expandtabs(8).replace('\r\n', '\n')
|
||||
tokgen = generate_tokens(source)
|
||||
|
||||
for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
|
||||
mark_start = True
|
||||
for part in re.split('(\n)', ttext):
|
||||
@@ -95,7 +109,7 @@ def source_token_lines(source):
|
||||
mark_end = False
|
||||
else:
|
||||
if mark_start and scol > col:
|
||||
line.append(("ws", " " * (scol - col)))
|
||||
line.append(("ws", u" " * (scol - col)))
|
||||
mark_start = False
|
||||
tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
|
||||
if ttype == token.NAME and keyword.iskeyword(ttext):
|
||||
@@ -109,22 +123,52 @@ def source_token_lines(source):
|
||||
if line:
|
||||
yield line
|
||||
|
||||
def source_encoding(source):
|
||||
"""Determine the encoding for `source` (a string), according to PEP 263.
|
||||
|
||||
class CachedTokenizer(object):
|
||||
"""A one-element cache around tokenize.generate_tokens.
|
||||
|
||||
When reporting, coverage.py tokenizes files twice, once to find the
|
||||
structure of the file, and once to syntax-color it. Tokenizing is
|
||||
expensive, and easily cached.
|
||||
|
||||
This is a one-element cache so that our twice-in-a-row tokenizing doesn't
|
||||
actually tokenize twice.
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self.last_text = None
|
||||
self.last_tokens = None
|
||||
|
||||
@contract(text='unicode')
|
||||
def generate_tokens(self, text):
|
||||
"""A stand-in for `tokenize.generate_tokens`."""
|
||||
if text != self.last_text:
|
||||
self.last_text = text
|
||||
readline = iternext(text.splitlines(True))
|
||||
self.last_tokens = list(tokenize.generate_tokens(readline))
|
||||
return self.last_tokens
|
||||
|
||||
# Create our generate_tokens cache as a callable replacement function.
|
||||
generate_tokens = CachedTokenizer().generate_tokens
|
||||
|
||||
|
||||
COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
|
||||
|
||||
@contract(source='bytes')
|
||||
def _source_encoding_py2(source):
|
||||
"""Determine the encoding for `source`, according to PEP 263.
|
||||
|
||||
`source` is a byte string, the text of the program.
|
||||
|
||||
Returns a string, the name of the encoding.
|
||||
|
||||
"""
|
||||
# Note: this function should never be called on Python 3, since py3 has
|
||||
# built-in tools to do this.
|
||||
assert sys.version_info < (3, 0)
|
||||
|
||||
# This is mostly code adapted from Py3.2's tokenize module.
|
||||
|
||||
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
|
||||
assert isinstance(source, bytes)
|
||||
|
||||
# Do this so the detect_encode code we copied will work.
|
||||
readline = iter(source.splitlines(True)).next
|
||||
readline = iternext(source.splitlines(True))
|
||||
|
||||
# This is mostly code adapted from Py3.2's tokenize module.
|
||||
|
||||
def _get_normal_name(orig_enc):
|
||||
"""Imitates get_normal_name in tokenizer.c."""
|
||||
@@ -137,18 +181,13 @@ def source_encoding(source):
|
||||
return orig_enc
|
||||
|
||||
# From detect_encode():
|
||||
# It detects the encoding from the presence of a utf-8 bom or an encoding
|
||||
# cookie as specified in pep-0263. If both a bom and a cookie are present,
|
||||
# It detects the encoding from the presence of a UTF-8 BOM or an encoding
|
||||
# cookie as specified in PEP-0263. If both a BOM and a cookie are present,
|
||||
# but disagree, a SyntaxError will be raised. If the encoding cookie is an
|
||||
# invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
|
||||
# invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found,
|
||||
# 'utf-8-sig' is returned.
|
||||
|
||||
# If no encoding is specified, then the default will be returned. The
|
||||
# default varied with version.
|
||||
|
||||
if sys.version_info <= (2, 4):
|
||||
default = 'iso-8859-1'
|
||||
else:
|
||||
# If no encoding is specified, then the default will be returned.
|
||||
default = 'ascii'
|
||||
|
||||
bom_found = False
|
||||
@@ -168,21 +207,21 @@ def source_encoding(source):
|
||||
except UnicodeDecodeError:
|
||||
return None
|
||||
|
||||
matches = cookie_re.findall(line_string)
|
||||
matches = COOKIE_RE.findall(line_string)
|
||||
if not matches:
|
||||
return None
|
||||
encoding = _get_normal_name(matches[0])
|
||||
try:
|
||||
codec = codecs.lookup(encoding)
|
||||
except LookupError:
|
||||
# This behaviour mimics the Python interpreter
|
||||
# This behavior mimics the Python interpreter
|
||||
raise SyntaxError("unknown encoding: " + encoding)
|
||||
|
||||
if bom_found:
|
||||
# codecs in 2.3 were raw tuples of functions, assume the best.
|
||||
codec_name = getattr(codec, 'name', encoding)
|
||||
if codec_name != 'utf-8':
|
||||
# This behaviour mimics the Python interpreter
|
||||
# This behavior mimics the Python interpreter
|
||||
raise SyntaxError('encoding problem: utf-8')
|
||||
encoding += '-sig'
|
||||
return encoding
|
||||
@@ -208,3 +247,48 @@ def source_encoding(source):
|
||||
return encoding
|
||||
|
||||
return default
|
||||
|
||||
|
||||
@contract(source='bytes')
|
||||
def _source_encoding_py3(source):
|
||||
"""Determine the encoding for `source`, according to PEP 263.
|
||||
|
||||
`source` is a byte string: the text of the program.
|
||||
|
||||
Returns a string, the name of the encoding.
|
||||
|
||||
"""
|
||||
readline = iternext(source.splitlines(True))
|
||||
return tokenize.detect_encoding(readline)[0]
|
||||
|
||||
|
||||
if env.PY3:
|
||||
source_encoding = _source_encoding_py3
|
||||
else:
|
||||
source_encoding = _source_encoding_py2
|
||||
|
||||
|
||||
@contract(source='unicode')
|
||||
def compile_unicode(source, filename, mode):
|
||||
"""Just like the `compile` builtin, but works on any Unicode string.
|
||||
|
||||
Python 2's compile() builtin has a stupid restriction: if the source string
|
||||
is Unicode, then it may not have a encoding declaration in it. Why not?
|
||||
Who knows! It also decodes to utf8, and then tries to interpret those utf8
|
||||
bytes according to the encoding declaration. Why? Who knows!
|
||||
|
||||
This function neuters the coding declaration, and compiles it.
|
||||
|
||||
"""
|
||||
source = neuter_encoding_declaration(source)
|
||||
if env.PY2 and isinstance(filename, unicode):
|
||||
filename = filename.encode(sys.getfilesystemencoding(), "replace")
|
||||
code = compile(source, filename, mode)
|
||||
return code
|
||||
|
||||
|
||||
@contract(source='unicode', returns='unicode')
|
||||
def neuter_encoding_declaration(source):
|
||||
"""Return `source`, with any encoding declaration neutered."""
|
||||
source = COOKIE_RE.sub("# (deleted declaration)", source, count=2)
|
||||
return source
|
||||
|
||||
47
python/helpers/coveragepy/coverage/pickle2json.py
Normal file
47
python/helpers/coveragepy/coverage/pickle2json.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Convert pickle to JSON for coverage.py."""
|
||||
|
||||
from coverage.backward import pickle
|
||||
from coverage.data import CoverageData
|
||||
|
||||
|
||||
def pickle_read_raw_data(cls_unused, file_obj):
|
||||
"""Replacement for CoverageData._read_raw_data."""
|
||||
return pickle.load(file_obj)
|
||||
|
||||
|
||||
def pickle2json(infile, outfile):
|
||||
"""Convert a coverage.py 3.x pickle data file to a 4.x JSON data file."""
|
||||
try:
|
||||
old_read_raw_data = CoverageData._read_raw_data
|
||||
CoverageData._read_raw_data = pickle_read_raw_data
|
||||
|
||||
covdata = CoverageData()
|
||||
|
||||
with open(infile, 'rb') as inf:
|
||||
covdata.read_fileobj(inf)
|
||||
|
||||
covdata.write_file(outfile)
|
||||
finally:
|
||||
CoverageData._read_raw_data = old_read_raw_data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from optparse import OptionParser
|
||||
|
||||
parser = OptionParser(usage="usage: %s [options]" % __file__)
|
||||
parser.description = "Convert .coverage files from pickle to JSON format"
|
||||
parser.add_option(
|
||||
"-i", "--input-file", action="store", default=".coverage",
|
||||
help="Name of input file. Default .coverage",
|
||||
)
|
||||
parser.add_option(
|
||||
"-o", "--output-file", action="store", default=".coverage",
|
||||
help="Name of output file. Default .coverage",
|
||||
)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
pickle2json(options.input_file, options.output_file)
|
||||
396
python/helpers/coveragepy/coverage/plugin.py
Normal file
396
python/helpers/coveragepy/coverage/plugin.py
Normal file
@@ -0,0 +1,396 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Plugin interfaces for coverage.py"""
|
||||
|
||||
from coverage import files
|
||||
from coverage.misc import contract, _needs_to_implement
|
||||
|
||||
|
||||
class CoveragePlugin(object):
|
||||
"""Base class for coverage.py plugins.
|
||||
|
||||
To write a coverage.py plugin, create a module with a subclass of
|
||||
:class:`CoveragePlugin`. You will override methods in your class to
|
||||
participate in various aspects of coverage.py's processing.
|
||||
|
||||
Currently the only plugin type is a file tracer, for implementing
|
||||
measurement support for non-Python files. File tracer plugins implement
|
||||
the :meth:`file_tracer` method to claim files and the :meth:`file_reporter`
|
||||
method to report on those files.
|
||||
|
||||
Any plugin can optionally implement :meth:`sys_info` to provide debugging
|
||||
information about their operation.
|
||||
|
||||
Coverage.py will store its own information on your plugin object, using
|
||||
attributes whose names start with ``_coverage_``. Don't be startled.
|
||||
|
||||
To register your plugin, define a function called `coverage_init` in your
|
||||
module::
|
||||
|
||||
def coverage_init(reg, options):
|
||||
reg.add_file_tracer(MyPlugin())
|
||||
|
||||
You use the `reg` parameter passed to your `coverage_init` function to
|
||||
register your plugin object. It has one method, `add_file_tracer`, which
|
||||
takes a newly created instance of your plugin.
|
||||
|
||||
If your plugin takes options, the `options` parameter is a dictionary of
|
||||
your plugin's options from the coverage.py configuration file. Use them
|
||||
however you want to configure your object before registering it.
|
||||
|
||||
"""
|
||||
|
||||
def file_tracer(self, filename): # pylint: disable=unused-argument
|
||||
"""Get a :class:`FileTracer` object for a file.
|
||||
|
||||
Every Python source file is offered to the plugin to give it a chance
|
||||
to take responsibility for tracing the file. If your plugin can handle
|
||||
the file, then return a :class:`FileTracer` object. Otherwise return
|
||||
None.
|
||||
|
||||
There is no way to register your plugin for particular files. Instead,
|
||||
this method is invoked for all files, and the plugin decides whether it
|
||||
can trace the file or not. Be prepared for `filename` to refer to all
|
||||
kinds of files that have nothing to do with your plugin.
|
||||
|
||||
The file name will be a Python file being executed. There are two
|
||||
broad categories of behavior for a plugin, depending on the kind of
|
||||
files your plugin supports:
|
||||
|
||||
* Static file names: each of your original source files has been
|
||||
converted into a distinct Python file. Your plugin is invoked with
|
||||
the Python file name, and it maps it back to its original source
|
||||
file.
|
||||
|
||||
* Dynamic file names: all of your source files are executed by the same
|
||||
Python file. In this case, your plugin implements
|
||||
:meth:`FileTracer.dynamic_source_filename` to provide the actual
|
||||
source file for each execution frame.
|
||||
|
||||
`filename` is a string, the path to the file being considered. This is
|
||||
the absolute real path to the file. If you are comparing to other
|
||||
paths, be sure to take this into account.
|
||||
|
||||
Returns a :class:`FileTracer` object to use to trace `filename`, or
|
||||
None if this plugin cannot trace this file.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def file_reporter(self, filename): # pylint: disable=unused-argument
|
||||
"""Get the :class:`FileReporter` class to use for a file.
|
||||
|
||||
This will only be invoked if `filename` returns non-None from
|
||||
:meth:`file_tracer`. It's an error to return None from this method.
|
||||
|
||||
Returns a :class:`FileReporter` object to use to report on `filename`.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "file_reporter")
|
||||
|
||||
def sys_info(self):
|
||||
"""Get a list of information useful for debugging.
|
||||
|
||||
This method will be invoked for ``--debug=sys``. Your
|
||||
plugin can return any information it wants to be displayed.
|
||||
|
||||
Returns a list of pairs: `[(name, value), ...]`.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class FileTracer(object):
|
||||
"""Support needed for files during the execution phase.
|
||||
|
||||
You may construct this object from :meth:`CoveragePlugin.file_tracer` any
|
||||
way you like. A natural choice would be to pass the file name given to
|
||||
`file_tracer`.
|
||||
|
||||
`FileTracer` objects should only be created in the
|
||||
:meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
"""
|
||||
|
||||
def source_filename(self):
|
||||
"""The source file name for this file.
|
||||
|
||||
This may be any file name you like. A key responsibility of a plugin
|
||||
is to own the mapping from Python execution back to whatever source
|
||||
file name was originally the source of the code.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns the file name to credit with this execution.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "source_filename")
|
||||
|
||||
def has_dynamic_source_filename(self):
|
||||
"""Does this FileTracer have dynamic source file names?
|
||||
|
||||
FileTracers can provide dynamically determined file names by
|
||||
implementing :meth:`dynamic_source_filename`. Invoking that function
|
||||
is expensive. To determine whether to invoke it, coverage.py uses the
|
||||
result of this function to know if it needs to bother invoking
|
||||
:meth:`dynamic_source_filename`.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns True if :meth:`dynamic_source_filename` should be called to get
|
||||
dynamic source file names.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument
|
||||
"""Get a dynamically computed source file name.
|
||||
|
||||
Some plugins need to compute the source file name dynamically for each
|
||||
frame.
|
||||
|
||||
This function will not be invoked if
|
||||
:meth:`has_dynamic_source_filename` returns False.
|
||||
|
||||
Returns the source file name for this frame, or None if this frame
|
||||
shouldn't be measured.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def line_number_range(self, frame):
|
||||
"""Get the range of source line numbers for a given a call frame.
|
||||
|
||||
The call frame is examined, and the source line number in the original
|
||||
file is returned. The return value is a pair of numbers, the starting
|
||||
line number and the ending line number, both inclusive. For example,
|
||||
returning (5, 7) means that lines 5, 6, and 7 should be considered
|
||||
executed.
|
||||
|
||||
This function might decide that the frame doesn't indicate any lines
|
||||
from the source file were executed. Return (-1, -1) in this case to
|
||||
tell coverage.py that no lines should be recorded for this frame.
|
||||
|
||||
"""
|
||||
lineno = frame.f_lineno
|
||||
return lineno, lineno
|
||||
|
||||
|
||||
class FileReporter(object):
|
||||
"""Support needed for files during the analysis and reporting phases.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
`FileReporter` objects should only be created in the
|
||||
:meth:`CoveragePlugin.file_reporter` method.
|
||||
|
||||
There are many methods here, but only :meth:`lines` is required, to provide
|
||||
the set of executable lines in the file.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, filename):
|
||||
"""Simple initialization of a `FileReporter`.
|
||||
|
||||
The `filename` argument is the path to the file being reported. This
|
||||
will be available as the `.filename` attribute on the object. Other
|
||||
method implementations on this base class rely on this attribute.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
|
||||
def __repr__(self):
|
||||
return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
|
||||
|
||||
def relative_filename(self):
|
||||
"""Get the relative file name for this file.
|
||||
|
||||
This file path will be displayed in reports. The default
|
||||
implementation will supply the actual project-relative file path. You
|
||||
only need to supply this method if you have an unusual syntax for file
|
||||
paths.
|
||||
|
||||
"""
|
||||
return files.relative_filename(self.filename)
|
||||
|
||||
@contract(returns='unicode')
|
||||
def source(self):
|
||||
"""Get the source for the file.
|
||||
|
||||
Returns a Unicode string.
|
||||
|
||||
The base implementation simply reads the `self.filename` file and
|
||||
decodes it as UTF8. Override this method if your file isn't readable
|
||||
as a text file, or if you need other encoding support.
|
||||
|
||||
"""
|
||||
with open(self.filename, "rb") as f:
|
||||
return f.read().decode("utf8")
|
||||
|
||||
def lines(self):
|
||||
"""Get the executable lines in this file.
|
||||
|
||||
Your plugin must determine which lines in the file were possibly
|
||||
executable. This method returns a set of those line numbers.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "lines")
|
||||
|
||||
def excluded_lines(self):
|
||||
"""Get the excluded executable lines in this file.
|
||||
|
||||
Your plugin can use any method it likes to allow the user to exclude
|
||||
executable lines from consideration.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_lines(self, lines):
|
||||
"""Translate recorded lines into reported lines.
|
||||
|
||||
Some file formats will want to report lines slightly differently than
|
||||
they are recorded. For example, Python records the last line of a
|
||||
multi-line statement, but reports are nicer if they mention the first
|
||||
line.
|
||||
|
||||
Your plugin can optionally define this method to perform these kinds of
|
||||
adjustment.
|
||||
|
||||
`lines` is a sequence of integers, the recorded line numbers.
|
||||
|
||||
Returns a set of integers, the adjusted line numbers.
|
||||
|
||||
The base implementation returns the numbers unchanged.
|
||||
|
||||
"""
|
||||
return set(lines)
|
||||
|
||||
def arcs(self):
|
||||
"""Get the executable arcs in this file.
|
||||
|
||||
To support branch coverage, your plugin needs to be able to indicate
|
||||
possible execution paths, as a set of line number pairs. Each pair is
|
||||
a `(prev, next)` pair indicating that execution can transition from the
|
||||
`prev` line number to the `next` line number.
|
||||
|
||||
Returns a set of pairs of line numbers. The default implementation
|
||||
returns an empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def no_branch_lines(self):
|
||||
"""Get the lines excused from branch coverage in this file.
|
||||
|
||||
Your plugin can use any method it likes to allow the user to exclude
|
||||
lines from consideration of branch coverage.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_arcs(self, arcs):
|
||||
"""Translate recorded arcs into reported arcs.
|
||||
|
||||
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
|
||||
line number pairs.
|
||||
|
||||
Returns a set of line number pairs.
|
||||
|
||||
The default implementation returns `arcs` unchanged.
|
||||
|
||||
"""
|
||||
return arcs
|
||||
|
||||
def exit_counts(self):
|
||||
"""Get a count of exits from that each line.
|
||||
|
||||
To determine which lines are branches, coverage.py looks for lines that
|
||||
have more than one exit. This function creates a dict mapping each
|
||||
executable line number to a count of how many exits it has.
|
||||
|
||||
To be honest, this feels wrong, and should be refactored. Let me know
|
||||
if you attempt to implement this method in your plugin...
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument
|
||||
"""Provide an English sentence describing a missing arc.
|
||||
|
||||
The `start` and `end` arguments are the line numbers of the missing
|
||||
arc. Negative numbers indicate entering or exiting code objects.
|
||||
|
||||
The `executed_arcs` argument is a set of line number pairs, the arcs
|
||||
that were executed in this file.
|
||||
|
||||
By default, this simply returns the string "Line {start} didn't jump
|
||||
to {end}".
|
||||
|
||||
"""
|
||||
return "Line {start} didn't jump to line {end}".format(start=start, end=end)
|
||||
|
||||
def source_token_lines(self):
|
||||
"""Generate a series of tokenized lines, one for each line in `source`.
|
||||
|
||||
These tokens are used for syntax-colored reports.
|
||||
|
||||
Each line is a list of pairs, each pair is a token::
|
||||
|
||||
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
|
||||
|
||||
Each pair has a token class, and the token text. The token classes
|
||||
are:
|
||||
|
||||
* ``'com'``: a comment
|
||||
* ``'key'``: a keyword
|
||||
* ``'nam'``: a name, or identifier
|
||||
* ``'num'``: a number
|
||||
* ``'op'``: an operator
|
||||
* ``'str'``: a string literal
|
||||
* ``'txt'``: some other kind of text
|
||||
|
||||
If you concatenate all the token texts, and then join them with
|
||||
newlines, you should have your original source back.
|
||||
|
||||
The default implementation simply returns each line tagged as
|
||||
``'txt'``.
|
||||
|
||||
"""
|
||||
for line in self.source().splitlines():
|
||||
yield [('txt', line)]
|
||||
|
||||
# Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
|
||||
# of them defined.
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, FileReporter) and self.filename == other.filename
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.filename < other.filename
|
||||
|
||||
def __le__(self, other):
|
||||
return self.filename <= other.filename
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.filename > other.filename
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.filename >= other.filename
|
||||
247
python/helpers/coveragepy/coverage/plugin_support.py
Normal file
247
python/helpers/coveragepy/coverage/plugin_support.py
Normal file
@@ -0,0 +1,247 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Support for plugins."""
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
from coverage.misc import CoverageException, isolate_module
|
||||
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class Plugins(object):
|
||||
"""The currently loaded collection of coverage.py plugins."""
|
||||
|
||||
def __init__(self):
|
||||
self.order = []
|
||||
self.names = {}
|
||||
self.file_tracers = []
|
||||
|
||||
self.current_module = None
|
||||
self.debug = None
|
||||
|
||||
@classmethod
|
||||
def load_plugins(cls, modules, config, debug=None):
|
||||
"""Load plugins from `modules`.
|
||||
|
||||
Returns a list of loaded and configured plugins.
|
||||
|
||||
"""
|
||||
plugins = cls()
|
||||
plugins.debug = debug
|
||||
|
||||
for module in modules:
|
||||
plugins.current_module = module
|
||||
__import__(module)
|
||||
mod = sys.modules[module]
|
||||
|
||||
coverage_init = getattr(mod, "coverage_init", None)
|
||||
if not coverage_init:
|
||||
raise CoverageException(
|
||||
"Plugin module %r didn't define a coverage_init function" % module
|
||||
)
|
||||
|
||||
options = config.get_plugin_options(module)
|
||||
coverage_init(plugins, options)
|
||||
|
||||
plugins.current_module = None
|
||||
return plugins
|
||||
|
||||
def add_file_tracer(self, plugin):
|
||||
"""Add a file tracer plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.file_tracers)
|
||||
|
||||
def add_noop(self, plugin):
|
||||
"""Add a plugin that does nothing.
|
||||
|
||||
This is only useful for testing the plugin support.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, None)
|
||||
|
||||
def _add_plugin(self, plugin, specialized):
|
||||
"""Add a plugin object.
|
||||
|
||||
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
|
||||
is a list to append the plugin to.
|
||||
|
||||
"""
|
||||
plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__)
|
||||
if self.debug and self.debug.should('plugin'):
|
||||
self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin))
|
||||
labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug)
|
||||
plugin = DebugPluginWrapper(plugin, labelled)
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
plugin._coverage_plugin_name = plugin_name
|
||||
plugin._coverage_enabled = True
|
||||
self.order.append(plugin)
|
||||
self.names[plugin_name] = plugin
|
||||
if specialized is not None:
|
||||
specialized.append(plugin)
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.order)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.order)
|
||||
|
||||
def get(self, plugin_name):
|
||||
"""Return a plugin by name."""
|
||||
return self.names[plugin_name]
|
||||
|
||||
|
||||
class LabelledDebug(object):
|
||||
"""A Debug writer, but with labels for prepending to the messages."""
|
||||
|
||||
def __init__(self, label, debug, prev_labels=()):
|
||||
self.labels = list(prev_labels) + [label]
|
||||
self.debug = debug
|
||||
|
||||
def add_label(self, label):
|
||||
"""Add a label to the writer, and return a new `LabelledDebug`."""
|
||||
return LabelledDebug(label, self.debug, self.labels)
|
||||
|
||||
def message_prefix(self):
|
||||
"""The prefix to use on messages, combining the labels."""
|
||||
prefixes = self.labels + ['']
|
||||
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
|
||||
|
||||
def write(self, message):
|
||||
"""Write `message`, but with the labels prepended."""
|
||||
self.debug.write("%s%s" % (self.message_prefix(), message))
|
||||
|
||||
|
||||
class DebugPluginWrapper(CoveragePlugin):
|
||||
"""Wrap a plugin, and use debug to report on what it's doing."""
|
||||
|
||||
def __init__(self, plugin, debug):
|
||||
super(DebugPluginWrapper, self).__init__()
|
||||
self.plugin = plugin
|
||||
self.debug = debug
|
||||
|
||||
def file_tracer(self, filename):
|
||||
tracer = self.plugin.file_tracer(filename)
|
||||
self.debug.write("file_tracer(%r) --> %r" % (filename, tracer))
|
||||
if tracer:
|
||||
debug = self.debug.add_label("file %r" % (filename,))
|
||||
tracer = DebugFileTracerWrapper(tracer, debug)
|
||||
return tracer
|
||||
|
||||
def file_reporter(self, filename):
|
||||
reporter = self.plugin.file_reporter(filename)
|
||||
self.debug.write("file_reporter(%r) --> %r" % (filename, reporter))
|
||||
if reporter:
|
||||
debug = self.debug.add_label("file %r" % (filename,))
|
||||
reporter = DebugFileReporterWrapper(filename, reporter, debug)
|
||||
return reporter
|
||||
|
||||
def sys_info(self):
|
||||
return self.plugin.sys_info()
|
||||
|
||||
|
||||
class DebugFileTracerWrapper(FileTracer):
|
||||
"""A debugging `FileTracer`."""
|
||||
|
||||
def __init__(self, tracer, debug):
|
||||
self.tracer = tracer
|
||||
self.debug = debug
|
||||
|
||||
def _show_frame(self, frame):
|
||||
"""A short string identifying a frame, for debug messages."""
|
||||
return "%s@%d" % (
|
||||
os.path.basename(frame.f_code.co_filename),
|
||||
frame.f_lineno,
|
||||
)
|
||||
|
||||
def source_filename(self):
|
||||
sfilename = self.tracer.source_filename()
|
||||
self.debug.write("source_filename() --> %r" % (sfilename,))
|
||||
return sfilename
|
||||
|
||||
def has_dynamic_source_filename(self):
|
||||
has = self.tracer.has_dynamic_source_filename()
|
||||
self.debug.write("has_dynamic_source_filename() --> %r" % (has,))
|
||||
return has
|
||||
|
||||
def dynamic_source_filename(self, filename, frame):
|
||||
dyn = self.tracer.dynamic_source_filename(filename, frame)
|
||||
self.debug.write("dynamic_source_filename(%r, %s) --> %r" % (
|
||||
filename, self._show_frame(frame), dyn,
|
||||
))
|
||||
return dyn
|
||||
|
||||
def line_number_range(self, frame):
|
||||
pair = self.tracer.line_number_range(frame)
|
||||
self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair))
|
||||
return pair
|
||||
|
||||
|
||||
class DebugFileReporterWrapper(FileReporter):
|
||||
"""A debugging `FileReporter`."""
|
||||
|
||||
def __init__(self, filename, reporter, debug):
|
||||
super(DebugFileReporterWrapper, self).__init__(filename)
|
||||
self.reporter = reporter
|
||||
self.debug = debug
|
||||
|
||||
def relative_filename(self):
|
||||
ret = self.reporter.relative_filename()
|
||||
self.debug.write("relative_filename() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def lines(self):
|
||||
ret = self.reporter.lines()
|
||||
self.debug.write("lines() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def excluded_lines(self):
|
||||
ret = self.reporter.excluded_lines()
|
||||
self.debug.write("excluded_lines() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def translate_lines(self, lines):
|
||||
ret = self.reporter.translate_lines(lines)
|
||||
self.debug.write("translate_lines(%r) --> %r" % (lines, ret))
|
||||
return ret
|
||||
|
||||
def translate_arcs(self, arcs):
|
||||
ret = self.reporter.translate_arcs(arcs)
|
||||
self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret))
|
||||
return ret
|
||||
|
||||
def no_branch_lines(self):
|
||||
ret = self.reporter.no_branch_lines()
|
||||
self.debug.write("no_branch_lines() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def exit_counts(self):
|
||||
ret = self.reporter.exit_counts()
|
||||
self.debug.write("exit_counts() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def arcs(self):
|
||||
ret = self.reporter.arcs()
|
||||
self.debug.write("arcs() --> %r" % (ret,))
|
||||
return ret
|
||||
|
||||
def source(self):
|
||||
ret = self.reporter.source()
|
||||
self.debug.write("source() --> %d chars" % (len(ret),))
|
||||
return ret
|
||||
|
||||
def source_token_lines(self):
|
||||
ret = list(self.reporter.source_token_lines())
|
||||
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
|
||||
return ret
|
||||
207
python/helpers/coveragepy/coverage/python.py
Normal file
207
python/helpers/coveragepy/coverage/python.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Python source expertise for coverage.py"""
|
||||
|
||||
import os.path
|
||||
import types
|
||||
import zipimport
|
||||
|
||||
from coverage import env, files
|
||||
from coverage.misc import (
|
||||
contract, CoverageException, expensive, NoSource, join_regex, isolate_module,
|
||||
)
|
||||
from coverage.parser import PythonParser
|
||||
from coverage.phystokens import source_token_lines, source_encoding
|
||||
from coverage.plugin import FileReporter
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
@contract(returns='bytes')
|
||||
def read_python_source(filename):
|
||||
"""Read the Python source text from `filename`.
|
||||
|
||||
Returns bytes.
|
||||
|
||||
"""
|
||||
with open(filename, "rb") as f:
|
||||
return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
|
||||
|
||||
|
||||
@contract(returns='unicode')
|
||||
def get_python_source(filename):
|
||||
"""Return the source code, as unicode."""
|
||||
base, ext = os.path.splitext(filename)
|
||||
if ext == ".py" and env.WINDOWS:
|
||||
exts = [".py", ".pyw"]
|
||||
else:
|
||||
exts = [ext]
|
||||
|
||||
for ext in exts:
|
||||
try_filename = base + ext
|
||||
if os.path.exists(try_filename):
|
||||
# A regular text file: open it.
|
||||
source = read_python_source(try_filename)
|
||||
break
|
||||
|
||||
# Maybe it's in a zip file?
|
||||
source = get_zip_bytes(try_filename)
|
||||
if source is not None:
|
||||
break
|
||||
else:
|
||||
# Couldn't find source.
|
||||
exc_msg = "No source for code: '%s'.\n" % (filename,)
|
||||
exc_msg += "Aborting report output, consider using -i."
|
||||
raise NoSource(exc_msg)
|
||||
|
||||
# Replace \f because of http://bugs.python.org/issue19035
|
||||
source = source.replace(b'\f', b' ')
|
||||
source = source.decode(source_encoding(source), "replace")
|
||||
|
||||
# Python code should always end with a line with a newline.
|
||||
if source and source[-1] != '\n':
|
||||
source += '\n'
|
||||
|
||||
return source
|
||||
|
||||
|
||||
@contract(returns='bytes|None')
|
||||
def get_zip_bytes(filename):
|
||||
"""Get data from `filename` if it is a zip file path.
|
||||
|
||||
Returns the bytestring data read from the zip file, or None if no zip file
|
||||
could be found or `filename` isn't in it. The data returned will be
|
||||
an empty string if the file is empty.
|
||||
|
||||
"""
|
||||
markers = ['.zip'+os.sep, '.egg'+os.sep]
|
||||
for marker in markers:
|
||||
if marker in filename:
|
||||
parts = filename.split(marker)
|
||||
try:
|
||||
zi = zipimport.zipimporter(parts[0]+marker[:-1])
|
||||
except zipimport.ZipImportError:
|
||||
continue
|
||||
try:
|
||||
data = zi.get_data(parts[1])
|
||||
except IOError:
|
||||
continue
|
||||
return data
|
||||
return None
|
||||
|
||||
|
||||
class PythonFileReporter(FileReporter):
|
||||
"""Report support for a Python file."""
|
||||
|
||||
def __init__(self, morf, coverage=None):
|
||||
self.coverage = coverage
|
||||
|
||||
if hasattr(morf, '__file__'):
|
||||
filename = morf.__file__
|
||||
elif isinstance(morf, types.ModuleType):
|
||||
# A module should have had .__file__, otherwise we can't use it.
|
||||
# This could be a PEP-420 namespace package.
|
||||
raise CoverageException("Module {0} has no file".format(morf))
|
||||
else:
|
||||
filename = morf
|
||||
|
||||
filename = files.unicode_filename(filename)
|
||||
|
||||
# .pyc files should always refer to a .py instead.
|
||||
if filename.endswith(('.pyc', '.pyo')):
|
||||
filename = filename[:-1]
|
||||
elif filename.endswith('$py.class'): # Jython
|
||||
filename = filename[:-9] + ".py"
|
||||
|
||||
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
|
||||
|
||||
if hasattr(morf, '__name__'):
|
||||
name = morf.__name__
|
||||
name = name.replace(".", os.sep) + ".py"
|
||||
name = files.unicode_filename(name)
|
||||
else:
|
||||
name = files.relative_filename(filename)
|
||||
self.relname = name
|
||||
|
||||
self._source = None
|
||||
self._parser = None
|
||||
self._statements = None
|
||||
self._excluded = None
|
||||
|
||||
@contract(returns='unicode')
|
||||
def relative_filename(self):
|
||||
return self.relname
|
||||
|
||||
@property
|
||||
def parser(self):
|
||||
"""Lazily create a :class:`PythonParser`."""
|
||||
if self._parser is None:
|
||||
self._parser = PythonParser(
|
||||
filename=self.filename,
|
||||
exclude=self.coverage._exclude_regex('exclude'),
|
||||
)
|
||||
self._parser.parse_source()
|
||||
return self._parser
|
||||
|
||||
def lines(self):
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.statements
|
||||
|
||||
def excluded_lines(self):
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.excluded
|
||||
|
||||
def translate_lines(self, lines):
|
||||
return self.parser.translate_lines(lines)
|
||||
|
||||
def translate_arcs(self, arcs):
|
||||
return self.parser.translate_arcs(arcs)
|
||||
|
||||
@expensive
|
||||
def no_branch_lines(self):
|
||||
no_branch = self.parser.lines_matching(
|
||||
join_regex(self.coverage.config.partial_list),
|
||||
join_regex(self.coverage.config.partial_always_list)
|
||||
)
|
||||
return no_branch
|
||||
|
||||
@expensive
|
||||
def arcs(self):
|
||||
return self.parser.arcs()
|
||||
|
||||
@expensive
|
||||
def exit_counts(self):
|
||||
return self.parser.exit_counts()
|
||||
|
||||
def missing_arc_description(self, start, end, executed_arcs=None):
|
||||
return self.parser.missing_arc_description(start, end, executed_arcs)
|
||||
|
||||
@contract(returns='unicode')
|
||||
def source(self):
|
||||
if self._source is None:
|
||||
self._source = get_python_source(self.filename)
|
||||
return self._source
|
||||
|
||||
def should_be_python(self):
|
||||
"""Does it seem like this file should contain Python?
|
||||
|
||||
This is used to decide if a file reported as part of the execution of
|
||||
a program was really likely to have contained Python in the first
|
||||
place.
|
||||
|
||||
"""
|
||||
# Get the file extension.
|
||||
_, ext = os.path.splitext(self.filename)
|
||||
|
||||
# Anything named *.py* should be Python.
|
||||
if ext.startswith('.py'):
|
||||
return True
|
||||
# A file with no extension should be Python.
|
||||
if not ext:
|
||||
return True
|
||||
# Everything else is probably not Python.
|
||||
return False
|
||||
|
||||
def source_token_lines(self):
|
||||
return source_token_lines(self.source())
|
||||
155
python/helpers/coveragepy/coverage/pytracer.py
Normal file
155
python/helpers/coveragepy/coverage/pytracer.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
import dis
|
||||
import sys
|
||||
|
||||
from coverage import env
|
||||
|
||||
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
|
||||
YIELD_VALUE = dis.opmap['YIELD_VALUE']
|
||||
if env.PY2:
|
||||
YIELD_VALUE = chr(YIELD_VALUE)
|
||||
|
||||
|
||||
class PyTracer(object):
|
||||
"""Python implementation of the raw data tracer."""
|
||||
|
||||
# Because of poor implementations of trace-function-manipulating tools,
|
||||
# the Python trace function must be kept very simple. In particular, there
|
||||
# must be only one function ever set as the trace function, both through
|
||||
# sys.settrace, and as the return value from the trace function. Put
|
||||
# another way, the trace function must always return itself. It cannot
|
||||
# swap in other functions, or return None to avoid tracing a particular
|
||||
# frame.
|
||||
#
|
||||
# The trace manipulator that introduced this restriction is DecoratorTools,
|
||||
# which sets a trace function, and then later restores the pre-existing one
|
||||
# by calling sys.settrace with a function it found in the current frame.
|
||||
#
|
||||
# Systems that use DecoratorTools (or similar trace manipulations) must use
|
||||
# PyTracer to get accurate results. The command-line --timid argument is
|
||||
# used to force the use of this tracer.
|
||||
|
||||
def __init__(self):
|
||||
# Attributes set from the collector:
|
||||
self.data = None
|
||||
self.trace_arcs = False
|
||||
self.should_trace = None
|
||||
self.should_trace_cache = None
|
||||
self.warn = None
|
||||
# The threading module to use, if any.
|
||||
self.threading = None
|
||||
|
||||
self.cur_file_dict = []
|
||||
self.last_line = [0]
|
||||
|
||||
self.data_stack = []
|
||||
self.last_exc_back = None
|
||||
self.last_exc_firstlineno = 0
|
||||
self.thread = None
|
||||
self.stopped = False
|
||||
|
||||
def __repr__(self):
|
||||
return "<PyTracer at 0x{0:0x}: {1} lines in {2} files>".format(
|
||||
id(self),
|
||||
sum(len(v) for v in self.data.values()),
|
||||
len(self.data),
|
||||
)
|
||||
|
||||
def _trace(self, frame, event, arg_unused):
|
||||
"""The trace function passed to sys.settrace."""
|
||||
|
||||
if self.stopped:
|
||||
return
|
||||
|
||||
if self.last_exc_back:
|
||||
if frame == self.last_exc_back:
|
||||
# Someone forgot a return event.
|
||||
if self.trace_arcs and self.cur_file_dict:
|
||||
pair = (self.last_line, -self.last_exc_firstlineno)
|
||||
self.cur_file_dict[pair] = None
|
||||
self.cur_file_dict, self.last_line = self.data_stack.pop()
|
||||
self.last_exc_back = None
|
||||
|
||||
if event == 'call':
|
||||
# Entering a new function context. Decide if we should trace
|
||||
# in this file.
|
||||
self.data_stack.append((self.cur_file_dict, self.last_line))
|
||||
filename = frame.f_code.co_filename
|
||||
disp = self.should_trace_cache.get(filename)
|
||||
if disp is None:
|
||||
disp = self.should_trace(filename, frame)
|
||||
self.should_trace_cache[filename] = disp
|
||||
|
||||
self.cur_file_dict = None
|
||||
if disp.trace:
|
||||
tracename = disp.source_filename
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = {}
|
||||
self.cur_file_dict = self.data[tracename]
|
||||
# The call event is really a "start frame" event, and happens for
|
||||
# function calls and re-entering generators. The f_lasti field is
|
||||
# -1 for calls, and a real offset for generators. Use <0 as the
|
||||
# line number for calls, and the real line number for generators.
|
||||
if frame.f_lasti < 0:
|
||||
self.last_line = -frame.f_code.co_firstlineno
|
||||
else:
|
||||
self.last_line = frame.f_lineno
|
||||
elif event == 'line':
|
||||
# Record an executed line.
|
||||
if self.cur_file_dict is not None:
|
||||
lineno = frame.f_lineno
|
||||
if self.trace_arcs:
|
||||
self.cur_file_dict[(self.last_line, lineno)] = None
|
||||
else:
|
||||
self.cur_file_dict[lineno] = None
|
||||
self.last_line = lineno
|
||||
elif event == 'return':
|
||||
if self.trace_arcs and self.cur_file_dict:
|
||||
# Record an arc leaving the function, but beware that a
|
||||
# "return" event might just mean yielding from a generator.
|
||||
bytecode = frame.f_code.co_code[frame.f_lasti]
|
||||
if bytecode != YIELD_VALUE:
|
||||
first = frame.f_code.co_firstlineno
|
||||
self.cur_file_dict[(self.last_line, -first)] = None
|
||||
# Leaving this function, pop the filename stack.
|
||||
self.cur_file_dict, self.last_line = self.data_stack.pop()
|
||||
elif event == 'exception':
|
||||
self.last_exc_back = frame.f_back
|
||||
self.last_exc_firstlineno = frame.f_code.co_firstlineno
|
||||
return self._trace
|
||||
|
||||
def start(self):
|
||||
"""Start this Tracer.
|
||||
|
||||
Return a Python function suitable for use with sys.settrace().
|
||||
|
||||
"""
|
||||
if self.threading:
|
||||
self.thread = self.threading.currentThread()
|
||||
sys.settrace(self._trace)
|
||||
self.stopped = False
|
||||
return self._trace
|
||||
|
||||
def stop(self):
|
||||
"""Stop this Tracer."""
|
||||
self.stopped = True
|
||||
if self.threading and self.thread.ident != self.threading.currentThread().ident:
|
||||
# Called on a different thread than started us: we can't unhook
|
||||
# ourselves, but we've set the flag that we should stop, so we
|
||||
# won't do any more tracing.
|
||||
return
|
||||
|
||||
if self.warn:
|
||||
if sys.gettrace() != self._trace:
|
||||
msg = "Trace function changed, measurement is likely wrong: %r"
|
||||
self.warn(msg % (sys.gettrace(),))
|
||||
|
||||
sys.settrace(None)
|
||||
|
||||
def get_stats(self):
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return None
|
||||
@@ -1,9 +1,16 @@
|
||||
"""Reporter foundation for Coverage."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Reporter foundation for coverage.py."""
|
||||
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from coverage.files import prep_patterns, FnmatchMatcher
|
||||
from coverage.misc import CoverageException, NoSource, NotPython, isolate_module
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
import fnmatch, os
|
||||
from coverage.codeunit import code_unit_factory
|
||||
from coverage.files import prep_patterns
|
||||
from coverage.misc import CoverageException, NoSource, NotPython
|
||||
|
||||
class Reporter(object):
|
||||
"""A base class for all reporters."""
|
||||
@@ -18,45 +25,45 @@ class Reporter(object):
|
||||
self.coverage = coverage
|
||||
self.config = config
|
||||
|
||||
# The code units to report on. Set by find_code_units.
|
||||
self.code_units = []
|
||||
|
||||
# The directory into which to place the report, used by some derived
|
||||
# classes.
|
||||
self.directory = None
|
||||
|
||||
def find_code_units(self, morfs):
|
||||
"""Find the code units we'll report on.
|
||||
# Our method find_file_reporters used to set an attribute that other
|
||||
# code could read. That's been refactored away, but some third parties
|
||||
# were using that attribute. We'll continue to support it in a noisy
|
||||
# way for now.
|
||||
self._file_reporters = []
|
||||
|
||||
@property
|
||||
def file_reporters(self):
|
||||
"""Keep .file_reporters working for private-grabbing tools."""
|
||||
warnings.warn(
|
||||
"Report.file_reporters will no longer be available in Coverage.py 4.2",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self._file_reporters
|
||||
|
||||
def find_file_reporters(self, morfs):
|
||||
"""Find the FileReporters we'll report on.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
Returns a list of FileReporters.
|
||||
|
||||
"""
|
||||
morfs = morfs or self.coverage.data.measured_files()
|
||||
file_locator = self.coverage.file_locator
|
||||
self.code_units = code_unit_factory(morfs, file_locator)
|
||||
reporters = self.coverage._get_file_reporters(morfs)
|
||||
|
||||
if self.config.include:
|
||||
patterns = prep_patterns(self.config.include)
|
||||
filtered = []
|
||||
for cu in self.code_units:
|
||||
for pattern in patterns:
|
||||
if fnmatch.fnmatch(cu.filename, pattern):
|
||||
filtered.append(cu)
|
||||
break
|
||||
self.code_units = filtered
|
||||
matcher = FnmatchMatcher(prep_patterns(self.config.include))
|
||||
reporters = [fr for fr in reporters if matcher.match(fr.filename)]
|
||||
|
||||
if self.config.omit:
|
||||
patterns = prep_patterns(self.config.omit)
|
||||
filtered = []
|
||||
for cu in self.code_units:
|
||||
for pattern in patterns:
|
||||
if fnmatch.fnmatch(cu.filename, pattern):
|
||||
break
|
||||
else:
|
||||
filtered.append(cu)
|
||||
self.code_units = filtered
|
||||
matcher = FnmatchMatcher(prep_patterns(self.config.omit))
|
||||
reporters = [fr for fr in reporters if not matcher.match(fr.filename)]
|
||||
|
||||
self.code_units.sort()
|
||||
self._file_reporters = sorted(reporters)
|
||||
return self._file_reporters
|
||||
|
||||
def report_files(self, report_fn, morfs, directory=None):
|
||||
"""Run a reporting function on a number of morfs.
|
||||
@@ -64,29 +71,34 @@ class Reporter(object):
|
||||
`report_fn` is called for each relative morf in `morfs`. It is called
|
||||
as::
|
||||
|
||||
report_fn(code_unit, analysis)
|
||||
report_fn(file_reporter, analysis)
|
||||
|
||||
where `code_unit` is the `CodeUnit` for the morf, and `analysis` is
|
||||
the `Analysis` for the morf.
|
||||
where `file_reporter` is the `FileReporter` for the morf, and
|
||||
`analysis` is the `Analysis` for the morf.
|
||||
|
||||
"""
|
||||
self.find_code_units(morfs)
|
||||
file_reporters = self.find_file_reporters(morfs)
|
||||
|
||||
if not self.code_units:
|
||||
if not file_reporters:
|
||||
raise CoverageException("No data to report.")
|
||||
|
||||
self.directory = directory
|
||||
if self.directory and not os.path.exists(self.directory):
|
||||
os.makedirs(self.directory)
|
||||
|
||||
for cu in self.code_units:
|
||||
for fr in file_reporters:
|
||||
try:
|
||||
report_fn(cu, self.coverage._analyze(cu))
|
||||
report_fn(fr, self.coverage._analyze(fr))
|
||||
except NoSource:
|
||||
if not self.config.ignore_errors:
|
||||
raise
|
||||
except NotPython:
|
||||
# Only report errors for .py files, and only if we didn't
|
||||
# explicitly suppress those errors.
|
||||
if cu.should_be_python() and not self.config.ignore_errors:
|
||||
# NotPython is only raised by PythonFileReporter, which has a
|
||||
# should_be_python() method.
|
||||
if fr.should_be_python():
|
||||
if self.config.ignore_errors:
|
||||
self.coverage._warn("Could not parse Python file {0}".format(fr.filename))
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -1,47 +1,42 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Results of coverage measurement."""
|
||||
|
||||
import os
|
||||
import collections
|
||||
|
||||
from coverage.backward import iitems, set, sorted # pylint: disable=W0622
|
||||
from coverage.misc import format_lines, join_regex, NoSource
|
||||
from coverage.parser import CodeParser
|
||||
from coverage.backward import iitems
|
||||
from coverage.misc import format_lines, SimpleRepr
|
||||
|
||||
|
||||
class Analysis(object):
|
||||
"""The results of analyzing a code unit."""
|
||||
"""The results of analyzing a FileReporter."""
|
||||
|
||||
def __init__(self, cov, code_unit):
|
||||
self.coverage = cov
|
||||
self.code_unit = code_unit
|
||||
|
||||
self.filename = self.code_unit.filename
|
||||
actual_filename, source = self.find_source(self.filename)
|
||||
|
||||
self.parser = CodeParser(
|
||||
text=source, filename=actual_filename,
|
||||
exclude=self.coverage._exclude_regex('exclude')
|
||||
)
|
||||
self.statements, self.excluded = self.parser.parse_source()
|
||||
def __init__(self, data, file_reporter):
|
||||
self.data = data
|
||||
self.file_reporter = file_reporter
|
||||
self.filename = self.file_reporter.filename
|
||||
self.statements = self.file_reporter.lines()
|
||||
self.excluded = self.file_reporter.excluded_lines()
|
||||
|
||||
# Identify missing statements.
|
||||
executed = self.coverage.data.executed_lines(self.filename)
|
||||
exec1 = self.parser.first_lines(executed)
|
||||
self.missing = self.statements - exec1
|
||||
executed = self.data.lines(self.filename) or []
|
||||
executed = self.file_reporter.translate_lines(executed)
|
||||
self.missing = self.statements - executed
|
||||
|
||||
if self.coverage.data.has_arcs():
|
||||
self.no_branch = self.parser.lines_matching(
|
||||
join_regex(self.coverage.config.partial_list),
|
||||
join_regex(self.coverage.config.partial_always_list)
|
||||
)
|
||||
if self.data.has_arcs():
|
||||
self._arc_possibilities = sorted(self.file_reporter.arcs())
|
||||
self.exit_counts = self.file_reporter.exit_counts()
|
||||
self.no_branch = self.file_reporter.no_branch_lines()
|
||||
n_branches = self.total_branches()
|
||||
mba = self.missing_branch_arcs()
|
||||
n_partial_branches = sum(
|
||||
[len(v) for k,v in iitems(mba) if k not in self.missing]
|
||||
)
|
||||
n_missing_branches = sum([len(v) for k,v in iitems(mba)])
|
||||
n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing)
|
||||
n_missing_branches = sum(len(v) for k,v in iitems(mba))
|
||||
else:
|
||||
n_branches = n_partial_branches = n_missing_branches = 0
|
||||
self._arc_possibilities = []
|
||||
self.exit_counts = {}
|
||||
self.no_branch = set()
|
||||
n_branches = n_partial_branches = n_missing_branches = 0
|
||||
|
||||
self.numbers = Numbers(
|
||||
n_files=1,
|
||||
@@ -53,44 +48,6 @@ class Analysis(object):
|
||||
n_missing_branches=n_missing_branches,
|
||||
)
|
||||
|
||||
def find_source(self, filename):
|
||||
"""Find the source for `filename`.
|
||||
|
||||
Returns two values: the actual filename, and the source.
|
||||
|
||||
The source returned depends on which of these cases holds:
|
||||
|
||||
* The filename seems to be a non-source file: returns None
|
||||
|
||||
* The filename is a source file, and actually exists: returns None.
|
||||
|
||||
* The filename is a source file, and is in a zip file or egg:
|
||||
returns the source.
|
||||
|
||||
* The filename is a source file, but couldn't be found: raises
|
||||
`NoSource`.
|
||||
|
||||
"""
|
||||
source = None
|
||||
|
||||
base, ext = os.path.splitext(filename)
|
||||
TRY_EXTS = {
|
||||
'.py': ['.py', '.pyw'],
|
||||
'.pyw': ['.pyw'],
|
||||
}
|
||||
try_exts = TRY_EXTS.get(ext)
|
||||
if not try_exts:
|
||||
return filename, None
|
||||
|
||||
for try_ext in try_exts:
|
||||
try_filename = base + try_ext
|
||||
if os.path.exists(try_filename):
|
||||
return try_filename, None
|
||||
source = self.coverage.file_locator.get_zip_data(try_filename)
|
||||
if source:
|
||||
return try_filename, source
|
||||
raise NoSource("No source for code: '%s'" % filename)
|
||||
|
||||
def missing_formatted(self):
|
||||
"""The missing line numbers, formatted nicely.
|
||||
|
||||
@@ -101,31 +58,47 @@ class Analysis(object):
|
||||
|
||||
def has_arcs(self):
|
||||
"""Were arcs measured in this result?"""
|
||||
return self.coverage.data.has_arcs()
|
||||
return self.data.has_arcs()
|
||||
|
||||
def arc_possibilities(self):
|
||||
"""Returns a sorted list of the arcs in the code."""
|
||||
arcs = self.parser.arcs()
|
||||
return arcs
|
||||
return self._arc_possibilities
|
||||
|
||||
def arcs_executed(self):
|
||||
"""Returns a sorted list of the arcs actually executed in the code."""
|
||||
executed = self.coverage.data.executed_arcs(self.filename)
|
||||
m2fl = self.parser.first_line
|
||||
executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed]
|
||||
executed = self.data.arcs(self.filename) or []
|
||||
executed = self.file_reporter.translate_arcs(executed)
|
||||
return sorted(executed)
|
||||
|
||||
def arcs_missing(self):
|
||||
"""Returns a sorted list of the arcs in the code not executed."""
|
||||
possible = self.arc_possibilities()
|
||||
executed = self.arcs_executed()
|
||||
missing = [
|
||||
missing = (
|
||||
p for p in possible
|
||||
if p not in executed
|
||||
and p[0] not in self.no_branch
|
||||
]
|
||||
)
|
||||
return sorted(missing)
|
||||
|
||||
def arcs_missing_formatted(self):
|
||||
"""The missing branch arcs, formatted nicely.
|
||||
|
||||
Returns a string like "1->2, 1->3, 16->20". Omits any mention of
|
||||
branches from missing lines, so if line 17 is missing, then 17->18
|
||||
won't be included.
|
||||
|
||||
"""
|
||||
arcs = self.missing_branch_arcs()
|
||||
missing = self.missing
|
||||
line_exits = sorted(iitems(arcs))
|
||||
pairs = []
|
||||
for line, exits in line_exits:
|
||||
for ex in sorted(exits):
|
||||
if line not in missing:
|
||||
pairs.append("%d->%s" % (line, (ex if ex > 0 else "exit")))
|
||||
return ', '.join(pairs)
|
||||
|
||||
def arcs_unpredicted(self):
|
||||
"""Returns a sorted list of the executed arcs missing from the code."""
|
||||
possible = self.arc_possibilities()
|
||||
@@ -133,22 +106,23 @@ class Analysis(object):
|
||||
# Exclude arcs here which connect a line to itself. They can occur
|
||||
# in executed data in some cases. This is where they can cause
|
||||
# trouble, and here is where it's the least burden to remove them.
|
||||
unpredicted = [
|
||||
# Also, generators can somehow cause arcs from "enter" to "exit", so
|
||||
# make sure we have at least one positive value.
|
||||
unpredicted = (
|
||||
e for e in executed
|
||||
if e not in possible
|
||||
and e[0] != e[1]
|
||||
]
|
||||
and (e[0] > 0 or e[1] > 0)
|
||||
)
|
||||
return sorted(unpredicted)
|
||||
|
||||
def branch_lines(self):
|
||||
"""Returns a list of line numbers that have more than one exit."""
|
||||
exit_counts = self.parser.exit_counts()
|
||||
return [l1 for l1,count in iitems(exit_counts) if count > 1]
|
||||
return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
|
||||
|
||||
def total_branches(self):
|
||||
"""How many total branches are there?"""
|
||||
exit_counts = self.parser.exit_counts()
|
||||
return sum([count for count in exit_counts.values() if count > 1])
|
||||
return sum(count for count in self.exit_counts.values() if count > 1)
|
||||
|
||||
def missing_branch_arcs(self):
|
||||
"""Return arcs that weren't executed from branch lines.
|
||||
@@ -158,11 +132,9 @@ class Analysis(object):
|
||||
"""
|
||||
missing = self.arcs_missing()
|
||||
branch_lines = set(self.branch_lines())
|
||||
mba = {}
|
||||
mba = collections.defaultdict(list)
|
||||
for l1, l2 in missing:
|
||||
if l1 in branch_lines:
|
||||
if l1 not in mba:
|
||||
mba[l1] = []
|
||||
mba[l1].append(l2)
|
||||
return mba
|
||||
|
||||
@@ -173,11 +145,10 @@ class Analysis(object):
|
||||
(total_exits, taken_exits).
|
||||
"""
|
||||
|
||||
exit_counts = self.parser.exit_counts()
|
||||
missing_arcs = self.missing_branch_arcs()
|
||||
stats = {}
|
||||
for lnum in self.branch_lines():
|
||||
exits = exit_counts[lnum]
|
||||
exits = self.exit_counts[lnum]
|
||||
try:
|
||||
missing = len(missing_arcs[lnum])
|
||||
except KeyError:
|
||||
@@ -186,7 +157,7 @@ class Analysis(object):
|
||||
return stats
|
||||
|
||||
|
||||
class Numbers(object):
|
||||
class Numbers(SimpleRepr):
|
||||
"""The numerical results of measuring coverage.
|
||||
|
||||
This holds the basic statistics from `Analysis`, and is used to roll
|
||||
@@ -210,35 +181,43 @@ class Numbers(object):
|
||||
self.n_partial_branches = n_partial_branches
|
||||
self.n_missing_branches = n_missing_branches
|
||||
|
||||
def init_args(self):
|
||||
"""Return a list for __init__(*args) to recreate this object."""
|
||||
return [
|
||||
self.n_files, self.n_statements, self.n_excluded, self.n_missing,
|
||||
self.n_branches, self.n_partial_branches, self.n_missing_branches,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def set_precision(cls, precision):
|
||||
"""Set the number of decimal places used to report percentages."""
|
||||
assert 0 <= precision < 10
|
||||
cls._precision = precision
|
||||
cls._near0 = 1.0 / 10**precision
|
||||
cls._near100 = 100.0 - cls._near0
|
||||
set_precision = classmethod(set_precision)
|
||||
|
||||
def _get_n_executed(self):
|
||||
@property
|
||||
def n_executed(self):
|
||||
"""Returns the number of executed statements."""
|
||||
return self.n_statements - self.n_missing
|
||||
n_executed = property(_get_n_executed)
|
||||
|
||||
def _get_n_executed_branches(self):
|
||||
@property
|
||||
def n_executed_branches(self):
|
||||
"""Returns the number of executed branches."""
|
||||
return self.n_branches - self.n_missing_branches
|
||||
n_executed_branches = property(_get_n_executed_branches)
|
||||
|
||||
def _get_pc_covered(self):
|
||||
@property
|
||||
def pc_covered(self):
|
||||
"""Returns a single percentage value for coverage."""
|
||||
if self.n_statements > 0:
|
||||
pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) /
|
||||
(self.n_statements + self.n_branches))
|
||||
numerator, denominator = self.ratio_covered
|
||||
pc_cov = (100.0 * numerator) / denominator
|
||||
else:
|
||||
pc_cov = 100.0
|
||||
return pc_cov
|
||||
pc_covered = property(_get_pc_covered)
|
||||
|
||||
def _get_pc_covered_str(self):
|
||||
@property
|
||||
def pc_covered_str(self):
|
||||
"""Returns the percent covered, as a string, without a percent sign.
|
||||
|
||||
Note that "0" is only returned when the value is truly zero, and "100"
|
||||
@@ -254,15 +233,21 @@ class Numbers(object):
|
||||
else:
|
||||
pc = round(pc, self._precision)
|
||||
return "%.*f" % (self._precision, pc)
|
||||
pc_covered_str = property(_get_pc_covered_str)
|
||||
|
||||
@classmethod
|
||||
def pc_str_width(cls):
|
||||
"""How many characters wide can pc_covered_str be?"""
|
||||
width = 3 # "100"
|
||||
if cls._precision > 0:
|
||||
width += 1 + cls._precision
|
||||
return width
|
||||
pc_str_width = classmethod(pc_str_width)
|
||||
|
||||
@property
|
||||
def ratio_covered(self):
|
||||
"""Return a numerator and denominator for the coverage ratio."""
|
||||
numerator = self.n_executed + self.n_executed_branches
|
||||
denominator = self.n_statements + self.n_branches
|
||||
return numerator, denominator
|
||||
|
||||
def __add__(self, other):
|
||||
nums = Numbers()
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""Summary reporting"""
|
||||
|
||||
import sys
|
||||
|
||||
from coverage import env
|
||||
from coverage.report import Reporter
|
||||
from coverage.results import Numbers
|
||||
from coverage.misc import NotPython
|
||||
from coverage.misc import NotPython, CoverageException, output_encoding
|
||||
|
||||
|
||||
class SummaryReporter(Reporter):
|
||||
@@ -17,70 +21,125 @@ class SummaryReporter(Reporter):
|
||||
def report(self, morfs, outfile=None):
|
||||
"""Writes a report summarizing coverage statistics per module.
|
||||
|
||||
`outfile` is a file object to write the summary to.
|
||||
`outfile` is a file object to write the summary to. It must be opened
|
||||
for native strings (bytes on Python 2, Unicode on Python 3).
|
||||
|
||||
"""
|
||||
self.find_code_units(morfs)
|
||||
file_reporters = self.find_file_reporters(morfs)
|
||||
|
||||
# Prepare the formatting strings
|
||||
max_name = max([len(cu.name) for cu in self.code_units] + [5])
|
||||
fmt_name = "%%- %ds " % max_name
|
||||
fmt_err = "%s %s: %s\n"
|
||||
header = (fmt_name % "Name") + " Stmts Miss"
|
||||
fmt_coverage = fmt_name + "%6d %6d"
|
||||
# Prepare the formatting strings, header, and column sorting.
|
||||
max_name = max([len(fr.relative_filename()) for fr in file_reporters] + [5])
|
||||
fmt_name = u"%%- %ds " % max_name
|
||||
fmt_err = u"%s %s: %s"
|
||||
fmt_skip_covered = u"\n%s file%s skipped due to complete coverage."
|
||||
|
||||
header = (fmt_name % "Name") + u" Stmts Miss"
|
||||
fmt_coverage = fmt_name + u"%6d %6d"
|
||||
if self.branches:
|
||||
header += " Branch BrMiss"
|
||||
fmt_coverage += " %6d %6d"
|
||||
header += u" Branch BrPart"
|
||||
fmt_coverage += u" %6d %6d"
|
||||
width100 = Numbers.pc_str_width()
|
||||
header += "%*s" % (width100+4, "Cover")
|
||||
fmt_coverage += "%%%ds%%%%" % (width100+3,)
|
||||
header += u"%*s" % (width100+4, "Cover")
|
||||
fmt_coverage += u"%%%ds%%%%" % (width100+3,)
|
||||
if self.config.show_missing:
|
||||
header += " Missing"
|
||||
fmt_coverage += " %s"
|
||||
rule = "-" * len(header) + "\n"
|
||||
header += "\n"
|
||||
fmt_coverage += "\n"
|
||||
header += u" Missing"
|
||||
fmt_coverage += u" %s"
|
||||
rule = u"-" * len(header)
|
||||
|
||||
if not outfile:
|
||||
column_order = dict(name=0, stmts=1, miss=2, cover=-1)
|
||||
if self.branches:
|
||||
column_order.update(dict(branch=3, brpart=4))
|
||||
|
||||
if outfile is None:
|
||||
outfile = sys.stdout
|
||||
|
||||
def writeout(line):
|
||||
"""Write a line to the output, adding a newline."""
|
||||
if env.PY2:
|
||||
line = line.encode(output_encoding())
|
||||
outfile.write(line.rstrip())
|
||||
outfile.write("\n")
|
||||
|
||||
# Write the header
|
||||
outfile.write(header)
|
||||
outfile.write(rule)
|
||||
writeout(header)
|
||||
writeout(rule)
|
||||
|
||||
# `lines` is a list of pairs, (line text, line values). The line text
|
||||
# is a string that will be printed, and line values is a tuple of
|
||||
# sortable values.
|
||||
lines = []
|
||||
|
||||
total = Numbers()
|
||||
skipped_count = 0
|
||||
|
||||
for cu in self.code_units:
|
||||
for fr in file_reporters:
|
||||
try:
|
||||
analysis = self.coverage._analyze(cu)
|
||||
analysis = self.coverage._analyze(fr)
|
||||
nums = analysis.numbers
|
||||
args = (cu.name, nums.n_statements, nums.n_missing)
|
||||
total += nums
|
||||
|
||||
if self.config.skip_covered:
|
||||
# Don't report on 100% files.
|
||||
no_missing_lines = (nums.n_missing == 0)
|
||||
no_missing_branches = (nums.n_partial_branches == 0)
|
||||
if no_missing_lines and no_missing_branches:
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
|
||||
if self.branches:
|
||||
args += (nums.n_branches, nums.n_missing_branches)
|
||||
args += (nums.n_branches, nums.n_partial_branches)
|
||||
args += (nums.pc_covered_str,)
|
||||
if self.config.show_missing:
|
||||
args += (analysis.missing_formatted(),)
|
||||
outfile.write(fmt_coverage % args)
|
||||
total += nums
|
||||
except KeyboardInterrupt: # pragma: not covered
|
||||
raise
|
||||
except:
|
||||
missing_fmtd = analysis.missing_formatted()
|
||||
if self.branches:
|
||||
branches_fmtd = analysis.arcs_missing_formatted()
|
||||
if branches_fmtd:
|
||||
if missing_fmtd:
|
||||
missing_fmtd += ", "
|
||||
missing_fmtd += branches_fmtd
|
||||
args += (missing_fmtd,)
|
||||
text = fmt_coverage % args
|
||||
# Add numeric percent coverage so that sorting makes sense.
|
||||
args += (nums.pc_covered,)
|
||||
lines.append((text, args))
|
||||
except Exception:
|
||||
report_it = not self.config.ignore_errors
|
||||
if report_it:
|
||||
typ, msg = sys.exc_info()[:2]
|
||||
if typ is NotPython and not cu.should_be_python():
|
||||
# NotPython is only raised by PythonFileReporter, which has a
|
||||
# should_be_python() method.
|
||||
if typ is NotPython and not fr.should_be_python():
|
||||
report_it = False
|
||||
if report_it:
|
||||
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
|
||||
writeout(fmt_err % (fr.relative_filename(), typ.__name__, msg))
|
||||
|
||||
# Sort the lines and write them out.
|
||||
if getattr(self.config, 'sort', None):
|
||||
position = column_order.get(self.config.sort.lower())
|
||||
if position is None:
|
||||
raise CoverageException("Invalid sorting option: {0!r}".format(self.config.sort))
|
||||
lines.sort(key=lambda l: (l[1][position], l[0]))
|
||||
|
||||
for line in lines:
|
||||
writeout(line[0])
|
||||
|
||||
# Write a TOTAl line if we had more than one file.
|
||||
if total.n_files > 1:
|
||||
outfile.write(rule)
|
||||
writeout(rule)
|
||||
args = ("TOTAL", total.n_statements, total.n_missing)
|
||||
if self.branches:
|
||||
args += (total.n_branches, total.n_missing_branches)
|
||||
args += (total.n_branches, total.n_partial_branches)
|
||||
args += (total.pc_covered_str,)
|
||||
if self.config.show_missing:
|
||||
args += ("",)
|
||||
outfile.write(fmt_coverage % args)
|
||||
writeout(fmt_coverage % args)
|
||||
|
||||
return total.pc_covered
|
||||
# Write other final lines.
|
||||
if not total.n_files and not skipped_count:
|
||||
raise CoverageException("No data to report.")
|
||||
|
||||
if self.config.skip_covered and skipped_count:
|
||||
writeout(fmt_skip_covered % (skipped_count, 's' if skipped_count > 1 else ''))
|
||||
|
||||
return total.n_statements and total.pc_covered
|
||||
|
||||
@@ -1,10 +1,28 @@
|
||||
"""A simple Python template renderer, for a nano-subset of Django syntax."""
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""A simple Python template renderer, for a nano-subset of Django syntax.
|
||||
|
||||
For a detailed discussion of this code, see this chapter from 500 Lines:
|
||||
http://aosabook.org/en/500L/a-template-engine.html
|
||||
|
||||
"""
|
||||
|
||||
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
|
||||
|
||||
import re
|
||||
|
||||
from coverage.backward import set # pylint: disable=W0622
|
||||
from coverage import env
|
||||
|
||||
|
||||
class TempliteSyntaxError(ValueError):
|
||||
"""Raised when a template has a syntax error."""
|
||||
pass
|
||||
|
||||
|
||||
class TempliteValueError(ValueError):
|
||||
"""Raised when an expression won't evaluate in a template."""
|
||||
pass
|
||||
|
||||
|
||||
class CodeBuilder(object):
|
||||
@@ -12,42 +30,45 @@ class CodeBuilder(object):
|
||||
|
||||
def __init__(self, indent=0):
|
||||
self.code = []
|
||||
self.indent_amount = indent
|
||||
self.indent_level = indent
|
||||
|
||||
def __str__(self):
|
||||
return "".join(str(c) for c in self.code)
|
||||
|
||||
def add_line(self, line):
|
||||
"""Add a line of source to the code.
|
||||
|
||||
Don't include indentations or newlines.
|
||||
Indentation and newline will be added for you, don't provide them.
|
||||
|
||||
"""
|
||||
self.code.append(" " * self.indent_amount)
|
||||
self.code.append(line)
|
||||
self.code.append("\n")
|
||||
self.code.extend([" " * self.indent_level, line, "\n"])
|
||||
|
||||
def add_section(self):
|
||||
"""Add a section, a sub-CodeBuilder."""
|
||||
sect = CodeBuilder(self.indent_amount)
|
||||
self.code.append(sect)
|
||||
return sect
|
||||
section = CodeBuilder(self.indent_level)
|
||||
self.code.append(section)
|
||||
return section
|
||||
|
||||
INDENT_STEP = 4 # PEP8 says so!
|
||||
|
||||
def indent(self):
|
||||
"""Increase the current indent for following lines."""
|
||||
self.indent_amount += 4
|
||||
self.indent_level += self.INDENT_STEP
|
||||
|
||||
def dedent(self):
|
||||
"""Decrease the current indent for following lines."""
|
||||
self.indent_amount -= 4
|
||||
self.indent_level -= self.INDENT_STEP
|
||||
|
||||
def __str__(self):
|
||||
return "".join([str(c) for c in self.code])
|
||||
|
||||
def get_function(self, fn_name):
|
||||
"""Compile the code, and return the function `fn_name`."""
|
||||
assert self.indent_amount == 0
|
||||
g = {}
|
||||
code_text = str(self)
|
||||
exec(code_text, g)
|
||||
return g[fn_name]
|
||||
def get_globals(self):
|
||||
"""Execute the code, and return a dict of globals it defines."""
|
||||
# A check that the caller really finished all the blocks they started.
|
||||
assert self.indent_level == 0
|
||||
# Get the Python source as a single string.
|
||||
python_source = str(self)
|
||||
# Execute the source, defining globals, and return them.
|
||||
global_namespace = {}
|
||||
exec(python_source, global_namespace)
|
||||
return global_namespace
|
||||
|
||||
|
||||
class Templite(object):
|
||||
@@ -55,7 +76,7 @@ class Templite(object):
|
||||
|
||||
Supported constructs are extended variable access::
|
||||
|
||||
{{var.modifer.modifier|filter|filter}}
|
||||
{{var.modifier.modifier|filter|filter}}
|
||||
|
||||
loops::
|
||||
|
||||
@@ -69,8 +90,24 @@ class Templite(object):
|
||||
|
||||
{# This will be ignored #}
|
||||
|
||||
Any of these constructs can have a hypen at the end (`-}}`, `-%}`, `-#}`),
|
||||
which will collapse the whitespace following the tag.
|
||||
|
||||
Construct a Templite with the template text, then use `render` against a
|
||||
dictionary context to create a finished string.
|
||||
dictionary context to create a finished string::
|
||||
|
||||
templite = Templite('''
|
||||
<h1>Hello {{name|upper}}!</h1>
|
||||
{% for topic in topics %}
|
||||
<p>You are interested in {{topic}}.</p>
|
||||
{% endif %}
|
||||
''',
|
||||
{'upper': str.upper},
|
||||
)
|
||||
text = templite.render({
|
||||
'name': "Ned",
|
||||
'topics': ['Python', 'Geometry', 'Juggling'],
|
||||
})
|
||||
|
||||
"""
|
||||
def __init__(self, text, *contexts):
|
||||
@@ -80,110 +117,151 @@ class Templite(object):
|
||||
These are good for filters and global values.
|
||||
|
||||
"""
|
||||
self.text = text
|
||||
self.context = {}
|
||||
for context in contexts:
|
||||
self.context.update(context)
|
||||
|
||||
self.all_vars = set()
|
||||
self.loop_vars = set()
|
||||
|
||||
# We construct a function in source form, then compile it and hold onto
|
||||
# it, and execute it to render the template.
|
||||
code = CodeBuilder()
|
||||
|
||||
code.add_line("def render(ctx, dot):")
|
||||
code.add_line("def render_function(context, do_dots):")
|
||||
code.indent()
|
||||
vars_code = code.add_section()
|
||||
self.all_vars = set()
|
||||
self.loop_vars = set()
|
||||
code.add_line("result = []")
|
||||
code.add_line("a = result.append")
|
||||
code.add_line("e = result.extend")
|
||||
code.add_line("s = str")
|
||||
code.add_line("append_result = result.append")
|
||||
code.add_line("extend_result = result.extend")
|
||||
if env.PY2:
|
||||
code.add_line("to_str = unicode")
|
||||
else:
|
||||
code.add_line("to_str = str")
|
||||
|
||||
buffered = []
|
||||
|
||||
def flush_output():
|
||||
"""Force `buffered` to the code builder."""
|
||||
if len(buffered) == 1:
|
||||
code.add_line("a(%s)" % buffered[0])
|
||||
code.add_line("append_result(%s)" % buffered[0])
|
||||
elif len(buffered) > 1:
|
||||
code.add_line("e([%s])" % ",".join(buffered))
|
||||
code.add_line("extend_result([%s])" % ", ".join(buffered))
|
||||
del buffered[:]
|
||||
|
||||
# Split the text to form a list of tokens.
|
||||
toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
|
||||
|
||||
ops_stack = []
|
||||
for tok in toks:
|
||||
if tok.startswith('{{'):
|
||||
# An expression to evaluate.
|
||||
buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip()))
|
||||
elif tok.startswith('{#'):
|
||||
|
||||
# Split the text to form a list of tokens.
|
||||
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
|
||||
|
||||
squash = False
|
||||
|
||||
for token in tokens:
|
||||
if token.startswith('{'):
|
||||
start, end = 2, -2
|
||||
squash = (token[-3] == '-')
|
||||
if squash:
|
||||
end = -3
|
||||
|
||||
if token.startswith('{#'):
|
||||
# Comment: ignore it and move on.
|
||||
continue
|
||||
elif tok.startswith('{%'):
|
||||
elif token.startswith('{{'):
|
||||
# An expression to evaluate.
|
||||
expr = self._expr_code(token[start:end].strip())
|
||||
buffered.append("to_str(%s)" % expr)
|
||||
else:
|
||||
# token.startswith('{%')
|
||||
# Action tag: split into words and parse further.
|
||||
flush_output()
|
||||
words = tok[2:-2].strip().split()
|
||||
|
||||
words = token[start:end].strip().split()
|
||||
if words[0] == 'if':
|
||||
# An if statement: evaluate the expression to determine if.
|
||||
assert len(words) == 2
|
||||
if len(words) != 2:
|
||||
self._syntax_error("Don't understand if", token)
|
||||
ops_stack.append('if')
|
||||
code.add_line("if %s:" % self.expr_code(words[1]))
|
||||
code.add_line("if %s:" % self._expr_code(words[1]))
|
||||
code.indent()
|
||||
elif words[0] == 'for':
|
||||
# A loop: iterate over expression result.
|
||||
assert len(words) == 4 and words[2] == 'in'
|
||||
if len(words) != 4 or words[2] != 'in':
|
||||
self._syntax_error("Don't understand for", token)
|
||||
ops_stack.append('for')
|
||||
self.loop_vars.add(words[1])
|
||||
self._variable(words[1], self.loop_vars)
|
||||
code.add_line(
|
||||
"for c_%s in %s:" % (
|
||||
words[1],
|
||||
self.expr_code(words[3])
|
||||
self._expr_code(words[3])
|
||||
)
|
||||
)
|
||||
code.indent()
|
||||
elif words[0].startswith('end'):
|
||||
# Endsomething. Pop the ops stack
|
||||
# Endsomething. Pop the ops stack.
|
||||
if len(words) != 1:
|
||||
self._syntax_error("Don't understand end", token)
|
||||
end_what = words[0][3:]
|
||||
if ops_stack[-1] != end_what:
|
||||
raise SyntaxError("Mismatched end tag: %r" % end_what)
|
||||
ops_stack.pop()
|
||||
if not ops_stack:
|
||||
self._syntax_error("Too many ends", token)
|
||||
start_what = ops_stack.pop()
|
||||
if start_what != end_what:
|
||||
self._syntax_error("Mismatched end tag", end_what)
|
||||
code.dedent()
|
||||
else:
|
||||
raise SyntaxError("Don't understand tag: %r" % words[0])
|
||||
self._syntax_error("Don't understand tag", words[0])
|
||||
else:
|
||||
# Literal content. If it isn't empty, output it.
|
||||
if tok:
|
||||
buffered.append("%r" % tok)
|
||||
if squash:
|
||||
token = token.lstrip()
|
||||
if token:
|
||||
buffered.append(repr(token))
|
||||
|
||||
if ops_stack:
|
||||
self._syntax_error("Unmatched action tag", ops_stack[-1])
|
||||
|
||||
flush_output()
|
||||
|
||||
for var_name in self.all_vars - self.loop_vars:
|
||||
vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name))
|
||||
vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
|
||||
|
||||
if ops_stack:
|
||||
raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1])
|
||||
|
||||
code.add_line("return ''.join(result)")
|
||||
code.add_line('return "".join(result)')
|
||||
code.dedent()
|
||||
self.render_function = code.get_function('render')
|
||||
self._render_function = code.get_globals()['render_function']
|
||||
|
||||
def expr_code(self, expr):
|
||||
def _expr_code(self, expr):
|
||||
"""Generate a Python expression for `expr`."""
|
||||
if "|" in expr:
|
||||
pipes = expr.split("|")
|
||||
code = self.expr_code(pipes[0])
|
||||
code = self._expr_code(pipes[0])
|
||||
for func in pipes[1:]:
|
||||
self.all_vars.add(func)
|
||||
self._variable(func, self.all_vars)
|
||||
code = "c_%s(%s)" % (func, code)
|
||||
elif "." in expr:
|
||||
dots = expr.split(".")
|
||||
code = self.expr_code(dots[0])
|
||||
args = [repr(d) for d in dots[1:]]
|
||||
code = "dot(%s, %s)" % (code, ", ".join(args))
|
||||
code = self._expr_code(dots[0])
|
||||
args = ", ".join(repr(d) for d in dots[1:])
|
||||
code = "do_dots(%s, %s)" % (code, args)
|
||||
else:
|
||||
self.all_vars.add(expr)
|
||||
self._variable(expr, self.all_vars)
|
||||
code = "c_%s" % expr
|
||||
return code
|
||||
|
||||
def _syntax_error(self, msg, thing):
|
||||
"""Raise a syntax error using `msg`, and showing `thing`."""
|
||||
raise TempliteSyntaxError("%s: %r" % (msg, thing))
|
||||
|
||||
def _variable(self, name, vars_set):
|
||||
"""Track that `name` is used as a variable.
|
||||
|
||||
Adds the name to `vars_set`, a set of variable names.
|
||||
|
||||
Raises an syntax error if `name` is not a valid name.
|
||||
|
||||
"""
|
||||
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
|
||||
self._syntax_error("Not a valid name", name)
|
||||
vars_set.add(name)
|
||||
|
||||
def render(self, context=None):
|
||||
"""Render this template by applying it to `context`.
|
||||
|
||||
@@ -191,18 +269,23 @@ class Templite(object):
|
||||
|
||||
"""
|
||||
# Make the complete context we'll use.
|
||||
ctx = dict(self.context)
|
||||
render_context = dict(self.context)
|
||||
if context:
|
||||
ctx.update(context)
|
||||
return self.render_function(ctx, self.do_dots)
|
||||
render_context.update(context)
|
||||
return self._render_function(render_context, self._do_dots)
|
||||
|
||||
def do_dots(self, value, *dots):
|
||||
"""Evaluate dotted expressions at runtime."""
|
||||
def _do_dots(self, value, *dots):
|
||||
"""Evaluate dotted expressions at run-time."""
|
||||
for dot in dots:
|
||||
try:
|
||||
value = getattr(value, dot)
|
||||
except AttributeError:
|
||||
try:
|
||||
value = value[dot]
|
||||
if hasattr(value, '__call__'):
|
||||
except (TypeError, KeyError):
|
||||
raise TempliteValueError(
|
||||
"Couldn't evaluate %r.%s" % (value, dot)
|
||||
)
|
||||
if callable(value):
|
||||
value = value()
|
||||
return value
|
||||
|
||||
@@ -1,730 +0,0 @@
|
||||
/* C-based Tracer for Coverage. */
|
||||
|
||||
#include "Python.h"
|
||||
#include "compile.h" /* in 2.3, this wasn't part of Python.h */
|
||||
#include "eval.h" /* or this. */
|
||||
#include "structmember.h"
|
||||
#include "frameobject.h"
|
||||
|
||||
/* Compile-time debugging helpers */
|
||||
#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */
|
||||
#undef TRACE_LOG /* Define to log our bookkeeping. */
|
||||
#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */
|
||||
|
||||
#if COLLECT_STATS
|
||||
#define STATS(x) x
|
||||
#else
|
||||
#define STATS(x)
|
||||
#endif
|
||||
|
||||
/* Py 2.x and 3.x compatibility */
|
||||
|
||||
#ifndef Py_TYPE
|
||||
#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
|
||||
#endif
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
|
||||
#define MyText_Type PyUnicode_Type
|
||||
#define MyText_Check(o) PyUnicode_Check(o)
|
||||
#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
|
||||
#define MyText_AS_STRING(o) PyBytes_AS_STRING(o)
|
||||
#define MyInt_FromLong(l) PyLong_FromLong(l)
|
||||
|
||||
#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
|
||||
|
||||
#else
|
||||
|
||||
#define MyText_Type PyString_Type
|
||||
#define MyText_Check(o) PyString_Check(o)
|
||||
#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
|
||||
#define MyText_AS_STRING(o) PyString_AS_STRING(o)
|
||||
#define MyInt_FromLong(l) PyInt_FromLong(l)
|
||||
|
||||
#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
|
||||
|
||||
#endif /* Py3k */
|
||||
|
||||
/* The values returned to indicate ok or error. */
|
||||
#define RET_OK 0
|
||||
#define RET_ERROR -1
|
||||
|
||||
/* An entry on the data stack. For each call frame, we need to record the
|
||||
dictionary to capture data, and the last line number executed in that
|
||||
frame.
|
||||
*/
|
||||
typedef struct {
|
||||
PyObject * file_data; /* PyMem_Malloc'ed, a borrowed ref. */
|
||||
int last_line;
|
||||
} DataStackEntry;
|
||||
|
||||
/* The CTracer type. */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
|
||||
/* Python objects manipulated directly by the Collector class. */
|
||||
PyObject * should_trace;
|
||||
PyObject * warn;
|
||||
PyObject * data;
|
||||
PyObject * should_trace_cache;
|
||||
PyObject * arcs;
|
||||
|
||||
/* Has the tracer been started? */
|
||||
int started;
|
||||
/* Are we tracing arcs, or just lines? */
|
||||
int tracing_arcs;
|
||||
|
||||
/*
|
||||
The data stack is a stack of dictionaries. Each dictionary collects
|
||||
data for a single source file. The data stack parallels the call stack:
|
||||
each call pushes the new frame's file data onto the data stack, and each
|
||||
return pops file data off.
|
||||
|
||||
The file data is a dictionary whose form depends on the tracing options.
|
||||
If tracing arcs, the keys are line number pairs. If not tracing arcs,
|
||||
the keys are line numbers. In both cases, the value is irrelevant
|
||||
(None).
|
||||
*/
|
||||
/* The index of the last-used entry in data_stack. */
|
||||
int depth;
|
||||
/* The file data at each level, or NULL if not recording. */
|
||||
DataStackEntry * data_stack;
|
||||
int data_stack_alloc; /* number of entries allocated at data_stack. */
|
||||
|
||||
/* The current file_data dictionary. Borrowed. */
|
||||
PyObject * cur_file_data;
|
||||
|
||||
/* The line number of the last line recorded, for tracing arcs.
|
||||
-1 means there was no previous line, as when entering a code object.
|
||||
*/
|
||||
int last_line;
|
||||
|
||||
/* The parent frame for the last exception event, to fix missing returns. */
|
||||
PyFrameObject * last_exc_back;
|
||||
int last_exc_firstlineno;
|
||||
|
||||
#if COLLECT_STATS
|
||||
struct {
|
||||
unsigned int calls;
|
||||
unsigned int lines;
|
||||
unsigned int returns;
|
||||
unsigned int exceptions;
|
||||
unsigned int others;
|
||||
unsigned int new_files;
|
||||
unsigned int missed_returns;
|
||||
unsigned int stack_reallocs;
|
||||
unsigned int errors;
|
||||
} stats;
|
||||
#endif /* COLLECT_STATS */
|
||||
} CTracer;
|
||||
|
||||
#define STACK_DELTA 100
|
||||
|
||||
static int
|
||||
CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
|
||||
{
|
||||
#if COLLECT_STATS
|
||||
self->stats.calls = 0;
|
||||
self->stats.lines = 0;
|
||||
self->stats.returns = 0;
|
||||
self->stats.exceptions = 0;
|
||||
self->stats.others = 0;
|
||||
self->stats.new_files = 0;
|
||||
self->stats.missed_returns = 0;
|
||||
self->stats.stack_reallocs = 0;
|
||||
self->stats.errors = 0;
|
||||
#endif /* COLLECT_STATS */
|
||||
|
||||
self->should_trace = NULL;
|
||||
self->warn = NULL;
|
||||
self->data = NULL;
|
||||
self->should_trace_cache = NULL;
|
||||
self->arcs = NULL;
|
||||
|
||||
self->started = 0;
|
||||
self->tracing_arcs = 0;
|
||||
|
||||
self->depth = -1;
|
||||
self->data_stack = PyMem_Malloc(STACK_DELTA*sizeof(DataStackEntry));
|
||||
if (self->data_stack == NULL) {
|
||||
STATS( self->stats.errors++; )
|
||||
PyErr_NoMemory();
|
||||
return RET_ERROR;
|
||||
}
|
||||
self->data_stack_alloc = STACK_DELTA;
|
||||
|
||||
self->cur_file_data = NULL;
|
||||
self->last_line = -1;
|
||||
|
||||
self->last_exc_back = NULL;
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
static void
|
||||
CTracer_dealloc(CTracer *self)
|
||||
{
|
||||
if (self->started) {
|
||||
PyEval_SetTrace(NULL, NULL);
|
||||
}
|
||||
|
||||
Py_XDECREF(self->should_trace);
|
||||
Py_XDECREF(self->warn);
|
||||
Py_XDECREF(self->data);
|
||||
Py_XDECREF(self->should_trace_cache);
|
||||
|
||||
PyMem_Free(self->data_stack);
|
||||
|
||||
Py_TYPE(self)->tp_free((PyObject*)self);
|
||||
}
|
||||
|
||||
#if TRACE_LOG
|
||||
static const char *
|
||||
indent(int n)
|
||||
{
|
||||
static const char * spaces =
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
" "
|
||||
;
|
||||
return spaces + strlen(spaces) - n*2;
|
||||
}
|
||||
|
||||
static int logging = 0;
|
||||
/* Set these constants to be a file substring and line number to start logging. */
|
||||
static const char * start_file = "tests/views";
|
||||
static int start_line = 27;
|
||||
|
||||
static void
|
||||
showlog(int depth, int lineno, PyObject * filename, const char * msg)
|
||||
{
|
||||
if (logging) {
|
||||
printf("%s%3d ", indent(depth), depth);
|
||||
if (lineno) {
|
||||
printf("%4d", lineno);
|
||||
}
|
||||
else {
|
||||
printf(" ");
|
||||
}
|
||||
if (filename) {
|
||||
PyObject *ascii = MyText_AS_BYTES(filename);
|
||||
printf(" %s", MyText_AS_STRING(ascii));
|
||||
Py_DECREF(ascii);
|
||||
}
|
||||
if (msg) {
|
||||
printf(" %s", msg);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
#define SHOWLOG(a,b,c,d) showlog(a,b,c,d)
|
||||
#else
|
||||
#define SHOWLOG(a,b,c,d)
|
||||
#endif /* TRACE_LOG */
|
||||
|
||||
#if WHAT_LOG
|
||||
static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
|
||||
#endif
|
||||
|
||||
/* Record a pair of integers in self->cur_file_data. */
|
||||
static int
|
||||
CTracer_record_pair(CTracer *self, int l1, int l2)
|
||||
{
|
||||
int ret = RET_OK;
|
||||
|
||||
PyObject * t = Py_BuildValue("(ii)", l1, l2);
|
||||
if (t != NULL) {
|
||||
if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) {
|
||||
STATS( self->stats.errors++; )
|
||||
ret = RET_ERROR;
|
||||
}
|
||||
Py_DECREF(t);
|
||||
}
|
||||
else {
|
||||
STATS( self->stats.errors++; )
|
||||
ret = RET_ERROR;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The Trace Function
|
||||
*/
|
||||
static int
|
||||
CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
|
||||
{
|
||||
int ret = RET_OK;
|
||||
PyObject * filename = NULL;
|
||||
PyObject * tracename = NULL;
|
||||
#if WHAT_LOG || TRACE_LOG
|
||||
PyObject * ascii = NULL;
|
||||
#endif
|
||||
|
||||
#if WHAT_LOG
|
||||
if (what <= sizeof(what_sym)/sizeof(const char *)) {
|
||||
ascii = MyText_AS_BYTES(frame->f_code->co_filename);
|
||||
printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(ascii), frame->f_lineno);
|
||||
Py_DECREF(ascii);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if TRACE_LOG
|
||||
ascii = MyText_AS_BYTES(frame->f_code->co_filename);
|
||||
if (strstr(MyText_AS_STRING(ascii), start_file) && frame->f_lineno == start_line) {
|
||||
logging = 1;
|
||||
}
|
||||
Py_DECREF(ascii);
|
||||
#endif
|
||||
|
||||
/* See below for details on missing-return detection. */
|
||||
if (self->last_exc_back) {
|
||||
if (frame == self->last_exc_back) {
|
||||
/* Looks like someone forgot to send a return event. We'll clear
|
||||
the exception state and do the RETURN code here. Notice that the
|
||||
frame we have in hand here is not the correct frame for the RETURN,
|
||||
that frame is gone. Our handling for RETURN doesn't need the
|
||||
actual frame, but we do log it, so that will look a little off if
|
||||
you're looking at the detailed log.
|
||||
|
||||
If someday we need to examine the frame when doing RETURN, then
|
||||
we'll need to keep more of the missed frame's state.
|
||||
*/
|
||||
STATS( self->stats.missed_returns++; )
|
||||
if (self->depth >= 0) {
|
||||
if (self->tracing_arcs && self->cur_file_data) {
|
||||
if (CTracer_record_pair(self, self->last_line, -self->last_exc_firstlineno) < 0) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
|
||||
self->cur_file_data = self->data_stack[self->depth].file_data;
|
||||
self->last_line = self->data_stack[self->depth].last_line;
|
||||
self->depth--;
|
||||
}
|
||||
}
|
||||
self->last_exc_back = NULL;
|
||||
}
|
||||
|
||||
|
||||
switch (what) {
|
||||
case PyTrace_CALL: /* 0 */
|
||||
STATS( self->stats.calls++; )
|
||||
/* Grow the stack. */
|
||||
self->depth++;
|
||||
if (self->depth >= self->data_stack_alloc) {
|
||||
STATS( self->stats.stack_reallocs++; )
|
||||
/* We've outgrown our data_stack array: make it bigger. */
|
||||
int bigger = self->data_stack_alloc + STACK_DELTA;
|
||||
DataStackEntry * bigger_data_stack = PyMem_Realloc(self->data_stack, bigger * sizeof(DataStackEntry));
|
||||
if (bigger_data_stack == NULL) {
|
||||
STATS( self->stats.errors++; )
|
||||
PyErr_NoMemory();
|
||||
self->depth--;
|
||||
return RET_ERROR;
|
||||
}
|
||||
self->data_stack = bigger_data_stack;
|
||||
self->data_stack_alloc = bigger;
|
||||
}
|
||||
|
||||
/* Push the current state on the stack. */
|
||||
self->data_stack[self->depth].file_data = self->cur_file_data;
|
||||
self->data_stack[self->depth].last_line = self->last_line;
|
||||
|
||||
/* Check if we should trace this line. */
|
||||
filename = frame->f_code->co_filename;
|
||||
tracename = PyDict_GetItem(self->should_trace_cache, filename);
|
||||
if (tracename == NULL) {
|
||||
STATS( self->stats.new_files++; )
|
||||
/* We've never considered this file before. */
|
||||
/* Ask should_trace about it. */
|
||||
PyObject * args = Py_BuildValue("(OO)", filename, frame);
|
||||
tracename = PyObject_Call(self->should_trace, args, NULL);
|
||||
Py_DECREF(args);
|
||||
if (tracename == NULL) {
|
||||
/* An error occurred inside should_trace. */
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (PyDict_SetItem(self->should_trace_cache, filename, tracename) < 0) {
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
else {
|
||||
Py_INCREF(tracename);
|
||||
}
|
||||
|
||||
/* If tracename is a string, then we're supposed to trace. */
|
||||
if (MyText_Check(tracename)) {
|
||||
PyObject * file_data = PyDict_GetItem(self->data, tracename);
|
||||
if (file_data == NULL) {
|
||||
file_data = PyDict_New();
|
||||
if (file_data == NULL) {
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = PyDict_SetItem(self->data, tracename, file_data);
|
||||
Py_DECREF(file_data);
|
||||
if (ret < 0) {
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
self->cur_file_data = file_data;
|
||||
/* Make the frame right in case settrace(gettrace()) happens. */
|
||||
Py_INCREF(self);
|
||||
frame->f_trace = (PyObject*)self;
|
||||
SHOWLOG(self->depth, frame->f_lineno, filename, "traced");
|
||||
}
|
||||
else {
|
||||
self->cur_file_data = NULL;
|
||||
SHOWLOG(self->depth, frame->f_lineno, filename, "skipped");
|
||||
}
|
||||
|
||||
Py_DECREF(tracename);
|
||||
|
||||
self->last_line = -1;
|
||||
break;
|
||||
|
||||
case PyTrace_RETURN: /* 3 */
|
||||
STATS( self->stats.returns++; )
|
||||
/* A near-copy of this code is above in the missing-return handler. */
|
||||
if (self->depth >= 0) {
|
||||
if (self->tracing_arcs && self->cur_file_data) {
|
||||
int first = frame->f_code->co_firstlineno;
|
||||
if (CTracer_record_pair(self, self->last_line, -first) < 0) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "return");
|
||||
self->cur_file_data = self->data_stack[self->depth].file_data;
|
||||
self->last_line = self->data_stack[self->depth].last_line;
|
||||
self->depth--;
|
||||
}
|
||||
break;
|
||||
|
||||
case PyTrace_LINE: /* 2 */
|
||||
STATS( self->stats.lines++; )
|
||||
if (self->depth >= 0) {
|
||||
SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "line");
|
||||
if (self->cur_file_data) {
|
||||
/* We're tracing in this frame: record something. */
|
||||
if (self->tracing_arcs) {
|
||||
/* Tracing arcs: key is (last_line,this_line). */
|
||||
if (CTracer_record_pair(self, self->last_line, frame->f_lineno) < 0) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Tracing lines: key is simply this_line. */
|
||||
PyObject * this_line = MyInt_FromLong(frame->f_lineno);
|
||||
if (this_line == NULL) {
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = PyDict_SetItem(self->cur_file_data, this_line, Py_None);
|
||||
Py_DECREF(this_line);
|
||||
if (ret < 0) {
|
||||
STATS( self->stats.errors++; )
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
self->last_line = frame->f_lineno;
|
||||
}
|
||||
break;
|
||||
|
||||
case PyTrace_EXCEPTION:
|
||||
/* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
|
||||
without a return event. To detect that, we'll keep a copy of the
|
||||
parent frame for an exception event. If the next event is in that
|
||||
frame, then we must have returned without a return event. We can
|
||||
synthesize the missing event then.
|
||||
|
||||
Python itself fixed this problem in 2.4. Pyexpat still has the bug.
|
||||
I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
|
||||
If it gets fixed, this code should still work properly. Maybe some day
|
||||
the bug will be fixed everywhere coverage.py is supported, and we can
|
||||
remove this missing-return detection.
|
||||
|
||||
More about this fix: http://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
|
||||
*/
|
||||
STATS( self->stats.exceptions++; )
|
||||
self->last_exc_back = frame->f_back;
|
||||
self->last_exc_firstlineno = frame->f_code->co_firstlineno;
|
||||
break;
|
||||
|
||||
default:
|
||||
STATS( self->stats.others++; )
|
||||
break;
|
||||
}
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Python has two ways to set the trace function: sys.settrace(fn), which
|
||||
* takes a Python callable, and PyEval_SetTrace(func, obj), which takes
|
||||
* a C function and a Python object. The way these work together is that
|
||||
* sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
|
||||
* Python callable as the object in PyEval_SetTrace. So sys.gettrace()
|
||||
* simply returns the Python object used as the second argument to
|
||||
* PyEval_SetTrace. So sys.gettrace() will return our self parameter, which
|
||||
* means it must be callable to be used in sys.settrace().
|
||||
*
|
||||
* So we make our self callable, equivalent to invoking our trace function.
|
||||
*
|
||||
* To help with the process of replaying stored frames, this function has an
|
||||
* optional keyword argument:
|
||||
*
|
||||
* def CTracer_call(frame, event, arg, lineno=0)
|
||||
*
|
||||
* If provided, the lineno argument is used as the line number, and the
|
||||
* frame's f_lineno member is ignored.
|
||||
*/
|
||||
static PyObject *
|
||||
CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
|
||||
{
|
||||
PyFrameObject *frame;
|
||||
PyObject *what_str;
|
||||
PyObject *arg;
|
||||
int lineno = 0;
|
||||
int what;
|
||||
int orig_lineno;
|
||||
PyObject *ret = NULL;
|
||||
|
||||
static char *what_names[] = {
|
||||
"call", "exception", "line", "return",
|
||||
"c_call", "c_exception", "c_return",
|
||||
NULL
|
||||
};
|
||||
|
||||
#if WHAT_LOG
|
||||
printf("pytrace\n");
|
||||
#endif
|
||||
|
||||
static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
|
||||
&PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* In Python, the what argument is a string, we need to find an int
|
||||
for the C function. */
|
||||
for (what = 0; what_names[what]; what++) {
|
||||
PyObject *ascii = MyText_AS_BYTES(what_str);
|
||||
int should_break = !strcmp(MyText_AS_STRING(ascii), what_names[what]);
|
||||
Py_DECREF(ascii);
|
||||
if (should_break) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Save off the frame's lineno, and use the forced one, if provided. */
|
||||
orig_lineno = frame->f_lineno;
|
||||
if (lineno > 0) {
|
||||
frame->f_lineno = lineno;
|
||||
}
|
||||
|
||||
/* Invoke the C function, and return ourselves. */
|
||||
if (CTracer_trace(self, frame, what, arg) == RET_OK) {
|
||||
Py_INCREF(self);
|
||||
ret = (PyObject *)self;
|
||||
}
|
||||
|
||||
/* Clean up. */
|
||||
frame->f_lineno = orig_lineno;
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
CTracer_start(CTracer *self, PyObject *args_unused)
|
||||
{
|
||||
PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
|
||||
self->started = 1;
|
||||
self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs);
|
||||
self->last_line = -1;
|
||||
|
||||
/* start() returns a trace function usable with sys.settrace() */
|
||||
Py_INCREF(self);
|
||||
return (PyObject *)self;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
CTracer_stop(CTracer *self, PyObject *args_unused)
|
||||
{
|
||||
if (self->started) {
|
||||
PyEval_SetTrace(NULL, NULL);
|
||||
self->started = 0;
|
||||
}
|
||||
|
||||
return Py_BuildValue("");
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
CTracer_get_stats(CTracer *self)
|
||||
{
|
||||
#if COLLECT_STATS
|
||||
return Py_BuildValue(
|
||||
"{sI,sI,sI,sI,sI,sI,sI,sI,si,sI}",
|
||||
"calls", self->stats.calls,
|
||||
"lines", self->stats.lines,
|
||||
"returns", self->stats.returns,
|
||||
"exceptions", self->stats.exceptions,
|
||||
"others", self->stats.others,
|
||||
"new_files", self->stats.new_files,
|
||||
"missed_returns", self->stats.missed_returns,
|
||||
"stack_reallocs", self->stats.stack_reallocs,
|
||||
"stack_alloc", self->data_stack_alloc,
|
||||
"errors", self->stats.errors
|
||||
);
|
||||
#else
|
||||
return Py_BuildValue("");
|
||||
#endif /* COLLECT_STATS */
|
||||
}
|
||||
|
||||
static PyMemberDef
|
||||
CTracer_members[] = {
|
||||
{ "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0,
|
||||
PyDoc_STR("Function indicating whether to trace a file.") },
|
||||
|
||||
{ "warn", T_OBJECT, offsetof(CTracer, warn), 0,
|
||||
PyDoc_STR("Function for issuing warnings.") },
|
||||
|
||||
{ "data", T_OBJECT, offsetof(CTracer, data), 0,
|
||||
PyDoc_STR("The raw dictionary of trace data.") },
|
||||
|
||||
{ "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
|
||||
PyDoc_STR("Dictionary caching should_trace results.") },
|
||||
|
||||
{ "arcs", T_OBJECT, offsetof(CTracer, arcs), 0,
|
||||
PyDoc_STR("Should we trace arcs, or just lines?") },
|
||||
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static PyMethodDef
|
||||
CTracer_methods[] = {
|
||||
{ "start", (PyCFunction) CTracer_start, METH_VARARGS,
|
||||
PyDoc_STR("Start the tracer") },
|
||||
|
||||
{ "stop", (PyCFunction) CTracer_stop, METH_VARARGS,
|
||||
PyDoc_STR("Stop the tracer") },
|
||||
|
||||
{ "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS,
|
||||
PyDoc_STR("Get statistics about the tracing") },
|
||||
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static PyTypeObject
|
||||
CTracerType = {
|
||||
MyType_HEAD_INIT
|
||||
"coverage.CTracer", /*tp_name*/
|
||||
sizeof(CTracer), /*tp_basicsize*/
|
||||
0, /*tp_itemsize*/
|
||||
(destructor)CTracer_dealloc, /*tp_dealloc*/
|
||||
0, /*tp_print*/
|
||||
0, /*tp_getattr*/
|
||||
0, /*tp_setattr*/
|
||||
0, /*tp_compare*/
|
||||
0, /*tp_repr*/
|
||||
0, /*tp_as_number*/
|
||||
0, /*tp_as_sequence*/
|
||||
0, /*tp_as_mapping*/
|
||||
0, /*tp_hash */
|
||||
(ternaryfunc)CTracer_call, /*tp_call*/
|
||||
0, /*tp_str*/
|
||||
0, /*tp_getattro*/
|
||||
0, /*tp_setattro*/
|
||||
0, /*tp_as_buffer*/
|
||||
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
|
||||
"CTracer objects", /* tp_doc */
|
||||
0, /* tp_traverse */
|
||||
0, /* tp_clear */
|
||||
0, /* tp_richcompare */
|
||||
0, /* tp_weaklistoffset */
|
||||
0, /* tp_iter */
|
||||
0, /* tp_iternext */
|
||||
CTracer_methods, /* tp_methods */
|
||||
CTracer_members, /* tp_members */
|
||||
0, /* tp_getset */
|
||||
0, /* tp_base */
|
||||
0, /* tp_dict */
|
||||
0, /* tp_descr_get */
|
||||
0, /* tp_descr_set */
|
||||
0, /* tp_dictoffset */
|
||||
(initproc)CTracer_init, /* tp_init */
|
||||
0, /* tp_alloc */
|
||||
0, /* tp_new */
|
||||
};
|
||||
|
||||
/* Module definition */
|
||||
|
||||
#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
|
||||
static PyModuleDef
|
||||
moduledef = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"coverage.tracer",
|
||||
MODULE_DOC,
|
||||
-1,
|
||||
NULL, /* methods */
|
||||
NULL,
|
||||
NULL, /* traverse */
|
||||
NULL, /* clear */
|
||||
NULL
|
||||
};
|
||||
|
||||
|
||||
PyObject *
|
||||
PyInit_tracer(void)
|
||||
{
|
||||
PyObject * mod = PyModule_Create(&moduledef);
|
||||
if (mod == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CTracerType.tp_new = PyType_GenericNew;
|
||||
if (PyType_Ready(&CTracerType) < 0) {
|
||||
Py_DECREF(mod);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(&CTracerType);
|
||||
PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
|
||||
|
||||
return mod;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void
|
||||
inittracer(void)
|
||||
{
|
||||
PyObject * mod;
|
||||
|
||||
mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
|
||||
if (mod == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
CTracerType.tp_new = PyType_GenericNew;
|
||||
if (PyType_Ready(&CTracerType) < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
Py_INCREF(&CTracerType);
|
||||
PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
|
||||
}
|
||||
|
||||
#endif /* Py3k */
|
||||
@@ -1,9 +1,33 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""The version and URL for coverage.py"""
|
||||
# This file is exec'ed in setup.py, don't import anything!
|
||||
|
||||
__version__ = "3.7.1" # see detailed history in CHANGES.txt
|
||||
# Same semantics as sys.version_info.
|
||||
version_info = (4, 2, 0, 'final', 0)
|
||||
|
||||
__url__ = "http://nedbatchelder.com/code/coverage"
|
||||
if max(__version__).isalpha():
|
||||
|
||||
def _make_version(major, minor, micro, releaselevel, serial):
|
||||
"""Create a readable version string from version_info tuple components."""
|
||||
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
|
||||
version = "%d.%d" % (major, minor)
|
||||
if micro:
|
||||
version += ".%d" % (micro,)
|
||||
if releaselevel != 'final':
|
||||
short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
|
||||
version += "%s%d" % (short, serial)
|
||||
return version
|
||||
|
||||
|
||||
def _make_url(major, minor, micro, releaselevel, serial):
|
||||
"""Make the URL people should start at for this version of coverage.py."""
|
||||
url = "https://coverage.readthedocs.io"
|
||||
if releaselevel != 'final':
|
||||
# For pre-releases, use a version-specific URL.
|
||||
__url__ += "/" + __version__
|
||||
url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
|
||||
return url
|
||||
|
||||
|
||||
__version__ = _make_version(*version_info)
|
||||
__url__ = _make_url(*version_info)
|
||||
|
||||
@@ -1,15 +1,36 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
|
||||
|
||||
"""XML reporting for coverage.py"""
|
||||
|
||||
import os, sys, time
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import xml.dom.minidom
|
||||
|
||||
from coverage import __url__, __version__
|
||||
from coverage.backward import sorted, rpartition # pylint: disable=W0622
|
||||
from coverage import env
|
||||
from coverage import __url__, __version__, files
|
||||
from coverage.backward import iitems
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.report import Reporter
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
DTD_URL = (
|
||||
'https://raw.githubusercontent.com/cobertura/web/'
|
||||
'f0366e5e2cf18f111cbd61fc34ef720a6584ba02'
|
||||
'/htdocs/xml/coverage-03.dtd'
|
||||
)
|
||||
|
||||
|
||||
def rate(hit, num):
|
||||
"""Return the fraction of `hit`/`num`, as a string."""
|
||||
return "%.4g" % (float(hit) / (num or 1.0))
|
||||
if num == 0:
|
||||
return "1"
|
||||
else:
|
||||
return "%.4g" % (float(hit) / num)
|
||||
|
||||
|
||||
class XmlReporter(Reporter):
|
||||
@@ -18,9 +39,14 @@ class XmlReporter(Reporter):
|
||||
def __init__(self, coverage, config):
|
||||
super(XmlReporter, self).__init__(coverage, config)
|
||||
|
||||
self.packages = None
|
||||
self.source_paths = set()
|
||||
if config.source:
|
||||
for src in config.source:
|
||||
if os.path.exists(src):
|
||||
self.source_paths.add(files.canonical_filename(src))
|
||||
self.packages = {}
|
||||
self.xml_out = None
|
||||
self.arcs = coverage.data.has_arcs()
|
||||
self.has_arcs = coverage.data.has_arcs()
|
||||
|
||||
def report(self, morfs, outfile=None):
|
||||
"""Generate a Cobertura-compatible XML report for `morfs`.
|
||||
@@ -35,11 +61,7 @@ class XmlReporter(Reporter):
|
||||
|
||||
# Create the DOM that will store the data.
|
||||
impl = xml.dom.minidom.getDOMImplementation()
|
||||
docType = impl.createDocumentType(
|
||||
"coverage", None,
|
||||
"http://cobertura.sourceforge.net/xml/coverage-03.dtd"
|
||||
)
|
||||
self.xml_out = impl.createDocument(None, "coverage", docType)
|
||||
self.xml_out = impl.createDocument(None, "coverage", None)
|
||||
|
||||
# Write header stuff.
|
||||
xcoverage = self.xml_out.documentElement
|
||||
@@ -48,29 +70,43 @@ class XmlReporter(Reporter):
|
||||
xcoverage.appendChild(self.xml_out.createComment(
|
||||
" Generated by coverage.py: %s " % __url__
|
||||
))
|
||||
xpackages = self.xml_out.createElement("packages")
|
||||
xcoverage.appendChild(xpackages)
|
||||
xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
|
||||
|
||||
# Call xml_file for each file in the data.
|
||||
self.packages = {}
|
||||
self.report_files(self.xml_file, morfs)
|
||||
|
||||
xsources = self.xml_out.createElement("sources")
|
||||
xcoverage.appendChild(xsources)
|
||||
|
||||
# Populate the XML DOM with the source info.
|
||||
for path in sorted(self.source_paths):
|
||||
xsource = self.xml_out.createElement("source")
|
||||
xsources.appendChild(xsource)
|
||||
txt = self.xml_out.createTextNode(path)
|
||||
xsource.appendChild(txt)
|
||||
|
||||
lnum_tot, lhits_tot = 0, 0
|
||||
bnum_tot, bhits_tot = 0, 0
|
||||
|
||||
xpackages = self.xml_out.createElement("packages")
|
||||
xcoverage.appendChild(xpackages)
|
||||
|
||||
# Populate the XML DOM with the package info.
|
||||
for pkg_name in sorted(self.packages.keys()):
|
||||
pkg_data = self.packages[pkg_name]
|
||||
for pkg_name, pkg_data in sorted(iitems(self.packages)):
|
||||
class_elts, lhits, lnum, bhits, bnum = pkg_data
|
||||
xpackage = self.xml_out.createElement("package")
|
||||
xpackages.appendChild(xpackage)
|
||||
xclasses = self.xml_out.createElement("classes")
|
||||
xpackage.appendChild(xclasses)
|
||||
for class_name in sorted(class_elts.keys()):
|
||||
xclasses.appendChild(class_elts[class_name])
|
||||
for _, class_elt in sorted(iitems(class_elts)):
|
||||
xclasses.appendChild(class_elt)
|
||||
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
|
||||
xpackage.setAttribute("line-rate", rate(lhits, lnum))
|
||||
xpackage.setAttribute("branch-rate", rate(bhits, bnum))
|
||||
if self.has_arcs:
|
||||
branch_rate = rate(bhits, bnum)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xpackage.setAttribute("branch-rate", branch_rate)
|
||||
xpackage.setAttribute("complexity", "0")
|
||||
|
||||
lnum_tot += lnum
|
||||
@@ -79,10 +115,17 @@ class XmlReporter(Reporter):
|
||||
bhits_tot += bhits
|
||||
|
||||
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
|
||||
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
|
||||
if self.has_arcs:
|
||||
branch_rate = rate(bhits_tot, bnum_tot)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xcoverage.setAttribute("branch-rate", branch_rate)
|
||||
|
||||
# Use the DOM to write the output file.
|
||||
outfile.write(self.xml_out.toprettyxml())
|
||||
out = self.xml_out.toprettyxml()
|
||||
if env.PY2:
|
||||
out = out.encode("utf8")
|
||||
outfile.write(out)
|
||||
|
||||
# Return the total percentage.
|
||||
denom = lnum_tot + bnum_tot
|
||||
@@ -92,14 +135,25 @@ class XmlReporter(Reporter):
|
||||
pct = 100.0 * (lhits_tot + bhits_tot) / denom
|
||||
return pct
|
||||
|
||||
def xml_file(self, cu, analysis):
|
||||
def xml_file(self, fr, analysis):
|
||||
"""Add to the XML report for a single file."""
|
||||
|
||||
# Create the 'lines' and 'package' XML elements, which
|
||||
# are populated later. Note that a package == a directory.
|
||||
package_name = rpartition(cu.name, ".")[0]
|
||||
className = cu.name
|
||||
filename = fr.filename.replace("\\", "/")
|
||||
for source_path in self.source_paths:
|
||||
if filename.startswith(source_path.replace("\\", "/") + "/"):
|
||||
rel_name = filename[len(source_path)+1:]
|
||||
break
|
||||
else:
|
||||
rel_name = fr.relative_filename()
|
||||
|
||||
dirname = os.path.dirname(rel_name) or "."
|
||||
dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
|
||||
package_name = dirname.replace("/", ".")
|
||||
|
||||
if rel_name != fr.filename:
|
||||
self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
|
||||
package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
|
||||
|
||||
xclass = self.xml_out.createElement("class")
|
||||
@@ -109,12 +163,12 @@ class XmlReporter(Reporter):
|
||||
xlines = self.xml_out.createElement("lines")
|
||||
xclass.appendChild(xlines)
|
||||
|
||||
xclass.setAttribute("name", className)
|
||||
filename = cu.file_locator.relative_filename(cu.filename)
|
||||
xclass.setAttribute("filename", filename.replace("\\", "/"))
|
||||
xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
|
||||
xclass.setAttribute("filename", fr.relative_filename().replace("\\", "/"))
|
||||
xclass.setAttribute("complexity", "0")
|
||||
|
||||
branch_stats = analysis.branch_stats()
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
|
||||
# For each statement, create an XML 'line' element.
|
||||
for line in sorted(analysis.statements):
|
||||
@@ -125,21 +179,25 @@ class XmlReporter(Reporter):
|
||||
# executed? If so, that should be recorded here.
|
||||
xline.setAttribute("hits", str(int(line not in analysis.missing)))
|
||||
|
||||
if self.arcs:
|
||||
if self.has_arcs:
|
||||
if line in branch_stats:
|
||||
total, taken = branch_stats[line]
|
||||
xline.setAttribute("branch", "true")
|
||||
xline.setAttribute("condition-coverage",
|
||||
xline.setAttribute(
|
||||
"condition-coverage",
|
||||
"%d%% (%d/%d)" % (100*taken/total, taken, total)
|
||||
)
|
||||
if line in missing_branch_arcs:
|
||||
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
|
||||
xline.setAttribute("missing-branches", ",".join(annlines))
|
||||
xlines.appendChild(xline)
|
||||
|
||||
class_lines = len(analysis.statements)
|
||||
class_hits = class_lines - len(analysis.missing)
|
||||
|
||||
if self.arcs:
|
||||
class_branches = sum([t for t,k in branch_stats.values()])
|
||||
missing_branches = sum([t-k for t,k in branch_stats.values()])
|
||||
if self.has_arcs:
|
||||
class_branches = sum(t for t, k in branch_stats.values())
|
||||
missing_branches = sum(t - k for t, k in branch_stats.values())
|
||||
class_br_hits = class_branches - missing_branches
|
||||
else:
|
||||
class_branches = 0.0
|
||||
@@ -147,8 +205,13 @@ class XmlReporter(Reporter):
|
||||
|
||||
# Finalize the statistics that are collected in the XML DOM.
|
||||
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
|
||||
xclass.setAttribute("branch-rate", rate(class_br_hits, class_branches))
|
||||
package[0][className] = xclass
|
||||
if self.has_arcs:
|
||||
branch_rate = rate(class_br_hits, class_branches)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xclass.setAttribute("branch-rate", branch_rate)
|
||||
|
||||
package[0][rel_name] = xclass
|
||||
package[1] += class_hits
|
||||
package[2] += class_lines
|
||||
package[3] += class_br_hits
|
||||
|
||||
Reference in New Issue
Block a user