diff --git a/README.md b/README.md index 2b1490d..1974a6e 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,12 @@ docstr-coverage some_project/src #### Options +- _--destination=\, -dst \_ - Set the results output destination (default stdout) + - stdout - Output to standard STDOUT. + - file - Save output to file. +- _--format=\, -frm \_ - Set output style (default text) + - text - Output in simple style. + - markdown - Output in Markdown notation. - _--skip-magic, -m_ - Ignore all magic methods (except `__init__`) - _--skip-init, -i_ - Ignore all `__init__` methods - _--skip-file-doc, -f_ - Ignore module docstrings (at the top of files) diff --git a/docstr_coverage/cli.py b/docstr_coverage/cli.py index a19cfb4..d2b4762 100644 --- a/docstr_coverage/cli.py +++ b/docstr_coverage/cli.py @@ -13,7 +13,7 @@ from docstr_coverage.config_file import set_config_defaults from docstr_coverage.coverage import analyze from docstr_coverage.ignore_config import IgnoreConfig -from docstr_coverage.printers import LegacyPrinter +from docstr_coverage.printers import LegacyPrinter, MarkdownPrinter def do_include_filepath(filepath: str, exclude_re: Optional["re.Pattern"]) -> bool: @@ -261,6 +261,24 @@ def _assert_valid_key_value(k, v): default=".docstr_coverage", help="Deprecated. Use json config (--config / -C) instead", ) +@click.option( + "-dst", + "--destination", + type=click.Choice(["stdout", "file"]), + default="stdout", + help="Results output destination", + show_default=True, + metavar="DESTINATION", +) +@click.option( + "-frm", + "--format", + type=click.Choice(["text", "markdown"]), + default="text", + help="Format of output", + show_default=True, + metavar="FORMAT", +) def execute(paths, **kwargs): """Measure docstring coverage for `PATHS`""" @@ -328,7 +346,21 @@ def execute(paths, **kwargs): show_progress = not kwargs["percentage_only"] results = analyze(all_paths, ignore_config=ignore_config, show_progress=show_progress) - LegacyPrinter(verbosity=kwargs["verbose"], ignore_config=ignore_config).print(results) + report_format: str = kwargs["format"] + if report_format == "markdown": + printer = MarkdownPrinter(results, verbosity=kwargs["verbose"], ignore_config=ignore_config) + elif report_format == "text": + printer = LegacyPrinter(results, verbosity=kwargs["verbose"], ignore_config=ignore_config) + else: + raise SystemError("Unknown report format: {0}".format(report_format)) + + destination: str = kwargs["destination"] + if destination == "file": + printer.save_to_file() + elif destination == "stdout": + printer.print_to_stdout() + else: + raise SystemError("Unknown output type: {0}".format(destination)) file_results, total_results = results.to_legacy() diff --git a/docstr_coverage/coverage.py b/docstr_coverage/coverage.py index 7cb3869..792f1e4 100644 --- a/docstr_coverage/coverage.py +++ b/docstr_coverage/coverage.py @@ -213,7 +213,7 @@ def get_docstring_coverage( ignore_names=ignore_names, ) results = analyze(filenames, ignore_config) - LegacyPrinter(verbosity=verbose, ignore_config=ignore_config).print(results) + LegacyPrinter(results, verbosity=verbose, ignore_config=ignore_config).print_to_stdout() return results.to_legacy() diff --git a/docstr_coverage/printers.py b/docstr_coverage/printers.py index 8ef21e2..99b46a6 100644 --- a/docstr_coverage/printers.py +++ b/docstr_coverage/printers.py @@ -1,9 +1,17 @@ """All logic used to print a recorded ResultCollection to stdout. Currently, this module is in BETA and its interface may change in future versions.""" import logging +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union from docstr_coverage.ignore_config import IgnoreConfig -from docstr_coverage.result_collection import FileStatus +from docstr_coverage.result_collection import ( + AggregatedCount, + File, + FileStatus, + ResultCollection, +) _GRADES = ( ("AMAZING! Your docstrings are truly a wonder to behold!", 100), @@ -22,128 +30,482 @@ logging.basicConfig(level=logging.INFO, format="%(message)s") -def print_line(line=""): - """Prints `line` +@dataclass(frozen=True) +class IgnoredNode: + """Data Structure for nodes that was ignored in checking.""" - Parameters - ---------- - line: String - The text to print""" - logger.info(line) + identifier: str + reason: str -class LegacyPrinter: - """Printing functionality consistent with the original early-versions docstr-coverage outputs. +@dataclass(frozen=True) +class FileCoverageStat: + """Data Structure of coverage info about one file. - In future versions, the interface of this class will be refined and an abstract superclass - will be extracted. Thus, coding against the current interface will require refactorings with - future versions of docstr-coverage.""" + For `verbosity` with value: + * `2` - Fields `coverage`, `found`, `missing`, `needed` and `path`. + * `3` - Fields with `verbosity` `2` and `nodes_without_docstring`. + * `4` - Fields with `verbosity` `3` and `is_empty`, `nodes_with_docstring`, + `ignored_nodes` + """ - def __init__(self, verbosity: int, ignore_config: IgnoreConfig = IgnoreConfig()): - self.verbosity = verbosity - self.ignore_config = ignore_config + coverage: float + found: int + missing: int + needed: int + path: str + ignored_nodes: Optional[Tuple[IgnoredNode, ...]] + is_empty: Optional[Union[bool]] + nodes_with_docstring: Optional[Tuple[str, ...]] + nodes_without_docstring: Optional[Tuple[str, ...]] - def print(self, results): - """Prints a provided set of results to stdout. - Parameters - ---------- - results: ResultCollection - The information about docstr presence to be printed to stdout.""" - if self.verbosity >= 2: - self._print_file_statistics(results) - if self.verbosity >= 1: - self._print_overall_statistics(results) +@dataclass(frozen=True) +class OverallCoverageStat: + """Data Structure of coverage statistic.""" + + found: int + grade: str + is_skip_class_def: bool + is_skip_file_docstring: bool + is_skip_init: bool + is_skip_magic: bool + is_skip_private: bool + missing: int + needed: int + num_empty_files: int + num_files: int + total_coverage: float + + +class Printer(ABC): + """Base abstract superclass for printing coverage results. - def _print_file_statistics(self, results): - """Prints the file specific information to stdout. + It provides coverage results in data structures (`OverallCoverageStat`, `FileCoverageStat` and + `IgnoredNode`) and abstract methods for implementing type of displaying and saving in file + statistic data. + In heir classes you can use `overall_coverage_stat` and `overall_files_coverage_stat` + attributes. Depends of given `verbosity` some data can be `None`.""" + + def __init__( + self, + results: ResultCollection, + verbosity: int, + ignore_config: IgnoreConfig = IgnoreConfig(), + ): + """ Parameters ---------- results: ResultCollection - The information about docstr presence to be printed to stdout.""" - for file_path, file in results.files(): - if self.verbosity < 4 and file.count_aggregate().missing == 0: - # Don't print fully documented files - continue - - # File Header - print_line('\nFile: "{}"'.format(file_path)) - - # List of missing docstrings - if self.verbosity >= 3: - if file.status == FileStatus.EMPTY and self.verbosity > 3: - print_line(" - File is empty") - for expected_docstr in file._expected_docstrings: - if expected_docstr.has_docstring and self.verbosity > 3: - print_line( - " - Found docstring for `{0}`".format(expected_docstr.node_identifier) - ) - elif expected_docstr.ignore_reason and self.verbosity > 3: - print_line( - " - Ignored `{0}`: reason: `{1}`".format( - expected_docstr.node_identifier, expected_docstr.ignore_reason - ) + Coverage analyze results. + verbosity: int + Verbosity identifier. + ignore_config: IgnoreConfig + Config with ignoring setups. + """ + self.verbosity: int = verbosity + self.ignore_config: IgnoreConfig = ignore_config + self.results: ResultCollection = results + self.__overall_coverage_stat: Optional[Union[OverallCoverageStat, float]] = None + self.__overall_files_coverage_stat: Optional[List[FileCoverageStat]] = None + + @property + def overall_coverage_stat(self) -> Union[OverallCoverageStat, float]: + """Getting full coverage statistic. + + For `verbosity` with value: + * `0` - Only `total_coverage` value returning. + * `1` - All fields, except `files_info`. + * `2` - All fields.""" + if self.__overall_coverage_stat is None: + count: AggregatedCount = self.results.count_aggregate() + + if self.verbosity >= 1: + + self.__overall_coverage_stat = OverallCoverageStat( + found=count.found, + grade=next( + message + for message, grade_threshold in _GRADES + if grade_threshold <= count.coverage() + ), + is_skip_class_def=self.ignore_config.skip_class_def, + is_skip_file_docstring=self.ignore_config.skip_file_docstring, + is_skip_init=self.ignore_config.skip_init, + is_skip_magic=self.ignore_config.skip_magic, + is_skip_private=self.ignore_config.skip_private, + missing=count.missing, + needed=count.needed, + num_empty_files=count.num_empty_files, + num_files=count.num_files, + total_coverage=count.coverage(), + ) + + else: + self.__overall_coverage_stat = count.coverage() + + return self.__overall_coverage_stat + + @property + def overall_files_coverage_stat(self) -> Optional[List[FileCoverageStat]]: + """Getting coverage statistics for files. + + For `verbosity` with value: + * `2` - Fields `coverage`, `found`, `missing`, `needed` and `path`. + * `3` - Fields with `verbosity` `2` and `nodes_without_docstring`. + * `4` - Fields with `verbosity` `3` and `is_empty`, `nodes_with_docstring`, + `ignored_nodes` + + Returns + ------- + List[FileCoverageStat] + Coverage info about all checked files.""" + if self.__overall_files_coverage_stat is None and self.verbosity >= 2: + overall_files_coverage_stat: List[FileCoverageStat] = [] + for file_path, file_info in self.results.files(): + + file_path: str + file_info: File + nodes_without_docstring: Optional[Tuple[str, ...]] + is_empty: Optional[bool] + nodes_with_docstring: Optional[Tuple[str, ...]] + ignored_nodes: Optional[Tuple[IgnoredNode, ...]] + + if self.verbosity >= 3: + nodes_without_docstring = tuple( + expected_docstring.node_identifier + for expected_docstring in file_info._expected_docstrings + if not expected_docstring.has_docstring + and not expected_docstring.ignore_reason + ) + else: + nodes_without_docstring = None + + if self.verbosity >= 4: + is_empty = file_info.status == FileStatus.EMPTY + nodes_with_docstring = tuple( + expected_docstring.node_identifier + for expected_docstring in file_info._expected_docstrings + if expected_docstring.has_docstring and not expected_docstring.ignore_reason + ) + ignored_nodes = tuple( + IgnoredNode( + identifier=expected_docstring.node_identifier, + reason=expected_docstring.ignore_reason, ) - elif not expected_docstr.has_docstring and not expected_docstr.ignore_reason: - if expected_docstr.node_identifier == "module docstring": - print_line(" - No module docstring") - else: - print_line( - " - No docstring for `{0}`".format(expected_docstr.node_identifier) - ) - - # Statistics - count = file.count_aggregate() - print_line( - " Needed: %s; Found: %s; Missing: %s; Coverage: %.1f%%" - % ( - count.needed, - count.found, - count.missing, - count.coverage(), - ), - ) - print_line() - print_line() + for expected_docstring in file_info._expected_docstrings + if expected_docstring.ignore_reason is not None + ) + else: + is_empty = None + nodes_with_docstring = None + ignored_nodes = None + + count = file_info.count_aggregate() + overall_files_coverage_stat.append( + FileCoverageStat( + coverage=count.coverage(), + found=count.found, + missing=count.missing, + needed=count.needed, + path=file_path, + ignored_nodes=ignored_nodes, + is_empty=is_empty, + nodes_with_docstring=nodes_with_docstring, + nodes_without_docstring=nodes_without_docstring, + ) + ) + self.__overall_files_coverage_stat = overall_files_coverage_stat + + return self.__overall_files_coverage_stat + + @abstractmethod + def print_to_stdout(self) -> None: + """Providing how to print coverage results.""" + pass - def _print_overall_statistics(self, results): - """Prints overall results (aggregated over all files) to stdout. + @abstractmethod + def save_to_file(self, path: Optional[str] = None) -> None: + """Providing how to save coverage results in file. Parameters ---------- - results: ResultCollection - The information about docstr presence to be printed to stdout.""" - count = results.count_aggregate() - - postfix = "" - if count.num_empty_files > 0: - postfix = " (%s files are empty)" % count.num_empty_files - if self.ignore_config.skip_magic: - postfix += " (skipped all non-init magic methods)" - if self.ignore_config.skip_file_docstring: - postfix += " (skipped file-level docstrings)" - if self.ignore_config.skip_init: - postfix += " (skipped __init__ methods)" - if self.ignore_config.skip_class_def: - postfix += " (skipped class definitions)" - if self.ignore_config.skip_private: - postfix += " (skipped private methods)" - - if count.num_files > 1: - print_line("Overall statistics for %s files%s:" % (count.num_files, postfix)) + path: Optional[str] + Path to file with coverage results. + """ + pass + + +class LegacyPrinter(Printer): + """Printer for legacy format.""" + + def print_to_stdout(self) -> None: + for line in self._generate_string().split("\n"): + logger.info(line) + + def save_to_file(self, path: Optional[str] = None) -> None: + if path is None: + path = "./coverage-results.txt" + with open(path, "w") as wf: + wf.write(self._generate_string()) + + def _generate_string(self) -> str: + final_string: str = "" + + if self.overall_files_coverage_stat is not None: + final_string += self._generate_file_stat_string() + final_string += "\n" + final_string += self._generate_overall_stat_string() + + return final_string + + def _generate_file_stat_string(self): + final_string: str = "" + for file_coverage_stat in self.overall_files_coverage_stat: + + file_string: str = 'File: "{0}"\n'.format(file_coverage_stat.path) + + if file_coverage_stat.is_empty is not None and file_coverage_stat.is_empty is True: + file_string += " - File is empty\n" + + if file_coverage_stat.nodes_with_docstring is not None: + for node_identifier in file_coverage_stat.nodes_with_docstring: + file_string += " - Found docstring for `{0}`\n".format( + node_identifier, + ) + + if file_coverage_stat.ignored_nodes is not None: + for ignored_node in file_coverage_stat.ignored_nodes: + file_string += " - Ignored `{0}`: reason: `{1}`\n".format( + ignored_node.identifier, + ignored_node.reason, + ) + + if file_coverage_stat.nodes_without_docstring is not None: + for node_identifier in file_coverage_stat.nodes_without_docstring: + if node_identifier == "module docstring": + file_string += " - No module docstring\n" + else: + file_string += " - No docstring for `{0}`\n".format(node_identifier) + + file_string += " Needed: %s; Found: %s; Missing: %s; Coverage: %.1f%%" % ( + file_coverage_stat.needed, + file_coverage_stat.found, + file_coverage_stat.missing, + file_coverage_stat.coverage, + ) + + final_string += "\n" + file_string + "\n" + + return final_string + "\n" + + def _generate_overall_stat_string(self) -> str: + if isinstance(self.overall_coverage_stat, float): + return str(self.overall_coverage_stat) + + prefix: str = "" + + if self.overall_coverage_stat.num_empty_files > 0: + prefix += " (%s files are empty)" % self.overall_coverage_stat.num_empty_files + + if self.overall_coverage_stat.is_skip_magic: + prefix += " (skipped all non-init magic methods)" + + if self.overall_coverage_stat.is_skip_file_docstring: + prefix += " (skipped file-level docstrings)" + + if self.overall_coverage_stat.is_skip_init: + prefix += " (skipped __init__ methods)" + + if self.overall_coverage_stat.is_skip_class_def: + prefix += " (skipped class definitions)" + + if self.overall_coverage_stat.is_skip_private: + prefix += " (skipped private methods)" + + final_string: str = "" + + if self.overall_coverage_stat.num_files > 1: + final_string += "Overall statistics for %s files%s:\n" % ( + self.overall_coverage_stat.num_files, + prefix, + ) else: - print_line("Overall statistics%s:" % postfix) + final_string += "Overall statistics%s:\n" % prefix - print_line( - "Needed: {} - Found: {} - Missing: {}".format( - count.needed, count.found, count.missing - ), + final_string += "Needed: {} - Found: {} - Missing: {}\n".format( + self.overall_coverage_stat.needed, + self.overall_coverage_stat.found, + self.overall_coverage_stat.missing, + ) + + final_string += "Total coverage: {:.1f}% - Grade: {}".format( + self.overall_coverage_stat.total_coverage, + self.overall_coverage_stat.grade, + ) + + return final_string + + +class MarkdownPrinter(LegacyPrinter): + """Printer for Markdown format.""" + + def save_to_file(self, path: Optional[str] = None) -> None: + if path is None: + path = "./coverage-results.md" + with open(path, "w") as wf: + wf.write(self._generate_string()) + + def _generate_file_stat_string(self) -> str: + final_string: str = "" + for file_coverage_stat in self.overall_files_coverage_stat: + + file_string: str = "**File**: `{0}`\n".format(file_coverage_stat.path) + + if file_coverage_stat.is_empty is not None and file_coverage_stat.is_empty is True: + file_string += "- File is empty\n" + + if file_coverage_stat.nodes_with_docstring is not None: + for node_identifier in file_coverage_stat.nodes_with_docstring: + file_string += "- Found docstring for `{0}`\n".format( + node_identifier, + ) + + if file_coverage_stat.ignored_nodes is not None: + for ignored_node in file_coverage_stat.ignored_nodes: + file_string += "- Ignored `{0}`: reason: `{1}`\n".format( + ignored_node.identifier, + ignored_node.reason, + ) + + if file_coverage_stat.nodes_without_docstring is not None: + for node_identifier in file_coverage_stat.nodes_without_docstring: + if node_identifier == "module docstring": + file_string += "- No module docstring\n" + else: + file_string += "- No docstring for `{0}`\n".format(node_identifier) + + file_string += "\n" + + file_string += self._generate_markdown_table( + ("Needed", "Found", "Missing", "Coverage"), + ( + ( + file_coverage_stat.needed, + file_coverage_stat.found, + file_coverage_stat.missing, + "{:.1f}%".format(file_coverage_stat.coverage), + ), + ), + ) + + if final_string == "": + final_string += file_string + "\n" + else: + final_string += "\n" + file_string + "\n" + + return final_string + "\n" + + def _generate_overall_stat_string(self) -> str: + if isinstance(self.overall_coverage_stat, float): + return str(self.overall_coverage_stat) + + final_string: str = "## Overall statistics\n" + + if self.overall_coverage_stat.num_files > 1: + final_string += "Files number: **{}**\n".format(self.overall_coverage_stat.num_files) + + final_string += "\n" + + final_string += "Total coverage: **{:.1f}%**\n".format( + self.overall_coverage_stat.total_coverage, ) - # Calculate Total Grade - grade = next( - message for message, grade_threshold in _GRADES if grade_threshold <= count.coverage() + final_string += "\n" + + final_string += "Grade: **{}**\n".format(self.overall_coverage_stat.grade) + + if self.overall_coverage_stat.num_empty_files > 0: + final_string += "- %s files are empty\n" % self.overall_coverage_stat.num_empty_files + + if self.overall_coverage_stat.is_skip_magic: + final_string += "- skipped all non-init magic methods\n" + + if self.overall_coverage_stat.is_skip_file_docstring: + final_string += "- skipped file-level docstrings\n" + + if self.overall_coverage_stat.is_skip_init: + final_string += "- skipped __init__ methods\n" + + if self.overall_coverage_stat.is_skip_class_def: + final_string += "- skipped class definitions\n" + + if self.overall_coverage_stat.is_skip_private: + final_string += "- skipped private methods\n" + + final_string += "\n" + + final_string += self._generate_markdown_table( + ("Needed", "Found", "Missing"), + ( + ( + self.overall_coverage_stat.needed, + self.overall_coverage_stat.found, + self.overall_coverage_stat.missing, + ), + ), ) - print_line("Total coverage: {:.1f}% - Grade: {}".format(count.coverage(), grade)) + return final_string + + def _generate_markdown_table( + self, + cols: Tuple[str, ...], + rows: Tuple[Tuple[Union[str, int, float]], ...], + ) -> str: + """Generate markdown table. + + Using: + >>> self._generate_markdown_table( + ... cols=("Needed", "Found", "Missing"), + ... vals=( + ... (10, 20, "65.5%"), + ... (30, 40, "99.9%") + ... ) + ... ) + | Needed | Found | Missing | + |---|---|---| + | 10 | 20 | 65.5% | + | 30 | 40 | 99.9% | + + Parameters + ---------- + cols: Tuple[str, ...] + Table columns + rows: Tuple[Tuple[Union[str, int, float]], ...] + Column values + + Returns + ------- + str + Generated table. + """ + if not all(len(v) == len(cols) for v in rows): + raise ValueError("Col num not equal to cols value") + final_string: str = "" + + for col in cols: + final_string += "| {} ".format(col) + final_string += "|\n" + + for _ in range(len(cols)): + final_string += "|---" + final_string += "|\n" + + for row in rows: + for value in row: + final_string += "| {} ".format(value) + final_string += "|" + + return final_string diff --git a/tests/test_coverage.py b/tests/test_coverage.py index 53ebdde..248fa27 100644 --- a/tests/test_coverage.py +++ b/tests/test_coverage.py @@ -6,7 +6,7 @@ from docstr_coverage import analyze from docstr_coverage.ignore_config import IgnoreConfig -from docstr_coverage.printers import _GRADES, LegacyPrinter +from docstr_coverage.printers import _GRADES, LegacyPrinter, MarkdownPrinter SAMPLES_DIRECTORY = os.path.join("tests", "sample_files", "subdir_a") EMPTY_FILE_PATH = os.path.join(SAMPLES_DIRECTORY, "empty_file.py") @@ -145,7 +145,8 @@ def test_should_report_when_no_docs_in_a_file(): [ ( [ - '\nFile: "tests/sample_files/subdir_a/empty_file.py"', + "", + 'File: "tests/sample_files/subdir_a/empty_file.py"', " - File is empty", " Needed: 0; Found: 0; Missing: 0; Coverage: 100.0%", "", @@ -157,10 +158,10 @@ def test_should_report_when_no_docs_in_a_file(): ) ], ) -def test_logging_empty_file(caplog, expected): +def test_legacy_printer_logging_empty_file(caplog, expected): with caplog.at_level(logging.DEBUG): result = analyze([EMPTY_FILE_PATH]) - LegacyPrinter(verbosity=4).print(result) + LegacyPrinter(result, verbosity=4).print_to_stdout() _file_results, _total_results = result.to_legacy() if platform.system() == "Windows": @@ -169,12 +170,121 @@ def test_logging_empty_file(caplog, expected): assert caplog.messages == expected +@pytest.mark.parametrize( + ["expected"], + [ + ( + [ + "\n", + 'File: "tests/sample_files/subdir_a/empty_file.py"\n', + " - File is empty\n", + " Needed: 0; Found: 0; Missing: 0; Coverage: 100.0%\n", + "\n", + "\n", + "Overall statistics (1 files are empty):\n", + "Needed: 0 - Found: 0 - Missing: 0\n", + "Total coverage: 100.0% - Grade: " + _GRADES[0][0], + ], + ) + ], +) +def test_legacy_save_to_file_printer_empty_file(tmpdir, expected): + path = tmpdir.join("coverage-result.txt") + result = analyze([EMPTY_FILE_PATH]) + LegacyPrinter(result, verbosity=4).save_to_file(path.strpath) + + lines = path.readlines() + if platform.system() == "Windows": + assert [m.replace("\\", "/") for m in lines] == expected + else: + assert lines == expected + + +@pytest.mark.parametrize( + ["expected"], + [ + ( + [ + "**File**: `tests/sample_files/subdir_a/empty_file.py`", + "- File is empty", + "", + "| Needed | Found | Missing | Coverage |", + "|---|---|---|---|", + "| 0 | 0 | 0 | 100.0% |", + "", + "", + "## Overall statistics", + "", + "Total coverage: **100.0%**", + "", + "Grade: **" + _GRADES[0][0] + "**", + "- 1 files are empty", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 0 | 0 | 0 |", + ], + ) + ], +) +def test_markdown_printer_logging_empty_file(caplog, expected): + with caplog.at_level(logging.DEBUG): + result = analyze([EMPTY_FILE_PATH]) + MarkdownPrinter(result, verbosity=4).print_to_stdout() + _file_results, _total_results = result.to_legacy() + + if platform.system() == "Windows": + assert [m.replace("\\", "/") for m in caplog.messages] == expected + else: + assert caplog.messages == expected + + +@pytest.mark.parametrize( + ["expected"], + [ + ( + [ + "**File**: `tests/sample_files/subdir_a/empty_file.py`\n", + "- File is empty\n", + "\n", + "| Needed | Found | Missing | Coverage |\n", + "|---|---|---|---|\n", + "| 0 | 0 | 0 | 100.0% |\n", + "\n", + "\n", + "## Overall statistics\n", + "\n", + "Total coverage: **100.0%**\n", + "\n", + "Grade: **" + _GRADES[0][0] + "**\n", + "- 1 files are empty\n", + "\n", + "| Needed | Found | Missing |\n", + "|---|---|---|\n", + "| 0 | 0 | 0 |", + ], + ) + ], +) +def test_markdown_save_to_file_printer_empty_file(tmpdir, expected): + path = tmpdir.join("coverage-result.md") + result = analyze([EMPTY_FILE_PATH]) + MarkdownPrinter(result, verbosity=4).save_to_file(path.strpath) + + lines = path.readlines() + if platform.system() == "Windows": + assert [m.replace("\\", "/") for m in lines] == expected + else: + assert lines == expected + + @pytest.mark.parametrize( ["expected", "verbose", "ignore_names"], [ ( [ - '\nFile: "tests/sample_files/subdir_a/partly_documented_file.py"', + "", + 'File: "tests/sample_files/subdir_a/partly_documented_file.py"', " - No module docstring", " - No docstring for `foo`", " - No docstring for `bar`", @@ -190,7 +300,8 @@ def test_logging_empty_file(caplog, expected): ), ( [ - '\nFile: "tests/sample_files/subdir_a/partly_documented_file.py"', + "", + 'File: "tests/sample_files/subdir_a/partly_documented_file.py"', " - No module docstring", " - No docstring for `FooBar.__init__`", " - No docstring for `foo`", @@ -207,7 +318,8 @@ def test_logging_empty_file(caplog, expected): ), ( [ - '\nFile: "tests/sample_files/subdir_a/partly_documented_file.py"', + "", + 'File: "tests/sample_files/subdir_a/partly_documented_file.py"', " Needed: 5; Found: 1; Missing: 4; Coverage: 20.0%", "", "", @@ -238,11 +350,133 @@ def test_logging_empty_file(caplog, expected): ), ], ) -def test_logging_partially_documented_file(caplog, expected, verbose, ignore_names): +def test_legacy_printer_logging_partially_documented_file(caplog, expected, verbose, ignore_names): + ignore_config = IgnoreConfig(ignore_names=ignore_names) + with caplog.at_level(logging.DEBUG): + result = analyze([PARTLY_DOCUMENTED_FILE_PATH], ignore_config=ignore_config) + LegacyPrinter(result, verbosity=verbose, ignore_config=ignore_config).print_to_stdout() + + if platform.system() == "Windows": + assert [m.replace("\\", "/") for m in caplog.messages] == expected + else: + assert caplog.messages == expected + + +@pytest.mark.parametrize( + ["expected", "verbose", "ignore_names"], + [ + ( + [ + "**File**: `tests/sample_files/subdir_a/partly_documented_file.py`", + "- No module docstring", + "- No docstring for `foo`", + "- No docstring for `bar`", + "", + "| Needed | Found | Missing | Coverage |", + "|---|---|---|---|", + "| 4 | 1 | 3 | 25.0% |", + "", + "", + "## Overall statistics", + "", + "Total coverage: **25.0%**", + "", + "Grade: **" + _GRADES[6][0] + "**", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 4 | 1 | 3 |", + ], + 3, + ([".*", "__.+__"],), + ), + ( + [ + "**File**: `tests/sample_files/subdir_a/partly_documented_file.py`", + "- No module docstring", + "- No docstring for `FooBar.__init__`", + "- No docstring for `foo`", + "- No docstring for `bar`", + "", + "| Needed | Found | Missing | Coverage |", + "|---|---|---|---|", + "| 5 | 1 | 4 | 20.0% |", + "", + "", + "## Overall statistics", + "", + "Total coverage: **20.0%**", + "", + "Grade: **" + _GRADES[7][0] + "**", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 5 | 1 | 4 |", + ], + 3, + (), + ), + ( + [ + "**File**: `tests/sample_files/subdir_a/partly_documented_file.py`", + "", + "| Needed | Found | Missing | Coverage |", + "|---|---|---|---|", + "| 5 | 1 | 4 | 20.0% |", + "", + "", + "## Overall statistics", + "", + "Total coverage: **20.0%**", + "", + "Grade: **" + _GRADES[7][0] + "**", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 5 | 1 | 4 |", + ], + 2, + (), + ), + ( + [ + "## Overall statistics", + "", + "Total coverage: **20.0%**", + "", + "Grade: **" + _GRADES[7][0] + "**", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 5 | 1 | 4 |", + ], + 1, + (), + ), + ( + [ + "## Overall statistics", + "", + "Total coverage: **0.0%**", + "", + "Grade: **" + _GRADES[9][0] + "**", + "", + "| Needed | Found | Missing |", + "|---|---|---|", + "| 1 | 0 | 1 |", + ], + 1, + ([".*", ".*"],), # ignore all, except module + ), + ], +) +def test_markdown_printer_logging_partially_documented_file( + caplog, expected, verbose, ignore_names +): ignore_config = IgnoreConfig(ignore_names=ignore_names) with caplog.at_level(logging.DEBUG): result = analyze([PARTLY_DOCUMENTED_FILE_PATH], ignore_config=ignore_config) - LegacyPrinter(verbosity=verbose, ignore_config=ignore_config).print(result) + MarkdownPrinter(result, verbosity=verbose, ignore_config=ignore_config).print_to_stdout() if platform.system() == "Windows": assert [m.replace("\\", "/") for m in caplog.messages] == expected