mirror of
https://github.com/TECHNOFAB11/bumpver.git
synced 2025-12-12 14:30:09 +01:00
better error messages and fixups
This commit is contained in:
parent
2f421daf16
commit
f705164e75
7 changed files with 155 additions and 123 deletions
|
|
@ -395,7 +395,7 @@ test:
|
||||||
--cov-report term \
|
--cov-report term \
|
||||||
--html=reports/pytest/index.html \
|
--html=reports/pytest/index.html \
|
||||||
--junitxml reports/pytest.xml \
|
--junitxml reports/pytest.xml \
|
||||||
-k "$${PYTEST_FILTER}" \
|
-k "$${PYTEST_FILTER-$${FLTR}}" \
|
||||||
$(shell cd src/ && ls -1 */__init__.py | awk '{ sub(/\/__init__.py/, "", $$1); print "--cov "$$1 }') \
|
$(shell cd src/ && ls -1 */__init__.py | awk '{ sub(/\/__init__.py/, "", $$1); print "--cov "$$1 }') \
|
||||||
test/ src/;
|
test/ src/;
|
||||||
|
|
||||||
|
|
@ -515,7 +515,7 @@ devtest:
|
||||||
--capture=no \
|
--capture=no \
|
||||||
--exitfirst \
|
--exitfirst \
|
||||||
--failed-first \
|
--failed-first \
|
||||||
-k "$${PYTEST_FILTER}" \
|
-k "$${PYTEST_FILTER-$${FLTR}}" \
|
||||||
test/ src/;
|
test/ src/;
|
||||||
|
|
||||||
@rm -rf "src/__pycache__";
|
@rm -rf "src/__pycache__";
|
||||||
|
|
|
||||||
|
|
@ -148,8 +148,8 @@ def test(
|
||||||
minor : bool = False,
|
minor : bool = False,
|
||||||
patch : bool = False,
|
patch : bool = False,
|
||||||
release_num: bool = False,
|
release_num: bool = False,
|
||||||
date : typ.Optional[str] = None,
|
|
||||||
pin_date : bool = False,
|
pin_date : bool = False,
|
||||||
|
date : typ.Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Increment a version number for demo purposes."""
|
"""Increment a version number for demo purposes."""
|
||||||
_configure_logging(verbose=max(_VERBOSE, verbose))
|
_configure_logging(verbose=max(_VERBOSE, verbose))
|
||||||
|
|
@ -179,6 +179,109 @@ def test(
|
||||||
click.echo(f"PEP440 : {pep440_version}")
|
click.echo(f"PEP440 : {pep440_version}")
|
||||||
|
|
||||||
|
|
||||||
|
def _grep_text(pattern: patterns.Pattern, text: str, color: bool) -> int:
|
||||||
|
match_count = 0
|
||||||
|
all_lines = text.splitlines()
|
||||||
|
for match in pattern.regexp.finditer(text):
|
||||||
|
match_count += 1
|
||||||
|
match_start, match_end = match.span()
|
||||||
|
|
||||||
|
line_idx = text[:match_start].count("\n")
|
||||||
|
line_start = text.rfind("\n", 0, match_start) + 1
|
||||||
|
line_end = text.find("\n", match_end, -1)
|
||||||
|
if color:
|
||||||
|
matched_line = (
|
||||||
|
text[line_start:match_start]
|
||||||
|
+ colorama.Style.BRIGHT
|
||||||
|
+ text[match_start:match_end]
|
||||||
|
+ colorama.Style.RESET_ALL
|
||||||
|
+ text[match_end:line_end]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
matched_line = (
|
||||||
|
text[line_start:match_start]
|
||||||
|
+ text[match_start:match_end]
|
||||||
|
+ text[match_end:line_end]
|
||||||
|
)
|
||||||
|
|
||||||
|
lines_offset = max(0, line_idx - 1) + 1
|
||||||
|
lines = all_lines[line_idx - 1 : line_idx + 2]
|
||||||
|
|
||||||
|
if line_idx == 0:
|
||||||
|
lines[0] = matched_line
|
||||||
|
else:
|
||||||
|
lines[1] = matched_line
|
||||||
|
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
print(f"{lines_offset + i:>4}: {line}")
|
||||||
|
|
||||||
|
print()
|
||||||
|
return match_count
|
||||||
|
|
||||||
|
|
||||||
|
def _grep(
|
||||||
|
raw_pattern: str,
|
||||||
|
file_ios : typ.Tuple[io.TextIOWrapper],
|
||||||
|
color : bool,
|
||||||
|
) -> None:
|
||||||
|
pattern = v2patterns.compile_pattern(raw_pattern)
|
||||||
|
|
||||||
|
match_count = 0
|
||||||
|
for file_io in file_ios:
|
||||||
|
text = file_io.read()
|
||||||
|
|
||||||
|
_match_count = _grep_text(pattern, text, color)
|
||||||
|
|
||||||
|
print()
|
||||||
|
print(f"Found {_match_count} match for pattern '{raw_pattern}' in {file_io.name}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
match_count += _match_count
|
||||||
|
|
||||||
|
if match_count == 0 or _VERBOSE:
|
||||||
|
pyexpr_regex = regexfmt.pyexpr_regex(pattern.regexp.pattern)
|
||||||
|
|
||||||
|
print(f"# pycalver pattern: '{raw_pattern}'")
|
||||||
|
print("# " + regexfmt.regex101_url(pattern.regexp.pattern))
|
||||||
|
print(pyexpr_regex)
|
||||||
|
print()
|
||||||
|
|
||||||
|
if match_count == 0:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.option(
|
||||||
|
"-v",
|
||||||
|
"--verbose",
|
||||||
|
count=True,
|
||||||
|
help="Control log level. -vv for debug level.",
|
||||||
|
)
|
||||||
|
@click.argument("pattern")
|
||||||
|
@click.argument('files', nargs=-1, type=click.File('r'))
|
||||||
|
def grep(
|
||||||
|
pattern: str,
|
||||||
|
files : typ.Tuple[io.TextIOWrapper],
|
||||||
|
verbose: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Search file(s) for a version pattern."""
|
||||||
|
verbose = max(_VERBOSE, verbose)
|
||||||
|
_configure_logging(verbose)
|
||||||
|
|
||||||
|
raw_pattern = pattern # use internal naming convention
|
||||||
|
|
||||||
|
isatty = getattr(sys.stdout, 'isatty', lambda: False)
|
||||||
|
|
||||||
|
if isatty():
|
||||||
|
colorama.init()
|
||||||
|
try:
|
||||||
|
_grep(raw_pattern, files, color=True)
|
||||||
|
finally:
|
||||||
|
colorama.deinit()
|
||||||
|
else:
|
||||||
|
_grep(raw_pattern, files, color=False)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
|
@click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.")
|
||||||
@click.option(
|
@click.option(
|
||||||
|
|
@ -254,7 +357,8 @@ def _incr(
|
||||||
else:
|
else:
|
||||||
pattern = v2patterns.compile_pattern(raw_pattern)
|
pattern = v2patterns.compile_pattern(raw_pattern)
|
||||||
|
|
||||||
logger.info(f"Using pattern {raw_pattern}/{pattern.regexp.pattern}")
|
logger.info("Using pattern " + raw_pattern)
|
||||||
|
logger.info("regex = " + regexfmt.pyexpr_regex(pattern.regexp.pattern))
|
||||||
|
|
||||||
if has_v1_part:
|
if has_v1_part:
|
||||||
return v1version.incr(
|
return v1version.incr(
|
||||||
|
|
@ -495,108 +599,5 @@ def bump(
|
||||||
_try_bump(cfg, new_version, commit_message, allow_dirty)
|
_try_bump(cfg, new_version, commit_message, allow_dirty)
|
||||||
|
|
||||||
|
|
||||||
def _grep_text(pattern: patterns.Pattern, text: str, color: bool) -> int:
|
|
||||||
match_count = 0
|
|
||||||
all_lines = text.splitlines()
|
|
||||||
for match in pattern.regexp.finditer(text):
|
|
||||||
match_count += 1
|
|
||||||
match_start, match_end = match.span()
|
|
||||||
|
|
||||||
line_idx = text[:match_start].count("\n")
|
|
||||||
line_start = text.rfind("\n", 0, match_start) + 1
|
|
||||||
line_end = text.find("\n", match_end, -1)
|
|
||||||
if color:
|
|
||||||
matched_line = (
|
|
||||||
text[line_start:match_start]
|
|
||||||
+ colorama.Style.BRIGHT
|
|
||||||
+ text[match_start:match_end]
|
|
||||||
+ colorama.Style.RESET_ALL
|
|
||||||
+ text[match_end:line_end]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
matched_line = (
|
|
||||||
text[line_start:match_start]
|
|
||||||
+ text[match_start:match_end]
|
|
||||||
+ text[match_end:line_end]
|
|
||||||
)
|
|
||||||
|
|
||||||
lines_offset = max(0, line_idx - 1) + 1
|
|
||||||
lines = all_lines[line_idx - 1 : line_idx + 2]
|
|
||||||
|
|
||||||
if line_idx == 0:
|
|
||||||
lines[0] = matched_line
|
|
||||||
else:
|
|
||||||
lines[1] = matched_line
|
|
||||||
|
|
||||||
for i, line in enumerate(lines):
|
|
||||||
print(f"{lines_offset + i:>4}: {line}")
|
|
||||||
|
|
||||||
print()
|
|
||||||
return match_count
|
|
||||||
|
|
||||||
|
|
||||||
def _grep(
|
|
||||||
raw_pattern: str,
|
|
||||||
file_ios : typ.Tuple[io.TextIOWrapper],
|
|
||||||
color : bool,
|
|
||||||
) -> None:
|
|
||||||
pattern = v2patterns.compile_pattern(raw_pattern)
|
|
||||||
|
|
||||||
match_count = 0
|
|
||||||
for file_io in file_ios:
|
|
||||||
text = file_io.read()
|
|
||||||
|
|
||||||
_match_count = _grep_text(pattern, text, color)
|
|
||||||
|
|
||||||
print()
|
|
||||||
print(f"Found {_match_count} match for pattern '{raw_pattern}' in {file_io.name}")
|
|
||||||
print()
|
|
||||||
|
|
||||||
match_count += _match_count
|
|
||||||
|
|
||||||
if match_count == 0 or _VERBOSE:
|
|
||||||
pyexpr_regex = regexfmt.pyexpr_regex(pattern.regexp.pattern)
|
|
||||||
|
|
||||||
print(f"# pycalver pattern: '{raw_pattern}'")
|
|
||||||
print("# " + regexfmt.regex101_url(pattern))
|
|
||||||
print(pyexpr_regex)
|
|
||||||
print()
|
|
||||||
|
|
||||||
if match_count == 0:
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
|
||||||
@click.option(
|
|
||||||
"-v",
|
|
||||||
"--verbose",
|
|
||||||
count=True,
|
|
||||||
help="Control log level. -vv for debug level.",
|
|
||||||
)
|
|
||||||
@click.argument("pattern")
|
|
||||||
@click.argument('files', nargs=-1, type=click.File('r'))
|
|
||||||
def grep(
|
|
||||||
pattern: str,
|
|
||||||
files : typ.Tuple[io.TextIOWrapper],
|
|
||||||
verbose: int = 0,
|
|
||||||
) -> None:
|
|
||||||
"""Search files for a version pattern."""
|
|
||||||
verbose = max(_VERBOSE, verbose)
|
|
||||||
_configure_logging(verbose)
|
|
||||||
|
|
||||||
raw_pattern = pattern # use internal naming convention
|
|
||||||
|
|
||||||
isatty = getattr(sys.stdout, 'isatty', lambda: False)
|
|
||||||
|
|
||||||
if isatty():
|
|
||||||
colorama.init()
|
|
||||||
try:
|
|
||||||
_grep(raw_pattern, files, color=True)
|
|
||||||
finally:
|
|
||||||
colorama.deinit()
|
|
||||||
else:
|
|
||||||
_grep(raw_pattern, files, color=False)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
cli()
|
cli()
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,8 @@
|
||||||
|
# This file is part of the pycalver project
|
||||||
|
# https://github.com/mbarkhau/pycalver
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018-2020 Manuel Barkhau (mbarkhau@gmail.com) - MIT License
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
import sys
|
import sys
|
||||||
import typing as typ
|
import typing as typ
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,15 @@
|
||||||
|
# This file is part of the pycalver project
|
||||||
|
# https://github.com/mbarkhau/pycalver
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018-2020 Manuel Barkhau (mbarkhau@gmail.com) - MIT License
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
import re
|
import re
|
||||||
|
import logging
|
||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
from . import pysix
|
from . import pysix
|
||||||
from . import patterns
|
|
||||||
|
logger = logging.getLogger("pycalver.regexfmt")
|
||||||
|
|
||||||
|
|
||||||
def format_regex(regex: str) -> str:
|
def format_regex(regex: str) -> str:
|
||||||
|
|
@ -53,16 +60,16 @@ def pyexpr_regex(regex: str) -> str:
|
||||||
return f"re.compile({repr(regex)})"
|
return f"re.compile({repr(regex)})"
|
||||||
|
|
||||||
|
|
||||||
def regex101_url(pattern: patterns.Pattern) -> str:
|
def regex101_url(regex_pattern: str) -> str:
|
||||||
try:
|
try:
|
||||||
regex_text = format_regex(pattern.regexp.pattern)
|
regex_pattern = format_regex(regex_pattern)
|
||||||
except re.error:
|
except re.error:
|
||||||
regex_text = pattern.regexp.pattern
|
logger.warning(f"Error formatting regex '{repr(regex_pattern)}'")
|
||||||
|
|
||||||
return "".join(
|
return "".join(
|
||||||
(
|
(
|
||||||
"https://regex101.com/",
|
"https://regex101.com/",
|
||||||
"?flavor=python",
|
"?flavor=python",
|
||||||
"&flags=gmx" "®ex=" + pysix.quote(regex_text),
|
"&flags=gmx" "®ex=" + pysix.quote(regex_pattern),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ from . import parse
|
||||||
from . import config
|
from . import config
|
||||||
from . import rewrite
|
from . import rewrite
|
||||||
from . import version
|
from . import version
|
||||||
|
from . import regexfmt
|
||||||
from . import v1version
|
from . import v1version
|
||||||
from .patterns import Pattern
|
from .patterns import Pattern
|
||||||
|
|
||||||
|
|
@ -39,7 +40,13 @@ def rewrite_lines(
|
||||||
if non_matched_patterns:
|
if non_matched_patterns:
|
||||||
for nmp in non_matched_patterns:
|
for nmp in non_matched_patterns:
|
||||||
logger.error(f"No match for pattern '{nmp.raw_pattern}'")
|
logger.error(f"No match for pattern '{nmp.raw_pattern}'")
|
||||||
logger.error(f"Pattern compiles to regex '{nmp.regexp.pattern}'")
|
msg = (
|
||||||
|
"\n# "
|
||||||
|
+ regexfmt.regex101_url(nmp.regexp.pattern)
|
||||||
|
+ "\nregex = "
|
||||||
|
+ regexfmt.pyexpr_regex(nmp.regexp.pattern)
|
||||||
|
)
|
||||||
|
logger.error(msg)
|
||||||
raise rewrite.NoPatternMatch("Invalid pattern(s)")
|
raise rewrite.NoPatternMatch("Invalid pattern(s)")
|
||||||
else:
|
else:
|
||||||
return new_lines
|
return new_lines
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ from . import parse
|
||||||
from . import config
|
from . import config
|
||||||
from . import rewrite
|
from . import rewrite
|
||||||
from . import version
|
from . import version
|
||||||
|
from . import regexfmt
|
||||||
from . import v2version
|
from . import v2version
|
||||||
from . import v2patterns
|
from . import v2patterns
|
||||||
from .patterns import Pattern
|
from .patterns import Pattern
|
||||||
|
|
@ -43,7 +44,14 @@ def rewrite_lines(
|
||||||
if non_matched_patterns:
|
if non_matched_patterns:
|
||||||
for nmp in non_matched_patterns:
|
for nmp in non_matched_patterns:
|
||||||
logger.error(f"No match for pattern '{nmp.raw_pattern}'")
|
logger.error(f"No match for pattern '{nmp.raw_pattern}'")
|
||||||
logger.error(f"Pattern compiles to regex '{nmp.regexp.pattern}'")
|
msg = (
|
||||||
|
"\n# "
|
||||||
|
+ regexfmt.regex101_url(nmp.regexp.pattern)
|
||||||
|
+ "\nregex = "
|
||||||
|
+ regexfmt.pyexpr_regex(nmp.regexp.pattern)
|
||||||
|
)
|
||||||
|
logger.error(msg)
|
||||||
|
logger.error(msg)
|
||||||
raise rewrite.NoPatternMatch("Invalid pattern(s)")
|
raise rewrite.NoPatternMatch("Invalid pattern(s)")
|
||||||
else:
|
else:
|
||||||
return new_lines
|
return new_lines
|
||||||
|
|
|
||||||
|
|
@ -148,8 +148,9 @@ V2_PART_PATTERN_CASES = [
|
||||||
|
|
||||||
|
|
||||||
def _compile_part_re(pattern_str):
|
def _compile_part_re(pattern_str):
|
||||||
grouped_pattern_str = r"(?:" + pattern_str + r")"
|
grouped_pattern_str = r"^(?:" + pattern_str + r")$"
|
||||||
return re.compile(grouped_pattern_str)
|
# print("\n", grouped_pattern_str)
|
||||||
|
return re.compile(grouped_pattern_str, flags=re.MULTILINE)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("parts, testcase, expected", V2_PART_PATTERN_CASES)
|
@pytest.mark.parametrize("parts, testcase, expected", V2_PART_PATTERN_CASES)
|
||||||
|
|
@ -157,7 +158,10 @@ def test_v2_part_patterns(parts, testcase, expected):
|
||||||
for part in parts:
|
for part in parts:
|
||||||
part_re = _compile_part_re(v2patterns.PART_PATTERNS[part])
|
part_re = _compile_part_re(v2patterns.PART_PATTERNS[part])
|
||||||
match = part_re.match(testcase)
|
match = part_re.match(testcase)
|
||||||
assert (match is None and expected is None) or (match.group(0) == expected)
|
if match is None:
|
||||||
|
assert expected is None
|
||||||
|
else:
|
||||||
|
assert match.group(0) == expected
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("part_name", v2patterns.PART_PATTERNS.keys())
|
@pytest.mark.parametrize("part_name", v2patterns.PART_PATTERNS.keys())
|
||||||
|
|
@ -176,8 +180,8 @@ PATTERN_PART_CASES = [
|
||||||
("pep440_pycalver", "201712.0033-alpha" , None),
|
("pep440_pycalver", "201712.0033-alpha" , None),
|
||||||
("pycalver" , "v201712.0034" , "v201712.0034"),
|
("pycalver" , "v201712.0034" , "v201712.0034"),
|
||||||
("pycalver" , "v201712.0035-alpha" , "v201712.0035-alpha"),
|
("pycalver" , "v201712.0035-alpha" , "v201712.0035-alpha"),
|
||||||
("pycalver" , "v201712.0036-alpha0", "v201712.0036-alpha"),
|
("pycalver" , "v201712.0036-alpha0", None),
|
||||||
("pycalver" , "v201712.0037-pre" , "v201712.0037"),
|
("pycalver" , "v201712.0037-pre" , None), # pre not available for v1 patterns
|
||||||
("pycalver" , "201712.38a0" , None),
|
("pycalver" , "201712.38a0" , None),
|
||||||
("pycalver" , "201712.0039" , None),
|
("pycalver" , "201712.0039" , None),
|
||||||
("semver" , "1.23.456" , "1.23.456"),
|
("semver" , "1.23.456" , "1.23.456"),
|
||||||
|
|
@ -194,17 +198,17 @@ PATTERN_PART_CASES = [
|
||||||
("release" , "-dev" , "-dev"),
|
("release" , "-dev" , "-dev"),
|
||||||
("release" , "-rc" , "-rc"),
|
("release" , "-rc" , "-rc"),
|
||||||
("release" , "-post" , "-post"),
|
("release" , "-post" , "-post"),
|
||||||
("release" , "-pre" , ""),
|
("release" , "-pre" , None), # pre not available for v1 patterns
|
||||||
("release" , "alpha" , ""),
|
("release" , "alpha" , None), # missing dash "-" prefix
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("part_name, line, expected", PATTERN_PART_CASES)
|
@pytest.mark.parametrize("part_name, line, expected", PATTERN_PART_CASES)
|
||||||
def test_v1_re_pattern_parts(part_name, line, expected):
|
def test_v1_re_pattern_parts(part_name, line, expected):
|
||||||
part_re = _compile_part_re(v1patterns.PART_PATTERNS[part_name])
|
part_re = _compile_part_re(v1patterns.PART_PATTERNS[part_name])
|
||||||
result = part_re.search(line)
|
result = part_re.match(line)
|
||||||
if result is None:
|
if result is None:
|
||||||
assert expected is None, (part_name, line)
|
assert expected is None, (part_name, line, result)
|
||||||
else:
|
else:
|
||||||
result_val = result.group(0)
|
result_val = result.group(0)
|
||||||
assert result_val == expected, (part_name, line)
|
assert result_val == expected, (part_name, line)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue