Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix small annoyances across different test to clean up the log. #2135

Merged
merged 16 commits into from
Feb 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 39 additions & 23 deletions pint/delegates/formatter/_spec_helpers.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
"""
pint.delegates.formatter._spec_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pint.delegates.formatter._spec_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Convenient functions to deal with format specifications.
Convenient functions to deal with format specifications.

:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""

from __future__ import annotations
Expand Down Expand Up @@ -87,45 +87,61 @@ def remove_custom_flags(spec: str) -> str:
return spec


##########
# This weird way of defining split format
# is the only reasonable way I foudn to use
# lru_cache in a function that might emit warning
# and do it every time.
# TODO: simplify it when there are no warnings.


@functools.lru_cache
def split_format(
def _split_format(
spec: str, default: str, separate_format_defaults: bool = True
) -> tuple[str, str]:
) -> tuple[str, str, list[str]]:
"""Split format specification into magnitude and unit format."""
mspec = remove_custom_flags(spec)
uspec = extract_custom_flags(spec)

default_mspec = remove_custom_flags(default)
default_uspec = extract_custom_flags(default)

warns = []
if separate_format_defaults in (False, None):
# should we warn always or only if there was no explicit choice?
# Given that we want to eventually remove the flag again, I'd say yes?
if spec and separate_format_defaults is None:
if not uspec and default_uspec:
warnings.warn(
(
"The given format spec does not contain a unit formatter."
" Falling back to the builtin defaults, but in the future"
" the unit formatter specified in the `default_format`"
" attribute will be used instead."
),
DeprecationWarning,
warns.append(
"The given format spec does not contain a unit formatter."
" Falling back to the builtin defaults, but in the future"
" the unit formatter specified in the `default_format`"
" attribute will be used instead."
)
if not mspec and default_mspec:
warnings.warn(
(
"The given format spec does not contain a magnitude formatter."
" Falling back to the builtin defaults, but in the future"
" the magnitude formatter specified in the `default_format`"
" attribute will be used instead."
),
DeprecationWarning,
warns.append(
"The given format spec does not contain a magnitude formatter."
" Falling back to the builtin defaults, but in the future"
" the magnitude formatter specified in the `default_format`"
" attribute will be used instead."
)
elif not spec:
mspec, uspec = default_mspec, default_uspec
else:
mspec = mspec or default_mspec
uspec = uspec or default_uspec

return mspec, uspec, warns


def split_format(
spec: str, default: str, separate_format_defaults: bool = True
) -> tuple[str, str]:
"""Split format specification into magnitude and unit format."""

mspec, uspec, warns = _split_format(spec, default, separate_format_defaults)

for warn_msg in warns:
warnings.warn(warn_msg, DeprecationWarning)

return mspec, uspec
157 changes: 81 additions & 76 deletions pint/pint_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,40 +12,24 @@
import operator
import token as tokenlib
import tokenize
from collections.abc import Iterable
from io import BytesIO
from tokenize import TokenInfo
from typing import Any

try:
from uncertainties import ufloat

HAS_UNCERTAINTIES = True
except ImportError:
HAS_UNCERTAINTIES = False
ufloat = None
from typing import Any, Callable, Generator, Generic, Iterator, TypeVar

from .compat import HAS_UNCERTAINTIES, ufloat
from .errors import DefinitionSyntaxError

# For controlling order of operations
_OP_PRIORITY = {
"+/-": 4,
"**": 3,
"^": 3,
"unary": 2,
"*": 1,
"": 1, # operator for implicit ops
"//": 1,
"/": 1,
"%": 1,
"+": 0,
"-": 0,
}
S = TypeVar("S")

if HAS_UNCERTAINTIES:
_ufloat = ufloat # type: ignore
else:

def _ufloat(left, right):
if HAS_UNCERTAINTIES:
return ufloat(left, right)
raise TypeError("Could not import support for uncertainties")
def _ufloat(*args: Any, **kwargs: Any):
raise TypeError(
"Please install the uncertainties package to be able to parse quantities with uncertainty."
)


def _power(left: Any, right: Any) -> Any:
Expand All @@ -63,46 +47,93 @@ def _power(left: Any, right: Any) -> Any:
return operator.pow(left, right)


# https://stackoverflow.com/a/1517965/1291237
class tokens_with_lookahead:
def __init__(self, iter):
UnaryOpT = Callable[
[
Any,
],
Any,
]
BinaryOpT = Callable[[Any, Any], Any]

_UNARY_OPERATOR_MAP: dict[str, UnaryOpT] = {"+": lambda x: x, "-": lambda x: x * -1}

_BINARY_OPERATOR_MAP: dict[str, BinaryOpT] = {
"+/-": _ufloat,
"**": _power,
"*": operator.mul,
"": operator.mul, # operator for implicit ops
"/": operator.truediv,
"+": operator.add,
"-": operator.sub,
"%": operator.mod,
"//": operator.floordiv,
}

# For controlling order of operations
_OP_PRIORITY = {
"+/-": 4,
"**": 3,
"^": 3,
"unary": 2,
"*": 1,
"": 1, # operator for implicit ops
"//": 1,
"/": 1,
"%": 1,
"+": 0,
"-": 0,
}


class IteratorLookAhead(Generic[S]):
"""An iterator with lookahead buffer.

Adapted: https://stackoverflow.com/a/1517965/1291237
"""

def __init__(self, iter: Iterator[S]):
self.iter = iter
self.buffer = []
self.buffer: list[S] = []

def __iter__(self):
return self

def __next__(self):
def __next__(self) -> S:
if self.buffer:
return self.buffer.pop(0)
else:
return self.iter.__next__()

def lookahead(self, n):
def lookahead(self, n: int) -> S:
"""Return an item n entries ahead in the iteration."""
while n >= len(self.buffer):
try:
self.buffer.append(self.iter.__next__())
except StopIteration:
return None
raise ValueError("Cannot look ahead, out of range")
return self.buffer[n]


def _plain_tokenizer(input_string):
def plain_tokenizer(input_string: str) -> Generator[TokenInfo, None, None]:
"""Standard python tokenizer"""
for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline):
if tokinfo.type != tokenlib.ENCODING:
yield tokinfo


def uncertainty_tokenizer(input_string):
def _number_or_nan(token):
def uncertainty_tokenizer(input_string: str) -> Generator[TokenInfo, None, None]:
"""Tokenizer capable of parsing uncertainties as v+/-u and v±u"""

def _number_or_nan(token: TokenInfo) -> bool:
if token.type == tokenlib.NUMBER or (
token.type == tokenlib.NAME and token.string == "nan"
):
return True
return False

def _get_possible_e(toklist, e_index):
def _get_possible_e(
toklist: IteratorLookAhead[TokenInfo], e_index: int
) -> TokenInfo | None:
possible_e_token = toklist.lookahead(e_index)
if (
possible_e_token.string[0] == "e"
Expand Down Expand Up @@ -143,7 +174,7 @@ def _get_possible_e(toklist, e_index):
possible_e = None
return possible_e

def _apply_e_notation(mantissa, exponent):
def _apply_e_notation(mantissa: TokenInfo, exponent: TokenInfo) -> TokenInfo:
if mantissa.string == "nan":
return mantissa
if float(mantissa.string) == 0.0:
Expand All @@ -156,7 +187,12 @@ def _apply_e_notation(mantissa, exponent):
line=exponent.line,
)

def _finalize_e(nominal_value, std_dev, toklist, possible_e):
def _finalize_e(
nominal_value: TokenInfo,
std_dev: TokenInfo,
toklist: IteratorLookAhead[TokenInfo],
possible_e: TokenInfo,
) -> tuple[TokenInfo, TokenInfo]:
nominal_value = _apply_e_notation(nominal_value, possible_e)
std_dev = _apply_e_notation(std_dev, possible_e)
next(toklist) # consume 'e' and positive exponent value
Expand All @@ -178,8 +214,9 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
# wading through all that vomit, just eliminate the problem
# in the input by rewriting ± as +/-.
input_string = input_string.replace("±", "+/-")
toklist = tokens_with_lookahead(_plain_tokenizer(input_string))
toklist = IteratorLookAhead(plain_tokenizer(input_string))
for tokinfo in toklist:
assert tokinfo is not None
line = tokinfo.line
start = tokinfo.start
if (
Expand All @@ -194,7 +231,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
end=toklist.lookahead(1).end,
line=line,
)
for i in range(-1, 1):
for _ in range(-1, 1):
next(toklist)
yield plus_minus_op
elif (
Expand Down Expand Up @@ -280,31 +317,7 @@ def _finalize_e(nominal_value, std_dev, toklist, possible_e):
if HAS_UNCERTAINTIES:
tokenizer = uncertainty_tokenizer
else:
tokenizer = _plain_tokenizer

import typing

UnaryOpT = typing.Callable[
[
Any,
],
Any,
]
BinaryOpT = typing.Callable[[Any, Any], Any]

_UNARY_OPERATOR_MAP: dict[str, UnaryOpT] = {"+": lambda x: x, "-": lambda x: x * -1}

_BINARY_OPERATOR_MAP: dict[str, BinaryOpT] = {
"+/-": _ufloat,
"**": _power,
"*": operator.mul,
"": operator.mul, # operator for implicit ops
"/": operator.truediv,
"+": operator.add,
"-": operator.sub,
"%": operator.mod,
"//": operator.floordiv,
}
tokenizer = plain_tokenizer


class EvalTreeNode:
Expand Down Expand Up @@ -344,12 +357,7 @@ def to_string(self) -> str:

def evaluate(
self,
define_op: typing.Callable[
[
Any,
],
Any,
],
define_op: UnaryOpT,
bin_op: dict[str, BinaryOpT] | None = None,
un_op: dict[str, UnaryOpT] | None = None,
):
Expand Down Expand Up @@ -395,9 +403,6 @@ def evaluate(
return define_op(self.left)


from collections.abc import Iterable


def _build_eval_tree(
tokens: list[TokenInfo],
op_priority: dict[str, int],
Expand Down
20 changes: 16 additions & 4 deletions pint/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,16 @@ def assert_equal(first, second, msg: str | None = None) -> None:
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_array_equal(m1, m2, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn("In assert_equal, m1 is not a number ", UserWarning)
warnings.warn(
f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ",
UserWarning,
)
return
elif not isinstance(m2, Number):
warnings.warn("In assert_equal, m2 is not a number ", UserWarning)
warnings.warn(
f"In assert_equal, m2 is not a number {first} ({m1}) vs. {second} ({m2}) ",
UserWarning,
)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
Expand Down Expand Up @@ -131,10 +137,16 @@ def assert_allclose(
if isinstance(m1, ndarray) or isinstance(m2, ndarray):
np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)
elif not isinstance(m1, Number):
warnings.warn("In assert_equal, m1 is not a number ", UserWarning)
warnings.warn(
f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ",
UserWarning,
)
return
elif not isinstance(m2, Number):
warnings.warn("In assert_equal, m2 is not a number ", UserWarning)
warnings.warn(
f"In assert_equal, m1 is not a number {first} ({m1}) vs. {second} ({m2}) ",
UserWarning,
)
return
elif math.isnan(m1):
assert math.isnan(m2), msg
Expand Down
Loading
Loading