diff --git a/altair/utils/_transformed_data.py b/altair/utils/_transformed_data.py index 3839a13d2..43d398575 100644 --- a/altair/utils/_transformed_data.py +++ b/altair/utils/_transformed_data.py @@ -213,8 +213,9 @@ def name_views( chart_names: list[str] = [] for subchart in subcharts: - for name in name_views(subchart, i=i + len(chart_names), exclude=exclude): - chart_names.append(name) + chart_names.extend( + name_views(subchart, i=i + len(chart_names), exclude=exclude) + ) return chart_names @@ -324,9 +325,7 @@ def get_datasets_for_scope(vega_spec: dict[str, Any], scope: Scope) -> list[str] group = get_group_mark_for_scope(vega_spec, scope) or {} # get datasets from group - datasets = [] - for dataset in group.get("data", []): - datasets.append(dataset["name"]) + datasets = [dataset["name"] for dataset in group.get("data", [])] # Add facet dataset facet_dataset = group.get("from", {}).get("facet", {}).get("name", None) diff --git a/altair/utils/core.py b/altair/utils/core.py index 7e8340324..472b9641a 100644 --- a/altair/utils/core.py +++ b/altair/utils/core.py @@ -23,7 +23,6 @@ overload, ) -import jsonschema import narwhals.stable.v1 as nw from narwhals.dependencies import get_polars, is_pandas_dataframe from narwhals.typing import IntoDataFrame @@ -871,13 +870,9 @@ def _wrap_in_channel(self, obj: Any, encoding: str, /): return [self._wrap_in_channel(el, encoding) for el in obj] if channel := self.name_to_channel.get(encoding): tp = channel["value" if "value" in obj else "field"] - try: - # Don't force validation here; some objects won't be valid until - # they're created in the context of a chart. - return tp.from_dict(obj, validate=False) - except jsonschema.ValidationError: - # our attempts at finding the correct class have failed - return obj + # Don't force validation here; some objects won't be valid until + # they're created in the context of a chart. + return tp.from_dict(obj, validate=False) else: warnings.warn(f"Unrecognized encoding channel {encoding!r}", stacklevel=1) return obj diff --git a/altair/utils/schemapi.py b/altair/utils/schemapi.py index 84f5be277..37335ee0d 100644 --- a/altair/utils/schemapi.py +++ b/altair/utils/schemapi.py @@ -3,51 +3,44 @@ from __future__ import annotations import contextlib -import copy import inspect import json +import operator import sys import textwrap -from collections import defaultdict -from functools import partial +from collections import defaultdict, deque +from functools import lru_cache, partial from importlib.metadata import version as importlib_version -from itertools import chain, zip_longest +from itertools import chain, groupby, islice, zip_longest from math import ceil from typing import ( TYPE_CHECKING, Any, Dict, - Final, Iterable, - Iterator, List, - Literal, + Mapping, Sequence, TypeVar, Union, cast, overload, ) -from typing_extensions import TypeAlias import jsonschema -import jsonschema.exceptions import jsonschema.validators import narwhals.stable.v1 as nw +from jsonschema import ValidationError from packaging.version import Version -# This leads to circular imports with the vegalite module. Currently, this works -# but be aware that when you access it in this script, the vegalite module might -# not yet be fully instantiated in case your code is being executed during import time -from altair import vegalite - if TYPE_CHECKING: from types import ModuleType - from typing import ClassVar + from typing import Callable, ClassVar, Final, Iterator, KeysView, Literal - from referencing import Registry + from jsonschema.protocols import Validator, _JsonParameter from altair.typing import ChartType + from altair.vegalite.v5.schema._typing import Map if sys.version_info >= (3, 13): from typing import TypeIs @@ -55,37 +48,81 @@ from typing_extensions import TypeIs if sys.version_info >= (3, 11): - from typing import Never, Self + from typing import LiteralString, Never, Self else: - from typing_extensions import Never, Self + from typing_extensions import LiteralString, Never, Self + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias _OptionalModule: TypeAlias = "ModuleType | None" + _Errs: TypeAlias = Iterable[ValidationError] + _ErrsLazy: TypeAlias = Iterator[ValidationError] + _ErrsLazyGroup: TypeAlias = Iterator[_ErrsLazy] + _IntoLazyGroup: TypeAlias = Iterator["tuple[str, ValidationError]"] + _ValidatorKeyword: TypeAlias = Literal[ + "additionalProperties", + "enum", + "type", + "required", + "properties", + "anyOf", + "allOf", + "oneOf", + "ref", + "const", + ] + """Non-exhaustive listing of possible literals in ``ValidationError.validator``""" + +__all__ = [ + "Optional", # altair.utils + "SchemaBase", # altair.vegalite.v5.schema.core + "Undefined", # altair.utils + "UndefinedType", # altair.vegalite.v5.schema.core -> (side-effect relied on to propagate to alt.__init__) + "_is_valid", # altair.vegalite.v5.api + "_resolve_references", # tools.schemapi.utils -> tools.generate_schema_wrapper + "_subclasses", # altair.vegalite.v5.schema.core + "is_undefined", # altair.typing + "validate_jsonschema", # altair.utils.display + "with_property_setters", # altair.vegalite.v5.schema.channels +] -ValidationErrorList: TypeAlias = List[jsonschema.exceptions.ValidationError] -GroupedValidationErrors: TypeAlias = Dict[str, ValidationErrorList] - -# This URI is arbitrary and could be anything else. It just cannot be an empty -# string as we need to reference the schema registered in -# the referencing.Registry. _VEGA_LITE_ROOT_URI: Final = "urn:vega-lite-schema" +""" +Prefix added to each ``"$ref"``. + +This URI is arbitrary and could be anything else. + +It just cannot be an empty string as we need to reference the schema registered in +the ``referencing.Registry``. +""" + +_DEFAULT_DIALECT_URI: LiteralString = "http://json-schema.org/draft-07/schema#" +""" +Ideally, this would be parsed from the current Vega-Lite schema, and not hardcoded here. + +However, due to circular imports between this module and ``alt.vegalite``, +this information is not yet available as the latter is only *partially* loaded. + +The `draft version`_ which is used is unlikely to change often so it's ok to keep this. + +.. _draft version: + https://json-schema.org/understanding-json-schema/reference/schema#declaring-a-dialect +""" +# RELATED: tests/utils/test/schemapi.py/test_actual_json_schema_draft_is_same_as_hardcoded_default -# Ideally, jsonschema specification would be parsed from the current Vega-Lite -# schema instead of being hardcoded here as a default value. -# However, due to circular imports between this module and the altair.vegalite -# modules, this information is not yet available at this point as altair.vegalite -# is only partially loaded. The draft version which is used is unlikely to -# change often so it's ok to keep this. There is also a test which validates -# that this value is always the same as in the Vega-Lite schema. -_DEFAULT_JSON_SCHEMA_DRAFT_URL: Final = "http://json-schema.org/draft-07/schema#" - - -# If DEBUG_MODE is True, then schema objects are converted to dict and -# validated at creation time. This slows things down, particularly for -# larger specs, but leads to much more useful tracebacks for the user. -# Individual schema classes can override this by setting the -# class-level _class_is_valid_at_instantiation attribute to False DEBUG_MODE: bool = True +""" +If ``DEBUG_MODE``, then ``SchemaBase`` are converted to ``dict`` and validated at creation time. + +This slows things down, particularly for larger specs, but leads to much more +useful tracebacks for the user. -jsonschema_version_str = importlib_version("jsonschema") +Individual schema classes can override with: + + class Derived(SchemaBase): + _class_is_valid_at_instantiation: ClassVar[bool] = False +""" def enable_debug_mode() -> None: @@ -109,238 +146,406 @@ def debug_mode(arg: bool) -> Iterator[None]: DEBUG_MODE = original -@overload def validate_jsonschema( - spec: Any, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = ..., - *, - raise_error: Literal[True] = ..., -) -> Never: ... + spec: _JsonParameter, schema: Map, rootschema: Map | None = None +) -> None: + """ + Validates ``spec`` against ``schema`` in the context of ``rootschema``. + Any ``ValidationError``(s) are deduplicated and prioritized, with + the remaining errors deemed relevant to the user. -@overload -def validate_jsonschema( - spec: Any, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = ..., - *, - raise_error: Literal[False], -) -> jsonschema.exceptions.ValidationError | None: ... + Notes + ----- + - The first error is monkeypatched with a grouped iterator of all remaining errors + - ``SchemaValidationError`` utilizes the patched attribute, to craft a more helpful error message. + - However this breaks typing + ``schema`` and ``rootschema`` are not validated but instead considered as valid. -def validate_jsonschema( - spec, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = None, - *, - raise_error: bool = True, -) -> jsonschema.exceptions.ValidationError | None: - """ - Validates the passed in spec against the schema in the context of the rootschema. + We don't use ``jsonschema.validate`` as this would validate the ``schema`` itself. + Instead, we pass the ``schema`` directly to the validator class. + + This is done for two reasons: + + 1. The schema comes from Vega-Lite and is not based on the user + input, therefore there is no need to validate it in the first place. + 2. The "uri-reference" format checker fails for some of the + references as URIs in "$ref" are not encoded, e.g.: + + '#/definitions/ValueDefWithCondition' - If any errors are found, they are deduplicated and prioritized - and only the most relevant errors are kept. Errors are then either raised - or returned, depending on the value of `raise_error`. + would be a valid $ref in a Vega-Lite schema but it is not a valid + URI reference due to the characters such as '<'. """ - errors = _get_errors_from_spec(spec, schema, rootschema=rootschema) - if errors: - leaf_errors = _get_leaves_of_error_tree(errors) - grouped_errors = _group_errors_by_json_path(leaf_errors) - grouped_errors = _subset_to_most_specific_json_paths(grouped_errors) - grouped_errors = _deduplicate_errors(grouped_errors) - - # Nothing special about this first error but we need to choose one - # which can be raised - main_error: Any = next(iter(grouped_errors.values()))[0] - # All errors are then attached as a new attribute to ValidationError so that - # they can be used in SchemaValidationError to craft a more helpful - # error message. Setting a new attribute like this is not ideal as - # it then no longer matches the type ValidationError. It would be better - # to refactor this function to never raise but only return errors. - main_error._all_errors = grouped_errors - if raise_error: - raise main_error + it_errors = _validator(schema, rootschema).iter_errors(spec) + if first_error := next(it_errors, None): + groups = _group_tree_leaves(_rechain(first_error, it_errors)) + most_specific = _prune_subset_paths(groups) + deduplicated = _deduplicate_errors(most_specific) + dummy_error: Any + if dummy_error := next(deduplicated, None): + dummy_error._errors = _regroup(_rechain(dummy_error, deduplicated)) # type: ignore[attr-defined] + raise dummy_error else: - return main_error - else: - return None + msg = ( + f"Expected to find at least one error, but first error was `None`.\n\n" + f"spec: {spec!r}" + ) + raise NotImplementedError(msg) -def _get_errors_from_spec( - spec: dict[str, Any], - schema: dict[str, Any], - rootschema: dict[str, Any] | None = None, -) -> ValidationErrorList: +def _get_schema_dialect_uri(schema: Map, /) -> str: """ - Uses the relevant jsonschema validator to validate the passed in spec against the schema using the rootschema to resolve references. + Return value of `$schema`_. + + Defines which JSON Schema draft ``schema`` was written for. + + .. _$schema: + https://json-schema.org/understanding-json-schema/reference/schema#schema - The schema and rootschema themselves are not validated but instead considered as valid. """ - # We don't use jsonschema.validate as this would validate the schema itself. - # Instead, we pass the schema directly to the validator class. This is done for - # two reasons: The schema comes from Vega-Lite and is not based on the user - # input, therefore there is no need to validate it in the first place. Furthermore, - # the "uri-reference" format checker fails for some of the references as URIs in - # "$ref" are not encoded, - # e.g. '#/definitions/ValueDefWithCondition' would be a valid $ref in a Vega-Lite schema but - # it is not a valid URI reference due to the characters such as '<'. - - json_schema_draft_url = _get_json_schema_draft_url(rootschema or schema) - validator_cls = jsonschema.validators.validator_for( - {"$schema": json_schema_draft_url} - ) - validator_kwargs: dict[str, Any] = {} - if hasattr(validator_cls, "FORMAT_CHECKER"): - validator_kwargs["format_checker"] = validator_cls.FORMAT_CHECKER - - if _use_referencing_library(): - schema = _prepare_references_in_schema(schema) - validator_kwargs["registry"] = _get_referencing_registry( - rootschema or schema, json_schema_draft_url - ) + return schema.get("$schema", _DEFAULT_DIALECT_URI) + + +def _prepare_references(schema: Map, /) -> dict[str, Any]: + """ + Return a deep copy of ``schema`` w/ replaced uri(s). + + All encountered ``dict | list``(s) will be reconstructed + w/ ``_VEGA_LITE_ROOT_URI`` in front of all nested``$ref`` values. + + Notes + ----- + ``copy.deepcopy`` is not needed as the iterator yields new objects. + """ + # FIXME: The hottest function + it is recursive + # Should be done once per schema + return dict(_recurse_refs(schema)) + +def _recurse_refs(m: Map, /) -> Iterator[tuple[str, Any]]: + """ + Recurse through a schema, yielding fresh copies of mutable containers. + + Adds ``_VEGA_LITE_ROOT_URI`` in front of all nested``$ref`` values. + """ + for k, v in m.items(): + if k == "$ref": + yield k, f"{_VEGA_LITE_ROOT_URI}{v}" + elif isinstance(v, dict): + yield k, dict(_recurse_refs(v)) + elif isinstance(v, list): + yield k, [dict(_recurse_refs(el)) if _is_dict(el) else el for el in v] + else: + yield k, v + + +@lru_cache(maxsize=None) +def _validator_for(uri: str, /) -> Callable[..., Validator]: + """ + Retrieve the constructor for a `Validator`_ class appropriate for validating the given schema. + + Parameters + ---------- + uri + Address pointing to the `$schema`_. + + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + .. _$schema: + https://json-schema.org/understanding-json-schema/reference/schema + """ + tp: Callable[..., Validator] = jsonschema.validators.validator_for({"$schema": uri}) + if hasattr(tp, "FORMAT_CHECKER"): + return partial(tp, format_checker=tp.FORMAT_CHECKER) else: - # No resolver is necessary if the schema is already the full schema - validator_kwargs["resolver"] = ( - jsonschema.RefResolver.from_schema(rootschema) - if rootschema is not None - else None + return tp + + +_HASH_ENCODER = json.JSONEncoder(sort_keys=True, separators=(",", ":")) + +if Version(importlib_version("jsonschema")) >= Version("4.18"): + from referencing import Registry + from referencing.jsonschema import specification_with as _specification_with + + if TYPE_CHECKING: + from referencing import Specification + from referencing._core import Resolver + + @lru_cache(maxsize=None) + def specification_with(dialect_id: str, /) -> Specification[Any]: + """ + Retrieve the `Specification`_ with the given dialect identifier. + + Wraps `specification_with`_, which returns one **immutable** object per + JSON Schema **dialect**. + + Raises + ------ + ``UnknownDialect`` + if the given ``dialect_id`` isn't known + + .. _Specification: + https://referencing.readthedocs.io/en/stable/api/#referencing.Specification + .. _specification_with: + https://referencing.readthedocs.io/en/stable/api/#referencing.jsonschema.specification_with + """ + return _specification_with(dialect_id) + + class _Registry: + """ + A cache of `Registry`_ (s). + + An instance named ``registry`` is used to wrap the `Registry`_ API, + with a managed cache. + + See Also + -------- + ``_Registry.__call__`` + + .. _Registry: + https://referencing.readthedocs.io/en/stable/api/#referencing.Registry + """ + + _cached: ClassVar[dict[tuple[str, str], Registry[Any]]] = {} + + @staticmethod + def compute_key(root: Map, dialect_id: str, /) -> tuple[str, str]: + """ + Generate a simple-minded hash to identify a registry. + + Notes + ----- + Why the strange hash? + - **All** generated schemas hit the ``"$ref"`` branch. + - ``api.Then`` hits the len(...) 1 branch w/ ``{"type": "object"}``. + - Final branch is only hit by mock schemas in: + - `tests/utils/test_core.py::test_infer_encoding_types` + - `tests/utils/test_schemapi.py` + """ + if "$ref" in root: + k1 = root["$ref"] + elif len(root) == 1: + k1 = "".join(f"{s!s}" for s in chain(*root.items())) + else: + k1 = _HASH_ENCODER.encode(root) + return k1, dialect_id + + @classmethod + def update_cached( + cls, root: Map, dialect_id: str, resolver: Resolver[Any] + ) -> None: + cls._cached[cls.compute_key(root, dialect_id)] = resolver._registry + + def __call__(self, root: Map, dialect_id: str, /) -> Registry[Any]: + """ + Constructs a `Registry`_, adding the `Resource`_ produced by ``rootschema``. + + Requires at least ``jsonschema`` `v4.18.0a1`_. + + .. _Registry: + https://referencing.readthedocs.io/en/stable/api/#referencing.Registry + .. _Resource: + https://referencing.readthedocs.io/en/stable/api/#referencing.Resource + .. _v4.18.0a1: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 + """ + cache_key = self.compute_key(root, dialect_id) + if (reg := self._cached.get(cache_key, None)) is not None: + return reg + resource = specification_with(dialect_id).create_resource(root) + reg = Registry().with_resource(_VEGA_LITE_ROOT_URI, resource).crawl() + type(self)._cached[cache_key] = reg + return reg + + registry: _Registry = _Registry() + + def _validator(schema: Map, rootschema: Map | None = None, /) -> Validator: + """ + Constructs a `Validator`_ for future validation. + + Parameters + ---------- + schema + Schema that a spec will be validated against. + rootschema + Context to evaluate within. + + We have **both** a current & a backwards-compatible version of this function. + + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + """ + # NOTE: This is the current version + uri = _get_schema_dialect_uri(rootschema or schema) + validator = _validator_for(uri) + return validator( + _prepare_references(schema), registry=registry(rootschema or schema, uri) ) - validator = validator_cls(schema, **validator_kwargs) - errors = list(validator.iter_errors(spec)) - return errors + def _resolve_references(schema: Map, rootschema: Map) -> Map: + """ + Resolve schema references until there is no ``"$ref"`` anymore in the top-level ``dict``. + + ``jsonschema`` deprecated ``RefResolver`` in favor of `referencing`_. + + We have **both** a current & a backwards-compatible version of this function. + + .. _referencing: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 + """ + # NOTE: This is the current version + root = rootschema or schema + if ("$ref" not in root) or ("$ref" not in schema): + return schema + uri = _get_schema_dialect_uri(rootschema) + resolver = registry(root, uri).resolver(_VEGA_LITE_ROOT_URI) + while "$ref" in schema: + resolved = resolver.lookup(schema["$ref"]) + schema = resolved.contents + registry.update_cached(root, uri, resolved.resolver) + return schema + + +else: + def _validator(schema: Map, rootschema: Map | None = None, /) -> Validator: + """ + Constructs a `Validator`_ for future validation. + + We have **both** a current & a backwards-compatible version of this function. -def _get_json_schema_draft_url(schema: dict[str, Any]) -> str: - return schema.get("$schema", _DEFAULT_JSON_SCHEMA_DRAFT_URL) + Parameters + ---------- + schema + Schema that a spec will be validated against. + rootschema + Context to evaluate within. + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + """ + # NOTE: This is the backwards-compatible version + validator = _validator_for(_get_schema_dialect_uri(rootschema or schema)) + resolver: Any = ( + jsonschema.RefResolver.from_schema(rootschema) if rootschema else rootschema + ) + return validator(schema, resolver=resolver) -def _use_referencing_library() -> bool: - """In version 4.18.0, the jsonschema package deprecated RefResolver in favor of the referencing library.""" - return Version(jsonschema_version_str) >= Version("4.18") + def _resolve_references(schema: Map, rootschema: Map) -> Map: + """ + Resolve schema references until there is no ``"$ref"`` anymore in the top-level ``dict``. + ``jsonschema`` deprecated ``RefResolver`` in favor of `referencing`_. -def _prepare_references_in_schema(schema: dict[str, Any]) -> dict[str, Any]: - # Create a copy so that $ref is not modified in the original schema in case - # that it would still reference a dictionary which might be attached to - # an Altair class _schema attribute - schema = copy.deepcopy(schema) + We have **both** a current & a backwards-compatible version of this function. - def _prepare_refs(d: dict[str, Any]) -> dict[str, Any]: + .. _referencing: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 """ - Add _VEGA_LITE_ROOT_URI in front of all $ref values. + # NOTE: This is the backwards-compatible version + resolver = jsonschema.RefResolver.from_schema(rootschema or schema) + while "$ref" in schema: + with resolver.resolving(schema["$ref"]) as resolved: + schema = resolved + return schema + - This function recursively iterates through the whole dictionary. +if Version(importlib_version("jsonschema")) >= Version("4.0.1"): + _json_path: Callable[[ValidationError], str] = operator.attrgetter("json_path") +else: - $ref values can only be nested in dictionaries or lists - as the passed in `d` dictionary comes from the Vega-Lite json schema - and in json we only have arrays (-> lists in Python) and objects - (-> dictionaries in Python) which we need to iterate through. + def _json_path(err: ValidationError, /) -> str: """ - for key, value in d.items(): - if key == "$ref": - d[key] = _VEGA_LITE_ROOT_URI + d[key] - elif isinstance(value, dict): - d[key] = _prepare_refs(value) - elif isinstance(value, list): - prepared_values = [] - for v in value: - if isinstance(v, dict): - v = _prepare_refs(v) - prepared_values.append(v) - d[key] = prepared_values - return d - - schema = _prepare_refs(schema) - return schema - - -# We do not annotate the return value here as the referencing library is not always -# available and this function is only executed in those cases. -def _get_referencing_registry( - rootschema: dict[str, Any], json_schema_draft_url: str | None = None -) -> Registry: - # Referencing is a dependency of newer jsonschema versions, starting with the - # version that is specified in _use_referencing_library and we therefore - # can expect that it is installed if the function returns True. - # We ignore 'import' mypy errors which happen when the referencing library - # is not installed. That's ok as in these cases this function is not called. - # We also have to ignore 'unused-ignore' errors as mypy raises those in case - # referencing is installed. - import referencing # type: ignore[import,unused-ignore] - import referencing.jsonschema # type: ignore[import,unused-ignore] - - if json_schema_draft_url is None: - json_schema_draft_url = _get_json_schema_draft_url(rootschema) - - specification = referencing.jsonschema.specification_with(json_schema_draft_url) - resource = specification.create_resource(rootschema) - return referencing.Registry().with_resource( - uri=_VEGA_LITE_ROOT_URI, resource=resource - ) + Vendored backport for ``jsonschema.ValidationError.json_path`` property. + + See https://github.com/vega/altair/issues/3038. + """ + path = "$" + for elem in err.absolute_path: + if isinstance(elem, int): + path += "[" + str(elem) + "]" + else: + path += "." + elem + return path -def _json_path(err: jsonschema.exceptions.ValidationError) -> str: +_FN_PATH = cast("Callable[[tuple[str, ValidationError]], str]", operator.itemgetter(0)) +"""Key function for ``(json_path, ValidationError)``.""" +_FN_VALIDATOR = cast("Callable[[ValidationError], _ValidatorKeyword]", operator.attrgetter("validator")) # fmt: off +"""Key function for ``ValidationError.validator``.""" + + +def _message_len(err: ValidationError, /) -> int: + """Return length of a ``ValidationError`` message.""" + return len(err.message) + + +def _rechain(element: T, others: Iterable[T], /) -> Iterator[T]: """ - Drop in replacement for the .json_path property of the jsonschema ValidationError class. + Continue an iterator at the last popped ``element``. - This is not available as property for ValidationError with jsonschema<4.0.1. + Equivalent to:: + + elements = 1, 2, 3, 4, 5 + it = iter(elements) + element = next(it) + it_continue = chain([element], it) - More info, see https://github.com/vega/altair/issues/3038. """ - path = "$" - for elem in err.absolute_path: - if isinstance(elem, int): - path += "[" + str(elem) + "]" - else: - path += "." + elem - return path + yield element + yield from others -def _group_errors_by_json_path( - errors: ValidationErrorList, -) -> GroupedValidationErrors: +def _regroup( + errors: _Errs, /, *, key: Callable[[ValidationError], str] = _json_path +) -> _ErrsLazyGroup: """ - Groups errors by the `json_path` attribute of the jsonschema ValidationError class. + Regroup error stream by a ``key`` function. - This attribute contains the path to the offending element within - a chart specification and can therefore be considered as an identifier of an - 'issue' in the chart that needs to be fixed. + Assumes ``errors`` are already sorted, which holds **only** at the end of ``validate_jsonschema``. """ - errors_by_json_path = defaultdict(list) - for err in errors: - err_key = getattr(err, "json_path", _json_path(err)) - errors_by_json_path[err_key].append(err) - return dict(errors_by_json_path) + for _, grouped_it in groupby(errors, key): + yield grouped_it -def _get_leaves_of_error_tree( - errors: ValidationErrorList, -) -> ValidationErrorList: +def _group_tree_leaves(errors: _Errs, /) -> _IntoLazyGroup: """ - For each error in `errors`, it traverses down the "error tree" that is generated by the jsonschema library to find and return all "leaf" errors. + Combines 3 previously distinct steps: + + 1. ``_get_leaves_of_error_tree`` These are errors which have no further errors that caused it and so they are the most specific errors with the most specific error messages. - """ - leaves: ValidationErrorList = [] + + 2. ``_group_errors_by_json_path`` (part of) + + Extracts the ``.json_path`` property for grouping. + + 3. Removes:: + + ValidationError: "'value' is a required property" + + as these errors are unlikely to be the relevant ones for the user. + They come from validation against a schema definition where the output of `alt.value` + would be valid. + However, if a user uses `alt.value`, the `value` keyword is included automatically + from that function and so it's unlikely that this was what the user intended + if the keyword is not present in the first place. + """ # noqa: D400 + REQUIRED = "required" + VALUE = ["value"] for err in errors: - if err.context: - # This means that the error `err` was caused by errors in subschemas. - # The list of errors from the subschemas are available in the property - # `context`. - leaves.extend(_get_leaves_of_error_tree(err.context)) + if err_context := err.context: + yield from _group_tree_leaves(err_context) + elif err.validator == REQUIRED and err.validator_value == VALUE: + continue else: - leaves.append(err) - return leaves + yield _json_path(err), err -def _subset_to_most_specific_json_paths( - errors_by_json_path: GroupedValidationErrors, -) -> GroupedValidationErrors: +def _prune_subset_paths(json_path_errors: _IntoLazyGroup, /) -> Iterator[_Errs]: """ Removes key (json path), value (errors) pairs where the json path is fully contained in another json path. @@ -348,66 +553,26 @@ def _subset_to_most_specific_json_paths( then the first one will be removed and only the second one is returned. This is done under the assumption that more specific json paths give more helpful error messages to the user. - """ - errors_by_json_path_specific: GroupedValidationErrors = {} - for json_path, errors in errors_by_json_path.items(): - if not _contained_at_start_of_one_of_other_values( - json_path, list(errors_by_json_path.keys()) - ): - errors_by_json_path_specific[json_path] = errors - return errors_by_json_path_specific - -def _contained_at_start_of_one_of_other_values(x: str, values: Sequence[str]) -> bool: - # Does not count as "contained at start of other value" if the values are - # the same. These cases should be handled separately - return any(value.startswith(x) for value in values if x != value) + Currently using a `list`, but typing it more restrictive to see if it can be avoided. - -def _deduplicate_errors( - grouped_errors: GroupedValidationErrors, -) -> GroupedValidationErrors: - """ - Some errors have very similar error messages or are just in general not helpful for a user. - - This function removes as many of these cases as possible and - can be extended over time to handle new cases that come up. + - Needs to be sorted to work with groupby + - Reversing allows prioritising more specific groups, since they are seen first + - Then re-reversed, to keep seen order """ - grouped_errors_deduplicated: GroupedValidationErrors = {} - for json_path, element_errors in grouped_errors.items(): - errors_by_validator = _group_errors_by_validator(element_errors) - - deduplication_functions = { - "enum": _deduplicate_enum_errors, - "additionalProperties": _deduplicate_additional_properties_errors, - } - deduplicated_errors: ValidationErrorList = [] - for validator, errors in errors_by_validator.items(): - deduplication_func = deduplication_functions.get(validator) - if deduplication_func is not None: - errors = deduplication_func(errors) - deduplicated_errors.extend(_deduplicate_by_message(errors)) - - # Removes any ValidationError "'value' is a required property" as these - # errors are unlikely to be the relevant ones for the user. They come from - # validation against a schema definition where the output of `alt.value` - # would be valid. However, if a user uses `alt.value`, the `value` keyword - # is included automatically from that function and so it's unlikely - # that this was what the user intended if the keyword is not present - # in the first place. - deduplicated_errors = [ - err for err in deduplicated_errors if not _is_required_value_error(err) - ] - - grouped_errors_deduplicated[json_path] = deduplicated_errors - return grouped_errors_deduplicated - - -def _is_required_value_error(err: jsonschema.exceptions.ValidationError) -> bool: - return err.validator == "required" and err.validator_value == ["value"] + rev_sort = sorted(json_path_errors, key=_FN_PATH, reverse=True) + keeping: dict[str, _Errs] = {} + for unique_path, grouped_errors in groupby(rev_sort, key=_FN_PATH): + if any(seen.startswith(unique_path) for seen in keeping): + continue + else: + keeping[unique_path] = [err for _, err in grouped_errors] + yield from islice(reversed(keeping.values()), 3) -def _group_errors_by_validator(errors: ValidationErrorList) -> GroupedValidationErrors: +def _groupby_validator( + errors: _Errs, / +) -> Iterator[tuple[_ValidatorKeyword, _ErrsLazy]]: """ Groups the errors by the json schema "validator" that casued the error. @@ -416,80 +581,68 @@ def _group_errors_by_validator(errors: ValidationErrorList) -> GroupedValidation was set although no additional properties are allowed then "validator" is `"additionalProperties`, etc. """ - errors_by_validator: defaultdict[str, ValidationErrorList] = defaultdict(list) - for err in errors: - # Ignore mypy error as err.validator as it wrongly sees err.validator - # as of type Optional[Validator] instead of str which it is according - # to the documentation and all tested cases - errors_by_validator[err.validator].append(err) # type: ignore[index] - return dict(errors_by_validator) + yield from groupby(sorted(errors, key=_FN_VALIDATOR), key=_FN_VALIDATOR) -def _deduplicate_enum_errors(errors: ValidationErrorList) -> ValidationErrorList: +def _deduplicate_errors(grouped_errors: Iterator[_Errs], /) -> _ErrsLazy: """ - Deduplicate enum errors by removing the errors where the allowed values are a subset of another error. + Some errors have very similar error messages or are just in general not helpful for a user. - For example, if `enum` contains two errors and one has `validator_value` (i.e. accepted values) ["A", "B"] and the - other one ["A", "B", "C"] then the first one is removed and the final - `enum` list only contains the error with ["A", "B", "C"]. + This function removes as many of these cases as possible and + can be extended over time to handle new cases that come up. """ - if len(errors) > 1: - # Values (and therefore `validator_value`) of an enum are always arrays, - # see https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values - # which is why we can use join below - value_strings = [",".join(err.validator_value) for err in errors] # type: ignore - longest_enums: ValidationErrorList = [] - for value_str, err in zip(value_strings, errors): - if not _contained_at_start_of_one_of_other_values(value_str, value_strings): - longest_enums.append(err) - errors = longest_enums - return errors - - -def _deduplicate_additional_properties_errors( - errors: ValidationErrorList, -) -> ValidationErrorList: + for by_path in grouped_errors: + for validator, errors in _groupby_validator(by_path): + if fn := _FN_MAP_DEDUPLICATION.get(validator): + errors = fn(errors) + yield from _distinct_messages(errors) + + +def _distinct_messages(iterable: _Errs, /) -> _ErrsLazy: + seen = set() + for el in iterable: + if el.message not in seen: + seen.add(el.message) + yield el + + +def _shortest_any_of(iterable: _Errs, /) -> _ErrsLazy: """ If there are multiple additional property errors it usually means that the offending element was validated against multiple schemas and its parent is a common anyOf validator. The error messages produced from these cases are usually - very similar and we just take the shortest one. For example, - the following 3 errors are raised for the `unknown` channel option in - `alt.X("variety", unknown=2)`: - - "Additional properties are not allowed ('unknown' was unexpected)" - - "Additional properties are not allowed ('field', 'unknown' were unexpected)" - - "Additional properties are not allowed ('field', 'type', 'unknown' were unexpected)". + very similar and we just take the shortest one. + For example the following 3 errors are raised for:: + + alt.X("variety", unknown=2) + - "Additional properties are not allowed ('unknown' was unexpected)" + - "Additional properties are not allowed ('field', 'unknown' were unexpected)" + - "Additional properties are not allowed ('field', 'type', 'unknown' were unexpected)". """ - if len(errors) > 1: - # Test if all parent errors are the same anyOf error and only do - # the prioritization in these cases. Can't think of a chart spec where this - # would not be the case but still allow for it below to not break anything. - parent = errors[0].parent - if ( - parent is not None - and parent.validator == "anyOf" - # Use [1:] as don't have to check for first error as it was used - # above to define `parent` - and all(err.parent is parent for err in errors[1:]) - ): - errors = [min(errors, key=lambda x: len(x.message))] - return errors - - -def _deduplicate_by_message(errors: ValidationErrorList) -> ValidationErrorList: - """Deduplicate errors by message. This keeps the original order in case it was chosen intentionally.""" - return list({e.message: e for e in errors}.values()) - - -def _subclasses(cls: type[Any]) -> Iterator[type[Any]]: - """Breadth-first sequence of all classes which inherit from cls.""" - seen = set() - current_set = {cls} - while current_set: - seen |= current_set - current_set = set.union(*(set(cls.__subclasses__()) for cls in current_set)) - for cls in current_set - seen: - yield cls + it = iter(iterable) + first = next(it) + if ( + parent := cast("ValidationError", first.parent) + ) and parent.validator == "anyOf": + yield min(_rechain(first, it), key=_message_len) + else: + yield first + + +def _prune_subset_enum(iterable: _Errs, /) -> _ErrsLazy: + """Skip any``"enum"`` errors that are a subset of another error.""" + enums: tuple[set[str], ...] + errors: tuple[ValidationError, ...] + enums, errors = zip(*((set(err.validator_value), err) for err in iterable)) # type: ignore[arg-type] + for cur_enum, err in zip(enums, errors): + if not any(cur_enum < e for e in enums if e != cur_enum): + yield err + + +_FN_MAP_DEDUPLICATION: Mapping[_ValidatorKeyword, Callable[[_Errs], _ErrsLazy]] = { + "additionalProperties": _shortest_any_of, + "enum": _prune_subset_enum, +} def _from_array_like(obj: Iterable[Any], /) -> list[Any]: @@ -538,30 +691,8 @@ def _todict(obj: Any, context: dict[str, Any] | None, np_opt: Any, pd_opt: Any) return obj -def _resolve_references( - schema: dict[str, Any], rootschema: dict[str, Any] | None = None -) -> dict[str, Any]: - """Resolve schema references until there is no $ref anymore in the top-level of the dictionary.""" - if _use_referencing_library(): - registry = _get_referencing_registry(rootschema or schema) - # Using a different variable name to show that this is not the - # jsonschema.RefResolver but instead a Resolver from the referencing - # library - referencing_resolver = registry.resolver() - while "$ref" in schema: - schema = referencing_resolver.lookup( - _VEGA_LITE_ROOT_URI + schema["$ref"] - ).contents - else: - resolver = jsonschema.RefResolver.from_schema(rootschema or schema) - while "$ref" in schema: - with resolver.resolving(schema["$ref"]) as resolved: - schema = resolved - return schema - - class SchemaValidationError(jsonschema.ValidationError): - def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: + def __init__(self, obj: SchemaBase, err: ValidationError) -> None: """ A wrapper for ``jsonschema.ValidationError`` with friendlier traceback. @@ -582,9 +713,8 @@ def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: """ super().__init__(**err._contents()) self.obj = obj - self._errors: GroupedValidationErrors = getattr( - err, "_all_errors", {getattr(err, "json_path", _json_path(err)): [err]} - ) + err = cast("SchemaValidationError", err) + self._errors: _ErrsLazyGroup = err._errors # This is the message from err self._original_message = self.message self.message = self._get_message() @@ -592,50 +722,52 @@ def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: def __str__(self) -> str: return self.message + @staticmethod + def indent_from_second_line(msg: str, /, indent: int = 4) -> str: + return "\n".join( + " " * indent + s if idx > 0 and s else s + for idx, s in enumerate(msg.split("\n")) + ) + def _get_message(self) -> str: - def indent_second_line_onwards(message: str, indent: int = 4) -> str: - modified_lines: list[str] = [] - for idx, line in enumerate(message.split("\n")): - if idx > 0 and len(line) > 0: - line = " " * indent + line - modified_lines.append(line) - return "\n".join(modified_lines) - - error_messages: list[str] = [] - # Only show a maximum of 3 errors as else the final message returned by this - # method could get very long. - for errors in list(self._errors.values())[:3]: - error_messages.append(self._get_message_for_errors_group(errors)) - - message = "" - if len(error_messages) > 1: - error_messages = [ - indent_second_line_onwards(f"Error {error_id}: {m}") - for error_id, m in enumerate(error_messages, start=1) - ] - message += "Multiple errors were found.\n\n" - message += "\n\n".join(error_messages) - return message - - def _get_message_for_errors_group( - self, - errors: ValidationErrorList, - ) -> str: - if errors[0].validator == "additionalProperties": - # During development, we only found cases where an additionalProperties - # error was raised if that was the only error for the offending instance - # as identifiable by the json path. Therefore, we just check here the first - # error. However, other constellations might exist in which case - # this should be adapted so that other error messages are shown as well. - message = self._get_additional_properties_error_message(errors[0]) + it: _ErrsLazyGroup = self._errors + group_1 = list(next(it)) + if (group_2 := next(it, None)) is not None: + messages: Iterator[str] = ( + self._get_message_for_errors_group(g) + for g in (group_1, list(group_2), next(it, None)) + if g is not None + ) + msg = "\n\n".join( + self.indent_from_second_line(f"Error {error_id}: {m}") + for error_id, m in enumerate(messages, start=1) + ) + return f"Multiple errors were found.\n\n{msg}" else: - message = self._get_default_error_message(errors=errors) + return self._get_message_for_errors_group(group_1) - return message.strip() + def _get_message_for_errors_group(self, errors: _Errs) -> str: + """ + Note. + + During development, we only found cases where an additionalProperties + error was raised if that was the only error for the offending instance + as identifiable by the json path. + + Therefore, we just check here the first error. + However, other constellations might exist in which case this should be adapted + so that other error messages are shown as well. + """ + if not isinstance(errors, Sequence): + errors = list(errors) + if errors[0].validator == "additionalProperties": + return self._get_additional_properties_error_message(errors[0]) + else: + return self._get_default_error_message(errors=errors) def _get_additional_properties_error_message( self, - error: jsonschema.exceptions.ValidationError, + error: ValidationError, ) -> str: """Output all existing parameters when an unknown parameter is specified.""" altair_cls = self._get_altair_class_for_error(error) @@ -646,22 +778,21 @@ def _get_additional_properties_error_message( # "Additional properties are not allowed ('unknown' was unexpected)" # Line below extracts "unknown" from this string parameter_name = error.message.split("('")[-1].split("'")[0] - message = f"""\ -`{altair_cls.__name__}` has no parameter named '{parameter_name}' - -Existing parameter names are: -{param_names_table} -See the help for `{altair_cls.__name__}` to read the full description of these parameters""" - return message + cls_name = altair_cls.__name__ + return ( + f"`{cls_name}` has no parameter named '{parameter_name}'\n\n" + f"Existing parameter names are:\n{param_names_table}\n" + f"See the help for `{cls_name}` to read the full description of these parameters" + ) - def _get_altair_class_for_error( - self, error: jsonschema.exceptions.ValidationError - ) -> type[SchemaBase]: + def _get_altair_class_for_error(self, error: ValidationError) -> type[SchemaBase]: """ Try to get the lowest class possible in the chart hierarchy so it can be displayed in the error message. This should lead to more informative error messages pointing the user closer to the source of the issue. """ + from altair import vegalite + for prop_name in reversed(error.absolute_path): # Check if str as e.g. first item can be a 0 if isinstance(prop_name, str): @@ -673,24 +804,17 @@ def _get_altair_class_for_error( # Did not find a suitable class based on traversing the path so we fall # back on the class of the top-level object which created # the SchemaValidationError - cls = self.obj.__class__ + cls = type(self.obj) return cls @staticmethod - def _format_params_as_table(param_dict_keys: Iterable[str]) -> str: + def _format_params_as_table(param_view: KeysView[str]) -> str: """Format param names into a table so that they are easier to read.""" - param_names: tuple[str, ...] - name_lengths: tuple[int, ...] - param_names, name_lengths = zip( - *[ - (name, len(name)) - for name in param_dict_keys - if name not in {"kwds", "self"} - ] - ) + param_names: list[str] = [nm for nm in param_view if nm not in {"kwds", "self"}] + # Worst case scenario with the same longest param name in the same # row for all columns - max_name_length = max(name_lengths) + max_name_length = len(max(param_view, key=len)) max_column_width = 80 # Output a square table if not too big (since it is easier to read) num_param_names = len(param_names) @@ -704,7 +828,7 @@ def split_into_equal_parts(n: int, p: int) -> list[int]: column_heights = split_into_equal_parts(num_param_names, columns) # Section the param names into columns and compute their widths - param_names_columns: list[tuple[str, ...]] = [] + param_names_columns: list[Sequence[str]] = [] column_max_widths: list[int] = [] last_end_idx: int = 0 for ch in column_heights: @@ -715,33 +839,32 @@ def split_into_equal_parts(n: int, p: int) -> list[int]: last_end_idx = ch + last_end_idx # Transpose the param name columns into rows to facilitate looping - param_names_rows: list[tuple[str, ...]] = [] - for li in zip_longest(*param_names_columns, fillvalue=""): - param_names_rows.append(li) # Build the table as a string by iterating over and formatting the rows param_names_table: str = "" - for param_names_row in param_names_rows: + column_pad = 3 + for param_names_row in zip_longest(*param_names_columns, fillvalue=""): + last_element = len(param_names_row) - 1 for num, param_name in enumerate(param_names_row): # Set column width based on the longest param in the column - max_name_length_column = column_max_widths[num] - column_pad = 3 - param_names_table += "{:<{}}".format( - param_name, max_name_length_column + column_pad - ) + width = column_pad + column_max_widths[num] + param_names_table += "{:<{}}".format(param_name, width) # Insert newlines and spacing after the last element in each row - if num == (len(param_names_row) - 1): + if num == last_element: param_names_table += "\n" return param_names_table def _get_default_error_message( self, - errors: ValidationErrorList, + errors: Sequence[ValidationError], ) -> str: bullet_points: list[str] = [] - errors_by_validator = _group_errors_by_validator(errors) + errors_by_validator: defaultdict[str, list[ValidationError]] = defaultdict(list) + for err in errors: + errors_by_validator[err.validator].append(err) # type: ignore[index] + if "enum" in errors_by_validator: for error in errors_by_validator["enum"]: - bullet_points.append(f"one of {error.validator_value}") + bullet_points.append(f"one of {error.validator_value}") # noqa: PERF401 if "type" in errors_by_validator: types = [f"'{err.validator_value}'" for err in errors_by_validator["type"]] @@ -786,7 +909,7 @@ def _get_default_error_message( if validator not in {"enum", "type"} ) message += "".join(it) - return message + return message.strip() class UndefinedType: @@ -896,7 +1019,7 @@ class SchemaBase: """ _schema: ClassVar[dict[str, Any] | Any] = None - _rootschema: ClassVar[dict[str, Any] | None] = None + _rootschema: ClassVar[dict[str, Any] | Any] = None _class_is_valid_at_instantiation: ClassVar[bool] = True def __init__(self, *args: Any, **kwds: Any) -> None: @@ -961,14 +1084,10 @@ def __getattr__(self, attr): # reminder: getattr is called after the normal lookups if attr == "_kwds": raise AttributeError() - if attr in self._kwds: + elif attr in self._kwds: return self._kwds[attr] else: - try: - _getattr = super().__getattr__ # pyright: ignore[reportAttributeAccessIssue] - except AttributeError: - _getattr = super().__getattribute__ - return _getattr(attr) + return getattr(super(), "__getattr__", super().__getattribute__)(attr) def __setattr__(self, item, val) -> None: self._kwds[item] = val @@ -1047,7 +1166,7 @@ def to_dict( # NOTE: Don't raise `from err`, see `SchemaValidationError` doc try: self.validate(result) - except jsonschema.ValidationError as err: + except ValidationError as err: raise SchemaValidationError(self, err) from None return result @@ -1122,17 +1241,16 @@ def from_dict( """ if validate: cls.validate(dct) - converter = _FromDict(cls._default_wrapper_classes()) + converter: type[_FromDict] | _FromDict = ( + _FromDict + if _FromDict.hash_tps + else _FromDict(cls._default_wrapper_classes()) + ) return converter.from_dict(dct, cls) @classmethod def from_json( - cls, - json_string: str, - validate: bool = True, - **kwargs: Any, - # Type hints for this method would get rather complicated - # if we want to provide a more specific return type + cls, json_string: str, validate: bool = True, **kwargs: Any ) -> ChartType: """ Instantiate the object from a valid JSON string. @@ -1159,22 +1277,25 @@ def validate( cls, instance: dict[str, Any], schema: dict[str, Any] | None = None ) -> None: """Validate the instance against the class schema in the context of the rootschema.""" - if schema is None: - schema = cls._schema - # For the benefit of mypy - assert schema is not None - validate_jsonschema(instance, schema, rootschema=cls._rootschema or cls._schema) + validate_jsonschema( + instance, schema or cls._schema, cls._rootschema or cls._schema + ) @classmethod def resolve_references(cls, schema: dict[str, Any] | None = None) -> dict[str, Any]: """Resolve references in the context of this object's schema or root schema.""" - schema_to_pass = schema or cls._schema - # For the benefit of mypy - assert schema_to_pass is not None - return _resolve_references( - schema=schema_to_pass, - rootschema=(cls._rootschema or cls._schema or schema), - ) + rootschema = cls._rootschema or cls._schema + if rootschema is None: + name = type(cls).__name__ + msg = ( + f"{name}.resolve_references() provided only `None` values for:\n" + f"{schema=}, {cls._schema=}, {cls._rootschema=}.\n\n" + f"This variant indicates the class definition {name!r} is invalid." + ) + raise TypeError(msg) + else: + resolved = _resolve_references(schema or cls._schema, rootschema) + return cast("dict[str, Any]", resolved) @classmethod def validate_property( @@ -1281,10 +1402,55 @@ def _is_iterable( return not isinstance(obj, exclude) and isinstance(obj, Iterable) +def _is_valid(spec: _JsonParameter, tp: type[SchemaBase], /) -> bool: + """ + Return True if ``tp`` can be constructed from ``spec``. + + Notes + ----- + Don't use this if you need to know *details* of the errors in ``spec``.. + """ + return next(_validator(tp._schema, tp._rootschema).iter_errors(spec), None) is None + + def _passthrough(*args: Any, **kwds: Any) -> Any | dict[str, Any]: return args[0] if args else kwds +def _hash_schema( + schema: _JsonParameter, + /, + *, + exclude: Iterable[str] = frozenset( + ("definitions", "title", "description", "$schema", "id") + ), +) -> int: + """ + Return the hash value for a ``schema``. + + Parameters + ---------- + schema + ``SchemaBase._schema``. + exclude + ``schema`` keys which are not considered when identifying equivalence. + """ + if isinstance(schema, Mapping): + schema = {k: v for k, v in schema.items() if k not in exclude} + return hash(_HASH_ENCODER.encode(schema)) + + +def _subclasses(cls: type[TSchemaBase]) -> Iterator[type[TSchemaBase]]: + """Breadth-first sequence of all classes which inherit from ``cls``.""" + seen = set() + current: set[type[TSchemaBase]] = {cls} + while current: + seen |= current + current = set(chain.from_iterable(cls.__subclasses__() for cls in current)) + for cls in current - seen: + yield cls + + class _FromDict: """ Class used to construct SchemaBase class hierarchies from a dict. @@ -1294,54 +1460,38 @@ class _FromDict: specified in the ``wrapper_classes`` positional-only argument to the constructor. """ - _hash_exclude_keys = ("definitions", "title", "description", "$schema", "id") + hash_tps: ClassVar[defaultdict[int, deque[type[SchemaBase]]]] = defaultdict(deque) + """ + Maps unique schemas to corresponding types. - def __init__(self, wrapper_classes: Iterable[type[SchemaBase]], /) -> None: - # Create a mapping of a schema hash to a list of matching classes - # This lets us quickly determine the correct class to construct - self.class_dict: dict[int, list[type[SchemaBase]]] = defaultdict(list) - for tp in wrapper_classes: - if tp._schema is not None: - self.class_dict[self.hash_schema(tp._schema)].append(tp) + The logic is that after removing a subset of keys, some schemas are identical. - @classmethod - def hash_schema(cls, schema: dict[str, Any], use_json: bool = True) -> int: - """ - Compute a python hash for a nested dictionary which properly handles dicts, lists, sets, and tuples. + If there are multiple matches, we use the first one in the ``deque``. - At the top level, the function excludes from the hashed schema all keys - listed in `exclude_keys`. + ``_subclasses`` yields the results of a `breadth-first search`_, + so the first matching class is the most general match. - This implements two methods: one based on conversion to JSON, and one based - on recursive conversions of unhashable to hashable types; the former seems - to be slightly faster in several benchmarks. - """ - if cls._hash_exclude_keys and isinstance(schema, dict): - schema = { - key: val - for key, val in schema.items() - if key not in cls._hash_exclude_keys - } - if use_json: - s = json.dumps(schema, sort_keys=True) - return hash(s) - else: + .. _breadth-first search: + https://en.wikipedia.org/wiki/Breadth-first_search + """ + + hash_resolved: ClassVar[dict[int, Map]] = {} + """ + Maps unique schemas to their reference-resolved equivalent. - def _freeze(val): - if isinstance(val, dict): - return frozenset((k, _freeze(v)) for k, v in val.items()) - elif isinstance(val, set): - return frozenset(map(_freeze, val)) - elif isinstance(val, (list, tuple)): - return tuple(map(_freeze, val)) - else: - return val + Ensures that ``_resolve_references`` is evaluated **at most once**, per hash. + """ - return hash(_freeze(schema)) + def __init__(self, wrapper_classes: Iterator[type[SchemaBase]], /) -> None: + cls = type(self) + for tp in wrapper_classes: + if tp._schema is not None: + cls.hash_tps[_hash_schema(tp._schema)].append(tp) @overload + @classmethod def from_dict( - self, + cls, dct: TSchemaBase, tp: None = ..., schema: None = ..., @@ -1349,8 +1499,9 @@ def from_dict( default_class: Any = ..., ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any] | list[dict[str, Any]], tp: Any = ..., schema: Any = ..., @@ -1358,8 +1509,9 @@ def from_dict( default_class: type[TSchemaBase] = ..., # pyright: ignore[reportInvalidTypeVarUse] ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any], tp: None = ..., schema: dict[str, Any] = ..., @@ -1367,8 +1519,9 @@ def from_dict( default_class: Any = ..., ) -> SchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any], tp: type[TSchemaBase], schema: None = ..., @@ -1376,16 +1529,18 @@ def from_dict( default_class: Any = ..., ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any] | list[dict[str, Any]], tp: type[TSchemaBase], schema: dict[str, Any], rootschema: dict[str, Any] | None = ..., default_class: Any = ..., ) -> Never: ... - def from_dict( - self, + @classmethod + def from_dict( # noqa: C901 + cls, dct: dict[str, Any] | list[dict[str, Any]] | TSchemaBase, tp: type[TSchemaBase] | None = None, schema: dict[str, Any] | None = None, @@ -1395,45 +1550,48 @@ def from_dict( """Construct an object from a dict representation.""" target_tp: Any current_schema: dict[str, Any] + hash_schema: int if isinstance(dct, SchemaBase): return dct elif tp is not None: current_schema = tp._schema + hash_schema = _hash_schema(current_schema) root_schema: dict[str, Any] = rootschema or tp._rootschema or current_schema target_tp = tp elif schema is not None: - # If there are multiple matches, we use the first one in the dict. - # Our class dict is constructed breadth-first from top to bottom, - # so the first class that matches is the most general match. current_schema = schema + hash_schema = _hash_schema(current_schema) root_schema = rootschema or current_schema - matches = self.class_dict[self.hash_schema(current_schema)] - target_tp = matches[0] if matches else default_class + matches = cls.hash_tps[hash_schema] + target_tp = next(iter(matches), default_class) else: msg = "Must provide either `tp` or `schema`, but not both." raise ValueError(msg) - from_dict = partial(self.from_dict, rootschema=root_schema) - # Can also return a list? - resolved = _resolve_references(current_schema, root_schema) - if "anyOf" in resolved or "oneOf" in resolved: - schemas = resolved.get("anyOf", []) + resolved.get("oneOf", []) - for possible in schemas: - try: - validate_jsonschema(dct, possible, rootschema=root_schema) - except jsonschema.ValidationError: - continue - else: + from_dict = partial(cls.from_dict, rootschema=root_schema) + if (resolved := cls.hash_resolved.get(hash_schema)) is None: + resolved = _resolve_references(current_schema, root_schema) + cls.hash_resolved[hash_schema] = resolved + if "anyOf" in resolved: + for possible in resolved["anyOf"]: + # NOTE: Instead of raise/except/continue + # Pre-"zero-cost" exceptions, this has a huge performance gain. + # https://docs.python.org/3/whatsnew/3.11.html#misc + # https://github.com/python/cpython/blob/9b3749849eda4012261a112b22eb07f26fd345a9/InternalDocs/exception_handling.md + it_errs = _validator(possible, root_schema).iter_errors(dct) + if next(it_errs, None) is None: return from_dict(dct, schema=possible, default_class=target_tp) if _is_dict(dct): # TODO: handle schemas for additionalProperties/patternProperties - props: dict[str, Any] = resolved.get("properties", {}) - kwds = { - k: (from_dict(v, schema=props[k]) if k in props else v) - for k, v in dct.items() - } - return target_tp(**kwds) + if props := resolved.get("properties"): + kwds = { + k: (from_dict(v, schema=sch) if (sch := props.get(k)) else v) + for k, v in dct.items() + } + return target_tp(**kwds) + else: + return target_tp(**dct) elif _is_list(dct): item_schema: dict[str, Any] = resolved.get("items", {}) return target_tp([from_dict(k, schema=item_schema) for k in dct]) @@ -1448,6 +1606,8 @@ def __init__(self, prop: str, schema: dict[str, Any]) -> None: self.schema = schema def __get__(self, obj, cls): + from altair import vegalite + self.obj = obj self.cls = cls # The docs from the encoding class parameter (e.g. `bin` in X, Color, diff --git a/altair/vegalite/v5/api.py b/altair/vegalite/v5/api.py index b437c1072..7af274a34 100644 --- a/altair/vegalite/v5/api.py +++ b/altair/vegalite/v5/api.py @@ -22,7 +22,6 @@ ) from typing_extensions import TypeAlias -import jsonschema import narwhals.stable.v1 as nw from altair import utils @@ -34,6 +33,7 @@ from altair.utils._vegafusion_data import using_vegafusion as _using_vegafusion from altair.utils.data import DataType from altair.utils.data import is_data_type as _is_data_type +from altair.utils.schemapi import _is_valid from .compiler import vegalite_compilers from .data import data_transformers @@ -437,7 +437,7 @@ def __getattr__(self, field_name: str) -> GetAttrExpression | SelectionExpressio # fields or encodings list, then we want to return an expression. if check_fields_and_encodings(self, field_name): return SelectionExpression(_attrexpr) - return _expr_core.GetAttrExpression(self.name, field_name) + return _attrexpr # TODO: Are there any special cases to consider for __getitem__? # This was copied from v4. @@ -489,13 +489,10 @@ def check_fields_and_encodings(parameter: Parameter, field_name: str) -> bool: param = parameter.param if utils.is_undefined(param) or isinstance(param, core.VariableParameter): return False - for prop in ["fields", "encodings"]: - try: - if field_name in getattr(param.select, prop): - return True - except (AttributeError, TypeError): - pass - + select = param.select + for prop in "fields", "encodings": + if not utils.is_undefined(p := select._get(prop)) and field_name in p: + return True return False @@ -3738,13 +3735,13 @@ def from_dict( jsonschema.ValidationError : If ``validate`` and ``dct`` does not conform to the schema """ + if not validate: + return super().from_dict(dct, validate=False) _tp: Any for tp in TopLevelMixin.__subclasses__(): _tp = super() if tp is Chart else tp - try: - return _tp.from_dict(dct, validate=validate) - except jsonschema.ValidationError: - pass + if _is_valid(dct, _tp): + return _tp.from_dict(dct, validate=False) # As a last resort, try using the Root vegalite object return t.cast(_TSchemaBase, core.Root.from_dict(dct, validate)) @@ -4860,17 +4857,13 @@ def _repeat_names( return params_named -def _remove_layer_props( # noqa: C901 +def _remove_layer_props( chart: LayerChart, subcharts: list[ChartType], layer_props: Iterable[str] ) -> tuple[dict[str, Any], list[ChartType]]: def remove_prop(subchart: ChartType, prop: str) -> ChartType: - # If subchart is a UnitSpec, then subchart["height"] raises a KeyError - try: - if subchart[prop] is not Undefined: - subchart = subchart.copy() - subchart[prop] = Undefined - except KeyError: - pass + if not utils.is_undefined(subchart._get(prop)): + subchart = subchart.copy() + subchart[prop] = Undefined return subchart output_dict: dict[str, Any] = {} @@ -4883,15 +4876,8 @@ def remove_prop(subchart: ChartType, prop: str) -> ChartType: if chart[prop] is Undefined: # Top level does not have this prop. # Check for consistent props within the subcharts. - values = [] - for c in subcharts: - # If c is a UnitSpec, then c["height"] raises a KeyError. - try: - val = c[prop] - if val is not Undefined: - values.append(val) - except KeyError: - pass + values = [v for c in subcharts if not utils.is_undefined(v := c._get(prop))] + if len(values) == 0: pass elif all(v == values[0] for v in values[1:]): diff --git a/altair/vegalite/v5/schema/core.py b/altair/vegalite/v5/schema/core.py index 0d6dd47e0..c552fcf47 100644 --- a/altair/vegalite/v5/schema/core.py +++ b/altair/vegalite/v5/schema/core.py @@ -486,6 +486,7 @@ def load_schema() -> dict: class VegaLiteSchema(SchemaBase): + _schema = load_schema() _rootschema = load_schema() @classmethod @@ -501,8 +502,6 @@ class Root(VegaLiteSchema): specifications. (The json schema is generated from this type.) """ - _schema = VegaLiteSchema._rootschema - def __init__(self, *args, **kwds): super().__init__(*args, **kwds) diff --git a/pyproject.toml b/pyproject.toml index 6901364a3..2d3d0bd2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -258,8 +258,16 @@ extend-select=[ "PLR1736", # literal-membership "PLR6201", + # unnecessary-lambda + "PLW0108", # unspecified-encoding "PLW1514", + # exception handling # + # ------------------ # + # try-except-pass + "S110", + # try-except-continue + "S112", ] select = [ # flake8-bugbear @@ -325,6 +333,8 @@ select = [ "I001", # complex-structure "C901", + # Perflint + "PERF", ] ignore = [ # Whitespace before ':' diff --git a/sphinxext/schematable.py b/sphinxext/schematable.py index f27622fb8..e0705ee02 100644 --- a/sphinxext/schematable.py +++ b/sphinxext/schematable.py @@ -173,7 +173,7 @@ def select_items_from_schema( for prop in props: try: yield prop, properties[prop], prop in required - except KeyError as err: + except KeyError as err: # noqa: PERF203 msg = f"Can't find property: {prop}" raise Exception(msg) from err diff --git a/tests/utils/test_schemapi.py b/tests/utils/test_schemapi.py index 2f0b9faab..25f483753 100644 --- a/tests/utils/test_schemapi.py +++ b/tests/utils/test_schemapi.py @@ -21,14 +21,8 @@ import altair as alt from altair import load_schema -from altair.utils.schemapi import ( - _DEFAULT_JSON_SCHEMA_DRAFT_URL, - SchemaBase, - SchemaValidationError, - Undefined, - UndefinedType, - _FromDict, -) +from altair.utils import schemapi +from altair.utils.schemapi import SchemaBase, Undefined, UndefinedType from altair.vegalite.v5.schema.channels import X from altair.vegalite.v5.schema.core import FieldOneOfPredicate, Legend from vega_datasets import data @@ -42,9 +36,9 @@ def test_actual_json_schema_draft_is_same_as_hardcoded_default(): - # See comments next to definition of _DEFAULT_JSON_SCHEMA_DRAFT_URL + # See comments next to definition of `_DEFAULT_DIALECT_URI` # for details why we need this test - assert _DEFAULT_JSON_SCHEMA_DRAFT_URL == _JSON_SCHEMA_DRAFT_URL, ( + assert schemapi._DEFAULT_DIALECT_URI == _JSON_SCHEMA_DRAFT_URL, ( "The default json schema URL, which is hardcoded," + " is not the same as the one used in the Vega-Lite schema." + " You need to update the default value." @@ -54,7 +48,23 @@ def test_actual_json_schema_draft_is_same_as_hardcoded_default(): class _TestSchema(SchemaBase): @classmethod def _default_wrapper_classes(cls): - return _TestSchema.__subclasses__() + return schemapi._subclasses(_TestSchema) + + @classmethod + def from_dict( + cls: type[schemapi.TSchemaBase], dct: dict[str, Any], validate: bool = True + ) -> schemapi.TSchemaBase: + """ + Overrides ``SchemaBase``, which uses a cached ``FromDict.hash_tps``. + + The cached version is based on an iterator over: + + schemapi._subclasses(VegaLiteSchema) + """ + if validate: + cls.validate(dct) + converter = schemapi._FromDict(cls._default_wrapper_classes()) + return converter.from_dict(dct, cls) class MySchema(_TestSchema): @@ -389,13 +399,10 @@ class BadSchema(SchemaBase): assert str(err.value).startswith("Cannot instantiate object") -@pytest.mark.parametrize("use_json", [True, False]) -def test_hash_schema(use_json): - classes = _TestSchema._default_wrapper_classes() - - for cls in classes: - hsh1 = _FromDict.hash_schema(cls._schema, use_json=use_json) - hsh2 = _FromDict.hash_schema(cls._schema, use_json=use_json) +def test_hash_schema(): + for cls in _TestSchema._default_wrapper_classes(): + hsh1 = schemapi._hash_schema(cls._schema) + hsh2 = schemapi._hash_schema(cls._schema) assert hsh1 == hsh2 assert hash(hsh1) == hash(hsh2) @@ -407,7 +414,7 @@ def test_schema_validation_error(): except jsonschema.ValidationError as err: the_err = err - assert isinstance(the_err, SchemaValidationError) + assert isinstance(the_err, schemapi.SchemaValidationError) message = str(the_err) assert the_err.message in message @@ -871,7 +878,7 @@ def test_chart_validation_errors(chart_func, expected_error_message): warnings.filterwarnings("ignore", category=UserWarning) chart = chart_func() expected_error_message = inspect.cleandoc(expected_error_message) - with pytest.raises(SchemaValidationError, match=expected_error_message): + with pytest.raises(schemapi.SchemaValidationError, match=expected_error_message): chart.to_dict() diff --git a/tests/vegalite/v5/test_api.py b/tests/vegalite/v5/test_api.py index ee5b73370..ad91dda32 100644 --- a/tests/vegalite/v5/test_api.py +++ b/tests/vegalite/v5/test_api.py @@ -22,6 +22,7 @@ from packaging.version import Version import altair as alt +from altair.utils import schemapi from altair.utils.schemapi import Optional, Undefined from tests import skip_requires_vl_convert, slow @@ -528,8 +529,6 @@ def test_when_labels_position_based_on_condition() -> None: import numpy as np import pandas as pd - from altair.utils.schemapi import SchemaValidationError - rand = np.random.RandomState(42) df = pd.DataFrame({"xval": range(100), "yval": rand.randn(100).cumsum()}) @@ -570,7 +569,9 @@ def test_when_labels_position_based_on_condition() -> None: fail_condition = alt.condition( param_width < 200, alt.value("red"), alt.value("black") ) - with pytest.raises(SchemaValidationError, match="invalid value for `expr`"): + with pytest.raises( + schemapi.SchemaValidationError, match="invalid value for `expr`" + ): alt.param(expr=fail_condition) # type: ignore @@ -1264,7 +1265,7 @@ def test_themes(): assert "config" not in chart.to_dict() -def test_chart_from_dict(): +def test_chart_from_dict() -> None: base = alt.Chart("data.csv").mark_point().encode(x="x:Q", y="y:Q") charts = [ diff --git a/tools/generate_schema_wrapper.py b/tools/generate_schema_wrapper.py index 7bac492d7..f92732d4e 100644 --- a/tools/generate_schema_wrapper.py +++ b/tools/generate_schema_wrapper.py @@ -54,6 +54,7 @@ BASE_SCHEMA: Final = """ class {basename}(SchemaBase): + _schema = load_schema() _rootschema = load_schema() @classmethod def _default_wrapper_classes(cls) -> Iterator[type[Any]]: @@ -301,6 +302,17 @@ def process_description(description: str) -> str: return description.strip() +class RootSchemaGenerator(SchemaGenerator): + schema_class_template = textwrap.dedent( + ''' + class {classname}({basename}): + """{docstring}""" + + {init_code} + ''' + ) + + class FieldSchemaGenerator(SchemaGenerator): schema_class_template = textwrap.dedent( ''' @@ -394,7 +406,7 @@ def _add_shorthand_property_to_field_encodings(schema: dict) -> dict: encoding = SchemaInfo(schema["definitions"][encoding_def], rootschema=schema) - for _, propschema in encoding.properties.items(): + for _, propschema in encoding.properties.items(): # noqa: PERF102 def_dict = get_field_datum_value_defs(propschema, schema) field_ref = def_dict.get("field") @@ -412,7 +424,7 @@ def _add_shorthand_property_to_field_encodings(schema: dict) -> dict: "description": "shorthand for field, aggregate, and type", } if "required" not in defschema: - defschema["required"] = ["shorthand"] + defschema["required"] = ["shorthand"] # type: ignore elif "shorthand" not in defschema["required"]: defschema["required"].append("shorthand") schema["definitions"][field_ref.split("/")[-1]] = defschema @@ -445,7 +457,7 @@ def recursive_dict_update(schema: dict, root: dict, def_dict: dict) -> None: if k in properties: def_dict[k] = definition else: - recursive_dict_update(next_schema, root, def_dict) + recursive_dict_update(next_schema, root, def_dict) # type: ignore elif "anyOf" in schema: for sub_schema in schema["anyOf"]: recursive_dict_update(sub_schema, root, def_dict) @@ -461,7 +473,7 @@ def get_field_datum_value_defs(propschema: SchemaInfo, root: dict) -> dict[str, msg = "Unexpected schema structure" raise ValueError(msg) else: - recursive_dict_update(schema, root, def_dict) + recursive_dict_update(schema, root, def_dict) # type: ignore return {i: j for i, j in def_dict.items() if j} @@ -557,16 +569,15 @@ def generate_vegalite_schema_wrapper(schema_file: Path) -> str: "\n" f"__all__ = {all_}\n", LOAD_SCHEMA.format(schemafile="vega-lite-schema.json"), BASE_SCHEMA.format(basename=basename), - schema_class( + RootSchemaGenerator( "Root", schema=rootschema, basename=basename, - schemarepr=CodeSnippet(f"{basename}._rootschema"), - ), + ).schema_class(), ] for name in toposort(graph): - contents.append(definitions[name].schema_class()) + contents.append(definitions[name].schema_class()) # noqa: PERF401 contents.append("") # end with newline return "\n".join(contents) diff --git a/tools/schemapi/schemapi.py b/tools/schemapi/schemapi.py index 5140073ad..68a19b44a 100644 --- a/tools/schemapi/schemapi.py +++ b/tools/schemapi/schemapi.py @@ -1,51 +1,44 @@ from __future__ import annotations import contextlib -import copy import inspect import json +import operator import sys import textwrap -from collections import defaultdict -from functools import partial +from collections import defaultdict, deque +from functools import lru_cache, partial from importlib.metadata import version as importlib_version -from itertools import chain, zip_longest +from itertools import chain, groupby, islice, zip_longest from math import ceil from typing import ( TYPE_CHECKING, Any, Dict, - Final, Iterable, - Iterator, List, - Literal, + Mapping, Sequence, TypeVar, Union, cast, overload, ) -from typing_extensions import TypeAlias import jsonschema -import jsonschema.exceptions import jsonschema.validators import narwhals.stable.v1 as nw +from jsonschema import ValidationError from packaging.version import Version -# This leads to circular imports with the vegalite module. Currently, this works -# but be aware that when you access it in this script, the vegalite module might -# not yet be fully instantiated in case your code is being executed during import time -from altair import vegalite - if TYPE_CHECKING: from types import ModuleType - from typing import ClassVar + from typing import Callable, ClassVar, Final, Iterator, KeysView, Literal - from referencing import Registry + from jsonschema.protocols import Validator, _JsonParameter from altair.typing import ChartType + from altair.vegalite.v5.schema._typing import Map if sys.version_info >= (3, 13): from typing import TypeIs @@ -53,37 +46,81 @@ from typing_extensions import TypeIs if sys.version_info >= (3, 11): - from typing import Never, Self + from typing import LiteralString, Never, Self else: - from typing_extensions import Never, Self + from typing_extensions import LiteralString, Never, Self + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias _OptionalModule: TypeAlias = "ModuleType | None" + _Errs: TypeAlias = Iterable[ValidationError] + _ErrsLazy: TypeAlias = Iterator[ValidationError] + _ErrsLazyGroup: TypeAlias = Iterator[_ErrsLazy] + _IntoLazyGroup: TypeAlias = Iterator["tuple[str, ValidationError]"] + _ValidatorKeyword: TypeAlias = Literal[ + "additionalProperties", + "enum", + "type", + "required", + "properties", + "anyOf", + "allOf", + "oneOf", + "ref", + "const", + ] + """Non-exhaustive listing of possible literals in ``ValidationError.validator``""" + +__all__ = [ + "Optional", # altair.utils + "SchemaBase", # altair.vegalite.v5.schema.core + "Undefined", # altair.utils + "UndefinedType", # altair.vegalite.v5.schema.core -> (side-effect relied on to propagate to alt.__init__) + "_is_valid", # altair.vegalite.v5.api + "_resolve_references", # tools.schemapi.utils -> tools.generate_schema_wrapper + "_subclasses", # altair.vegalite.v5.schema.core + "is_undefined", # altair.typing + "validate_jsonschema", # altair.utils.display + "with_property_setters", # altair.vegalite.v5.schema.channels +] -ValidationErrorList: TypeAlias = List[jsonschema.exceptions.ValidationError] -GroupedValidationErrors: TypeAlias = Dict[str, ValidationErrorList] - -# This URI is arbitrary and could be anything else. It just cannot be an empty -# string as we need to reference the schema registered in -# the referencing.Registry. _VEGA_LITE_ROOT_URI: Final = "urn:vega-lite-schema" +""" +Prefix added to each ``"$ref"``. + +This URI is arbitrary and could be anything else. + +It just cannot be an empty string as we need to reference the schema registered in +the ``referencing.Registry``. +""" + +_DEFAULT_DIALECT_URI: LiteralString = "http://json-schema.org/draft-07/schema#" +""" +Ideally, this would be parsed from the current Vega-Lite schema, and not hardcoded here. + +However, due to circular imports between this module and ``alt.vegalite``, +this information is not yet available as the latter is only *partially* loaded. + +The `draft version`_ which is used is unlikely to change often so it's ok to keep this. + +.. _draft version: + https://json-schema.org/understanding-json-schema/reference/schema#declaring-a-dialect +""" +# RELATED: tests/utils/test/schemapi.py/test_actual_json_schema_draft_is_same_as_hardcoded_default -# Ideally, jsonschema specification would be parsed from the current Vega-Lite -# schema instead of being hardcoded here as a default value. -# However, due to circular imports between this module and the altair.vegalite -# modules, this information is not yet available at this point as altair.vegalite -# is only partially loaded. The draft version which is used is unlikely to -# change often so it's ok to keep this. There is also a test which validates -# that this value is always the same as in the Vega-Lite schema. -_DEFAULT_JSON_SCHEMA_DRAFT_URL: Final = "http://json-schema.org/draft-07/schema#" - - -# If DEBUG_MODE is True, then schema objects are converted to dict and -# validated at creation time. This slows things down, particularly for -# larger specs, but leads to much more useful tracebacks for the user. -# Individual schema classes can override this by setting the -# class-level _class_is_valid_at_instantiation attribute to False DEBUG_MODE: bool = True +""" +If ``DEBUG_MODE``, then ``SchemaBase`` are converted to ``dict`` and validated at creation time. + +This slows things down, particularly for larger specs, but leads to much more +useful tracebacks for the user. -jsonschema_version_str = importlib_version("jsonschema") +Individual schema classes can override with: + + class Derived(SchemaBase): + _class_is_valid_at_instantiation: ClassVar[bool] = False +""" def enable_debug_mode() -> None: @@ -107,238 +144,406 @@ def debug_mode(arg: bool) -> Iterator[None]: DEBUG_MODE = original -@overload def validate_jsonschema( - spec: Any, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = ..., - *, - raise_error: Literal[True] = ..., -) -> Never: ... + spec: _JsonParameter, schema: Map, rootschema: Map | None = None +) -> None: + """ + Validates ``spec`` against ``schema`` in the context of ``rootschema``. + Any ``ValidationError``(s) are deduplicated and prioritized, with + the remaining errors deemed relevant to the user. -@overload -def validate_jsonschema( - spec: Any, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = ..., - *, - raise_error: Literal[False], -) -> jsonschema.exceptions.ValidationError | None: ... + Notes + ----- + - The first error is monkeypatched with a grouped iterator of all remaining errors + - ``SchemaValidationError`` utilizes the patched attribute, to craft a more helpful error message. + - However this breaks typing + ``schema`` and ``rootschema`` are not validated but instead considered as valid. -def validate_jsonschema( - spec, - schema: dict[str, Any], - rootschema: dict[str, Any] | None = None, - *, - raise_error: bool = True, -) -> jsonschema.exceptions.ValidationError | None: - """ - Validates the passed in spec against the schema in the context of the rootschema. + We don't use ``jsonschema.validate`` as this would validate the ``schema`` itself. + Instead, we pass the ``schema`` directly to the validator class. + + This is done for two reasons: + + 1. The schema comes from Vega-Lite and is not based on the user + input, therefore there is no need to validate it in the first place. + 2. The "uri-reference" format checker fails for some of the + references as URIs in "$ref" are not encoded, e.g.: + + '#/definitions/ValueDefWithCondition' - If any errors are found, they are deduplicated and prioritized - and only the most relevant errors are kept. Errors are then either raised - or returned, depending on the value of `raise_error`. + would be a valid $ref in a Vega-Lite schema but it is not a valid + URI reference due to the characters such as '<'. """ - errors = _get_errors_from_spec(spec, schema, rootschema=rootschema) - if errors: - leaf_errors = _get_leaves_of_error_tree(errors) - grouped_errors = _group_errors_by_json_path(leaf_errors) - grouped_errors = _subset_to_most_specific_json_paths(grouped_errors) - grouped_errors = _deduplicate_errors(grouped_errors) - - # Nothing special about this first error but we need to choose one - # which can be raised - main_error: Any = next(iter(grouped_errors.values()))[0] - # All errors are then attached as a new attribute to ValidationError so that - # they can be used in SchemaValidationError to craft a more helpful - # error message. Setting a new attribute like this is not ideal as - # it then no longer matches the type ValidationError. It would be better - # to refactor this function to never raise but only return errors. - main_error._all_errors = grouped_errors - if raise_error: - raise main_error + it_errors = _validator(schema, rootschema).iter_errors(spec) + if first_error := next(it_errors, None): + groups = _group_tree_leaves(_rechain(first_error, it_errors)) + most_specific = _prune_subset_paths(groups) + deduplicated = _deduplicate_errors(most_specific) + dummy_error: Any + if dummy_error := next(deduplicated, None): + dummy_error._errors = _regroup(_rechain(dummy_error, deduplicated)) # type: ignore[attr-defined] + raise dummy_error else: - return main_error - else: - return None + msg = ( + f"Expected to find at least one error, but first error was `None`.\n\n" + f"spec: {spec!r}" + ) + raise NotImplementedError(msg) -def _get_errors_from_spec( - spec: dict[str, Any], - schema: dict[str, Any], - rootschema: dict[str, Any] | None = None, -) -> ValidationErrorList: +def _get_schema_dialect_uri(schema: Map, /) -> str: """ - Uses the relevant jsonschema validator to validate the passed in spec against the schema using the rootschema to resolve references. + Return value of `$schema`_. + + Defines which JSON Schema draft ``schema`` was written for. + + .. _$schema: + https://json-schema.org/understanding-json-schema/reference/schema#schema - The schema and rootschema themselves are not validated but instead considered as valid. """ - # We don't use jsonschema.validate as this would validate the schema itself. - # Instead, we pass the schema directly to the validator class. This is done for - # two reasons: The schema comes from Vega-Lite and is not based on the user - # input, therefore there is no need to validate it in the first place. Furthermore, - # the "uri-reference" format checker fails for some of the references as URIs in - # "$ref" are not encoded, - # e.g. '#/definitions/ValueDefWithCondition' would be a valid $ref in a Vega-Lite schema but - # it is not a valid URI reference due to the characters such as '<'. - - json_schema_draft_url = _get_json_schema_draft_url(rootschema or schema) - validator_cls = jsonschema.validators.validator_for( - {"$schema": json_schema_draft_url} - ) - validator_kwargs: dict[str, Any] = {} - if hasattr(validator_cls, "FORMAT_CHECKER"): - validator_kwargs["format_checker"] = validator_cls.FORMAT_CHECKER - - if _use_referencing_library(): - schema = _prepare_references_in_schema(schema) - validator_kwargs["registry"] = _get_referencing_registry( - rootschema or schema, json_schema_draft_url - ) + return schema.get("$schema", _DEFAULT_DIALECT_URI) + + +def _prepare_references(schema: Map, /) -> dict[str, Any]: + """ + Return a deep copy of ``schema`` w/ replaced uri(s). + + All encountered ``dict | list``(s) will be reconstructed + w/ ``_VEGA_LITE_ROOT_URI`` in front of all nested``$ref`` values. + + Notes + ----- + ``copy.deepcopy`` is not needed as the iterator yields new objects. + """ + # FIXME: The hottest function + it is recursive + # Should be done once per schema + return dict(_recurse_refs(schema)) + +def _recurse_refs(m: Map, /) -> Iterator[tuple[str, Any]]: + """ + Recurse through a schema, yielding fresh copies of mutable containers. + + Adds ``_VEGA_LITE_ROOT_URI`` in front of all nested``$ref`` values. + """ + for k, v in m.items(): + if k == "$ref": + yield k, f"{_VEGA_LITE_ROOT_URI}{v}" + elif isinstance(v, dict): + yield k, dict(_recurse_refs(v)) + elif isinstance(v, list): + yield k, [dict(_recurse_refs(el)) if _is_dict(el) else el for el in v] + else: + yield k, v + + +@lru_cache(maxsize=None) +def _validator_for(uri: str, /) -> Callable[..., Validator]: + """ + Retrieve the constructor for a `Validator`_ class appropriate for validating the given schema. + + Parameters + ---------- + uri + Address pointing to the `$schema`_. + + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + .. _$schema: + https://json-schema.org/understanding-json-schema/reference/schema + """ + tp: Callable[..., Validator] = jsonschema.validators.validator_for({"$schema": uri}) + if hasattr(tp, "FORMAT_CHECKER"): + return partial(tp, format_checker=tp.FORMAT_CHECKER) else: - # No resolver is necessary if the schema is already the full schema - validator_kwargs["resolver"] = ( - jsonschema.RefResolver.from_schema(rootschema) - if rootschema is not None - else None + return tp + + +_HASH_ENCODER = json.JSONEncoder(sort_keys=True, separators=(",", ":")) + +if Version(importlib_version("jsonschema")) >= Version("4.18"): + from referencing import Registry + from referencing.jsonschema import specification_with as _specification_with + + if TYPE_CHECKING: + from referencing import Specification + from referencing._core import Resolver + + @lru_cache(maxsize=None) + def specification_with(dialect_id: str, /) -> Specification[Any]: + """ + Retrieve the `Specification`_ with the given dialect identifier. + + Wraps `specification_with`_, which returns one **immutable** object per + JSON Schema **dialect**. + + Raises + ------ + ``UnknownDialect`` + if the given ``dialect_id`` isn't known + + .. _Specification: + https://referencing.readthedocs.io/en/stable/api/#referencing.Specification + .. _specification_with: + https://referencing.readthedocs.io/en/stable/api/#referencing.jsonschema.specification_with + """ + return _specification_with(dialect_id) + + class _Registry: + """ + A cache of `Registry`_ (s). + + An instance named ``registry`` is used to wrap the `Registry`_ API, + with a managed cache. + + See Also + -------- + ``_Registry.__call__`` + + .. _Registry: + https://referencing.readthedocs.io/en/stable/api/#referencing.Registry + """ + + _cached: ClassVar[dict[tuple[str, str], Registry[Any]]] = {} + + @staticmethod + def compute_key(root: Map, dialect_id: str, /) -> tuple[str, str]: + """ + Generate a simple-minded hash to identify a registry. + + Notes + ----- + Why the strange hash? + - **All** generated schemas hit the ``"$ref"`` branch. + - ``api.Then`` hits the len(...) 1 branch w/ ``{"type": "object"}``. + - Final branch is only hit by mock schemas in: + - `tests/utils/test_core.py::test_infer_encoding_types` + - `tests/utils/test_schemapi.py` + """ + if "$ref" in root: + k1 = root["$ref"] + elif len(root) == 1: + k1 = "".join(f"{s!s}" for s in chain(*root.items())) + else: + k1 = _HASH_ENCODER.encode(root) + return k1, dialect_id + + @classmethod + def update_cached( + cls, root: Map, dialect_id: str, resolver: Resolver[Any] + ) -> None: + cls._cached[cls.compute_key(root, dialect_id)] = resolver._registry + + def __call__(self, root: Map, dialect_id: str, /) -> Registry[Any]: + """ + Constructs a `Registry`_, adding the `Resource`_ produced by ``rootschema``. + + Requires at least ``jsonschema`` `v4.18.0a1`_. + + .. _Registry: + https://referencing.readthedocs.io/en/stable/api/#referencing.Registry + .. _Resource: + https://referencing.readthedocs.io/en/stable/api/#referencing.Resource + .. _v4.18.0a1: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 + """ + cache_key = self.compute_key(root, dialect_id) + if (reg := self._cached.get(cache_key, None)) is not None: + return reg + resource = specification_with(dialect_id).create_resource(root) + reg = Registry().with_resource(_VEGA_LITE_ROOT_URI, resource).crawl() + type(self)._cached[cache_key] = reg + return reg + + registry: _Registry = _Registry() + + def _validator(schema: Map, rootschema: Map | None = None, /) -> Validator: + """ + Constructs a `Validator`_ for future validation. + + Parameters + ---------- + schema + Schema that a spec will be validated against. + rootschema + Context to evaluate within. + + We have **both** a current & a backwards-compatible version of this function. + + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + """ + # NOTE: This is the current version + uri = _get_schema_dialect_uri(rootschema or schema) + validator = _validator_for(uri) + return validator( + _prepare_references(schema), registry=registry(rootschema or schema, uri) ) - validator = validator_cls(schema, **validator_kwargs) - errors = list(validator.iter_errors(spec)) - return errors + def _resolve_references(schema: Map, rootschema: Map) -> Map: + """ + Resolve schema references until there is no ``"$ref"`` anymore in the top-level ``dict``. + + ``jsonschema`` deprecated ``RefResolver`` in favor of `referencing`_. + + We have **both** a current & a backwards-compatible version of this function. + + .. _referencing: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 + """ + # NOTE: This is the current version + root = rootschema or schema + if ("$ref" not in root) or ("$ref" not in schema): + return schema + uri = _get_schema_dialect_uri(rootschema) + resolver = registry(root, uri).resolver(_VEGA_LITE_ROOT_URI) + while "$ref" in schema: + resolved = resolver.lookup(schema["$ref"]) + schema = resolved.contents + registry.update_cached(root, uri, resolved.resolver) + return schema + + +else: + def _validator(schema: Map, rootschema: Map | None = None, /) -> Validator: + """ + Constructs a `Validator`_ for future validation. + + We have **both** a current & a backwards-compatible version of this function. -def _get_json_schema_draft_url(schema: dict[str, Any]) -> str: - return schema.get("$schema", _DEFAULT_JSON_SCHEMA_DRAFT_URL) + Parameters + ---------- + schema + Schema that a spec will be validated against. + rootschema + Context to evaluate within. + .. _Validator: + https://python-jsonschema.readthedocs.io/en/stable/validate/#the-validator-protocol + """ + # NOTE: This is the backwards-compatible version + validator = _validator_for(_get_schema_dialect_uri(rootschema or schema)) + resolver: Any = ( + jsonschema.RefResolver.from_schema(rootschema) if rootschema else rootschema + ) + return validator(schema, resolver=resolver) -def _use_referencing_library() -> bool: - """In version 4.18.0, the jsonschema package deprecated RefResolver in favor of the referencing library.""" - return Version(jsonschema_version_str) >= Version("4.18") + def _resolve_references(schema: Map, rootschema: Map) -> Map: + """ + Resolve schema references until there is no ``"$ref"`` anymore in the top-level ``dict``. + ``jsonschema`` deprecated ``RefResolver`` in favor of `referencing`_. -def _prepare_references_in_schema(schema: dict[str, Any]) -> dict[str, Any]: - # Create a copy so that $ref is not modified in the original schema in case - # that it would still reference a dictionary which might be attached to - # an Altair class _schema attribute - schema = copy.deepcopy(schema) + We have **both** a current & a backwards-compatible version of this function. - def _prepare_refs(d: dict[str, Any]) -> dict[str, Any]: + .. _referencing: + https://github.com/python-jsonschema/jsonschema/releases/tag/v4.18.0a1 """ - Add _VEGA_LITE_ROOT_URI in front of all $ref values. + # NOTE: This is the backwards-compatible version + resolver = jsonschema.RefResolver.from_schema(rootschema or schema) + while "$ref" in schema: + with resolver.resolving(schema["$ref"]) as resolved: + schema = resolved + return schema + - This function recursively iterates through the whole dictionary. +if Version(importlib_version("jsonschema")) >= Version("4.0.1"): + _json_path: Callable[[ValidationError], str] = operator.attrgetter("json_path") +else: - $ref values can only be nested in dictionaries or lists - as the passed in `d` dictionary comes from the Vega-Lite json schema - and in json we only have arrays (-> lists in Python) and objects - (-> dictionaries in Python) which we need to iterate through. + def _json_path(err: ValidationError, /) -> str: """ - for key, value in d.items(): - if key == "$ref": - d[key] = _VEGA_LITE_ROOT_URI + d[key] - elif isinstance(value, dict): - d[key] = _prepare_refs(value) - elif isinstance(value, list): - prepared_values = [] - for v in value: - if isinstance(v, dict): - v = _prepare_refs(v) - prepared_values.append(v) - d[key] = prepared_values - return d - - schema = _prepare_refs(schema) - return schema - - -# We do not annotate the return value here as the referencing library is not always -# available and this function is only executed in those cases. -def _get_referencing_registry( - rootschema: dict[str, Any], json_schema_draft_url: str | None = None -) -> Registry: - # Referencing is a dependency of newer jsonschema versions, starting with the - # version that is specified in _use_referencing_library and we therefore - # can expect that it is installed if the function returns True. - # We ignore 'import' mypy errors which happen when the referencing library - # is not installed. That's ok as in these cases this function is not called. - # We also have to ignore 'unused-ignore' errors as mypy raises those in case - # referencing is installed. - import referencing # type: ignore[import,unused-ignore] - import referencing.jsonschema # type: ignore[import,unused-ignore] - - if json_schema_draft_url is None: - json_schema_draft_url = _get_json_schema_draft_url(rootschema) - - specification = referencing.jsonschema.specification_with(json_schema_draft_url) - resource = specification.create_resource(rootschema) - return referencing.Registry().with_resource( - uri=_VEGA_LITE_ROOT_URI, resource=resource - ) + Vendored backport for ``jsonschema.ValidationError.json_path`` property. + + See https://github.com/vega/altair/issues/3038. + """ + path = "$" + for elem in err.absolute_path: + if isinstance(elem, int): + path += "[" + str(elem) + "]" + else: + path += "." + elem + return path -def _json_path(err: jsonschema.exceptions.ValidationError) -> str: +_FN_PATH = cast("Callable[[tuple[str, ValidationError]], str]", operator.itemgetter(0)) +"""Key function for ``(json_path, ValidationError)``.""" +_FN_VALIDATOR = cast("Callable[[ValidationError], _ValidatorKeyword]", operator.attrgetter("validator")) # fmt: off +"""Key function for ``ValidationError.validator``.""" + + +def _message_len(err: ValidationError, /) -> int: + """Return length of a ``ValidationError`` message.""" + return len(err.message) + + +def _rechain(element: T, others: Iterable[T], /) -> Iterator[T]: """ - Drop in replacement for the .json_path property of the jsonschema ValidationError class. + Continue an iterator at the last popped ``element``. - This is not available as property for ValidationError with jsonschema<4.0.1. + Equivalent to:: + + elements = 1, 2, 3, 4, 5 + it = iter(elements) + element = next(it) + it_continue = chain([element], it) - More info, see https://github.com/vega/altair/issues/3038. """ - path = "$" - for elem in err.absolute_path: - if isinstance(elem, int): - path += "[" + str(elem) + "]" - else: - path += "." + elem - return path + yield element + yield from others -def _group_errors_by_json_path( - errors: ValidationErrorList, -) -> GroupedValidationErrors: +def _regroup( + errors: _Errs, /, *, key: Callable[[ValidationError], str] = _json_path +) -> _ErrsLazyGroup: """ - Groups errors by the `json_path` attribute of the jsonschema ValidationError class. + Regroup error stream by a ``key`` function. - This attribute contains the path to the offending element within - a chart specification and can therefore be considered as an identifier of an - 'issue' in the chart that needs to be fixed. + Assumes ``errors`` are already sorted, which holds **only** at the end of ``validate_jsonschema``. """ - errors_by_json_path = defaultdict(list) - for err in errors: - err_key = getattr(err, "json_path", _json_path(err)) - errors_by_json_path[err_key].append(err) - return dict(errors_by_json_path) + for _, grouped_it in groupby(errors, key): + yield grouped_it -def _get_leaves_of_error_tree( - errors: ValidationErrorList, -) -> ValidationErrorList: +def _group_tree_leaves(errors: _Errs, /) -> _IntoLazyGroup: """ - For each error in `errors`, it traverses down the "error tree" that is generated by the jsonschema library to find and return all "leaf" errors. + Combines 3 previously distinct steps: + + 1. ``_get_leaves_of_error_tree`` These are errors which have no further errors that caused it and so they are the most specific errors with the most specific error messages. - """ - leaves: ValidationErrorList = [] + + 2. ``_group_errors_by_json_path`` (part of) + + Extracts the ``.json_path`` property for grouping. + + 3. Removes:: + + ValidationError: "'value' is a required property" + + as these errors are unlikely to be the relevant ones for the user. + They come from validation against a schema definition where the output of `alt.value` + would be valid. + However, if a user uses `alt.value`, the `value` keyword is included automatically + from that function and so it's unlikely that this was what the user intended + if the keyword is not present in the first place. + """ # noqa: D400 + REQUIRED = "required" + VALUE = ["value"] for err in errors: - if err.context: - # This means that the error `err` was caused by errors in subschemas. - # The list of errors from the subschemas are available in the property - # `context`. - leaves.extend(_get_leaves_of_error_tree(err.context)) + if err_context := err.context: + yield from _group_tree_leaves(err_context) + elif err.validator == REQUIRED and err.validator_value == VALUE: + continue else: - leaves.append(err) - return leaves + yield _json_path(err), err -def _subset_to_most_specific_json_paths( - errors_by_json_path: GroupedValidationErrors, -) -> GroupedValidationErrors: +def _prune_subset_paths(json_path_errors: _IntoLazyGroup, /) -> Iterator[_Errs]: """ Removes key (json path), value (errors) pairs where the json path is fully contained in another json path. @@ -346,66 +551,26 @@ def _subset_to_most_specific_json_paths( then the first one will be removed and only the second one is returned. This is done under the assumption that more specific json paths give more helpful error messages to the user. - """ - errors_by_json_path_specific: GroupedValidationErrors = {} - for json_path, errors in errors_by_json_path.items(): - if not _contained_at_start_of_one_of_other_values( - json_path, list(errors_by_json_path.keys()) - ): - errors_by_json_path_specific[json_path] = errors - return errors_by_json_path_specific - -def _contained_at_start_of_one_of_other_values(x: str, values: Sequence[str]) -> bool: - # Does not count as "contained at start of other value" if the values are - # the same. These cases should be handled separately - return any(value.startswith(x) for value in values if x != value) + Currently using a `list`, but typing it more restrictive to see if it can be avoided. - -def _deduplicate_errors( - grouped_errors: GroupedValidationErrors, -) -> GroupedValidationErrors: - """ - Some errors have very similar error messages or are just in general not helpful for a user. - - This function removes as many of these cases as possible and - can be extended over time to handle new cases that come up. + - Needs to be sorted to work with groupby + - Reversing allows prioritising more specific groups, since they are seen first + - Then re-reversed, to keep seen order """ - grouped_errors_deduplicated: GroupedValidationErrors = {} - for json_path, element_errors in grouped_errors.items(): - errors_by_validator = _group_errors_by_validator(element_errors) - - deduplication_functions = { - "enum": _deduplicate_enum_errors, - "additionalProperties": _deduplicate_additional_properties_errors, - } - deduplicated_errors: ValidationErrorList = [] - for validator, errors in errors_by_validator.items(): - deduplication_func = deduplication_functions.get(validator) - if deduplication_func is not None: - errors = deduplication_func(errors) - deduplicated_errors.extend(_deduplicate_by_message(errors)) - - # Removes any ValidationError "'value' is a required property" as these - # errors are unlikely to be the relevant ones for the user. They come from - # validation against a schema definition where the output of `alt.value` - # would be valid. However, if a user uses `alt.value`, the `value` keyword - # is included automatically from that function and so it's unlikely - # that this was what the user intended if the keyword is not present - # in the first place. - deduplicated_errors = [ - err for err in deduplicated_errors if not _is_required_value_error(err) - ] - - grouped_errors_deduplicated[json_path] = deduplicated_errors - return grouped_errors_deduplicated - - -def _is_required_value_error(err: jsonschema.exceptions.ValidationError) -> bool: - return err.validator == "required" and err.validator_value == ["value"] + rev_sort = sorted(json_path_errors, key=_FN_PATH, reverse=True) + keeping: dict[str, _Errs] = {} + for unique_path, grouped_errors in groupby(rev_sort, key=_FN_PATH): + if any(seen.startswith(unique_path) for seen in keeping): + continue + else: + keeping[unique_path] = [err for _, err in grouped_errors] + yield from islice(reversed(keeping.values()), 3) -def _group_errors_by_validator(errors: ValidationErrorList) -> GroupedValidationErrors: +def _groupby_validator( + errors: _Errs, / +) -> Iterator[tuple[_ValidatorKeyword, _ErrsLazy]]: """ Groups the errors by the json schema "validator" that casued the error. @@ -414,80 +579,68 @@ def _group_errors_by_validator(errors: ValidationErrorList) -> GroupedValidation was set although no additional properties are allowed then "validator" is `"additionalProperties`, etc. """ - errors_by_validator: defaultdict[str, ValidationErrorList] = defaultdict(list) - for err in errors: - # Ignore mypy error as err.validator as it wrongly sees err.validator - # as of type Optional[Validator] instead of str which it is according - # to the documentation and all tested cases - errors_by_validator[err.validator].append(err) # type: ignore[index] - return dict(errors_by_validator) + yield from groupby(sorted(errors, key=_FN_VALIDATOR), key=_FN_VALIDATOR) -def _deduplicate_enum_errors(errors: ValidationErrorList) -> ValidationErrorList: +def _deduplicate_errors(grouped_errors: Iterator[_Errs], /) -> _ErrsLazy: """ - Deduplicate enum errors by removing the errors where the allowed values are a subset of another error. + Some errors have very similar error messages or are just in general not helpful for a user. - For example, if `enum` contains two errors and one has `validator_value` (i.e. accepted values) ["A", "B"] and the - other one ["A", "B", "C"] then the first one is removed and the final - `enum` list only contains the error with ["A", "B", "C"]. + This function removes as many of these cases as possible and + can be extended over time to handle new cases that come up. """ - if len(errors) > 1: - # Values (and therefore `validator_value`) of an enum are always arrays, - # see https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values - # which is why we can use join below - value_strings = [",".join(err.validator_value) for err in errors] # type: ignore - longest_enums: ValidationErrorList = [] - for value_str, err in zip(value_strings, errors): - if not _contained_at_start_of_one_of_other_values(value_str, value_strings): - longest_enums.append(err) - errors = longest_enums - return errors - - -def _deduplicate_additional_properties_errors( - errors: ValidationErrorList, -) -> ValidationErrorList: + for by_path in grouped_errors: + for validator, errors in _groupby_validator(by_path): + if fn := _FN_MAP_DEDUPLICATION.get(validator): + errors = fn(errors) + yield from _distinct_messages(errors) + + +def _distinct_messages(iterable: _Errs, /) -> _ErrsLazy: + seen = set() + for el in iterable: + if el.message not in seen: + seen.add(el.message) + yield el + + +def _shortest_any_of(iterable: _Errs, /) -> _ErrsLazy: """ If there are multiple additional property errors it usually means that the offending element was validated against multiple schemas and its parent is a common anyOf validator. The error messages produced from these cases are usually - very similar and we just take the shortest one. For example, - the following 3 errors are raised for the `unknown` channel option in - `alt.X("variety", unknown=2)`: - - "Additional properties are not allowed ('unknown' was unexpected)" - - "Additional properties are not allowed ('field', 'unknown' were unexpected)" - - "Additional properties are not allowed ('field', 'type', 'unknown' were unexpected)". + very similar and we just take the shortest one. + For example the following 3 errors are raised for:: + + alt.X("variety", unknown=2) + - "Additional properties are not allowed ('unknown' was unexpected)" + - "Additional properties are not allowed ('field', 'unknown' were unexpected)" + - "Additional properties are not allowed ('field', 'type', 'unknown' were unexpected)". """ - if len(errors) > 1: - # Test if all parent errors are the same anyOf error and only do - # the prioritization in these cases. Can't think of a chart spec where this - # would not be the case but still allow for it below to not break anything. - parent = errors[0].parent - if ( - parent is not None - and parent.validator == "anyOf" - # Use [1:] as don't have to check for first error as it was used - # above to define `parent` - and all(err.parent is parent for err in errors[1:]) - ): - errors = [min(errors, key=lambda x: len(x.message))] - return errors - - -def _deduplicate_by_message(errors: ValidationErrorList) -> ValidationErrorList: - """Deduplicate errors by message. This keeps the original order in case it was chosen intentionally.""" - return list({e.message: e for e in errors}.values()) - - -def _subclasses(cls: type[Any]) -> Iterator[type[Any]]: - """Breadth-first sequence of all classes which inherit from cls.""" - seen = set() - current_set = {cls} - while current_set: - seen |= current_set - current_set = set.union(*(set(cls.__subclasses__()) for cls in current_set)) - for cls in current_set - seen: - yield cls + it = iter(iterable) + first = next(it) + if ( + parent := cast("ValidationError", first.parent) + ) and parent.validator == "anyOf": + yield min(_rechain(first, it), key=_message_len) + else: + yield first + + +def _prune_subset_enum(iterable: _Errs, /) -> _ErrsLazy: + """Skip any``"enum"`` errors that are a subset of another error.""" + enums: tuple[set[str], ...] + errors: tuple[ValidationError, ...] + enums, errors = zip(*((set(err.validator_value), err) for err in iterable)) # type: ignore[arg-type] + for cur_enum, err in zip(enums, errors): + if not any(cur_enum < e for e in enums if e != cur_enum): + yield err + + +_FN_MAP_DEDUPLICATION: Mapping[_ValidatorKeyword, Callable[[_Errs], _ErrsLazy]] = { + "additionalProperties": _shortest_any_of, + "enum": _prune_subset_enum, +} def _from_array_like(obj: Iterable[Any], /) -> list[Any]: @@ -536,30 +689,8 @@ def _todict(obj: Any, context: dict[str, Any] | None, np_opt: Any, pd_opt: Any) return obj -def _resolve_references( - schema: dict[str, Any], rootschema: dict[str, Any] | None = None -) -> dict[str, Any]: - """Resolve schema references until there is no $ref anymore in the top-level of the dictionary.""" - if _use_referencing_library(): - registry = _get_referencing_registry(rootschema or schema) - # Using a different variable name to show that this is not the - # jsonschema.RefResolver but instead a Resolver from the referencing - # library - referencing_resolver = registry.resolver() - while "$ref" in schema: - schema = referencing_resolver.lookup( - _VEGA_LITE_ROOT_URI + schema["$ref"] - ).contents - else: - resolver = jsonschema.RefResolver.from_schema(rootschema or schema) - while "$ref" in schema: - with resolver.resolving(schema["$ref"]) as resolved: - schema = resolved - return schema - - class SchemaValidationError(jsonschema.ValidationError): - def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: + def __init__(self, obj: SchemaBase, err: ValidationError) -> None: """ A wrapper for ``jsonschema.ValidationError`` with friendlier traceback. @@ -580,9 +711,8 @@ def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: """ super().__init__(**err._contents()) self.obj = obj - self._errors: GroupedValidationErrors = getattr( - err, "_all_errors", {getattr(err, "json_path", _json_path(err)): [err]} - ) + err = cast("SchemaValidationError", err) + self._errors: _ErrsLazyGroup = err._errors # This is the message from err self._original_message = self.message self.message = self._get_message() @@ -590,50 +720,52 @@ def __init__(self, obj: SchemaBase, err: jsonschema.ValidationError) -> None: def __str__(self) -> str: return self.message + @staticmethod + def indent_from_second_line(msg: str, /, indent: int = 4) -> str: + return "\n".join( + " " * indent + s if idx > 0 and s else s + for idx, s in enumerate(msg.split("\n")) + ) + def _get_message(self) -> str: - def indent_second_line_onwards(message: str, indent: int = 4) -> str: - modified_lines: list[str] = [] - for idx, line in enumerate(message.split("\n")): - if idx > 0 and len(line) > 0: - line = " " * indent + line - modified_lines.append(line) - return "\n".join(modified_lines) - - error_messages: list[str] = [] - # Only show a maximum of 3 errors as else the final message returned by this - # method could get very long. - for errors in list(self._errors.values())[:3]: - error_messages.append(self._get_message_for_errors_group(errors)) - - message = "" - if len(error_messages) > 1: - error_messages = [ - indent_second_line_onwards(f"Error {error_id}: {m}") - for error_id, m in enumerate(error_messages, start=1) - ] - message += "Multiple errors were found.\n\n" - message += "\n\n".join(error_messages) - return message - - def _get_message_for_errors_group( - self, - errors: ValidationErrorList, - ) -> str: - if errors[0].validator == "additionalProperties": - # During development, we only found cases where an additionalProperties - # error was raised if that was the only error for the offending instance - # as identifiable by the json path. Therefore, we just check here the first - # error. However, other constellations might exist in which case - # this should be adapted so that other error messages are shown as well. - message = self._get_additional_properties_error_message(errors[0]) + it: _ErrsLazyGroup = self._errors + group_1 = list(next(it)) + if (group_2 := next(it, None)) is not None: + messages: Iterator[str] = ( + self._get_message_for_errors_group(g) + for g in (group_1, list(group_2), next(it, None)) + if g is not None + ) + msg = "\n\n".join( + self.indent_from_second_line(f"Error {error_id}: {m}") + for error_id, m in enumerate(messages, start=1) + ) + return f"Multiple errors were found.\n\n{msg}" else: - message = self._get_default_error_message(errors=errors) + return self._get_message_for_errors_group(group_1) - return message.strip() + def _get_message_for_errors_group(self, errors: _Errs) -> str: + """ + Note. + + During development, we only found cases where an additionalProperties + error was raised if that was the only error for the offending instance + as identifiable by the json path. + + Therefore, we just check here the first error. + However, other constellations might exist in which case this should be adapted + so that other error messages are shown as well. + """ + if not isinstance(errors, Sequence): + errors = list(errors) + if errors[0].validator == "additionalProperties": + return self._get_additional_properties_error_message(errors[0]) + else: + return self._get_default_error_message(errors=errors) def _get_additional_properties_error_message( self, - error: jsonschema.exceptions.ValidationError, + error: ValidationError, ) -> str: """Output all existing parameters when an unknown parameter is specified.""" altair_cls = self._get_altair_class_for_error(error) @@ -644,22 +776,21 @@ def _get_additional_properties_error_message( # "Additional properties are not allowed ('unknown' was unexpected)" # Line below extracts "unknown" from this string parameter_name = error.message.split("('")[-1].split("'")[0] - message = f"""\ -`{altair_cls.__name__}` has no parameter named '{parameter_name}' - -Existing parameter names are: -{param_names_table} -See the help for `{altair_cls.__name__}` to read the full description of these parameters""" - return message + cls_name = altair_cls.__name__ + return ( + f"`{cls_name}` has no parameter named '{parameter_name}'\n\n" + f"Existing parameter names are:\n{param_names_table}\n" + f"See the help for `{cls_name}` to read the full description of these parameters" + ) - def _get_altair_class_for_error( - self, error: jsonschema.exceptions.ValidationError - ) -> type[SchemaBase]: + def _get_altair_class_for_error(self, error: ValidationError) -> type[SchemaBase]: """ Try to get the lowest class possible in the chart hierarchy so it can be displayed in the error message. This should lead to more informative error messages pointing the user closer to the source of the issue. """ + from altair import vegalite + for prop_name in reversed(error.absolute_path): # Check if str as e.g. first item can be a 0 if isinstance(prop_name, str): @@ -671,24 +802,17 @@ def _get_altair_class_for_error( # Did not find a suitable class based on traversing the path so we fall # back on the class of the top-level object which created # the SchemaValidationError - cls = self.obj.__class__ + cls = type(self.obj) return cls @staticmethod - def _format_params_as_table(param_dict_keys: Iterable[str]) -> str: + def _format_params_as_table(param_view: KeysView[str]) -> str: """Format param names into a table so that they are easier to read.""" - param_names: tuple[str, ...] - name_lengths: tuple[int, ...] - param_names, name_lengths = zip( - *[ - (name, len(name)) - for name in param_dict_keys - if name not in {"kwds", "self"} - ] - ) + param_names: list[str] = [nm for nm in param_view if nm not in {"kwds", "self"}] + # Worst case scenario with the same longest param name in the same # row for all columns - max_name_length = max(name_lengths) + max_name_length = len(max(param_view, key=len)) max_column_width = 80 # Output a square table if not too big (since it is easier to read) num_param_names = len(param_names) @@ -702,7 +826,7 @@ def split_into_equal_parts(n: int, p: int) -> list[int]: column_heights = split_into_equal_parts(num_param_names, columns) # Section the param names into columns and compute their widths - param_names_columns: list[tuple[str, ...]] = [] + param_names_columns: list[Sequence[str]] = [] column_max_widths: list[int] = [] last_end_idx: int = 0 for ch in column_heights: @@ -713,33 +837,32 @@ def split_into_equal_parts(n: int, p: int) -> list[int]: last_end_idx = ch + last_end_idx # Transpose the param name columns into rows to facilitate looping - param_names_rows: list[tuple[str, ...]] = [] - for li in zip_longest(*param_names_columns, fillvalue=""): - param_names_rows.append(li) # Build the table as a string by iterating over and formatting the rows param_names_table: str = "" - for param_names_row in param_names_rows: + column_pad = 3 + for param_names_row in zip_longest(*param_names_columns, fillvalue=""): + last_element = len(param_names_row) - 1 for num, param_name in enumerate(param_names_row): # Set column width based on the longest param in the column - max_name_length_column = column_max_widths[num] - column_pad = 3 - param_names_table += "{:<{}}".format( - param_name, max_name_length_column + column_pad - ) + width = column_pad + column_max_widths[num] + param_names_table += "{:<{}}".format(param_name, width) # Insert newlines and spacing after the last element in each row - if num == (len(param_names_row) - 1): + if num == last_element: param_names_table += "\n" return param_names_table def _get_default_error_message( self, - errors: ValidationErrorList, + errors: Sequence[ValidationError], ) -> str: bullet_points: list[str] = [] - errors_by_validator = _group_errors_by_validator(errors) + errors_by_validator: defaultdict[str, list[ValidationError]] = defaultdict(list) + for err in errors: + errors_by_validator[err.validator].append(err) # type: ignore[index] + if "enum" in errors_by_validator: for error in errors_by_validator["enum"]: - bullet_points.append(f"one of {error.validator_value}") + bullet_points.append(f"one of {error.validator_value}") # noqa: PERF401 if "type" in errors_by_validator: types = [f"'{err.validator_value}'" for err in errors_by_validator["type"]] @@ -784,7 +907,7 @@ def _get_default_error_message( if validator not in {"enum", "type"} ) message += "".join(it) - return message + return message.strip() class UndefinedType: @@ -894,7 +1017,7 @@ class SchemaBase: """ _schema: ClassVar[dict[str, Any] | Any] = None - _rootschema: ClassVar[dict[str, Any] | None] = None + _rootschema: ClassVar[dict[str, Any] | Any] = None _class_is_valid_at_instantiation: ClassVar[bool] = True def __init__(self, *args: Any, **kwds: Any) -> None: @@ -959,14 +1082,10 @@ def __getattr__(self, attr): # reminder: getattr is called after the normal lookups if attr == "_kwds": raise AttributeError() - if attr in self._kwds: + elif attr in self._kwds: return self._kwds[attr] else: - try: - _getattr = super().__getattr__ # pyright: ignore[reportAttributeAccessIssue] - except AttributeError: - _getattr = super().__getattribute__ - return _getattr(attr) + return getattr(super(), "__getattr__", super().__getattribute__)(attr) def __setattr__(self, item, val) -> None: self._kwds[item] = val @@ -1045,7 +1164,7 @@ def to_dict( # NOTE: Don't raise `from err`, see `SchemaValidationError` doc try: self.validate(result) - except jsonschema.ValidationError as err: + except ValidationError as err: raise SchemaValidationError(self, err) from None return result @@ -1120,17 +1239,16 @@ def from_dict( """ if validate: cls.validate(dct) - converter = _FromDict(cls._default_wrapper_classes()) + converter: type[_FromDict] | _FromDict = ( + _FromDict + if _FromDict.hash_tps + else _FromDict(cls._default_wrapper_classes()) + ) return converter.from_dict(dct, cls) @classmethod def from_json( - cls, - json_string: str, - validate: bool = True, - **kwargs: Any, - # Type hints for this method would get rather complicated - # if we want to provide a more specific return type + cls, json_string: str, validate: bool = True, **kwargs: Any ) -> ChartType: """ Instantiate the object from a valid JSON string. @@ -1157,22 +1275,25 @@ def validate( cls, instance: dict[str, Any], schema: dict[str, Any] | None = None ) -> None: """Validate the instance against the class schema in the context of the rootschema.""" - if schema is None: - schema = cls._schema - # For the benefit of mypy - assert schema is not None - validate_jsonschema(instance, schema, rootschema=cls._rootschema or cls._schema) + validate_jsonschema( + instance, schema or cls._schema, cls._rootschema or cls._schema + ) @classmethod def resolve_references(cls, schema: dict[str, Any] | None = None) -> dict[str, Any]: """Resolve references in the context of this object's schema or root schema.""" - schema_to_pass = schema or cls._schema - # For the benefit of mypy - assert schema_to_pass is not None - return _resolve_references( - schema=schema_to_pass, - rootschema=(cls._rootschema or cls._schema or schema), - ) + rootschema = cls._rootschema or cls._schema + if rootschema is None: + name = type(cls).__name__ + msg = ( + f"{name}.resolve_references() provided only `None` values for:\n" + f"{schema=}, {cls._schema=}, {cls._rootschema=}.\n\n" + f"This variant indicates the class definition {name!r} is invalid." + ) + raise TypeError(msg) + else: + resolved = _resolve_references(schema or cls._schema, rootschema) + return cast("dict[str, Any]", resolved) @classmethod def validate_property( @@ -1279,10 +1400,55 @@ def _is_iterable( return not isinstance(obj, exclude) and isinstance(obj, Iterable) +def _is_valid(spec: _JsonParameter, tp: type[SchemaBase], /) -> bool: + """ + Return True if ``tp`` can be constructed from ``spec``. + + Notes + ----- + Don't use this if you need to know *details* of the errors in ``spec``.. + """ + return next(_validator(tp._schema, tp._rootschema).iter_errors(spec), None) is None + + def _passthrough(*args: Any, **kwds: Any) -> Any | dict[str, Any]: return args[0] if args else kwds +def _hash_schema( + schema: _JsonParameter, + /, + *, + exclude: Iterable[str] = frozenset( + ("definitions", "title", "description", "$schema", "id") + ), +) -> int: + """ + Return the hash value for a ``schema``. + + Parameters + ---------- + schema + ``SchemaBase._schema``. + exclude + ``schema`` keys which are not considered when identifying equivalence. + """ + if isinstance(schema, Mapping): + schema = {k: v for k, v in schema.items() if k not in exclude} + return hash(_HASH_ENCODER.encode(schema)) + + +def _subclasses(cls: type[TSchemaBase]) -> Iterator[type[TSchemaBase]]: + """Breadth-first sequence of all classes which inherit from ``cls``.""" + seen = set() + current: set[type[TSchemaBase]] = {cls} + while current: + seen |= current + current = set(chain.from_iterable(cls.__subclasses__() for cls in current)) + for cls in current - seen: + yield cls + + class _FromDict: """ Class used to construct SchemaBase class hierarchies from a dict. @@ -1292,54 +1458,38 @@ class _FromDict: specified in the ``wrapper_classes`` positional-only argument to the constructor. """ - _hash_exclude_keys = ("definitions", "title", "description", "$schema", "id") + hash_tps: ClassVar[defaultdict[int, deque[type[SchemaBase]]]] = defaultdict(deque) + """ + Maps unique schemas to corresponding types. - def __init__(self, wrapper_classes: Iterable[type[SchemaBase]], /) -> None: - # Create a mapping of a schema hash to a list of matching classes - # This lets us quickly determine the correct class to construct - self.class_dict: dict[int, list[type[SchemaBase]]] = defaultdict(list) - for tp in wrapper_classes: - if tp._schema is not None: - self.class_dict[self.hash_schema(tp._schema)].append(tp) + The logic is that after removing a subset of keys, some schemas are identical. - @classmethod - def hash_schema(cls, schema: dict[str, Any], use_json: bool = True) -> int: - """ - Compute a python hash for a nested dictionary which properly handles dicts, lists, sets, and tuples. + If there are multiple matches, we use the first one in the ``deque``. - At the top level, the function excludes from the hashed schema all keys - listed in `exclude_keys`. + ``_subclasses`` yields the results of a `breadth-first search`_, + so the first matching class is the most general match. - This implements two methods: one based on conversion to JSON, and one based - on recursive conversions of unhashable to hashable types; the former seems - to be slightly faster in several benchmarks. - """ - if cls._hash_exclude_keys and isinstance(schema, dict): - schema = { - key: val - for key, val in schema.items() - if key not in cls._hash_exclude_keys - } - if use_json: - s = json.dumps(schema, sort_keys=True) - return hash(s) - else: + .. _breadth-first search: + https://en.wikipedia.org/wiki/Breadth-first_search + """ + + hash_resolved: ClassVar[dict[int, Map]] = {} + """ + Maps unique schemas to their reference-resolved equivalent. - def _freeze(val): - if isinstance(val, dict): - return frozenset((k, _freeze(v)) for k, v in val.items()) - elif isinstance(val, set): - return frozenset(map(_freeze, val)) - elif isinstance(val, (list, tuple)): - return tuple(map(_freeze, val)) - else: - return val + Ensures that ``_resolve_references`` is evaluated **at most once**, per hash. + """ - return hash(_freeze(schema)) + def __init__(self, wrapper_classes: Iterator[type[SchemaBase]], /) -> None: + cls = type(self) + for tp in wrapper_classes: + if tp._schema is not None: + cls.hash_tps[_hash_schema(tp._schema)].append(tp) @overload + @classmethod def from_dict( - self, + cls, dct: TSchemaBase, tp: None = ..., schema: None = ..., @@ -1347,8 +1497,9 @@ def from_dict( default_class: Any = ..., ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any] | list[dict[str, Any]], tp: Any = ..., schema: Any = ..., @@ -1356,8 +1507,9 @@ def from_dict( default_class: type[TSchemaBase] = ..., # pyright: ignore[reportInvalidTypeVarUse] ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any], tp: None = ..., schema: dict[str, Any] = ..., @@ -1365,8 +1517,9 @@ def from_dict( default_class: Any = ..., ) -> SchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any], tp: type[TSchemaBase], schema: None = ..., @@ -1374,16 +1527,18 @@ def from_dict( default_class: Any = ..., ) -> TSchemaBase: ... @overload + @classmethod def from_dict( - self, + cls, dct: dict[str, Any] | list[dict[str, Any]], tp: type[TSchemaBase], schema: dict[str, Any], rootschema: dict[str, Any] | None = ..., default_class: Any = ..., ) -> Never: ... - def from_dict( - self, + @classmethod + def from_dict( # noqa: C901 + cls, dct: dict[str, Any] | list[dict[str, Any]] | TSchemaBase, tp: type[TSchemaBase] | None = None, schema: dict[str, Any] | None = None, @@ -1393,45 +1548,48 @@ def from_dict( """Construct an object from a dict representation.""" target_tp: Any current_schema: dict[str, Any] + hash_schema: int if isinstance(dct, SchemaBase): return dct elif tp is not None: current_schema = tp._schema + hash_schema = _hash_schema(current_schema) root_schema: dict[str, Any] = rootschema or tp._rootschema or current_schema target_tp = tp elif schema is not None: - # If there are multiple matches, we use the first one in the dict. - # Our class dict is constructed breadth-first from top to bottom, - # so the first class that matches is the most general match. current_schema = schema + hash_schema = _hash_schema(current_schema) root_schema = rootschema or current_schema - matches = self.class_dict[self.hash_schema(current_schema)] - target_tp = matches[0] if matches else default_class + matches = cls.hash_tps[hash_schema] + target_tp = next(iter(matches), default_class) else: msg = "Must provide either `tp` or `schema`, but not both." raise ValueError(msg) - from_dict = partial(self.from_dict, rootschema=root_schema) - # Can also return a list? - resolved = _resolve_references(current_schema, root_schema) - if "anyOf" in resolved or "oneOf" in resolved: - schemas = resolved.get("anyOf", []) + resolved.get("oneOf", []) - for possible in schemas: - try: - validate_jsonschema(dct, possible, rootschema=root_schema) - except jsonschema.ValidationError: - continue - else: + from_dict = partial(cls.from_dict, rootschema=root_schema) + if (resolved := cls.hash_resolved.get(hash_schema)) is None: + resolved = _resolve_references(current_schema, root_schema) + cls.hash_resolved[hash_schema] = resolved + if "anyOf" in resolved: + for possible in resolved["anyOf"]: + # NOTE: Instead of raise/except/continue + # Pre-"zero-cost" exceptions, this has a huge performance gain. + # https://docs.python.org/3/whatsnew/3.11.html#misc + # https://github.com/python/cpython/blob/9b3749849eda4012261a112b22eb07f26fd345a9/InternalDocs/exception_handling.md + it_errs = _validator(possible, root_schema).iter_errors(dct) + if next(it_errs, None) is None: return from_dict(dct, schema=possible, default_class=target_tp) if _is_dict(dct): # TODO: handle schemas for additionalProperties/patternProperties - props: dict[str, Any] = resolved.get("properties", {}) - kwds = { - k: (from_dict(v, schema=props[k]) if k in props else v) - for k, v in dct.items() - } - return target_tp(**kwds) + if props := resolved.get("properties"): + kwds = { + k: (from_dict(v, schema=sch) if (sch := props.get(k)) else v) + for k, v in dct.items() + } + return target_tp(**kwds) + else: + return target_tp(**dct) elif _is_list(dct): item_schema: dict[str, Any] = resolved.get("items", {}) return target_tp([from_dict(k, schema=item_schema) for k in dct]) @@ -1446,6 +1604,8 @@ def __init__(self, prop: str, schema: dict[str, Any]) -> None: self.schema = schema def __get__(self, obj, cls): + from altair import vegalite + self.obj = obj self.cls = cls # The docs from the encoding class parameter (e.g. `bin` in X, Color,