Skip to content

Commit

Permalink
refactor: use pydantic (#16)
Browse files Browse the repository at this point in the history
* refactor: use pydantic instead of dataclassy for parsing/validation

* refactor: do not use pydantic for TokenlistManager; fix tests

* test: handle validation failures

* feat: add version flag and test

* test: add default setter

* test: clean up iso timestamps so tests work

* fix: missing error message

* refactor: clarify tag verification algorithm a bit
  • Loading branch information
fubuloubu authored Aug 11, 2021
1 parent e121e11 commit b136ce0
Show file tree
Hide file tree
Showing 9 changed files with 190 additions and 94 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ markers = "fuzzing: Run Hypothesis fuzz test suite"
line_length = 100
force_grid_wrap = 0
include_trailing_comma = true
known_third_party = ["click", "dataclassy", "github", "hypothesis", "hypothesis_jsonschema", "pytest", "requests", "semantic_version", "setuptools"]
known_third_party = ["click", "github", "hypothesis", "hypothesis_jsonschema", "pydantic", "pytest", "requests", "semantic_version", "setuptools"]
known_first_party = ["tokenlists"]
multi_line_output = 3
use_parentheses = true
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
install_requires=[
"importlib-metadata ; python_version<'3.8'",
"click>=8.0.0",
"dataclassy>=0.10.3,<1.0",
"pydantic>=1.8.2,<2.0.0",
"pyyaml>=5.4.1,<6",
"semantic-version>=2.8.5,<3",
],
Expand Down
17 changes: 16 additions & 1 deletion tests/functional/test_schema_fuzzing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,29 @@
import requests # type: ignore
from hypothesis import HealthCheck, given, settings
from hypothesis_jsonschema import from_schema
from pydantic import ValidationError

from tokenlists import TokenList

TOKENLISTS_SCHEMA = "https://uniswap.org/tokenlist.schema.json"


def clean_iso_timestamps(tl: dict) -> dict:
"""
Timestamps can be in any format, and our processing handles it okay
However, for testing purposes, we want the output format to line up,
and unfortunately there is some ambiguity in ISO timestamp formats.
"""
tl["timestamp"] = tl["timestamp"].replace("Z", "+00:00")
return tl


@pytest.mark.fuzzing
@given(token_list=from_schema(requests.get(TOKENLISTS_SCHEMA).json()))
@settings(suppress_health_check=(HealthCheck.too_slow,))
def test_schema(token_list):
assert TokenList.from_dict(token_list).to_dict() == token_list
try:
assert TokenList.parse_obj(token_list).dict() == clean_iso_timestamps(token_list)

except (ValidationError, ValueError):
pass # Expect these kinds of errors
17 changes: 12 additions & 5 deletions tests/functional/test_uniswap_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import github
import pytest # type: ignore
import requests # type: ignore
from pydantic import ValidationError

from tokenlists import TokenList

Expand All @@ -13,9 +14,15 @@


@pytest.mark.parametrize(
"token_list_file",
UNISWAP_REPO.get_contents("test/schema"), # type: ignore
"token_list_name",
[f.name for f in UNISWAP_REPO.get_contents("test/schema")], # type: ignore
)
def test_uniswap_tokenlists(token_list_file):
token_list = requests.get(UNISWAP_RAW_URL + token_list_file.name).json()
assert TokenList.from_dict(token_list).to_dict() == token_list
def test_uniswap_tokenlists(token_list_name):
token_list = requests.get(UNISWAP_RAW_URL + token_list_name).json()

if "invalid" not in token_list_name:
assert TokenList.parse_obj(token_list).dict() == token_list

else:
with pytest.raises((ValidationError, ValueError)):
TokenList.parse_obj(token_list).dict()
4 changes: 2 additions & 2 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import pytest # type: ignore
from click.testing import CliRunner

from tokenlists import TokenListManager, _cli
from tokenlists import _cli, config


@pytest.fixture
def runner(monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem() as temp_dir:
monkeypatch.setattr(_cli, "TokenListManager", lambda: TokenListManager(Path(temp_dir)))
monkeypatch.setattr(config, "DEFAULT_CACHE_PATH", Path(temp_dir))
yield runner


Expand Down
17 changes: 17 additions & 0 deletions tests/integration/test_cli.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
from tokenlists.version import version

TEST_URI = "tokens.1inch.eth"


def test_version(runner, cli):
result = runner.invoke(cli, ["--version"])
assert result.exit_code == 0
assert result.output.strip() == version


def test_empty_list(runner, cli):
result = runner.invoke(cli, ["list"])
assert result.exit_code == 0
Expand Down Expand Up @@ -35,3 +43,12 @@ def test_remove(runner, cli):
result = runner.invoke(cli, ["list"])
assert result.exit_code == 0
assert "No tokenlists exist" in result.output


def test_default(runner, cli):
result = runner.invoke(cli, ["install", TEST_URI])
assert result.exit_code == 0

result = runner.invoke(cli, ["set-default", "1inch"])
assert result.exit_code == 0
assert "1inch" in result.output
11 changes: 9 additions & 2 deletions tokenlists/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def choices(self):


@click.group()
@click.version_option(message="%(version)s", package_name="tokenlists")
def cli():
"""
Utility for working with the `py-tokenlists` installed token lists
Expand Down Expand Up @@ -59,6 +60,8 @@ def set_default(name):

manager.set_default_tokenlist(name)

click.echo(f"Default tokenlist is now: '{manager.default_tokenlist}'")


@cli.command(short_help="Display the names and versions of all installed tokenlists")
@click.option("--search", default="")
Expand All @@ -76,7 +79,7 @@ def list_tokens(search, tokenlist_name, chain_id):
lambda t: pattern.match(t.symbol),
manager.get_tokens(tokenlist_name, chain_id),
):
click.echo("{address} ({symbol})".format(**token_info.to_dict()))
click.echo("{address} ({symbol})".format(**token_info.dict()))


@cli.command(short_help="Display the info for a particular token")
Expand All @@ -91,6 +94,10 @@ def token_info(symbol, tokenlist_name, chain_id, case_insensitive):
raise click.ClickException("No tokenlists available!")

token_info = manager.get_token_info(symbol, tokenlist_name, chain_id, case_insensitive)
token_info = token_info.dict()

if "tags" not in token_info:
token_info["tags"] = ""

click.echo(
"""
Expand All @@ -101,6 +108,6 @@ def token_info(symbol, tokenlist_name, chain_id, case_insensitive):
Decimals: {decimals}
Tags: {tags}
""".format(
tags=[], **token_info.to_dict()
**token_info
)
)
45 changes: 23 additions & 22 deletions tokenlists/manager.py
Original file line number Diff line number Diff line change
@@ -1,48 +1,48 @@
import json
from pathlib import Path
from typing import Dict, Iterator, List, Optional
from typing import Iterator, List, Optional

import requests # type: ignore
from dataclassy import dataclass

from tokenlists import config
from tokenlists.typing import ChainId, TokenInfo, TokenList, TokenSymbol


@dataclass
class TokenListManager:
cache_folder: Path = config.DEFAULT_CACHE_PATH
installed_tokenlists: Dict[str, TokenList] = {}
default_tokenlist: Optional[str] = config.DEFAULT_TOKENLIST

def __post_init__(self):
def __init__(self):
# NOTE: Folder should always exist, even if empty
self.cache_folder = config.DEFAULT_CACHE_PATH
self.cache_folder.mkdir(exist_ok=True)

# Load all the ones cached on disk
self.installed_tokenlists = {}
for path in self.cache_folder.glob("*.json"):
with path.open() as fp:
tokenlist = TokenList.from_dict(json.load(fp))
self.installed_tokenlists[tokenlist.name] = tokenlist
tokenlist = TokenList.parse_file(path)
self.installed_tokenlists[tokenlist.name] = tokenlist

self.default_tokenlist = config.DEFAULT_TOKENLIST
if not self.default_tokenlist:
# Default might be cached on disk (does not override config)
default_tokenlist_cachefile = self.cache_folder.joinpath(".default")

if default_tokenlist_cachefile.exists():
self.default_tokenlist = default_tokenlist_cachefile.read_text()

# Default might be cached on disk (does not override config)
default_tokenlist_cachefile = self.cache_folder.joinpath(".default")
if not self.default_tokenlist and default_tokenlist_cachefile.exists():
self.default_tokenlist = default_tokenlist_cachefile.read_text()
elif len(self.installed_tokenlists) > 0:
# Not cached on disk, use first installed list
self.default_tokenlist = next(iter(self.installed_tokenlists))

def install_tokenlist(self, uri: str):
# This supports ENS lists
if uri.endswith(".eth"):
uri = config.UNISWAP_ENS_TOKENLISTS_HOST.format(uri)

# Load and store the tokenlist
tokenlist = TokenList.from_dict(requests.get(uri).json())
tokenlist = TokenList.parse_obj(requests.get(uri).json())
self.installed_tokenlists[tokenlist.name] = tokenlist

# Cache it on disk for later instances
self.cache_folder.mkdir(exist_ok=True)
token_list_file = self.cache_folder.joinpath(f"{tokenlist.name}.json")
with token_list_file.open("w") as fp:
json.dump(tokenlist.to_dict(), fp)
token_list_file.write_text(tokenlist.json())

def remove_tokenlist(self, tokenlist_name: str) -> None:
tokenlist = self.installed_tokenlists[tokenlist_name]
Expand All @@ -62,6 +62,7 @@ def set_default_tokenlist(self, name: str) -> None:
self.default_tokenlist = name

# Cache it on disk too
self.cache_folder.mkdir(exist_ok=True)
self.cache_folder.joinpath(".default").write_text(name)

def available_tokenlists(self) -> List[str]:
Expand All @@ -85,7 +86,7 @@ def get_tokens(
chain_id: ChainId = 1, # Ethereum Mainnnet
) -> Iterator[TokenInfo]:
tokenlist = self.get_tokenlist(token_listname)
return filter(lambda t: t.chainId == chain_id, iter(tokenlist))
return filter(lambda t: t.chainId == chain_id, tokenlist.tokens)

def get_token_info(
self,
Expand All @@ -96,7 +97,7 @@ def get_token_info(
) -> TokenInfo:
tokenlist = self.get_tokenlist(token_listname)

token_iter = filter(lambda t: t.chainId == chain_id, iter(tokenlist))
token_iter = filter(lambda t: t.chainId == chain_id, tokenlist.tokens)
token_iter = (
filter(lambda t: t.symbol == symbol, token_iter)
if case_insensitive
Expand Down
Loading

0 comments on commit b136ce0

Please sign in to comment.