From 037f06299b5414f6f89c3128e67b0b73e808b952 Mon Sep 17 00:00:00 2001 From: Sheng Lundquist Date: Fri, 26 Apr 2024 11:42:35 -0700 Subject: [PATCH] Adding ability to fork from existing chain to interactive hyperdrive (#1440) --- .../interactive_hyperdrive_forking_example.py | 80 +++++++++++++++++++ .../hyperdrive/interactive/i_local_chain.py | 13 ++- 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 examples/interactive_hyperdrive_forking_example.py diff --git a/examples/interactive_hyperdrive_forking_example.py b/examples/interactive_hyperdrive_forking_example.py new file mode 100644 index 0000000000..6fa4436b38 --- /dev/null +++ b/examples/interactive_hyperdrive_forking_example.py @@ -0,0 +1,80 @@ +"""Example script for using interactive hyperdrive to fork a remote chain. +""" + +# %% +# We expect this to be a script, hence no need for uppercase naming +# pylint: disable=invalid-name + +from __future__ import annotations + +from fixedpointmath import FixedPoint + +from agent0 import IHyperdrive, ILocalChain, PolicyZoo +from agent0.core.base.make_key import make_private_key + +# %% + +# The chain to fork, e.g., to sepolia testnet +rpc_uri = "http://uri.to.sepolia.testnet" +# The block number to fork at. If None, will fork at latest. +fork_block_number: int | None = None +# The address of the registry on the chain to find the deployed hyperdrive pool. +registry_address = "0xba5156E697d39a03EDA824C19f375383F6b759EA" + +# %% +# Launch a local anvil chain forked from the rpc uri. +chain = ILocalChain(fork_uri=rpc_uri, fork_block_number=fork_block_number) + +hyperdrive_address = IHyperdrive.get_hyperdrive_addresses_from_registry(registry_address, chain)["sdai_14_day"] + +# Note that we use IHyperdrive here instead of ILocalHyperdrive, +# as ILocalHyperdrive deploys a new pool, whereas we want to connect to an existing pool +# on the forked local chain. +# TODO this prevents us from using data tools provided by ILocalHyperdrive, ideally we can +# load a ILocalHyperdrive from an IHyperdrive object that connects to an existing pool and populates +# the database. This is blocked by needing an archive node, the fix here would be to +# (1) use event data instead, and (2) build historical data from event data. +hyperdrive_config = IHyperdrive.Config() +hyperdrive_pool = IHyperdrive(chain, hyperdrive_address, hyperdrive_config) + +# %% + +# Launch a new agent +private_key = make_private_key() + +# Init from private key and attach policy +hyperdrive_agent0 = hyperdrive_pool.init_agent( + private_key=private_key, + policy=PolicyZoo.random, + # The configuration for the underlying policy + policy_config=PolicyZoo.random.Config(rng_seed=123), +) + +# %% +# We add funds to the agent. +# TODO this will likely fail when we fork from mainnet, as we call `mint` +# on the base token. This will work on testnet, as we allow minting on the testnet +# base token. +hyperdrive_agent0.add_funds(base=FixedPoint(1000), eth=FixedPoint(100)) + +# Set max approval +hyperdrive_agent0.set_max_approval() + +# %% + +# Make trades +# Return values here mirror the various events emitted from these contract calls +# These functions are blocking, but relatively easy to expose async versions of the +# trades below +open_long_event = hyperdrive_agent0.open_long(base=FixedPoint(111)) +close_long_event = hyperdrive_agent0.close_long( + maturity_time=open_long_event.maturity_time, bonds=open_long_event.bond_amount +) + + +# %% +random_trade_events = [] +for i in range(10): + # NOTE Since a policy can execute multiple trades per action, the output events is a list + trade_events: list = hyperdrive_agent0.execute_policy_action() + random_trade_events.extend(trade_events) diff --git a/src/agent0/core/hyperdrive/interactive/i_local_chain.py b/src/agent0/core/hyperdrive/interactive/i_local_chain.py index 9d4a342a9b..ad85ccde26 100644 --- a/src/agent0/core/hyperdrive/interactive/i_local_chain.py +++ b/src/agent0/core/hyperdrive/interactive/i_local_chain.py @@ -62,7 +62,7 @@ class Config: experimental_data_threading: bool = False """Flag for running the data pipeline in a separate thread. Defaults to False.""" - def __init__(self, config: Config | None = None): + def __init__(self, config: Config | None = None, fork_uri: str | None = None, fork_block_number: int | None = None): """Initialize the Chain class that connects to an existing chain. Also launch a postgres docker container for gathering data. @@ -88,6 +88,12 @@ def __init__(self, config: Config | None = None): ] if config.block_time is not None: anvil_launch_args.extend(("--block-time", str(config.block_time))) + + if fork_uri is not None: + anvil_launch_args.extend(["--fork-url", fork_uri]) + if fork_block_number is not None: + anvil_launch_args.extend(["--fork-block-number", str(fork_block_number)]) + # This process never stops, so we run this in the background and explicitly clean up later self.anvil_process = subprocess.Popen( # pylint: disable=consider-using-with # Suppressing output of anvil @@ -96,6 +102,11 @@ def __init__(self, config: Config | None = None): stderr=subprocess.STDOUT, ) + # TODO HACK wait for anvil to start, ideally we would be looking for the output to stdout + # Forking takes a bit longer to spin up, so we only sleep when forking + if fork_uri is not None: + time.sleep(2) + super().__init__(f"http://127.0.0.1:{str(config.chain_port)}") # Remove protocol and replace . and : with dashes