From 7c2cb0638fcaafb12d41a090c75b86eba698fcba Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sat, 18 May 2024 18:55:41 -0400 Subject: [PATCH 01/26] drafting stuff up --- env.yml | 137 ++++++++++++++++++++++++++++ qbraid_algorithms/qrc/krylov.py | 2 +- qbraid_algorithms/qrc/krylov_foo.py | 66 ++++++++++++++ qbraid_algorithms/qrc/test_rbh.py | 84 +++++++++++++++++ 4 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 env.yml create mode 100644 qbraid_algorithms/qrc/krylov_foo.py create mode 100644 qbraid_algorithms/qrc/test_rbh.py diff --git a/env.yml b/env.yml new file mode 100644 index 0000000..ccd2ff0 --- /dev/null +++ b/env.yml @@ -0,0 +1,137 @@ +name: qbraidAlgEnv +channels: + - pytorch + - defaults +dependencies: + - blas=1.0=mkl + - brotli-python=1.0.9=py310hcec6c5f_8 + - bzip2=1.0.8=h6c40b1e_6 + - ca-certificates=2024.3.11=hecd8cb5_0 + - certifi=2024.2.2=py310hecd8cb5_0 + - charset-normalizer=2.0.4=pyhd3eb1b0_0 + - filelock=3.13.1=py310hecd8cb5_0 + - freetype=2.12.1=hd8bbffd_0 + - gmp=6.2.1=he9d5cce_3 + - gmpy2=2.1.2=py310hd5de756_0 + - idna=3.7=py310hecd8cb5_0 + - intel-openmp=2023.1.0=ha357a0b_43548 + - jinja2=3.1.3=py310hecd8cb5_0 + - jpeg=9e=h6c40b1e_1 + - lcms2=2.12=hf1fd2bf_0 + - lerc=3.0=he9d5cce_0 + - libcxx=14.0.6=h9765a3e_0 + - libdeflate=1.17=hb664fd8_1 + - libffi=3.4.4=hecd8cb5_1 + - libjpeg-turbo=2.0.0=hca72f7f_0 + - libpng=1.6.39=h6c40b1e_0 + - libtiff=4.5.1=hcec6c5f_0 + - libwebp-base=1.3.2=h6c40b1e_0 + - lz4-c=1.9.4=hcec6c5f_1 + - markupsafe=2.1.3=py310h6c40b1e_0 + - mkl=2023.1.0=h8e150cf_43560 + - mkl-service=2.4.0=py310h6c40b1e_1 + - mkl_fft=1.3.8=py310h6c40b1e_0 + - mkl_random=1.2.4=py310ha357a0b_0 + - mpc=1.1.0=h6ef4df4_1 + - mpfr=4.0.2=h9066e36_1 + - mpmath=1.3.0=py310hecd8cb5_0 + - ncurses=6.4=hcec6c5f_0 + - networkx=3.1=py310hecd8cb5_0 + - numpy=1.26.4=py310h827a554_0 + - numpy-base=1.26.4=py310ha186be2_0 + - openjpeg=2.4.0=h66ea3da_0 + - openssl=3.0.13=hca72f7f_1 + - pillow=10.3.0=py310h6c40b1e_0 + - pip=24.0=py310hecd8cb5_0 + - pysocks=1.7.1=py310hecd8cb5_0 + - python=3.10.14=h5ee71fb_1 + - pytorch=2.2.2=py3.10_0 + - pyyaml=6.0.1=py310h6c40b1e_0 + - readline=8.2=hca72f7f_0 + - requests=2.31.0=py310hecd8cb5_1 + - setuptools=69.5.1=py310hecd8cb5_0 + - sqlite=3.45.3=h6c40b1e_0 + - sympy=1.12=py310hecd8cb5_0 + - tbb=2021.8.0=ha357a0b_0 + - tk=8.6.14=h4d00af3_0 + - torchaudio=2.2.2=py310_cpu + - torchvision=0.17.2=py310_cpu + - typing_extensions=4.11.0=py310hecd8cb5_0 + - tzdata=2024a=h04d1e81_0 + - urllib3=2.2.1=py310hecd8cb5_0 + - wheel=0.43.0=py310hecd8cb5_0 + - xz=5.4.6=h6c40b1e_1 + - yaml=0.2.5=haf1e3a3_0 + - zlib=1.2.13=h4b97444_1 + - zstd=1.5.5=hc035e20_2 + - pip: + - alabaster==0.7.16 + - appnope==0.1.4 + - astroid==3.2.0 + - asttokens==2.4.1 + - babel==2.15.0 + - black==24.4.2 + - click==8.1.7 + - comm==0.2.2 + - coverage==7.5.1 + - debugpy==1.8.1 + - decorator==5.1.1 + - dill==0.3.8 + - docutils==0.20.1 + - exceptiongroup==1.2.1 + - executing==2.0.1 + - fsspec==2024.5.0 + - imagesize==1.4.1 + - iniconfig==2.0.0 + - ipykernel==6.29.4 + - ipython==8.24.0 + - isort==5.13.2 + - jedi==0.19.1 + - jupyter-client==8.6.1 + - jupyter-core==5.7.2 + - markdown-it-py==3.0.0 + - matplotlib-inline==0.1.7 + - mccabe==0.7.0 + - mdurl==0.1.2 + - mypy-extensions==1.0.0 + - nest-asyncio==1.6.0 + - packaging==24.0 + - parso==0.8.4 + - pathspec==0.12.1 + - pexpect==4.9.0 + - platformdirs==4.2.2 + - pluggy==1.5.0 + - prompt-toolkit==3.0.43 + - psutil==5.9.8 + - ptyprocess==0.7.0 + - pure-eval==0.2.2 + - pygments==2.18.0 + - pylint==3.2.0 + - pytest==8.2.0 + - pytest-cov==5.0.0 + - python-dateutil==2.9.0.post0 + - pyzmq==26.0.3 + - qbraid-algorithms==0.1.0 + - qbraid-cli==0.8.0 + - qbraid-core==0.1.6 + - rich==13.7.1 + - shellingham==1.5.4 + - six==1.16.0 + - snowballstemmer==2.2.0 + - sphinx==7.3.7 + - sphinx-autodoc-typehints==2.1.0 + - sphinx-rtd-theme==2.0.0 + - sphinxcontrib-applehelp==1.0.8 + - sphinxcontrib-devhelp==1.0.6 + - sphinxcontrib-htmlhelp==2.0.5 + - sphinxcontrib-jquery==4.1 + - sphinxcontrib-jsmath==1.0.1 + - sphinxcontrib-qthelp==1.0.7 + - sphinxcontrib-serializinghtml==1.1.10 + - stack-data==0.6.3 + - tomli==2.0.1 + - tomlkit==0.12.5 + - tornado==6.4 + - traitlets==5.14.3 + - typer==0.12.3 + - wcwidth==0.2.13 diff --git a/qbraid_algorithms/qrc/krylov.py b/qbraid_algorithms/qrc/krylov.py index 777e323..468415a 100644 --- a/qbraid_algorithms/qrc/krylov.py +++ b/qbraid_algorithms/qrc/krylov.py @@ -87,4 +87,4 @@ def normalize_register(self): TODO: Implement the normalization logic. """ - raise NotImplementedError + raise NotImplementedError \ No newline at end of file diff --git a/qbraid_algorithms/qrc/krylov_foo.py b/qbraid_algorithms/qrc/krylov_foo.py new file mode 100644 index 0000000..9b517cf --- /dev/null +++ b/qbraid_algorithms/qrc/krylov_foo.py @@ -0,0 +1,66 @@ +# krylov.py +from dataclasses import dataclass +from bloqade.emulate.ir.emulator import Register +from bloqade.emulate.ir.state_vector import RydbergHamiltonian +from bloqade.atom_arrangement import Square +from bloqade.rydberg import RydbergInteraction +import numpy as np + +class KrylovOptions: + """Class that describes options for a Krylov subspace method.""" + def __init__(self, progress=False, progress_name="emulating", normalize_step=1, normalize_finally=True, tol=1e-7): + self.progress = progress + self.progress_name = progress_name + self.normalize_step = normalize_step + self.normalize_finally = normalize_finally + self.tol = tol + +@dataclass +class KrylovEvolution: + """Class that describes a time evolution using Krylov subspace methods.""" + reg: Register + start_clock: float + durations: list[float] + hamiltonian: RydbergHamiltonian + options: KrylovOptions + + def generate_krylov_basis(self, H, psi0, m): + """Generates the first m Krylov basis vectors.""" + n = len(psi0) + K = np.zeros((n, m), dtype=complex) + K[:, 0] = psi0 / np.linalg.norm(psi0) + for j in range(1, m): + K[:, j] = H @ K[:, j-1] + for k in range(j): + K[:, j] -= np.dot(K[:, k], K[:, j]) * K[:, k] + K[:, j] /= np.linalg.norm(K[:, j]) + return K + + def gram_schmidt(self, V): + """Orthonormalizes the vectors using the Gram-Schmidt process.""" + Q, R = np.linalg.qr(V) + return Q + + def krylov_evolution(self, H, psi0, t, m): + """Projects H onto the Krylov subspace and computes the time evolution.""" + K = self.generate_krylov_basis(H, psi0, m) + H_m = K.T.conj() @ H @ K + exp_Hm = expm(-1j * H_m * t) + psi_t = K @ exp_Hm @ K.T.conj() @ psi0 + return psi_t + + def emulate_step(self, step, clock, duration): + """Simulate a single time step of quantum evolution using the Krylov subspace method.""" + try: + psi0 = self.reg.state_vector + evolved_state = self.krylov_evolution(self.hamiltonian.rydberg, psi0, duration, len(self.durations)) + self.reg.state_vector = evolved_state + except Exception as e: + raise NotImplementedError(f"Emulation step failed: {e}") + + def normalize_register(self): + """Normalize the quantum register if specified in options.""" + if self.options.normalize_finally: + norm = np.linalg.norm(self.reg.state_vector) + if norm > self.options.tol: + self.reg.state_vector /= norm diff --git a/qbraid_algorithms/qrc/test_rbh.py b/qbraid_algorithms/qrc/test_rbh.py new file mode 100644 index 0000000..14070be --- /dev/null +++ b/qbraid_algorithms/qrc/test_rbh.py @@ -0,0 +1,84 @@ +# krylov.py +from dataclasses import dataclass +from bloqade.emulate.ir.emulator import Register +from bloqade.emulate.ir.state_vector import RydbergHamiltonian +from bloqade.atom_arrangement import Square +from scipy.linalg import expm +import numpy as np + +class KrylovOptions: + """Class that describes options for a Krylov subspace method.""" + def __init__(self, progress=False, progress_name="emulating", normalize_step=1, normalize_finally=True, tol=1e-7): + self.progress = progress + self.progress_name = progress_name + self.normalize_step = normalize_step + self.normalize_finally = normalize_finally + self.tol = tol + +@dataclass +class KrylovEvolution: + """Class that describes a time evolution using Krylov subspace methods.""" + reg: Register + start_clock: float + durations: list[float] + hamiltonian: RydbergHamiltonian + options: KrylovOptions + + def generate_krylov_basis(self, H, psi0, m): + """Generates the first m Krylov basis vectors.""" + n = len(psi0) + K = np.zeros((n, m), dtype=complex) + K[:, 0] = psi0 / np.linalg.norm(psi0) + for j in range(1, m): + K[:, j] = H @ K[:, j-1] + for k in range(j): + K[:, j] -= np.dot(K[:, k], K[:, j]) * K[:, k] + K[:, j] /= np.linalg.norm(K[:, j]) + return K + + def gram_schmidt(self, V): + """Orthonormalizes the vectors using the Gram-Schmidt process.""" + Q, R = np.linalg.qr(V) + return Q + + def krylov_evolution(self, psi0, t, m): + """Projects the Hamiltonian onto the Krylov subspace and computes the time evolution.""" + H = self.hamiltonian.rydberg # Access the correct attribute + K = self.generate_krylov_basis(H, psi0, m) + H_m = K.T.conj() @ H @ K + exp_Hm = expm(-1j * H_m * t) + psi_t = K @ exp_Hm @ K.T.conj() @ psi0 + return psi_t + + def emulate_step(self, step, clock, duration): + """Simulate a single time step of quantum evolution using the Krylov subspace method.""" + try: + psi0 = self.reg.state_vector + evolved_state = self.krylov_evolution(psi0, duration, len(self.durations)) + self.reg.state_vector = evolved_state + except Exception as e: + raise NotImplementedError(f"Emulation step failed: {e}") + + def normalize_register(self): + """Normalize the quantum register if specified in options.""" + if self.options.normalize_finally: + norm = np.linalg.norm(self.reg.state_vector) + if norm > self.options.tol: + self.reg.state_vector /= norm + +# Usage Example +# Define the initial state +initial_state = np.array([1, 0, 0, 0], dtype=complex) + +# Create a KrylovEvolution instance +krylov_options = KrylovOptions() +krylov_evolution = KrylovEvolution( + reg=Register(initial_state), + start_clock=0.0, + durations=[0.1, 0.2, 0.3], + hamiltonian=None, # This will be initialized in __post_init__ + options=krylov_options +) + +# Simulate the evolution (example step) +krylov_evolution.emulate_step(step=0, clock=0.0, duration=0.1) From 9d618f88e13c406d5a2a3767ba7249e09de8dd1a Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sun, 19 May 2024 16:11:33 -0400 Subject: [PATCH 02/26] feat(qrc): Implement Magnus expansion for simulating quantum dynamics (#13) * Initial commit * figuring out time series * add back esn and reservoir into code * finished(?) with esn time series * finished time series stuff; added to qbraid-algorithms/datasets * time series esn updates * fixed time series * add ruff * add pr workflow * update dev version + warning * isort fix * update format workflow name * fix docs + ruff * add qBraid bot * module structure + placeholder classes * format * update QRC template / placeholders * reflog * reflog (#12) * feat(qrc): add MagnusExpansion and test file * added magnus_expansion.py file * feat(qrc): add MagnusExpansion --------- Co-authored-by: Ryan Hill Co-authored-by: Rohan Jain --- .gitignore | 3 ++ qbraid_algorithms/qrc/__init__.py | 4 +- qbraid_algorithms/qrc/magnus_expansion.py | 41 +++++++++++++++++++ tests/test_qbraid_algorithms/__init__.py | 1 + .../test_qrc/__init__.py | 0 .../test_qrc/test_magnus_expansion.py | 20 +++++++++ 6 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 qbraid_algorithms/qrc/magnus_expansion.py create mode 100644 tests/test_qbraid_algorithms/__init__.py create mode 100644 tests/test_qbraid_algorithms/test_qrc/__init__.py create mode 100644 tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py diff --git a/.gitignore b/.gitignore index adfc4ae..05227be 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,9 @@ __pycache__/ # C extensions *.so +# Desktop +.DS_Store + # Distribution / packaging .Python build/ diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index dba2d9a..b3efdba 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -18,9 +18,11 @@ :toctree: ../stubs/ QRCModel + MagnusExpansion """ from .model import QRCModel +from .magnus_expansion import MagnusExpansion -__all__ = ["QRCModel"] +__all__ = ["QRCModel", "MagnusExpansion"] diff --git a/qbraid_algorithms/qrc/magnus_expansion.py b/qbraid_algorithms/qrc/magnus_expansion.py new file mode 100644 index 0000000..b183ae1 --- /dev/null +++ b/qbraid_algorithms/qrc/magnus_expansion.py @@ -0,0 +1,41 @@ +from scipy.linalg import expm + +class MagnusExpansion: + def __init__(self, H): + self.H = H + + def commutator(self, A, B): + return A @ B - B @ A + + def compute_magnus_terms(self, t): + H_t = self.H * t + Ω1 = H_t + + # Second-order term + comm_H1_H2 = self.commutator(self.H, self.H) + Ω2 = 0.5 * (comm_H1_H2 * t**2) + + # Third-order term + comm_H1_comm_H2_H3 = self.commutator(self.H, self.commutator(self.H, self.H)) + comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.H, self.H), self.H) + Ω3 = (1/6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 + + # Fourth-order term + comm_H1_comm_H2_comm_H3_H4 = self.commutator(self.H, self.commutator(self.H, self.commutator(self.H, self.H))) + comm_H4_comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.commutator(self.H, self.H), self.H), self.H) + Ω4 = (1/24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 + + return Ω1 + Ω2 + Ω3 + Ω4 + + def time_evolution_operator(self, t): + Ω = self.compute_magnus_terms(t) + return expm(Ω) + + def simulate_dynamics(self, psi0, t_final, dt): + psi = psi0 + t = 0 + while t < t_final: + U = self.time_evolution_operator(dt) + psi = U @ psi + t += dt + return psi diff --git a/tests/test_qbraid_algorithms/__init__.py b/tests/test_qbraid_algorithms/__init__.py new file mode 100644 index 0000000..321033c --- /dev/null +++ b/tests/test_qbraid_algorithms/__init__.py @@ -0,0 +1 @@ +from .test_qrc import * diff --git a/tests/test_qbraid_algorithms/test_qrc/__init__.py b/tests/test_qbraid_algorithms/test_qrc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py new file mode 100644 index 0000000..441f119 --- /dev/null +++ b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py @@ -0,0 +1,20 @@ +import numpy as np +from qbraid_algorithms import qrc + +def test_simulate_dynamics(): + # Define a simple Hamiltonian and initial state + H = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian + psi0 = np.array([1, 0], dtype=complex) # Initial state + t_final = 1.0 + dt = 0.01 + + # Create an instance of MagnusExpansion + magnus = qrc.magnus_expansion.MagnusExpansion(H) + + # Simulate the dynamics + final_state = magnus.simulate_dynamics(psi0, t_final, dt) + + # Add assertions to check the final state + # For example: + expected_final_state = np.array([0.54030231+0.84147098j, 0.00000000+0.j]) + np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) From 86f20b5beb99e1a5f7e06e0d63b8d23dbd0233ab Mon Sep 17 00:00:00 2001 From: Hirmay Sandesara <56473003+Hirmay@users.noreply.github.com> Date: Fri, 24 May 2024 16:55:10 +0530 Subject: [PATCH 03/26] Add files via upload --- qosf_python_conversion.ipynb | 281 +++++++++++++++++++++++++++++++++++ 1 file changed, 281 insertions(+) create mode 100644 qosf_python_conversion.ipynb diff --git a/qosf_python_conversion.ipynb b/qosf_python_conversion.ipynb new file mode 100644 index 0000000..0c8c52c --- /dev/null +++ b/qosf_python_conversion.ipynb @@ -0,0 +1,281 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "384e875b-f354-47e3-93be-a2833bd5d47d", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ec2-user/anaconda3/envs/Braket/lib/python3.10/site-packages/torch/cuda/__init__.py:619: UserWarning: Can't initialize NVML\n", + " warnings.warn(\"Can't initialize NVML\")\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torchvision import datasets, transforms\n", + "from sklearn.decomposition import PCA\n", + "from sklearn.preprocessing import OneHotEncoder\n", + "import bloqade\n", + "# from bloqade import KrylovKit" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b09e1581-ce05-499f-9d20-81b928d80be3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Define constants\n", + "dim_pca = 10\n", + "Δ_max = 6.0\n", + "num_examples = 1000\n", + "num_test_examples = 100" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "98344ae5-5055-45a5-acea-133898451f37", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Load MNIST dataset\n", + "transform = transforms.Compose([transforms.ToTensor()])\n", + "train_dataset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)\n", + "test_dataset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=False, transform=transform)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "76522421-817d-4ff1-b514-0861e1290da0", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Create data loaders\n", + "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)\n", + "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "5d3d73cd-b187-46df-9e96-e66b832f9d49", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Perform PCA on training data\n", + "pca = PCA(n_components=dim_pca)\n", + "x_train_pca = pca.fit_transform(train_dataset.data.numpy().reshape(-1, 28*28))\n", + "x_test_pca = pca.transform(test_dataset.data.numpy().reshape(-1, 28*28))" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "02936fa6-32f5-4085-8e6f-d78d84c76540", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Scale PCA values to feasible range of local detuning\n", + "x_train_pca = x_train_pca / np.max(np.abs(x_train_pca)) * Δ_max\n", + "x_test_pca = x_test_pca / np.max(np.abs(x_test_pca)) * Δ_max" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "3d2e9596-a5ed-4176-a786-33de409f6d50", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 1.88088681, -0.1077645 , -0.78267303, ..., 1.18395488,\n", + " -1.00375497, -0.46647276],\n", + " [-2.40351538, -0.38411485, 1.00276769, ..., 0.58196908,\n", + " 0.37119781, -0.21784087],\n", + " [-1.08367033, 0.1664487 , 0.65421782, ..., -0.2765957 ,\n", + " 0.24811255, -0.3418475 ],\n", + " ...,\n", + " [ 1.50126648, 0.89318566, -1.04370463, ..., -0.88698179,\n", + " -0.06128021, -2.01555388],\n", + " [-0.27316387, 1.61689005, -0.63724006, ..., 0.41777847,\n", + " 0.67804019, -0.54465725],\n", + " [-0.22766553, 1.77605246, 2.12734287, ..., -1.06862189,\n", + " 0.43011495, 0.13571932]])" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "x_test_pca[:, 1:num_examples]" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "ce2224e6-2296-4ad0-bf79-89c62fe52e8b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# One-hot encode labels\n", + "encoder = OneHotEncoder(sparse_output=False)\n", + "y_train = encoder.fit_transform(train_dataset.targets.numpy().reshape(-1, 1))\n", + "y_test = encoder.transform(test_dataset.targets.numpy().reshape(-1, 1))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "f3499830-b177-4cc5-87ac-77d1f47b8381", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0., 0., 0., ..., 1., 0., 0.],\n", + " [0., 1., 0., ..., 0., 0., 0.],\n", + " [1., 0., 0., ..., 0., 0., 0.],\n", + " ...,\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.]])" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_test[:, 1:num_examples]" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "5530692e-88d3-4200-ba84-0f2d37251010", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Define quantum reservoir computing (QRC) layer\n", + "class DetuningLayer(nn.Module):\n", + " def __init__(self, atoms, readouts, Ω, t_start, t_end, step):\n", + " super(DetuningLayer, self).__init__()\n", + " self.atoms = atoms\n", + " self.readouts = readouts\n", + " self.Ω = Ω\n", + " self.t_start = t_start\n", + " self.t_end = t_end\n", + " self.step = step\n", + " def forward(self, x):\n", + " # Simulate quantum dynamics and compute readouts\n", + " # have to use bloqade quantum\n", + " # This part is not implemented in Python, as it requires a quantum simulator \n", + " # calculating steps\n", + " self.atoms @ np.exp(-1j * h * (self.t_end - self.t_start))" + ] + }, + { + "cell_type": "markdown", + "id": "5eef6e17-5d42-4f2f-a9f4-81506b792d71", + "metadata": {}, + "source": [ + "## Defining a NN model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd603eb8-c7d3-47b5-9b93-11951e05d166", + "metadata": {}, + "outputs": [], + "source": [ + "# Define neural network model\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " self.fc1 = nn.Linear(dim_pca, 10)\n", + " def forward(self, x):\n", + " x = torch.relu(self.fc1(x))\n", + " return x\n", + " # Train classical model using PCA features\n", + " model_reg = Net()\n", + " criterion = nn.CrossEntropyLoss()\n", + " optimizer = optim.Adam(model_reg.parameters(), lr=0.01)\n", + " for epoch in range(1000):\n", + " for x, y in train_loader:\n", + " x = x.view(-1, 28*28)\n", + " x_pca = pca.transform(x.numpy())\n", + " x_pca = torch.tensor(x_pca, dtype=torch.float32)\n", + " y = torch.tensor(y, dtype=torch.long)\n", + " optimizer.zero_grad()\n", + " output = model_reg(x_pca)\n", + " loss = criterion(output, y)\n", + " loss.backward()\n", + " optimizer.step()\n", + " # Train QRC model using quantum reservoir computing\n", + " pre_layer = DetuningLayer(atoms, readouts, Ω, t_start, t_end, step)\n", + " model_qrc = Net()\n", + " for epoch in range(1000):\n", + " for x, y in train_loader:\n", + " x = x.view(-1, 28*28)\n", + " x_pca = pca.transform(x.numpy())\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "conda_braket", + "language": "python", + "name": "conda_braket" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 60ed8529bcc2e10b2cc6113bf1963e7ce404d06d Mon Sep 17 00:00:00 2001 From: ANJANA THIMMAIAH Date: Fri, 31 May 2024 11:14:08 -0700 Subject: [PATCH 04/26] Base version of the OneHotEncode Function --- OneHotEncode.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 OneHotEncode.py diff --git a/OneHotEncode.py b/OneHotEncode.py new file mode 100644 index 0000000..2835074 --- /dev/null +++ b/OneHotEncode.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +################################################################################## +# Function : OneHotEncodeData # +# Purpose : This is an independent function call to load the mnist image # +# datset that contains images of numbers from 0 to 9 primarily # +# used as a training and test dats sets for image processing # +# ML algorithms. # +# This is built as an independent function to decouple the data # +# set loads and make the other functions independent of the dataset # +# # +################################################################################### + +import pcalocaldetuning as pld +#import numpy + +from sklearn.preprocessing import OneHotEncoder + +def OneHotEncodeData(): + xs=pld.pcalocaldetuning() + encoder = OneHotEncoder(sparse_output=False) + + + + onehotencodeddata=encoder.fit_transform(xs) + + #print("onehotencodeddata shape ",onehotencodeddata.shape) + + #print("Onehot encoded data") + + #print(onehotencodeddata[1:50,1:20]) + + return onehotencodeddata + + + #numpy.savetxt("/Users/anjanathimmaiah/soorajprograms/datafiles/onehotencoded.csv", onehotencodeddata,delimiter =",") From b96fb944e00a7f07515fb02b436e06f3a20635ed Mon Sep 17 00:00:00 2001 From: Sooraj Bopanna Date: Fri, 31 May 2024 11:25:18 -0700 Subject: [PATCH 05/26] Base version of the functions to load mnist data, perform local detuning and pcareduction --- loadmnistdata.py | 18 +++++++++++++++++ pcalocaldetuning.py | 32 +++++++++++++++++++++++++++++ pcareduction.py | 49 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+) create mode 100644 loadmnistdata.py create mode 100644 pcalocaldetuning.py create mode 100644 pcareduction.py diff --git a/loadmnistdata.py b/loadmnistdata.py new file mode 100644 index 0000000..bf60c7b --- /dev/null +++ b/loadmnistdata.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +################################################################################### +# Function : loadmnistdata # +# Purpose : This is an independent function call to load the mnist image # +# datset that contains images of numbers from 0 to 9 primarily # +# used as a training and test dats sets for image processing # +# ML algorithms. # +# This is built as an independent function to decouple the data # +# set loads and make the other functions independent of the dataset # +# # +################################################################################### + +from tensorflow.keras.datasets import mnist + +def loadmnistdata(): + (X_train, Y_train), (X_test, Y_test) = mnist.load_data() + return X_train,Y_train,X_test,Y_test \ No newline at end of file diff --git a/pcalocaldetuning.py b/pcalocaldetuning.py new file mode 100644 index 0000000..bc1476e --- /dev/null +++ b/pcalocaldetuning.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +################################################################################### +# Function : pcalocaldetuning # +# Purpose : This function is used to scale the range of the principal # +# components to a feasible range of local detuning. Later, for each # +# image, each of the 10 scaled principal components are encoded # +# into each single local detuning for 10 atoms. # +# # +################################################################################### + +import pcareduction as custpca + +def pcalocaldetuning(): + + print("Calling pcareduction") + xs =custpca.pcareduction() + + print("In pca local detuning") + + delta_max=6.0 + spectral=max(abs(xs.max()),abs(xs.min())) + print("xs.max()") + xs.max() + print("xs.min()") + xs.min() + print("spectral ",spectral) + + xs=(xs/spectral)*delta_max + print(xs) + + return(xs) \ No newline at end of file diff --git a/pcareduction.py b/pcareduction.py new file mode 100644 index 0000000..7804187 --- /dev/null +++ b/pcareduction.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +########################################################################################## +# Function : pcareduction # +# Purpose : This is a function call to perform linear dimensonality reduction using # +# principal component analysis(PCA) # +# The data is linearly transformed onto a new coordinate system such # +# that the directions (principal components) capturing the largest # +# variation in the data can be easily identified. # +# # +# The principal components of a collection of points in a real coordinate # +# space are a sequence of p unit vectors where the i-th vector is the # +# direction of a line that best fits the data while being orthogonal to # +# the first vectors. Here, a best-fitting line is defined as one # +# that minimizes the average squared perpendicular distance from the # +# points to the line. These directions (i.e., principal components) # +# constitute an orthonormal basis in which different individual dimensions # +# of the data are linearly uncorrelated. Many studies use the first two # +# principal components in order to plot the data in two dimensions and # +# to visually identify clusters of closely related data points. # +# # +########################################################################################## + +import loadmnistdata as lmd +#import csv +#import json + +#import numpy + +#from sklearn.preprocessing import StandardScaler +from sklearn.decomposition import PCA + +def pcareduction(): + + X_train,Y_train,X_test,Y_test = lmd.loadmnistdata() + + dim_pca=10 + + pca= PCA(n_components=dim_pca) + + model_pca=pca.fit_transform(X_train[:,:,1]) + + num_examples=1000 + + xs=model_pca[:,1:num_examples] + + #numpy.savetxt("/Users/anjanathimmaiah/soorajprograms/datafiles/pcaredoutput.csv", xs,delimiter =",") + + return xs \ No newline at end of file From 2b716f364682a2c49f8dd2798d451119c932d677 Mon Sep 17 00:00:00 2001 From: Hirmay Sandesara <56473003+Hirmay@users.noreply.github.com> Date: Wed, 5 Jun 2024 21:13:12 +0530 Subject: [PATCH 06/26] Update pca.py PCA but implemented a bit differently --- qbraid_algorithms/qrc/pca.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/qbraid_algorithms/qrc/pca.py b/qbraid_algorithms/qrc/pca.py index 38275e9..cb549f0 100644 --- a/qbraid_algorithms/qrc/pca.py +++ b/qbraid_algorithms/qrc/pca.py @@ -14,10 +14,10 @@ """ import torch - +from sklearn.decomposition import PCA def pca_reduction( - data: torch.Tensor, n_components: int = 2 + data: torch.Tensor, n_components: int = 2, data_dim: int = 10, delta_max: int = 10, ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: """ Perform PCA reduction on the provided data using PyTorch's pca_lowrank to @@ -26,6 +26,8 @@ def pca_reduction( Args: data (torch.Tensor): The input data tensor where each row represents a sample. n_components (int): The number of principal components to retain. + data_dim (int) : The dimension of the input data required for doing PCA. + delta_max (int) : The scaling factor for bring PCA values to the feasible range of local detuning. Returns: tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: A tuple containing the @@ -33,7 +35,15 @@ def pca_reduction( TODO: Implement the PCA reduction function using torch.pca_lowrank or another suitable method. """ - # Placeholder for actual implementation. - u, s, v = torch.pca_lowrank(data, q=n_components) - transformed_data = torch.mm(data, v[:, :n_components]) - return transformed_data, (u, s, v) + # Perform PCA on training data + pca = PCA(n_components=n_components) + data_pca = pca.fit_transform(data.data.numpy().reshape(data_dim)) + + # Scale PCA values to feasible range of local detuning + scaled_data_pca = data_pca / np.max(np.abs(data_pca)) * delta_max + + # u, s, v = torch.pca_lowrank(data, q=n_components) + # transformed_data = torch.mm(data, v[:, :n_components]) + # return transformed_data, (u, s, v) + + return scaled_data_pca From 27fbf2856536d74117a4c401e6a639baa4f5be3d Mon Sep 17 00:00:00 2001 From: Hirmay Sandesara <56473003+Hirmay@users.noreply.github.com> Date: Wed, 5 Jun 2024 21:16:02 +0530 Subject: [PATCH 07/26] Update encoding.py --- qbraid_algorithms/qrc/encoding.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/qbraid_algorithms/qrc/encoding.py b/qbraid_algorithms/qrc/encoding.py index d38a424..58fd339 100644 --- a/qbraid_algorithms/qrc/encoding.py +++ b/qbraid_algorithms/qrc/encoding.py @@ -14,7 +14,7 @@ """ import torch - +from sklearn.preprocessing import OneHotEncoder def one_hot_encoding(labels: torch.Tensor, num_classes: int) -> torch.Tensor: """ @@ -30,4 +30,7 @@ def one_hot_encoding(labels: torch.Tensor, num_classes: int) -> torch.Tensor: TODO: Implement the one-hot encoding function. """ # Placeholder for actual implementation. - return torch.nn.functional.one_hot(labels, num_classes=num_classes) + encoder = OneHotEncoder(sparse_output=False) + # I don't know if the reshape params will be universal or not + encoded_data = encoder.fit_transform(lables.targets.numpy().reshape(-1, 1)) + # return torch.nn.functional.one_hot(labels, num_classes=num_classes) From c6dad6ff013560de8dcfda76b0d24f1ce33703eb Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Mon, 10 Jun 2024 13:34:16 -0500 Subject: [PATCH 08/26] unify dev progress --- OneHotEncode.py | 37 --- env.yml | 137 --------- .../quantum_reservoir_computing_mnist.ipynb | 164 ++++++++++ loadmnistdata.py | 18 -- pcalocaldetuning.py | 32 -- pcareduction.py | 49 --- qbraid_algorithms/datasets/__init__.py | 4 +- qbraid_algorithms/datasets/mnist.py | 26 ++ qbraid_algorithms/datasets/sequences.py | 2 +- qbraid_algorithms/qrc/__init__.py | 2 +- qbraid_algorithms/qrc/dynamics.py | 58 +++- qbraid_algorithms/qrc/encoding.py | 19 +- qbraid_algorithms/qrc/krylov.py | 45 ++- qbraid_algorithms/qrc/krylov_foo.py | 66 ---- qbraid_algorithms/qrc/magnus_expansion.py | 37 ++- qbraid_algorithms/qrc/model.py | 33 ++ qbraid_algorithms/qrc/pca.py | 30 +- qbraid_algorithms/qrc/test_rbh.py | 84 ------ qosf_python_conversion.ipynb | 281 ------------------ tests/test_esn_reservoir.py | 10 +- tests/test_qbraid_algorithms/__init__.py | 1 - .../test_qrc/__init__.py | 0 .../test_qrc/test_magnus_expansion.py | 20 -- tests/test_qrc_dynamics.py | 62 ++++ 24 files changed, 450 insertions(+), 767 deletions(-) delete mode 100644 OneHotEncode.py delete mode 100644 env.yml create mode 100644 examples/quantum_reservoir_computing_mnist.ipynb delete mode 100644 loadmnistdata.py delete mode 100644 pcalocaldetuning.py delete mode 100644 pcareduction.py create mode 100644 qbraid_algorithms/datasets/mnist.py delete mode 100644 qbraid_algorithms/qrc/krylov_foo.py delete mode 100644 qbraid_algorithms/qrc/test_rbh.py delete mode 100644 qosf_python_conversion.ipynb delete mode 100644 tests/test_qbraid_algorithms/__init__.py delete mode 100644 tests/test_qbraid_algorithms/test_qrc/__init__.py delete mode 100644 tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py create mode 100644 tests/test_qrc_dynamics.py diff --git a/OneHotEncode.py b/OneHotEncode.py deleted file mode 100644 index 2835074..0000000 --- a/OneHotEncode.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -################################################################################## -# Function : OneHotEncodeData # -# Purpose : This is an independent function call to load the mnist image # -# datset that contains images of numbers from 0 to 9 primarily # -# used as a training and test dats sets for image processing # -# ML algorithms. # -# This is built as an independent function to decouple the data # -# set loads and make the other functions independent of the dataset # -# # -################################################################################### - -import pcalocaldetuning as pld -#import numpy - -from sklearn.preprocessing import OneHotEncoder - -def OneHotEncodeData(): - xs=pld.pcalocaldetuning() - encoder = OneHotEncoder(sparse_output=False) - - - - onehotencodeddata=encoder.fit_transform(xs) - - #print("onehotencodeddata shape ",onehotencodeddata.shape) - - #print("Onehot encoded data") - - #print(onehotencodeddata[1:50,1:20]) - - return onehotencodeddata - - - #numpy.savetxt("/Users/anjanathimmaiah/soorajprograms/datafiles/onehotencoded.csv", onehotencodeddata,delimiter =",") diff --git a/env.yml b/env.yml deleted file mode 100644 index ccd2ff0..0000000 --- a/env.yml +++ /dev/null @@ -1,137 +0,0 @@ -name: qbraidAlgEnv -channels: - - pytorch - - defaults -dependencies: - - blas=1.0=mkl - - brotli-python=1.0.9=py310hcec6c5f_8 - - bzip2=1.0.8=h6c40b1e_6 - - ca-certificates=2024.3.11=hecd8cb5_0 - - certifi=2024.2.2=py310hecd8cb5_0 - - charset-normalizer=2.0.4=pyhd3eb1b0_0 - - filelock=3.13.1=py310hecd8cb5_0 - - freetype=2.12.1=hd8bbffd_0 - - gmp=6.2.1=he9d5cce_3 - - gmpy2=2.1.2=py310hd5de756_0 - - idna=3.7=py310hecd8cb5_0 - - intel-openmp=2023.1.0=ha357a0b_43548 - - jinja2=3.1.3=py310hecd8cb5_0 - - jpeg=9e=h6c40b1e_1 - - lcms2=2.12=hf1fd2bf_0 - - lerc=3.0=he9d5cce_0 - - libcxx=14.0.6=h9765a3e_0 - - libdeflate=1.17=hb664fd8_1 - - libffi=3.4.4=hecd8cb5_1 - - libjpeg-turbo=2.0.0=hca72f7f_0 - - libpng=1.6.39=h6c40b1e_0 - - libtiff=4.5.1=hcec6c5f_0 - - libwebp-base=1.3.2=h6c40b1e_0 - - lz4-c=1.9.4=hcec6c5f_1 - - markupsafe=2.1.3=py310h6c40b1e_0 - - mkl=2023.1.0=h8e150cf_43560 - - mkl-service=2.4.0=py310h6c40b1e_1 - - mkl_fft=1.3.8=py310h6c40b1e_0 - - mkl_random=1.2.4=py310ha357a0b_0 - - mpc=1.1.0=h6ef4df4_1 - - mpfr=4.0.2=h9066e36_1 - - mpmath=1.3.0=py310hecd8cb5_0 - - ncurses=6.4=hcec6c5f_0 - - networkx=3.1=py310hecd8cb5_0 - - numpy=1.26.4=py310h827a554_0 - - numpy-base=1.26.4=py310ha186be2_0 - - openjpeg=2.4.0=h66ea3da_0 - - openssl=3.0.13=hca72f7f_1 - - pillow=10.3.0=py310h6c40b1e_0 - - pip=24.0=py310hecd8cb5_0 - - pysocks=1.7.1=py310hecd8cb5_0 - - python=3.10.14=h5ee71fb_1 - - pytorch=2.2.2=py3.10_0 - - pyyaml=6.0.1=py310h6c40b1e_0 - - readline=8.2=hca72f7f_0 - - requests=2.31.0=py310hecd8cb5_1 - - setuptools=69.5.1=py310hecd8cb5_0 - - sqlite=3.45.3=h6c40b1e_0 - - sympy=1.12=py310hecd8cb5_0 - - tbb=2021.8.0=ha357a0b_0 - - tk=8.6.14=h4d00af3_0 - - torchaudio=2.2.2=py310_cpu - - torchvision=0.17.2=py310_cpu - - typing_extensions=4.11.0=py310hecd8cb5_0 - - tzdata=2024a=h04d1e81_0 - - urllib3=2.2.1=py310hecd8cb5_0 - - wheel=0.43.0=py310hecd8cb5_0 - - xz=5.4.6=h6c40b1e_1 - - yaml=0.2.5=haf1e3a3_0 - - zlib=1.2.13=h4b97444_1 - - zstd=1.5.5=hc035e20_2 - - pip: - - alabaster==0.7.16 - - appnope==0.1.4 - - astroid==3.2.0 - - asttokens==2.4.1 - - babel==2.15.0 - - black==24.4.2 - - click==8.1.7 - - comm==0.2.2 - - coverage==7.5.1 - - debugpy==1.8.1 - - decorator==5.1.1 - - dill==0.3.8 - - docutils==0.20.1 - - exceptiongroup==1.2.1 - - executing==2.0.1 - - fsspec==2024.5.0 - - imagesize==1.4.1 - - iniconfig==2.0.0 - - ipykernel==6.29.4 - - ipython==8.24.0 - - isort==5.13.2 - - jedi==0.19.1 - - jupyter-client==8.6.1 - - jupyter-core==5.7.2 - - markdown-it-py==3.0.0 - - matplotlib-inline==0.1.7 - - mccabe==0.7.0 - - mdurl==0.1.2 - - mypy-extensions==1.0.0 - - nest-asyncio==1.6.0 - - packaging==24.0 - - parso==0.8.4 - - pathspec==0.12.1 - - pexpect==4.9.0 - - platformdirs==4.2.2 - - pluggy==1.5.0 - - prompt-toolkit==3.0.43 - - psutil==5.9.8 - - ptyprocess==0.7.0 - - pure-eval==0.2.2 - - pygments==2.18.0 - - pylint==3.2.0 - - pytest==8.2.0 - - pytest-cov==5.0.0 - - python-dateutil==2.9.0.post0 - - pyzmq==26.0.3 - - qbraid-algorithms==0.1.0 - - qbraid-cli==0.8.0 - - qbraid-core==0.1.6 - - rich==13.7.1 - - shellingham==1.5.4 - - six==1.16.0 - - snowballstemmer==2.2.0 - - sphinx==7.3.7 - - sphinx-autodoc-typehints==2.1.0 - - sphinx-rtd-theme==2.0.0 - - sphinxcontrib-applehelp==1.0.8 - - sphinxcontrib-devhelp==1.0.6 - - sphinxcontrib-htmlhelp==2.0.5 - - sphinxcontrib-jquery==4.1 - - sphinxcontrib-jsmath==1.0.1 - - sphinxcontrib-qthelp==1.0.7 - - sphinxcontrib-serializinghtml==1.1.10 - - stack-data==0.6.3 - - tomli==2.0.1 - - tomlkit==0.12.5 - - tornado==6.4 - - traitlets==5.14.3 - - typer==0.12.3 - - wcwidth==0.2.13 diff --git a/examples/quantum_reservoir_computing_mnist.ipynb b/examples/quantum_reservoir_computing_mnist.ipynb new file mode 100644 index 0000000..afe7b30 --- /dev/null +++ b/examples/quantum_reservoir_computing_mnist.ipynb @@ -0,0 +1,164 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "384e875b-f354-47e3-93be-a2833bd5d47d", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torchvision import datasets, transforms\n", + "\n", + "from qbraid_algorithms.qrc.encoding import one_hot_encoding\n", + "from qbraid_algorithms.qrc.pca import pca_reduction\n", + "from qbraid_algorithms.qrc.dynamics import DetuningLayer, generate_sites, apply_layer\n", + "from qbraid_algorithms.qrc.model import QRCModel" + ] + }, + { + "cell_type": "markdown", + "id": "92fe1fac", + "metadata": {}, + "source": [ + "# Quantum Reservoir Computing Demo\n", + "\n", + "In this notebook we will show you how to train a model to classify MNIST images using quantum reservoir computing (QRC)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "98344ae5-5055-45a5-acea-133898451f37", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Load MNIST dataset\n", + "transform = transforms.Compose([transforms.ToTensor()])\n", + "train_dataset = datasets.MNIST('./data/', download=True, train=True, transform=transform)\n", + "test_dataset = datasets.MNIST('./data/', download=True, train=False, transform=transform)\n", + "\n", + "# Create data loaders\n", + "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)\n", + "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)" + ] + }, + { + "cell_type": "markdown", + "id": "21064646", + "metadata": {}, + "source": [ + "## PCA Reduction" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b891e72e", + "metadata": {}, + "outputs": [], + "source": [ + "# Define constants\n", + "dim_pca = 10\n", + "delta_max = 6.0\n", + "num_examples = 1000\n", + "num_test_examples = 100" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a23670ab", + "metadata": {}, + "outputs": [], + "source": [ + "x_train_pca = pca_reduction(train_dataset, n_components=10, data_dim=28*28, delta_max=delta_max, train=True)\n", + "x_test_pca = pca_reduction(train_dataset, n_components=10, data_dim=28*28, delta_max=delta_max, train=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ce2224e6-2296-4ad0-bf79-89c62fe52e8b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "y_train = one_hot_encoding(train_dataset.targets.numpy().reshape(-1, 1))\n", + "y_test = one_hot_encoding(test_dataset.targets.numpy().reshape(-1, 1))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f3499830-b177-4cc5-87ac-77d1f47b8381", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0., 0., 0., ..., 1., 0., 0.],\n", + " [0., 1., 0., ..., 0., 0., 0.],\n", + " [1., 0., 0., ..., 0., 0., 0.],\n", + " ...,\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.]])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_test[:, 1:num_examples]" + ] + }, + { + "cell_type": "markdown", + "id": "bd9942ce", + "metadata": {}, + "source": [ + "## Simulate Quantum Dynamics" + ] + }, + { + "cell_type": "markdown", + "id": "1fe1a860", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "qbraid-algos", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/loadmnistdata.py b/loadmnistdata.py deleted file mode 100644 index bf60c7b..0000000 --- a/loadmnistdata.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- - -################################################################################### -# Function : loadmnistdata # -# Purpose : This is an independent function call to load the mnist image # -# datset that contains images of numbers from 0 to 9 primarily # -# used as a training and test dats sets for image processing # -# ML algorithms. # -# This is built as an independent function to decouple the data # -# set loads and make the other functions independent of the dataset # -# # -################################################################################### - -from tensorflow.keras.datasets import mnist - -def loadmnistdata(): - (X_train, Y_train), (X_test, Y_test) = mnist.load_data() - return X_train,Y_train,X_test,Y_test \ No newline at end of file diff --git a/pcalocaldetuning.py b/pcalocaldetuning.py deleted file mode 100644 index bc1476e..0000000 --- a/pcalocaldetuning.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -################################################################################### -# Function : pcalocaldetuning # -# Purpose : This function is used to scale the range of the principal # -# components to a feasible range of local detuning. Later, for each # -# image, each of the 10 scaled principal components are encoded # -# into each single local detuning for 10 atoms. # -# # -################################################################################### - -import pcareduction as custpca - -def pcalocaldetuning(): - - print("Calling pcareduction") - xs =custpca.pcareduction() - - print("In pca local detuning") - - delta_max=6.0 - spectral=max(abs(xs.max()),abs(xs.min())) - print("xs.max()") - xs.max() - print("xs.min()") - xs.min() - print("spectral ",spectral) - - xs=(xs/spectral)*delta_max - print(xs) - - return(xs) \ No newline at end of file diff --git a/pcareduction.py b/pcareduction.py deleted file mode 100644 index 7804187..0000000 --- a/pcareduction.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -########################################################################################## -# Function : pcareduction # -# Purpose : This is a function call to perform linear dimensonality reduction using # -# principal component analysis(PCA) # -# The data is linearly transformed onto a new coordinate system such # -# that the directions (principal components) capturing the largest # -# variation in the data can be easily identified. # -# # -# The principal components of a collection of points in a real coordinate # -# space are a sequence of p unit vectors where the i-th vector is the # -# direction of a line that best fits the data while being orthogonal to # -# the first vectors. Here, a best-fitting line is defined as one # -# that minimizes the average squared perpendicular distance from the # -# points to the line. These directions (i.e., principal components) # -# constitute an orthonormal basis in which different individual dimensions # -# of the data are linearly uncorrelated. Many studies use the first two # -# principal components in order to plot the data in two dimensions and # -# to visually identify clusters of closely related data points. # -# # -########################################################################################## - -import loadmnistdata as lmd -#import csv -#import json - -#import numpy - -#from sklearn.preprocessing import StandardScaler -from sklearn.decomposition import PCA - -def pcareduction(): - - X_train,Y_train,X_test,Y_test = lmd.loadmnistdata() - - dim_pca=10 - - pca= PCA(n_components=dim_pca) - - model_pca=pca.fit_transform(X_train[:,:,1]) - - num_examples=1000 - - xs=model_pca[:,1:num_examples] - - #numpy.savetxt("/Users/anjanathimmaiah/soorajprograms/datafiles/pcaredoutput.csv", xs,delimiter =",") - - return xs \ No newline at end of file diff --git a/qbraid_algorithms/datasets/__init__.py b/qbraid_algorithms/datasets/__init__.py index a3d47b7..a4e7ccb 100644 --- a/qbraid_algorithms/datasets/__init__.py +++ b/qbraid_algorithms/datasets/__init__.py @@ -19,10 +19,12 @@ create_sequences create_time_series_data + load_mnist_data """ +from .mnist import load_mnist_data from .sequences import create_sequences, create_time_series_data -__all__ = ["create_sequences", "create_time_series_data"] +__all__ = ["create_sequences", "create_time_series_data", "load_mnist_data"] diff --git a/qbraid_algorithms/datasets/mnist.py b/qbraid_algorithms/datasets/mnist.py new file mode 100644 index 0000000..faa785d --- /dev/null +++ b/qbraid_algorithms/datasets/mnist.py @@ -0,0 +1,26 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Module defining MNIST dataset for reservoir computing tasks. + +""" +import numpy as np +import torchvision as tv + + +def load_mnist_data(download: bool = False, train: bool = True) -> np.ndarray: + """Load the MNIST dataset.""" + transform = tv.transforms.Compose([tv.transforms.ToTensor()]) + dataset = tv.datasets.MNIST( + "./MNIST_data/", download=download, train=train, transform=transform + ) + dataset_array: np.ndarray = dataset.data.numpy() + return dataset_array diff --git a/qbraid_algorithms/datasets/sequences.py b/qbraid_algorithms/datasets/sequences.py index b292bd6..042e597 100644 --- a/qbraid_algorithms/datasets/sequences.py +++ b/qbraid_algorithms/datasets/sequences.py @@ -9,7 +9,7 @@ # THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. """ -Module defining datasets for reservoir computing tasks. +Module defining time-series datasets for reservoir computing tasks. """ diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index b3efdba..d223648 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -22,7 +22,7 @@ """ -from .model import QRCModel from .magnus_expansion import MagnusExpansion +from .model import QRCModel __all__ = ["QRCModel", "MagnusExpansion"] diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index ae92daf..28ef630 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -13,12 +13,16 @@ """ +import math from dataclasses import dataclass, field from typing import Any import numpy as np from bloqade.emulate.ir.atom_type import AtomType from bloqade.emulate.ir.emulator import Register +from bloqade.emulate.ir.state_vector import RydbergHamiltonian + +from .krylov import KrylovEvolution @dataclass @@ -53,6 +57,30 @@ def generate_sites(lattice_type, dimension, scale): raise NotImplementedError +def rydberg_h(atoms: list[AtomType], delta: float, omega: float) -> RydbergHamiltonian: + """ + Generate the Hamiltonian for a Rydberg atom system. + + Args: + atoms (list[AtomType]): Atom positions. + omega (float): Rabi frequency. + + Returns: + RydbergHamiltonian: Hamiltonian matrix. + """ + raise NotImplementedError + + +def set_zero_state(reg: Register): + """ + Set the quantum state to the zero state. + + Args: + reg (Register): Quantum state storage. + """ + raise NotImplementedError + + def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: """ Simulate quantum evolution and record output for a given set of PCA values (x). @@ -66,4 +94,32 @@ def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: TODO: Implement the actual simulation using suitable quantum simulation libraries. """ - raise NotImplementedError + h = rydberg_h(layer.atoms, x, layer.omega) + + reg = layer.reg + reg = set_zero_state(reg) + + t_start = layer.t_start + t_end = layer.t_end + t_step = layer.step + start_clock = NotImplemented + + # initialize output vector + steps = math.floor((t_end - t_start) / t_step) + out = np.zeros(steps * len(layer.readouts)) + + # Numerically simulate the quantum evolution with Krylov methods and store the readouts + i = 1 + prob = KrylovEvolution(reg, start_clock=start_clock, hamiltonian=h) + for i in range(steps): + # ignore first time step, this is just the initial state + if i == 0: + continue + + # TODO: Implement the emulation step function. + # NOTE: The following lines are placeholders, are not necessarily correct, and should be replaced. + prob = prob.emulate_step(i, t_start + i * t_step, t_step) + for j, readout in enumerate(layer.readouts): + out[i * len(layer.readouts) + j] = readout(prob) + + return out diff --git a/qbraid_algorithms/qrc/encoding.py b/qbraid_algorithms/qrc/encoding.py index 58fd339..88cf1ec 100644 --- a/qbraid_algorithms/qrc/encoding.py +++ b/qbraid_algorithms/qrc/encoding.py @@ -12,25 +12,26 @@ Module for encoding of data. """ - +import numpy as np import torch from sklearn.preprocessing import OneHotEncoder -def one_hot_encoding(labels: torch.Tensor, num_classes: int) -> torch.Tensor: + +def one_hot_encoding(labels: np.ndarray, train: bool = True) -> torch.Tensor: """ Convert a tensor of numeric labels into a one-hot encoded matrix using PyTorch. Args: - labels (torch.Tensor): The tensor of labels to encode. - num_classes (int): The total number of classes. + labels (np.ndarray): The array of labels to encode. Returns: torch.Tensor: The one-hot encoded matrix where each row corresponds to a label. - TODO: Implement the one-hot encoding function. """ - # Placeholder for actual implementation. encoder = OneHotEncoder(sparse_output=False) - # I don't know if the reshape params will be universal or not - encoded_data = encoder.fit_transform(lables.targets.numpy().reshape(-1, 1)) - # return torch.nn.functional.one_hot(labels, num_classes=num_classes) + reshaped_data = labels.reshape(-1, 1) + if train: + encoded_data = encoder.fit_transform(reshaped_data) + else: + encoded_data = encoder.transform(reshaped_data) + return encoded_data diff --git a/qbraid_algorithms/qrc/krylov.py b/qbraid_algorithms/qrc/krylov.py index 468415a..bd46ef4 100644 --- a/qbraid_algorithms/qrc/krylov.py +++ b/qbraid_algorithms/qrc/krylov.py @@ -12,11 +12,13 @@ Module for quantum time evolution using Krylov subspace methods. """ - from dataclasses import dataclass +import numpy as np +from bloqade.atom_arrangement import Square from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RydbergHamiltonian +from bloqade.emulate.ir.state_vector import RybdbergInteraction, RydbergHamiltonian +from scipy.linalg import expm # Placeholder for Krylov options with dummy attributes @@ -65,6 +67,31 @@ class KrylovEvolution: hamiltonian: RydbergHamiltonian options: KrylovOptions + def generate_krylov_basis(self, h, psi_0, m): + """Generates the first m Krylov basis vectors.""" + n = len(psi_0) + k = np.zeros((n, m), dtype=complex) + k[:, 0] = psi_0 / np.linalg.norm(psi_0) + for j in range(1, m): + k[:, j] = h @ k[:, j - 1] + for i in range(j): + k[:, j] -= np.dot(k[:, i], k[:, j]) * k[:, i] + k[:, j] /= np.linalg.norm(k[:, j]) + return k + + def gram_schmidt(self, v): + """Orthonormalizes the vectors using the Gram-Schmidt process.""" + q, _ = np.linalg.qr(v) + return q + + def krylov_evolution(self, h, psi_0, t, m): + """Projects H onto the Krylov subspace and computes the time evolution.""" + k = self.generate_krylov_basis(h, psi_0, m) + h_m = k.T.conj() @ h @ k + exp_h_m = np.expm(-1j * h_m * t) + psi_t = k @ exp_h_m @ k.T.conj() @ psi_0 + return psi_t + def emulate_step(self, step: int, clock: float, duration: float) -> "KrylovEvolution": """ Simulate a single time step of quantum evolution using the Krylov subspace method. @@ -79,7 +106,14 @@ def emulate_step(self, step: int, clock: float, duration: float) -> "KrylovEvolu TODO: Implement the emulation step function. """ - raise NotImplementedError + try: + psi_0 = self.reg.state_vector + evolved_state = self.krylov_evolution( + self.hamiltonian.rydberg, psi_0, duration, len(self.durations) + ) + self.reg.state_vector = evolved_state + except Exception as err: + raise NotImplementedError("Emulation step failed") from err def normalize_register(self): """ @@ -87,4 +121,7 @@ def normalize_register(self): TODO: Implement the normalization logic. """ - raise NotImplementedError \ No newline at end of file + if self.options.normalize_finally: + norm = np.linalg.norm(self.reg.state_vector) + if norm > self.options.tol: + self.reg.state_vector /= norm diff --git a/qbraid_algorithms/qrc/krylov_foo.py b/qbraid_algorithms/qrc/krylov_foo.py deleted file mode 100644 index 9b517cf..0000000 --- a/qbraid_algorithms/qrc/krylov_foo.py +++ /dev/null @@ -1,66 +0,0 @@ -# krylov.py -from dataclasses import dataclass -from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RydbergHamiltonian -from bloqade.atom_arrangement import Square -from bloqade.rydberg import RydbergInteraction -import numpy as np - -class KrylovOptions: - """Class that describes options for a Krylov subspace method.""" - def __init__(self, progress=False, progress_name="emulating", normalize_step=1, normalize_finally=True, tol=1e-7): - self.progress = progress - self.progress_name = progress_name - self.normalize_step = normalize_step - self.normalize_finally = normalize_finally - self.tol = tol - -@dataclass -class KrylovEvolution: - """Class that describes a time evolution using Krylov subspace methods.""" - reg: Register - start_clock: float - durations: list[float] - hamiltonian: RydbergHamiltonian - options: KrylovOptions - - def generate_krylov_basis(self, H, psi0, m): - """Generates the first m Krylov basis vectors.""" - n = len(psi0) - K = np.zeros((n, m), dtype=complex) - K[:, 0] = psi0 / np.linalg.norm(psi0) - for j in range(1, m): - K[:, j] = H @ K[:, j-1] - for k in range(j): - K[:, j] -= np.dot(K[:, k], K[:, j]) * K[:, k] - K[:, j] /= np.linalg.norm(K[:, j]) - return K - - def gram_schmidt(self, V): - """Orthonormalizes the vectors using the Gram-Schmidt process.""" - Q, R = np.linalg.qr(V) - return Q - - def krylov_evolution(self, H, psi0, t, m): - """Projects H onto the Krylov subspace and computes the time evolution.""" - K = self.generate_krylov_basis(H, psi0, m) - H_m = K.T.conj() @ H @ K - exp_Hm = expm(-1j * H_m * t) - psi_t = K @ exp_Hm @ K.T.conj() @ psi0 - return psi_t - - def emulate_step(self, step, clock, duration): - """Simulate a single time step of quantum evolution using the Krylov subspace method.""" - try: - psi0 = self.reg.state_vector - evolved_state = self.krylov_evolution(self.hamiltonian.rydberg, psi0, duration, len(self.durations)) - self.reg.state_vector = evolved_state - except Exception as e: - raise NotImplementedError(f"Emulation step failed: {e}") - - def normalize_register(self): - """Normalize the quantum register if specified in options.""" - if self.options.normalize_finally: - norm = np.linalg.norm(self.reg.state_vector) - if norm > self.options.tol: - self.reg.state_vector /= norm diff --git a/qbraid_algorithms/qrc/magnus_expansion.py b/qbraid_algorithms/qrc/magnus_expansion.py index b183ae1..4ec2297 100644 --- a/qbraid_algorithms/qrc/magnus_expansion.py +++ b/qbraid_algorithms/qrc/magnus_expansion.py @@ -1,13 +1,36 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Module for quantum time evolution using Magnus expansion. + +""" + from scipy.linalg import expm + class MagnusExpansion: + """ + Class that describes a time evolution using Magnus expansion. + + """ + def __init__(self, H): self.H = H def commutator(self, A, B): + """Compute the commutator of two matrices.""" return A @ B - B @ A def compute_magnus_terms(self, t): + """Compute the terms of the Magnus expansion.""" H_t = self.H * t Ω1 = H_t @@ -18,20 +41,26 @@ def compute_magnus_terms(self, t): # Third-order term comm_H1_comm_H2_H3 = self.commutator(self.H, self.commutator(self.H, self.H)) comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.H, self.H), self.H) - Ω3 = (1/6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 + Ω3 = (1 / 6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 # Fourth-order term - comm_H1_comm_H2_comm_H3_H4 = self.commutator(self.H, self.commutator(self.H, self.commutator(self.H, self.H))) - comm_H4_comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.commutator(self.H, self.H), self.H), self.H) - Ω4 = (1/24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 + comm_H1_comm_H2_comm_H3_H4 = self.commutator( + self.H, self.commutator(self.H, self.commutator(self.H, self.H)) + ) + comm_H4_comm_H3_comm_H2_H1 = self.commutator( + self.commutator(self.commutator(self.H, self.H), self.H), self.H + ) + Ω4 = (1 / 24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 return Ω1 + Ω2 + Ω3 + Ω4 def time_evolution_operator(self, t): + """Compute the time evolution operator using Magnus expansion.""" Ω = self.compute_magnus_terms(t) return expm(Ω) def simulate_dynamics(self, psi0, t_final, dt): + """Simulate the dynamics of the system.""" psi = psi0 t = 0 while t < t_final: diff --git a/qbraid_algorithms/qrc/model.py b/qbraid_algorithms/qrc/model.py index acc7f3f..200ed4a 100644 --- a/qbraid_algorithms/qrc/model.py +++ b/qbraid_algorithms/qrc/model.py @@ -17,6 +17,7 @@ from typing import Any, Callable import numpy as np +import torch.nn as nn from .dynamics import DetuningLayer @@ -44,3 +45,35 @@ def __call__(self, xs: np.ndarray) -> list[int]: TODO: Implement the transformation and prediction steps. """ raise NotImplementedError + + +# Define neural network model +# class Net(nn.Module): +# def __init__(self): +# super(Net, self).__init__() +# self.fc1 = nn.Linear(dim_pca, 10) +# def forward(self, x): +# x = torch.relu(self.fc1(x)) +# return x +# # Train classical model using PCA features +# model_reg = Net() +# criterion = nn.CrossEntropyLoss() +# optimizer = optim.Adam(model_reg.parameters(), lr=0.01) +# for epoch in range(1000): +# for x, y in train_loader: +# x = x.view(-1, 28*28) +# x_pca = pca.transform(x.numpy()) +# x_pca = torch.tensor(x_pca, dtype=torch.float32) +# y = torch.tensor(y, dtype=torch.long) +# optimizer.zero_grad() +# output = model_reg(x_pca) +# loss = criterion(output, y) +# loss.backward() +# optimizer.step() +# # Train QRC model using quantum reservoir computing +# pre_layer = DetuningLayer(atoms, readouts, Ω, t_start, t_end, step) +# model_qrc = Net() +# for epoch in range(1000): +# for x, y in train_loader: +# x = x.view(-1, 28*28) +# x_pca = pca.transform(x.numpy()) diff --git a/qbraid_algorithms/qrc/pca.py b/qbraid_algorithms/qrc/pca.py index cb549f0..fdddd0e 100644 --- a/qbraid_algorithms/qrc/pca.py +++ b/qbraid_algorithms/qrc/pca.py @@ -12,13 +12,18 @@ Module implemting Principal Component Analysis (PCA) for dimensionality reduction. """ - +import numpy as np import torch from sklearn.decomposition import PCA + def pca_reduction( - data: torch.Tensor, n_components: int = 2, data_dim: int = 10, delta_max: int = 10, -) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: + data: torch.Tensor, + n_components: int, + data_dim: int, + delta_max: int, + train: bool = True, +) -> torch.Tensor: """ Perform PCA reduction on the provided data using PyTorch's pca_lowrank to reduce its dimensionality. @@ -28,22 +33,21 @@ def pca_reduction( n_components (int): The number of principal components to retain. data_dim (int) : The dimension of the input data required for doing PCA. delta_max (int) : The scaling factor for bring PCA values to the feasible range of local detuning. + train (bool, optional): Whether the data is training data. Defaults to True. Returns: - tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: A tuple containing the - transformed data and the PCA components (u, s, v) used for the transformation. - - TODO: Implement the PCA reduction function using torch.pca_lowrank or another suitable method. + torch.Tensor: The transformed data """ # Perform PCA on training data pca = PCA(n_components=n_components) - data_pca = pca.fit_transform(data.data.numpy().reshape(data_dim)) - + data_array: np.ndarray = data.data.numpy() + data_reshaped = data_array.reshape(-1, data_dim) + if train: + data_pca = pca.fit_transform(data_reshaped) + else: + data_pca = pca.transform(data_reshaped) + # Scale PCA values to feasible range of local detuning scaled_data_pca = data_pca / np.max(np.abs(data_pca)) * delta_max - # u, s, v = torch.pca_lowrank(data, q=n_components) - # transformed_data = torch.mm(data, v[:, :n_components]) - # return transformed_data, (u, s, v) - return scaled_data_pca diff --git a/qbraid_algorithms/qrc/test_rbh.py b/qbraid_algorithms/qrc/test_rbh.py deleted file mode 100644 index 14070be..0000000 --- a/qbraid_algorithms/qrc/test_rbh.py +++ /dev/null @@ -1,84 +0,0 @@ -# krylov.py -from dataclasses import dataclass -from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RydbergHamiltonian -from bloqade.atom_arrangement import Square -from scipy.linalg import expm -import numpy as np - -class KrylovOptions: - """Class that describes options for a Krylov subspace method.""" - def __init__(self, progress=False, progress_name="emulating", normalize_step=1, normalize_finally=True, tol=1e-7): - self.progress = progress - self.progress_name = progress_name - self.normalize_step = normalize_step - self.normalize_finally = normalize_finally - self.tol = tol - -@dataclass -class KrylovEvolution: - """Class that describes a time evolution using Krylov subspace methods.""" - reg: Register - start_clock: float - durations: list[float] - hamiltonian: RydbergHamiltonian - options: KrylovOptions - - def generate_krylov_basis(self, H, psi0, m): - """Generates the first m Krylov basis vectors.""" - n = len(psi0) - K = np.zeros((n, m), dtype=complex) - K[:, 0] = psi0 / np.linalg.norm(psi0) - for j in range(1, m): - K[:, j] = H @ K[:, j-1] - for k in range(j): - K[:, j] -= np.dot(K[:, k], K[:, j]) * K[:, k] - K[:, j] /= np.linalg.norm(K[:, j]) - return K - - def gram_schmidt(self, V): - """Orthonormalizes the vectors using the Gram-Schmidt process.""" - Q, R = np.linalg.qr(V) - return Q - - def krylov_evolution(self, psi0, t, m): - """Projects the Hamiltonian onto the Krylov subspace and computes the time evolution.""" - H = self.hamiltonian.rydberg # Access the correct attribute - K = self.generate_krylov_basis(H, psi0, m) - H_m = K.T.conj() @ H @ K - exp_Hm = expm(-1j * H_m * t) - psi_t = K @ exp_Hm @ K.T.conj() @ psi0 - return psi_t - - def emulate_step(self, step, clock, duration): - """Simulate a single time step of quantum evolution using the Krylov subspace method.""" - try: - psi0 = self.reg.state_vector - evolved_state = self.krylov_evolution(psi0, duration, len(self.durations)) - self.reg.state_vector = evolved_state - except Exception as e: - raise NotImplementedError(f"Emulation step failed: {e}") - - def normalize_register(self): - """Normalize the quantum register if specified in options.""" - if self.options.normalize_finally: - norm = np.linalg.norm(self.reg.state_vector) - if norm > self.options.tol: - self.reg.state_vector /= norm - -# Usage Example -# Define the initial state -initial_state = np.array([1, 0, 0, 0], dtype=complex) - -# Create a KrylovEvolution instance -krylov_options = KrylovOptions() -krylov_evolution = KrylovEvolution( - reg=Register(initial_state), - start_clock=0.0, - durations=[0.1, 0.2, 0.3], - hamiltonian=None, # This will be initialized in __post_init__ - options=krylov_options -) - -# Simulate the evolution (example step) -krylov_evolution.emulate_step(step=0, clock=0.0, duration=0.1) diff --git a/qosf_python_conversion.ipynb b/qosf_python_conversion.ipynb deleted file mode 100644 index 0c8c52c..0000000 --- a/qosf_python_conversion.ipynb +++ /dev/null @@ -1,281 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "384e875b-f354-47e3-93be-a2833bd5d47d", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ec2-user/anaconda3/envs/Braket/lib/python3.10/site-packages/torch/cuda/__init__.py:619: UserWarning: Can't initialize NVML\n", - " warnings.warn(\"Can't initialize NVML\")\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from torchvision import datasets, transforms\n", - "from sklearn.decomposition import PCA\n", - "from sklearn.preprocessing import OneHotEncoder\n", - "import bloqade\n", - "# from bloqade import KrylovKit" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "b09e1581-ce05-499f-9d20-81b928d80be3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Define constants\n", - "dim_pca = 10\n", - "Δ_max = 6.0\n", - "num_examples = 1000\n", - "num_test_examples = 100" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "98344ae5-5055-45a5-acea-133898451f37", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Load MNIST dataset\n", - "transform = transforms.Compose([transforms.ToTensor()])\n", - "train_dataset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)\n", - "test_dataset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=False, transform=transform)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "76522421-817d-4ff1-b514-0861e1290da0", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Create data loaders\n", - "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)\n", - "test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "5d3d73cd-b187-46df-9e96-e66b832f9d49", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Perform PCA on training data\n", - "pca = PCA(n_components=dim_pca)\n", - "x_train_pca = pca.fit_transform(train_dataset.data.numpy().reshape(-1, 28*28))\n", - "x_test_pca = pca.transform(test_dataset.data.numpy().reshape(-1, 28*28))" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "02936fa6-32f5-4085-8e6f-d78d84c76540", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Scale PCA values to feasible range of local detuning\n", - "x_train_pca = x_train_pca / np.max(np.abs(x_train_pca)) * Δ_max\n", - "x_test_pca = x_test_pca / np.max(np.abs(x_test_pca)) * Δ_max" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "3d2e9596-a5ed-4176-a786-33de409f6d50", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 1.88088681, -0.1077645 , -0.78267303, ..., 1.18395488,\n", - " -1.00375497, -0.46647276],\n", - " [-2.40351538, -0.38411485, 1.00276769, ..., 0.58196908,\n", - " 0.37119781, -0.21784087],\n", - " [-1.08367033, 0.1664487 , 0.65421782, ..., -0.2765957 ,\n", - " 0.24811255, -0.3418475 ],\n", - " ...,\n", - " [ 1.50126648, 0.89318566, -1.04370463, ..., -0.88698179,\n", - " -0.06128021, -2.01555388],\n", - " [-0.27316387, 1.61689005, -0.63724006, ..., 0.41777847,\n", - " 0.67804019, -0.54465725],\n", - " [-0.22766553, 1.77605246, 2.12734287, ..., -1.06862189,\n", - " 0.43011495, 0.13571932]])" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "x_test_pca[:, 1:num_examples]" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "ce2224e6-2296-4ad0-bf79-89c62fe52e8b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# One-hot encode labels\n", - "encoder = OneHotEncoder(sparse_output=False)\n", - "y_train = encoder.fit_transform(train_dataset.targets.numpy().reshape(-1, 1))\n", - "y_test = encoder.transform(test_dataset.targets.numpy().reshape(-1, 1))" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "f3499830-b177-4cc5-87ac-77d1f47b8381", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[0., 0., 0., ..., 1., 0., 0.],\n", - " [0., 1., 0., ..., 0., 0., 0.],\n", - " [1., 0., 0., ..., 0., 0., 0.],\n", - " ...,\n", - " [0., 0., 0., ..., 0., 0., 0.],\n", - " [0., 0., 0., ..., 0., 0., 0.],\n", - " [0., 0., 0., ..., 0., 0., 0.]])" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "y_test[:, 1:num_examples]" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "5530692e-88d3-4200-ba84-0f2d37251010", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Define quantum reservoir computing (QRC) layer\n", - "class DetuningLayer(nn.Module):\n", - " def __init__(self, atoms, readouts, Ω, t_start, t_end, step):\n", - " super(DetuningLayer, self).__init__()\n", - " self.atoms = atoms\n", - " self.readouts = readouts\n", - " self.Ω = Ω\n", - " self.t_start = t_start\n", - " self.t_end = t_end\n", - " self.step = step\n", - " def forward(self, x):\n", - " # Simulate quantum dynamics and compute readouts\n", - " # have to use bloqade quantum\n", - " # This part is not implemented in Python, as it requires a quantum simulator \n", - " # calculating steps\n", - " self.atoms @ np.exp(-1j * h * (self.t_end - self.t_start))" - ] - }, - { - "cell_type": "markdown", - "id": "5eef6e17-5d42-4f2f-a9f4-81506b792d71", - "metadata": {}, - "source": [ - "## Defining a NN model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dd603eb8-c7d3-47b5-9b93-11951e05d166", - "metadata": {}, - "outputs": [], - "source": [ - "# Define neural network model\n", - "class Net(nn.Module):\n", - " def __init__(self):\n", - " super(Net, self).__init__()\n", - " self.fc1 = nn.Linear(dim_pca, 10)\n", - " def forward(self, x):\n", - " x = torch.relu(self.fc1(x))\n", - " return x\n", - " # Train classical model using PCA features\n", - " model_reg = Net()\n", - " criterion = nn.CrossEntropyLoss()\n", - " optimizer = optim.Adam(model_reg.parameters(), lr=0.01)\n", - " for epoch in range(1000):\n", - " for x, y in train_loader:\n", - " x = x.view(-1, 28*28)\n", - " x_pca = pca.transform(x.numpy())\n", - " x_pca = torch.tensor(x_pca, dtype=torch.float32)\n", - " y = torch.tensor(y, dtype=torch.long)\n", - " optimizer.zero_grad()\n", - " output = model_reg(x_pca)\n", - " loss = criterion(output, y)\n", - " loss.backward()\n", - " optimizer.step()\n", - " # Train QRC model using quantum reservoir computing\n", - " pre_layer = DetuningLayer(atoms, readouts, Ω, t_start, t_end, step)\n", - " model_qrc = Net()\n", - " for epoch in range(1000):\n", - " for x, y in train_loader:\n", - " x = x.view(-1, 28*28)\n", - " x_pca = pca.transform(x.numpy())\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "conda_braket", - "language": "python", - "name": "conda_braket" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tests/test_esn_reservoir.py b/tests/test_esn_reservoir.py index d1beb13..2e3cfe7 100644 --- a/tests/test_esn_reservoir.py +++ b/tests/test_esn_reservoir.py @@ -40,9 +40,7 @@ def test_reservoir_matrix_meets_target_sparsity(): """Test whether the internal weight matrix 'w' of a Reservoir instance meets the target sparsity within an acceptable margin of error.""" target_sparsity = 0.8 - reservoir = EchoStateReservoir( - input_size=10, hidden_size=100, sparsity=target_sparsity - ) + reservoir = EchoStateReservoir(input_size=10, hidden_size=100, sparsity=target_sparsity) total_elements = reservoir.w.numel() non_zero_elements = reservoir.w.nonzero().size(0) zero_elements = total_elements - non_zero_elements @@ -52,11 +50,7 @@ def test_reservoir_matrix_meets_target_sparsity(): expected_sparsity_lower_bound = target_sparsity - (target_sparsity * tolerance) expected_sparsity_upper_bound = target_sparsity + (target_sparsity * tolerance) - assert ( - expected_sparsity_lower_bound - <= actual_sparsity - <= expected_sparsity_upper_bound - ), ( + assert expected_sparsity_lower_bound <= actual_sparsity <= expected_sparsity_upper_bound, ( f"Actual sparsity {actual_sparsity} outside of expected range " f"[{expected_sparsity_lower_bound}, {expected_sparsity_upper_bound}]" ) diff --git a/tests/test_qbraid_algorithms/__init__.py b/tests/test_qbraid_algorithms/__init__.py deleted file mode 100644 index 321033c..0000000 --- a/tests/test_qbraid_algorithms/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .test_qrc import * diff --git a/tests/test_qbraid_algorithms/test_qrc/__init__.py b/tests/test_qbraid_algorithms/test_qrc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py deleted file mode 100644 index 441f119..0000000 --- a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py +++ /dev/null @@ -1,20 +0,0 @@ -import numpy as np -from qbraid_algorithms import qrc - -def test_simulate_dynamics(): - # Define a simple Hamiltonian and initial state - H = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian - psi0 = np.array([1, 0], dtype=complex) # Initial state - t_final = 1.0 - dt = 0.01 - - # Create an instance of MagnusExpansion - magnus = qrc.magnus_expansion.MagnusExpansion(H) - - # Simulate the dynamics - final_state = magnus.simulate_dynamics(psi0, t_final, dt) - - # Add assertions to check the final state - # For example: - expected_final_state = np.array([0.54030231+0.84147098j, 0.00000000+0.j]) - np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) diff --git a/tests/test_qrc_dynamics.py b/tests/test_qrc_dynamics.py new file mode 100644 index 0000000..3e77940 --- /dev/null +++ b/tests/test_qrc_dynamics.py @@ -0,0 +1,62 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Unit tests for Quantum Reservoir Computing (QRC) dynamics modules. + +""" + +import numpy as np +from bloqade.emulate.ir.emulator import Register + +from qbraid_algorithms.qrc.krylov import KrylovEvolution, KrylovOptions +from qbraid_algorithms.qrc.magnus_expansion import MagnusExpansion + + +def test_rbh(): + """Test the Rydberg Blockade Hamiltonian (RBH.""" + initial_state = np.array([1, 0, 0, 0], dtype=complex) + + # Create a KrylovEvolution instance + krylov_options = KrylovOptions() + krylov_evolution = KrylovEvolution( + reg=Register(initial_state), + start_clock=0.0, + durations=[0.1, 0.2, 0.3], + hamiltonian=None, # This will be initialized in __post_init__ + options=krylov_options, + ) + + # Simulate the evolution (example step) + krylov_evolution.emulate_step(step=0, clock=0.0, duration=0.1) + + passed = True # TODO + + assert passed + + +def test_simulate_dynamics(): + """Test the simulation of quantum dynamics using Magnus expansion.""" + # Define a simple Hamiltonian and initial state + h = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian + psi0 = np.array([1, 0], dtype=complex) # Initial state + t_final = 1.0 + dt = 0.01 + + # Create an instance of MagnusExpansion + magnus = MagnusExpansion(h) + + # Simulate the dynamics + final_state = magnus.simulate_dynamics(psi0, t_final, dt) + + # Add assertions to check the final state + # For example: + expected_final_state = np.array([0.54030231 + 0.84147098j, 0.00000000 + 0.0j]) + np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) From 34d72b61371644bf4b4a96991f071c2741ba8f55 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Tue, 11 Jun 2024 17:54:06 -0500 Subject: [PATCH 09/26] ci/cd + packaging updates --- .github/dependabot.yml | 11 ++++ .github/workflows/docs.yml | 4 +- .github/workflows/format.yml | 10 ++-- .github/workflows/main.yml | 22 ++++--- .github/workflows/pre-release.yml | 4 +- .github/workflows/publish.yml | 4 +- .../quantum_reservoir_computing_mnist.ipynb | 24 +++++--- pyproject.toml | 16 ++++- qbraid_algorithms/esn/reservoir.py | 2 +- qbraid_algorithms/qrc/__init__.py | 2 +- qbraid_algorithms/qrc/dynamics.py | 8 ++- qbraid_algorithms/qrc/krylov.py | 29 +++++---- .../qrc/{magnus_expansion.py => magnus.py} | 42 ++++++------- qbraid_algorithms/qrc/model.py | 3 +- qbraid_algorithms/qrc/pca.py | 4 +- tests/test_esn_reservoir.py | 2 +- tests/test_qrc_dynamics.py | 58 ++++++++++++++++-- tools/stamp_pre_release.py | 59 ++----------------- 18 files changed, 175 insertions(+), 129 deletions(-) create mode 100644 .github/dependabot.yml rename qbraid_algorithms/qrc/{magnus_expansion.py => magnus.py} (52%) diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..91abb11 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 48ff2c1..a14153b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -12,9 +12,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' cache: pip diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 4ab3f81..212026c 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -10,17 +10,19 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' - name: Install dependencies run: | python3 -m pip install --upgrade pip - python3 -m pip install isort ruff qbraid-cli + python3 -m pip install isort 'black[jupyter]' pylint ruff pylint qbraid-cli - name: Check isort, ruff, headers run: | ruff check qbraid_algorithms examples tests tools isort --check-only qbraid_algorithms tests tools - qbraid admin headers qbraid_algorithms tests tools \ No newline at end of file + black --check qbraid_algorithms tests tools examples --line-length 100 + pylint qbraid_algorithms tests tools examples --disable=W0108,W0511,W0401,R0902 --ignore=qbraid_algorithms/_version.py + qbraid admin headers qbraid_algorithms tests tools --type gpl --skip qbraid_algorithms/_version.py \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c3fa048..c8d15f1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,9 +15,9 @@ jobs: python-version: ['3.11'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: pip @@ -29,7 +29,7 @@ jobs: run: | python -m build - name: Upload built package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: built-package path: dist/*.whl @@ -43,14 +43,14 @@ jobs: python-version: ['3.9', '3.10', '3.11', '3.12'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: pip - name: Download built package - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: built-package path: dist @@ -67,4 +67,12 @@ jobs: pip install pytest - name: Run tests with pytest run: | - pytest tests \ No newline at end of file + pytest tests --cov=qbraid_algorithms --cov-config=pyproject.toml --cov-report=term --cov-report=xml + - name: Upload coverage reports to Codecov + if: matrix.python-version == '3.11' && matrix.os == 'ubuntu-latest' + uses: codecov/codecov-action@v4.0.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false + files: ./build/coverage/coverage.xml + verbose: true \ No newline at end of file diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 10f789b..46a5ab3 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -9,12 +9,12 @@ jobs: runs-on: ubuntu-latest environment: publish steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5812b6c..48a85ad 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,12 +9,12 @@ jobs: runs-on: ubuntu-latest environment: publish steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' diff --git a/examples/quantum_reservoir_computing_mnist.ipynb b/examples/quantum_reservoir_computing_mnist.ipynb index afe7b30..fb440bb 100644 --- a/examples/quantum_reservoir_computing_mnist.ipynb +++ b/examples/quantum_reservoir_computing_mnist.ipynb @@ -9,16 +9,18 @@ }, "outputs": [], "source": [ - "import numpy as np\n", + "# import numpy as np\n", "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", + "\n", + "# import torch.nn as nn\n", + "# import torch.optim as optim\n", "from torchvision import datasets, transforms\n", "\n", "from qbraid_algorithms.qrc.encoding import one_hot_encoding\n", "from qbraid_algorithms.qrc.pca import pca_reduction\n", - "from qbraid_algorithms.qrc.dynamics import DetuningLayer, generate_sites, apply_layer\n", - "from qbraid_algorithms.qrc.model import QRCModel" + "\n", + "# from qbraid_algorithms.qrc.dynamics import DetuningLayer, generate_sites, apply_layer\n", + "# from qbraid_algorithms.qrc.model import QRCModel" ] }, { @@ -42,8 +44,8 @@ "source": [ "# Load MNIST dataset\n", "transform = transforms.Compose([transforms.ToTensor()])\n", - "train_dataset = datasets.MNIST('./data/', download=True, train=True, transform=transform)\n", - "test_dataset = datasets.MNIST('./data/', download=True, train=False, transform=transform)\n", + "train_dataset = datasets.MNIST(\"./data/\", download=True, train=True, transform=transform)\n", + "test_dataset = datasets.MNIST(\"./data/\", download=True, train=False, transform=transform)\n", "\n", "# Create data loaders\n", "train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)\n", @@ -79,8 +81,12 @@ "metadata": {}, "outputs": [], "source": [ - "x_train_pca = pca_reduction(train_dataset, n_components=10, data_dim=28*28, delta_max=delta_max, train=True)\n", - "x_test_pca = pca_reduction(train_dataset, n_components=10, data_dim=28*28, delta_max=delta_max, train=True)" + "x_train_pca = pca_reduction(\n", + " train_dataset, n_components=10, data_dim=28 * 28, delta_max=delta_max, train=True\n", + ")\n", + "x_test_pca = pca_reduction(\n", + " train_dataset, n_components=10, data_dim=28 * 28, delta_max=delta_max, train=True\n", + ")" ] }, { diff --git a/pyproject.toml b/pyproject.toml index 7cb8220..5004c72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "qbraid-algorithms" -version = "0.1.0a0" +version = "0.1.0-alpha" authors = [{name = "qBraid Development Team"}, {email = "contact@qbraid.com"}] description = "Python package for building, simulating, and benchmarking hybrid quantum-classical algorithms." readme = "README.md" @@ -25,7 +25,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Physics" ] -dependencies = ["torch>=2.3.0,<3.0", "numpy>=1.17,<1.27", "bloqade>=0.15.12,<0.16"] +dependencies = ["torch>=2.3.0,<3.0", "numpy>=1.17,<1.27", "scipy~=1.13.1", "bloqade>=0.15.12,<0.16"] [project.urls] Homepage = "https://github.com/qBraid/qbraid-algorithms" @@ -65,6 +65,18 @@ exclude = ''' )/ ''' +[tool.pylint.'MESSAGES CONTROL'] +max-line-length = 100 +disable = "W0108, W0511, W0401, R0902" + +[tool.pylint.MASTER] +ignore-paths = [ + "^.*\\_version.py$", +] + [tool.pytest.ini_options] addopts = "-ra" testpaths = ["tests"] +filterwarnings = [ + "ignore::DeprecationWarning", +] diff --git a/qbraid_algorithms/esn/reservoir.py b/qbraid_algorithms/esn/reservoir.py index 83b5c72..030d20a 100644 --- a/qbraid_algorithms/esn/reservoir.py +++ b/qbraid_algorithms/esn/reservoir.py @@ -51,7 +51,7 @@ def _generate_w(self, mean: float = 0.0, max_retries: int = 3) -> torch.Tensor: mean=mean, std=self.spectral_radius ) w = torch.where(torch.rand_like(w) > self.sparsity, w, torch.zeros_like(w)) - eigenvalues = torch.linalg.eigvals(w) + eigenvalues = torch.linalg.eigvals(w) # pylint: disable=not-callable max_abs_eigenvalue = torch.max(torch.abs(eigenvalues)).item() if max_abs_eigenvalue != 0: w *= self.spectral_radius / max_abs_eigenvalue diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index d223648..cf61dc8 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -22,7 +22,7 @@ """ -from .magnus_expansion import MagnusExpansion +from .magnus import MagnusExpansion from .model import QRCModel __all__ = ["QRCModel", "MagnusExpansion"] diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index 28ef630..9511cf1 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -110,15 +110,17 @@ def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: # Numerically simulate the quantum evolution with Krylov methods and store the readouts i = 1 - prob = KrylovEvolution(reg, start_clock=start_clock, hamiltonian=h) + prob = KrylovEvolution( + reg, start_clock=start_clock, durations=[t_step] * steps, hamiltonian=h, options=None + ) for i in range(steps): # ignore first time step, this is just the initial state if i == 0: continue # TODO: Implement the emulation step function. - # NOTE: The following lines are placeholders, are not necessarily correct, and should be replaced. - prob = prob.emulate_step(i, t_start + i * t_step, t_step) + # NOTE: The following lines are placehoders. They are not correct, and should be replaced. + prob.emulate_step(i, t_start + i * t_step, t_step) for j, readout in enumerate(layer.readouts): out[i * len(layer.readouts) + j] = readout(prob) diff --git a/qbraid_algorithms/qrc/krylov.py b/qbraid_algorithms/qrc/krylov.py index bd46ef4..7b1f9c4 100644 --- a/qbraid_algorithms/qrc/krylov.py +++ b/qbraid_algorithms/qrc/krylov.py @@ -15,14 +15,15 @@ from dataclasses import dataclass import numpy as np -from bloqade.atom_arrangement import Square -from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RybdbergInteraction, RydbergHamiltonian + +# from bloqade.atom_arrangement import Square +# from bloqade.emulate.ir.emulator import Register +from bloqade.emulate.ir.state_vector import RydbergHamiltonian, StateVector from scipy.linalg import expm # Placeholder for Krylov options with dummy attributes -class KrylovOptions: +class KrylovOptions: # pylint: disable=too-few-public-methods """Class that describes options for a Krylov subspace method. Args: @@ -34,6 +35,7 @@ class KrylovOptions: """ + # pylint: disable-next=too-many-arguments def __init__( self, progress: bool = False, @@ -61,7 +63,7 @@ class KrylovEvolution: options (KrylovOptions): Options for the evolution process. """ - reg: Register + reg: StateVector # Register? start_clock: float durations: list[float] hamiltonian: RydbergHamiltonian @@ -88,10 +90,11 @@ def krylov_evolution(self, h, psi_0, t, m): """Projects H onto the Krylov subspace and computes the time evolution.""" k = self.generate_krylov_basis(h, psi_0, m) h_m = k.T.conj() @ h @ k - exp_h_m = np.expm(-1j * h_m * t) + exp_h_m = expm(-1j * h_m * t) psi_t = k @ exp_h_m @ k.T.conj() @ psi_0 return psi_t + # pylint: disable-next=unused-argument def emulate_step(self, step: int, clock: float, duration: float) -> "KrylovEvolution": """ Simulate a single time step of quantum evolution using the Krylov subspace method. @@ -107,21 +110,25 @@ def emulate_step(self, step: int, clock: float, duration: float) -> "KrylovEvolu TODO: Implement the emulation step function. """ try: - psi_0 = self.reg.state_vector + psi_0 = self.reg evolved_state = self.krylov_evolution( self.hamiltonian.rydberg, psi_0, duration, len(self.durations) ) - self.reg.state_vector = evolved_state + self.reg = evolved_state except Exception as err: raise NotImplementedError("Emulation step failed") from err - def normalize_register(self): + def normalize_register(self) -> None: """ Normalize the quantum register if specified in options. TODO: Implement the normalization logic. """ + # https://github.com/QuEraComputing/bloqade-python/blob/17585b21ad8f099ac8ffe126257e8ffb6c7f4588/src/bloqade/emulate/ir/state_vector.py#L208-L215 + # data = self.reg.data + # data /= np.linalg.norm(data) + # self.reg.data = data if self.options.normalize_finally: - norm = np.linalg.norm(self.reg.state_vector) + norm = np.linalg.norm(self.reg) if norm > self.options.tol: - self.reg.state_vector /= norm + self.reg /= norm diff --git a/qbraid_algorithms/qrc/magnus_expansion.py b/qbraid_algorithms/qrc/magnus.py similarity index 52% rename from qbraid_algorithms/qrc/magnus_expansion.py rename to qbraid_algorithms/qrc/magnus.py index 4ec2297..fefb908 100644 --- a/qbraid_algorithms/qrc/magnus_expansion.py +++ b/qbraid_algorithms/qrc/magnus.py @@ -22,49 +22,49 @@ class MagnusExpansion: """ - def __init__(self, H): - self.H = H + def __init__(self, h): + self.h = h - def commutator(self, A, B): + def commutator(self, a, b): """Compute the commutator of two matrices.""" - return A @ B - B @ A + return a @ b - b @ a def compute_magnus_terms(self, t): """Compute the terms of the Magnus expansion.""" - H_t = self.H * t - Ω1 = H_t + h_t = self.h * t + omega_1 = h_t # Second-order term - comm_H1_H2 = self.commutator(self.H, self.H) - Ω2 = 0.5 * (comm_H1_H2 * t**2) + comm_h1_h2 = self.commutator(self.h, self.h) + omega_2 = 0.5 * (comm_h1_h2 * t**2) # Third-order term - comm_H1_comm_H2_H3 = self.commutator(self.H, self.commutator(self.H, self.H)) - comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.H, self.H), self.H) - Ω3 = (1 / 6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 + comm_h1_comm_h2_h3 = self.commutator(self.h, self.commutator(self.h, self.h)) + comm_h3_comm_h2_h1 = self.commutator(self.commutator(self.h, self.h), self.h) + omega_3 = (1 / 6) * (comm_h1_comm_h2_h3 + comm_h3_comm_h2_h1) * t**3 # Fourth-order term - comm_H1_comm_H2_comm_H3_H4 = self.commutator( - self.H, self.commutator(self.H, self.commutator(self.H, self.H)) + comm_h1_comm_h2_comm_h3_h4 = self.commutator( + self.h, self.commutator(self.h, self.commutator(self.h, self.h)) ) - comm_H4_comm_H3_comm_H2_H1 = self.commutator( - self.commutator(self.commutator(self.H, self.H), self.H), self.H + comm_h4_comm_h3_comm_h2_h1 = self.commutator( + self.commutator(self.commutator(self.h, self.h), self.h), self.h ) - Ω4 = (1 / 24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 + omega_4 = (1 / 24) * (comm_h1_comm_h2_comm_h3_h4 + comm_h4_comm_h3_comm_h2_h1) * t**4 - return Ω1 + Ω2 + Ω3 + Ω4 + return omega_1 + omega_2 + omega_3 + omega_4 def time_evolution_operator(self, t): """Compute the time evolution operator using Magnus expansion.""" - Ω = self.compute_magnus_terms(t) - return expm(Ω) + omega = self.compute_magnus_terms(t) + return expm(omega) def simulate_dynamics(self, psi0, t_final, dt): """Simulate the dynamics of the system.""" psi = psi0 t = 0 while t < t_final: - U = self.time_evolution_operator(dt) - psi = U @ psi + u = self.time_evolution_operator(dt) + psi = u @ psi t += dt return psi diff --git a/qbraid_algorithms/qrc/model.py b/qbraid_algorithms/qrc/model.py index 200ed4a..8b6c318 100644 --- a/qbraid_algorithms/qrc/model.py +++ b/qbraid_algorithms/qrc/model.py @@ -17,10 +17,11 @@ from typing import Any, Callable import numpy as np -import torch.nn as nn from .dynamics import DetuningLayer +# from torch import nn + @dataclass class QRCModel: diff --git a/qbraid_algorithms/qrc/pca.py b/qbraid_algorithms/qrc/pca.py index fdddd0e..a7139d0 100644 --- a/qbraid_algorithms/qrc/pca.py +++ b/qbraid_algorithms/qrc/pca.py @@ -31,8 +31,8 @@ def pca_reduction( Args: data (torch.Tensor): The input data tensor where each row represents a sample. n_components (int): The number of principal components to retain. - data_dim (int) : The dimension of the input data required for doing PCA. - delta_max (int) : The scaling factor for bring PCA values to the feasible range of local detuning. + data_dim (int): The dimension of the input data required for doing PCA. + delta_max (int): Scaling factor to bring PCA vals into a feasible range for local detuning. train (bool, optional): Whether the data is training data. Defaults to True. Returns: diff --git a/tests/test_esn_reservoir.py b/tests/test_esn_reservoir.py index 2e3cfe7..f2bdb25 100644 --- a/tests/test_esn_reservoir.py +++ b/tests/test_esn_reservoir.py @@ -68,7 +68,7 @@ def test_spectral_radius(): reservoir = EchoStateReservoir( input_size=10, hidden_size=100, sparsity=0.5, spectral_radius=0.95 ) - eigenvalues = torch.linalg.eigvals(reservoir.w) + eigenvalues = torch.linalg.eigvals(reservoir.w) # pylint: disable=not-callable max_eigenvalue = torch.max(torch.abs(eigenvalues)).item() assert pytest.approx(max_eigenvalue, 0.01) == 0.95 diff --git a/tests/test_qrc_dynamics.py b/tests/test_qrc_dynamics.py index 3e77940..77515af 100644 --- a/tests/test_qrc_dynamics.py +++ b/tests/test_qrc_dynamics.py @@ -8,26 +8,70 @@ # # THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. +# pylint: disable=redefined-outer-name + """ Unit tests for Quantum Reservoir Computing (QRC) dynamics modules. """ +from unittest.mock import Mock import numpy as np +import pytest +from bloqade.emulate.ir.atom_type import AtomType from bloqade.emulate.ir.emulator import Register +from bloqade.emulate.ir.space import Space, SpaceType +from bloqade.emulate.ir.state_vector import StateVector +from numpy.typing import NDArray from qbraid_algorithms.qrc.krylov import KrylovEvolution, KrylovOptions -from qbraid_algorithms.qrc.magnus_expansion import MagnusExpansion +from qbraid_algorithms.qrc.magnus import MagnusExpansion + + +@pytest.fixture +def program_register() -> Register: + """Create a program register.""" + return Mock() + + +@pytest.fixture +def atom_type() -> AtomType: + """Create an atom type.""" + return Mock() + + +@pytest.fixture +def configurations() -> NDArray: + """Create configurations.""" + return Mock() + + +@pytest.fixture +def space_type() -> SpaceType: + """Create a space type.""" + return Mock() + + +@pytest.fixture +def space(program_register, atom_type, configurations, space_type) -> Space: + """Create a space object.""" + return Space( + space_type=space_type, + atom_type=atom_type, + program_register=program_register, + configurations=configurations, + ) -def test_rbh(): - """Test the Rydberg Blockade Hamiltonian (RBH.""" +@pytest.mark.skip(reason="Not implemented yet") +def test_rbh(space): + """Test the Rydberg Blockade Hamiltonian (RBH)""" initial_state = np.array([1, 0, 0, 0], dtype=complex) # Create a KrylovEvolution instance krylov_options = KrylovOptions() krylov_evolution = KrylovEvolution( - reg=Register(initial_state), + reg=StateVector(data=initial_state, space=space), start_clock=0.0, durations=[0.1, 0.2, 0.3], hamiltonian=None, # This will be initialized in __post_init__ @@ -37,11 +81,13 @@ def test_rbh(): # Simulate the evolution (example step) krylov_evolution.emulate_step(step=0, clock=0.0, duration=0.1) - passed = True # TODO + final_state = np.array([], dtype=complex) # dummy value + expected_value = StateVector(data=final_state, space=space) - assert passed + assert krylov_evolution.reg == expected_value +@pytest.mark.skip(reason="Not completed yet") def test_simulate_dynamics(): """Test the simulation of quantum dynamics using Magnus expansion.""" # Define a simple Hamiltonian and initial state diff --git a/tools/stamp_pre_release.py b/tools/stamp_pre_release.py index 65b40b7..367c97d 100755 --- a/tools/stamp_pre_release.py +++ b/tools/stamp_pre_release.py @@ -15,60 +15,11 @@ import pathlib -from packaging.version import Version, parse -from qbraid_core.system import extract_version, get_latest_package_version - -PROJECT_ROOT = pathlib.Path(__file__).parent.parent.resolve() -PYPROJECT_TOML_PATH = PROJECT_ROOT / "pyproject.toml" - -PACKAGE_NAME = "qbraid_algorithms" - - -class PreReleaseVersionError(Exception): - """Class for exceptions raised while stamping pre-release version.""" - - -def get_bumped_version(latest: str, local: str) -> str: - """Compare latest and local versions and return the bumped version.""" - latest_version = parse(latest) - local_version = parse(local) - - def bump_prerelease(version: Version) -> str: - if version.pre: - pre_type, pre_num = version.pre[0], version.pre[1] - return f"{version.base_version}-{pre_type}.{pre_num + 1}" - return f"{version.base_version}-a.0" - - if local_version.base_version > latest_version.base_version: - return f"{local_version.base_version}-a.0" - if local_version.base_version == latest_version.base_version: - if latest_version.is_prerelease: - if local_version.is_prerelease: - if local_version.pre[0] == latest_version.pre[0]: - if local_version.pre[1] > latest_version.pre[1]: - raise PreReleaseVersionError( - "Local version prerelease is newer than latest version." - ) - return bump_prerelease(latest_version) - if local_version.pre[0] < latest_version.pre[0]: - return bump_prerelease(latest_version) - return f"{local_version.base_version}-{local_version.pre[0]}.0" - raise PreReleaseVersionError("Latest version is prerelease but local version is not.") - if local_version.is_prerelease: - return f"{local_version.base_version}-{local_version.pre[0]}.0" - if local_version == latest_version: - return f"{local_version.base_version}-a.0" - raise PreReleaseVersionError( - "Local version base is equal to latest, but no clear upgrade path found." - ) - raise PreReleaseVersionError("Latest version base is greater than local, cannot bump.") - +from qbraid_core.system.versions import get_prelease_version if __name__ == "__main__": - if not PYPROJECT_TOML_PATH.exists(): - raise FileNotFoundError("pyproject.toml not found") - v_local = extract_version(PYPROJECT_TOML_PATH, shorten_prerelease=True) - v_latest = get_latest_package_version(PACKAGE_NAME, prerelease=True) - v_prerelease = get_bumped_version(v_latest, v_local) - print(v_prerelease) + PACKAGE = "qbraid_algorithms" + root = pathlib.Path(__file__).parent.parent.resolve() + version = get_prelease_version(root, PACKAGE) + print(version) From 7e23172bba9a398b945f82023179bf9647f8d486 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Tue, 11 Jun 2024 18:04:23 -0500 Subject: [PATCH 10/26] fix workflows --- .github/workflows/format.yml | 2 +- .github/workflows/main.yml | 2 +- docs/conf.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 212026c..7277b55 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -24,5 +24,5 @@ jobs: ruff check qbraid_algorithms examples tests tools isort --check-only qbraid_algorithms tests tools black --check qbraid_algorithms tests tools examples --line-length 100 - pylint qbraid_algorithms tests tools examples --disable=W0108,W0511,W0401,R0902 --ignore=qbraid_algorithms/_version.py + pylint qbraid_algorithms tests tools examples --disable=W0108,W0511,W0401,R0902,E0401 --ignore=qbraid_algorithms/_version.py qbraid admin headers qbraid_algorithms tests tools --type gpl --skip qbraid_algorithms/_version.py \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c8d15f1..abb68e9 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -64,7 +64,7 @@ jobs: shell: pwsh - name: Install testing dependencies run: | - pip install pytest + pip install pytest pytest-cov - name: Run tests with pytest run: | pytest tests --cov=qbraid_algorithms --cov-config=pyproject.toml --cov-report=term --cov-report=xml diff --git a/docs/conf.py b/docs/conf.py index 79ec3d5..7609c67 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -27,6 +27,8 @@ "sphinx.ext.autosummary", ] +autodoc_mock_imports = ["torchvision"] + # The master toctree document. master_doc = "index" From 665a941934e318ce907319392b53b7b8be355854 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Tue, 11 Jun 2024 18:10:04 -0500 Subject: [PATCH 11/26] lazy load torchvision --- .github/workflows/docs.yml | 2 +- .github/workflows/format.yml | 4 ++-- qbraid_algorithms/datasets/mnist.py | 7 ++++--- qbraid_algorithms/esn/model.py | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a14153b..bbdf55e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,4 +1,4 @@ -name: Build Docs +name: Docs on: pull_request: diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 7277b55..3201174 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,4 +1,4 @@ -name: Formatting check +name: Format on: pull_request: @@ -6,7 +6,7 @@ on: workflow_dispatch: jobs: - main: + check: runs-on: ubuntu-latest steps: diff --git a/qbraid_algorithms/datasets/mnist.py b/qbraid_algorithms/datasets/mnist.py index faa785d..9101c20 100644 --- a/qbraid_algorithms/datasets/mnist.py +++ b/qbraid_algorithms/datasets/mnist.py @@ -13,13 +13,14 @@ """ import numpy as np -import torchvision as tv def load_mnist_data(download: bool = False, train: bool = True) -> np.ndarray: """Load the MNIST dataset.""" - transform = tv.transforms.Compose([tv.transforms.ToTensor()]) - dataset = tv.datasets.MNIST( + import torchvision # pylint: disable=import-outside-toplevel + + transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) + dataset = torchvision.datasets.MNIST( "./MNIST_data/", download=download, train=train, transform=transform ) dataset_array: np.ndarray = dataset.data.numpy() diff --git a/qbraid_algorithms/esn/model.py b/qbraid_algorithms/esn/model.py index dc4dddd..1245658 100644 --- a/qbraid_algorithms/esn/model.py +++ b/qbraid_algorithms/esn/model.py @@ -18,7 +18,7 @@ from .reservoir import EchoStateReservoir -class EchoStateNetwork(torch.nn.Module): +class EchoStateNetwork(torch.nn.Module): # pylint: disable=too-few-public-methods """ An Echo State Network module that combines a Reservoir with a fully connected output layer. From b18138f7e01d57922f0509b2b4b716f08a3f3d07 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Tue, 11 Jun 2024 18:12:33 -0500 Subject: [PATCH 12/26] fix headers skip --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 3201174..a73073a 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -25,4 +25,4 @@ jobs: isort --check-only qbraid_algorithms tests tools black --check qbraid_algorithms tests tools examples --line-length 100 pylint qbraid_algorithms tests tools examples --disable=W0108,W0511,W0401,R0902,E0401 --ignore=qbraid_algorithms/_version.py - qbraid admin headers qbraid_algorithms tests tools --type gpl --skip qbraid_algorithms/_version.py \ No newline at end of file + qbraid admin headers qbraid_algorithms tests tools --type gpl \ No newline at end of file From 5f3ea71fb4ff0c979fc8073da1a8dea3c638fb26 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Tue, 11 Jun 2024 18:19:23 -0500 Subject: [PATCH 13/26] add code coverage badge --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 010d71b..5c35435 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ CI + + + PyPI version From ac8f2fe188989ddf805e61062054defc8b5c2b26 Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sun, 19 May 2024 16:11:33 -0400 Subject: [PATCH 14/26] feat(qrc): Implement Magnus expansion for simulating quantum dynamics (#13) * Initial commit * figuring out time series * add back esn and reservoir into code * finished(?) with esn time series * finished time series stuff; added to qbraid-algorithms/datasets * time series esn updates * fixed time series * add ruff * add pr workflow * update dev version + warning * isort fix * update format workflow name * fix docs + ruff * add qBraid bot * module structure + placeholder classes * format * update QRC template / placeholders * reflog * reflog (#12) * feat(qrc): add MagnusExpansion and test file * added magnus_expansion.py file * feat(qrc): add MagnusExpansion --------- Co-authored-by: Ryan Hill Co-authored-by: Rohan Jain --- qbraid_algorithms/qrc/__init__.py | 1 + qbraid_algorithms/qrc/magnus_expansion.py | 41 +++++++++++++++++++ tests/test_qbraid_algorithms/__init__.py | 1 + .../test_qrc/__init__.py | 0 .../test_qrc/test_magnus_expansion.py | 20 +++++++++ 5 files changed, 63 insertions(+) create mode 100644 qbraid_algorithms/qrc/magnus_expansion.py create mode 100644 tests/test_qbraid_algorithms/__init__.py create mode 100644 tests/test_qbraid_algorithms/test_qrc/__init__.py create mode 100644 tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index cf61dc8..af80ecb 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -24,5 +24,6 @@ from .magnus import MagnusExpansion from .model import QRCModel +from .magnus_expansion import MagnusExpansion __all__ = ["QRCModel", "MagnusExpansion"] diff --git a/qbraid_algorithms/qrc/magnus_expansion.py b/qbraid_algorithms/qrc/magnus_expansion.py new file mode 100644 index 0000000..b183ae1 --- /dev/null +++ b/qbraid_algorithms/qrc/magnus_expansion.py @@ -0,0 +1,41 @@ +from scipy.linalg import expm + +class MagnusExpansion: + def __init__(self, H): + self.H = H + + def commutator(self, A, B): + return A @ B - B @ A + + def compute_magnus_terms(self, t): + H_t = self.H * t + Ω1 = H_t + + # Second-order term + comm_H1_H2 = self.commutator(self.H, self.H) + Ω2 = 0.5 * (comm_H1_H2 * t**2) + + # Third-order term + comm_H1_comm_H2_H3 = self.commutator(self.H, self.commutator(self.H, self.H)) + comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.H, self.H), self.H) + Ω3 = (1/6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 + + # Fourth-order term + comm_H1_comm_H2_comm_H3_H4 = self.commutator(self.H, self.commutator(self.H, self.commutator(self.H, self.H))) + comm_H4_comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.commutator(self.H, self.H), self.H), self.H) + Ω4 = (1/24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 + + return Ω1 + Ω2 + Ω3 + Ω4 + + def time_evolution_operator(self, t): + Ω = self.compute_magnus_terms(t) + return expm(Ω) + + def simulate_dynamics(self, psi0, t_final, dt): + psi = psi0 + t = 0 + while t < t_final: + U = self.time_evolution_operator(dt) + psi = U @ psi + t += dt + return psi diff --git a/tests/test_qbraid_algorithms/__init__.py b/tests/test_qbraid_algorithms/__init__.py new file mode 100644 index 0000000..321033c --- /dev/null +++ b/tests/test_qbraid_algorithms/__init__.py @@ -0,0 +1 @@ +from .test_qrc import * diff --git a/tests/test_qbraid_algorithms/test_qrc/__init__.py b/tests/test_qbraid_algorithms/test_qrc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py new file mode 100644 index 0000000..441f119 --- /dev/null +++ b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py @@ -0,0 +1,20 @@ +import numpy as np +from qbraid_algorithms import qrc + +def test_simulate_dynamics(): + # Define a simple Hamiltonian and initial state + H = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian + psi0 = np.array([1, 0], dtype=complex) # Initial state + t_final = 1.0 + dt = 0.01 + + # Create an instance of MagnusExpansion + magnus = qrc.magnus_expansion.MagnusExpansion(H) + + # Simulate the dynamics + final_state = magnus.simulate_dynamics(psi0, t_final, dt) + + # Add assertions to check the final state + # For example: + expected_final_state = np.array([0.54030231+0.84147098j, 0.00000000+0.j]) + np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) From a94ec4fc545f9b9fc5e88df4ab1663ab9518395d Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Wed, 26 Jun 2024 21:39:34 -0400 Subject: [PATCH 15/26] Refactor towards bloqade's built in evolution --- qbraid_algorithms/qrc/dynamics.py | 4 +- qbraid_algorithms/qrc/krylov.py | 134 ------------------------ qbraid_algorithms/qrc/time_evolution.py | 87 +++++++++++++++ tests/test_qrc_dynamics.py | 6 +- 4 files changed, 92 insertions(+), 139 deletions(-) delete mode 100644 qbraid_algorithms/qrc/krylov.py create mode 100644 qbraid_algorithms/qrc/time_evolution.py diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index 9511cf1..6c77678 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -22,7 +22,7 @@ from bloqade.emulate.ir.emulator import Register from bloqade.emulate.ir.state_vector import RydbergHamiltonian -from .krylov import KrylovEvolution +from .time_evolution import AnalogEvolution @dataclass @@ -110,7 +110,7 @@ def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: # Numerically simulate the quantum evolution with Krylov methods and store the readouts i = 1 - prob = KrylovEvolution( + prob = AnalogEvolution( reg, start_clock=start_clock, durations=[t_step] * steps, hamiltonian=h, options=None ) for i in range(steps): diff --git a/qbraid_algorithms/qrc/krylov.py b/qbraid_algorithms/qrc/krylov.py deleted file mode 100644 index 7b1f9c4..0000000 --- a/qbraid_algorithms/qrc/krylov.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module for quantum time evolution using Krylov subspace methods. - -""" -from dataclasses import dataclass - -import numpy as np - -# from bloqade.atom_arrangement import Square -# from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RydbergHamiltonian, StateVector -from scipy.linalg import expm - - -# Placeholder for Krylov options with dummy attributes -class KrylovOptions: # pylint: disable=too-few-public-methods - """Class that describes options for a Krylov subspace method. - - Args: - progress (bool): Whether to show progress during the evolution. - progress_name (str): Name for the progress indicator. - normalize_step (int): Frequency of normalization steps. - normalize_finally (bool): Whether to normalize the quantum state at the end. - tol (float): Tolerance for numerical operations. - - """ - - # pylint: disable-next=too-many-arguments - def __init__( - self, - progress: bool = False, - progress_name: str = "emulating", - normalize_step: int = 1, - normalize_finally: bool = True, - tol: float = 1e-7, - ): - self.progress = progress - self.progress_name = progress_name - self.normalize_step = normalize_step - self.normalize_finally = normalize_finally - self.tol = tol - - -@dataclass -class KrylovEvolution: - """Class that describes a time evolution using Krylov subspace methods. - - Args: - reg (Register): Quantum register for the evolution. - start_clock (float): Start time of the evolution. - durations (list[float]): Durations of each time step. - hamiltonian (RydbergHamiltonian): Hamiltonian for the evolution. - options (KrylovOptions): Options for the evolution process. - """ - - reg: StateVector # Register? - start_clock: float - durations: list[float] - hamiltonian: RydbergHamiltonian - options: KrylovOptions - - def generate_krylov_basis(self, h, psi_0, m): - """Generates the first m Krylov basis vectors.""" - n = len(psi_0) - k = np.zeros((n, m), dtype=complex) - k[:, 0] = psi_0 / np.linalg.norm(psi_0) - for j in range(1, m): - k[:, j] = h @ k[:, j - 1] - for i in range(j): - k[:, j] -= np.dot(k[:, i], k[:, j]) * k[:, i] - k[:, j] /= np.linalg.norm(k[:, j]) - return k - - def gram_schmidt(self, v): - """Orthonormalizes the vectors using the Gram-Schmidt process.""" - q, _ = np.linalg.qr(v) - return q - - def krylov_evolution(self, h, psi_0, t, m): - """Projects H onto the Krylov subspace and computes the time evolution.""" - k = self.generate_krylov_basis(h, psi_0, m) - h_m = k.T.conj() @ h @ k - exp_h_m = expm(-1j * h_m * t) - psi_t = k @ exp_h_m @ k.T.conj() @ psi_0 - return psi_t - - # pylint: disable-next=unused-argument - def emulate_step(self, step: int, clock: float, duration: float) -> "KrylovEvolution": - """ - Simulate a single time step of quantum evolution using the Krylov subspace method. - - Args: - step: Current step index. - clock: Current time. - duration: Duration of the current time step. - - Returns: - Self with the quantum state updated. - - TODO: Implement the emulation step function. - """ - try: - psi_0 = self.reg - evolved_state = self.krylov_evolution( - self.hamiltonian.rydberg, psi_0, duration, len(self.durations) - ) - self.reg = evolved_state - except Exception as err: - raise NotImplementedError("Emulation step failed") from err - - def normalize_register(self) -> None: - """ - Normalize the quantum register if specified in options. - - TODO: Implement the normalization logic. - """ - # https://github.com/QuEraComputing/bloqade-python/blob/17585b21ad8f099ac8ffe126257e8ffb6c7f4588/src/bloqade/emulate/ir/state_vector.py#L208-L215 - # data = self.reg.data - # data /= np.linalg.norm(data) - # self.reg.data = data - if self.options.normalize_finally: - norm = np.linalg.norm(self.reg) - if norm > self.options.tol: - self.reg /= norm diff --git a/qbraid_algorithms/qrc/time_evolution.py b/qbraid_algorithms/qrc/time_evolution.py new file mode 100644 index 0000000..864aed9 --- /dev/null +++ b/qbraid_algorithms/qrc/time_evolution.py @@ -0,0 +1,87 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Module for quantum time evolution using Krylov subspace methods. + +""" +from dataclasses import dataclass + +import numpy as np + +from bloqade.atom_arrangement import Square, Chain, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement + +class GeometryOptions: # pylint: disable=too-few-public-methods + + + # pylint: disable-next=too-many-arguments + def __init__( + self, + atom_arrangement_shape: str, + lattice_spacing: float, + + ): + atom_arrangement_shape_dictionary = { + "Square": Square, + "Chain": Chain, + "Rectangular": Rectangular, + "Honeycomb": Honeycomb, + "Triangular": Triangular, + "Lieb": Lieb, + "Kagome": Kagome, + } + + self.atom_arrangement: AtomArrangement = atom_arrangement_shape_dictionary.get(atom_arrangement_shape) + self.lattice_spacing: float = lattice_spacing + +@dataclass +class AnalogEvolution: + + def __init__( + self, + rabi_amplitudes: list[float], + durations: list[float], + geometric_configuration: GeometryOptions + ): + + + self.amplitudes = rabi_amplitudes + self.durations = durations + + + def get_prob(counts, num_atoms): + """Helper function for calculating the Rydberg probability averaged over all the shots""" + prob = np.zeros(num_atoms) + + total_shots = 0 # Total number of shots in the counts + for key, val in counts[0].items(): + prob += np.array([float(bit) for bit in [*key]]) * val + total_shots += val + + prob /= total_shots + return prob + + def time_evolution(self, durations: list[float], rabi_detuning_values: list[float], amp_waveform: PiecewiseLinear, num_atoms: int): + """ + evolves the program over discrete list of time steps + """ + program = ( + amp_waveform + .detuning.uniform.piecewise_linear(durations, rabi_detuning_values) + ) + + # TODO: Create some sort of config for defining whether to run an emulation or hardware run + hardware_run_bitstrings = program.braket.aquila.run_async(100).report().counts() + + expected_statevector = self.get_prob(hardware_run_bitstrings, num_atoms) + return expected_statevector + + + diff --git a/tests/test_qrc_dynamics.py b/tests/test_qrc_dynamics.py index 77515af..1d7117a 100644 --- a/tests/test_qrc_dynamics.py +++ b/tests/test_qrc_dynamics.py @@ -24,7 +24,7 @@ from bloqade.emulate.ir.state_vector import StateVector from numpy.typing import NDArray -from qbraid_algorithms.qrc.krylov import KrylovEvolution, KrylovOptions +from qbraid_algorithms.qrc.time_evolution import AnalogEvolution, EvolveOptions from qbraid_algorithms.qrc.magnus import MagnusExpansion @@ -69,8 +69,8 @@ def test_rbh(space): initial_state = np.array([1, 0, 0, 0], dtype=complex) # Create a KrylovEvolution instance - krylov_options = KrylovOptions() - krylov_evolution = KrylovEvolution( + krylov_options = EvolveOptions() + krylov_evolution = AnalogEvolution( reg=StateVector(data=initial_state, space=space), start_clock=0.0, durations=[0.1, 0.2, 0.3], From b07c1c869796bcb721662c5d9d26bc6aed63301a Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sat, 29 Jun 2024 17:22:33 -0400 Subject: [PATCH 16/26] feat: update dependencies and add time evolution tests --- pyproject.toml | 2 +- tests/test_time_evolution.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 tests/test_time_evolution.py diff --git a/pyproject.toml b/pyproject.toml index 5004c72..e918c22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Physics" ] -dependencies = ["torch>=2.3.0,<3.0", "numpy>=1.17,<1.27", "scipy~=1.13.1", "bloqade>=0.15.12,<0.16"] +dependencies = ["torch>=2.1.0,<3.0", "numpy>=1.17,<1.27", "scipy~=1.13.1", "bloqade>=0.15.12,<0.16"] [project.urls] Homepage = "https://github.com/qBraid/qbraid-algorithms" diff --git a/tests/test_time_evolution.py b/tests/test_time_evolution.py new file mode 100644 index 0000000..193d8fa --- /dev/null +++ b/tests/test_time_evolution.py @@ -0,0 +1,35 @@ +import pytest +from qbraid_algorithms import qrc + +@pytest.fixture +def time_evolution_instance(): + geometric_configuration = qrc.GeometryOptions("Square", 1.0) + analog_evolution = qrc.AnalogEvolution( + rabi_amplitudes=[1.0], + durations=[1.0], + simulation=True, + geometric_configuration=geometric_configuration + ) + return analog_evolution + +def test_time_evolution_returns_expected_result(time_evolution_instance): + num_atoms = 10 + result = time_evolution_instance.time_evolution(num_atoms) + assert result is not None + # Add more specific assertions based on the expected behavior + +def test_time_evolution_with_zero_atoms(time_evolution_instance): + num_atoms = 0 + with pytest.raises(ValueError): + time_evolution_instance.time_evolution(num_atoms) + +def test_time_evolution_with_negative_atoms(time_evolution_instance): + num_atoms = -5 + with pytest.raises(ValueError): + time_evolution_instance.time_evolution(num_atoms) + +def test_time_evolution_with_large_number_of_atoms(time_evolution_instance): + num_atoms = 1000000 + result = time_evolution_instance.time_evolution(num_atoms) + assert result is not None + # Add more specific assertions based on the expected behavior From bd6da5f406a1d482ef18c84a046e75080e72a1be Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sun, 30 Jun 2024 14:05:09 -0400 Subject: [PATCH 17/26] adding conditional logic for emulation vs hardware backend --- qbraid_algorithms/qrc/time_evolution.py | 37 +++++++++++++++++++------ 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/qbraid_algorithms/qrc/time_evolution.py b/qbraid_algorithms/qrc/time_evolution.py index 864aed9..8dee78f 100644 --- a/qbraid_algorithms/qrc/time_evolution.py +++ b/qbraid_algorithms/qrc/time_evolution.py @@ -17,6 +17,7 @@ import numpy as np from bloqade.atom_arrangement import Square, Chain, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement +from bloqade.builder.waveform import PiecewiseLinear class GeometryOptions: # pylint: disable=too-few-public-methods @@ -48,10 +49,12 @@ def __init__( self, rabi_amplitudes: list[float], durations: list[float], + simulation: bool, geometric_configuration: GeometryOptions ): + self.simulation = simulation self.amplitudes = rabi_amplitudes self.durations = durations @@ -68,20 +71,38 @@ def get_prob(counts, num_atoms): prob /= total_shots return prob - def time_evolution(self, durations: list[float], rabi_detuning_values: list[float], amp_waveform: PiecewiseLinear, num_atoms: int): + def _time_evolution(self, amp_waveform, num_atoms: int): """ evolves the program over discrete list of time steps """ + program = ( - amp_waveform - .detuning.uniform.piecewise_linear(durations, rabi_detuning_values) - ) + amp_waveform + .detuning.uniform.piecewise_linear(self.durations, self.amplitudes) + ) + + if self.simulation: + [emulation] = (program.bloqade.python().hamiltonian()) + emulation.evolve(times=self.durations) + + return emulation.hamiltonian.tocsr(time=self.durations[-1]).toarray() + + - # TODO: Create some sort of config for defining whether to run an emulation or hardware run - hardware_run_bitstrings = program.braket.aquila.run_async(100).report().counts() + else: + hardware_run_bitstrings = program.braket.aquila.run_async(100).report().counts() - expected_statevector = self.get_prob(hardware_run_bitstrings, num_atoms) - return expected_statevector + expected_statevector = self.get_prob(hardware_run_bitstrings, num_atoms) + return expected_statevector + + def time_evolution(self, num_atoms: int): + """ + evolves the program over discrete list of time steps + """ + program_setup = self.geometric_configuration.atom_arrangement( + lattice_spacing=self.geometric_configuration.lattice_spacing + ).rydberg.rabi.amplitude.uniform.constant(15.0, 4.0) + return self._time_evolution(program_setup, num_atoms) From d8a00ce3ad4343c02337a7ce73cd5e4aa04e4198 Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sat, 6 Jul 2024 10:08:06 -0400 Subject: [PATCH 18/26] figuring out how to integrate dynamics and time_evolution --- qbraid_algorithms/qrc/dynamics.py | 131 ++++++------------------------ 1 file changed, 26 insertions(+), 105 deletions(-) diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index a87f092..f7db823 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -13,104 +13,57 @@ """ -import math -import math from dataclasses import dataclass, field from typing import Any import numpy as np from bloqade.emulate.ir.atom_type import AtomType from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.state_vector import RydbergHamiltonian - - -from .time_evolution import AnalogEvolution - +from bloqade.atom_arrangement import Chain, Square, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement +from bloqade.ir import Waveform +from bloqade.builder import Uniform, Scale, Location +from bloqade import rydberg_h @dataclass class DetuningLayer: """Class representing a detuning layer in a quantum reservoir.""" - atoms: list[AtomType] # Atom positions - readouts: list[Any] # Readout observables - omega: float # Rabi frequency - t_start: float # Evolution starting time - t_end: float # Evolution ending time - step: float # Readout time step - reg: Register = field( - default_factory=lambda *args, **kwargs: Register(*args, **kwargs) - ) # Quantum state storage + def __init__( + self, + program: AtomArrangement, + spatial_modulation: str + ): + if spatial_modulation == "uniform": + self.detuning = program.detuning.uniform + elif spatial_modulation == "scale": + self.detuning = program.detuning.scale + elif spatial_modulation == "location": + self.detuning = program.detuning.location + else: + raise ValueError("Invalid spatial modulation type.") -def generate_sites(lattice_type, dimension, scale): - """ - Generate positions for atoms on a specified lattice type with a given scale. - Args: - lattice_type (Any): Type of the lattice. - dimension (int): Number of principal components. - scale (float): Scale factor for lattice spacing. - - Returns: - Any: Positions of atoms. - TODO: Implement actual site generation based on lattice type. +def generate_sites(self): """ - raise NotImplementedError - - -def rydberg_h(atoms: list[AtomType], delta: float, omega: float) -> RydbergHamiltonian: - """ - Generate the Hamiltonian for a Rydberg atom system. - - Args: - atoms (list[AtomType]): Atom positions. - omega (float): Rabi frequency. - - Returns: - RydbergHamiltonian: Hamiltonian matrix. - """ - raise NotImplementedError - - -def set_zero_state(reg: Register): - """ - Set the quantum state to the zero state. - - Args: - reg (Register): Quantum state storage. - """ - raise NotImplementedError - + Generate positions for atoms on a specified lattice type with a given scale. -def rydberg_h(atoms: list[AtomType], delta: float, omega: float) -> RydbergHamiltonian: - """ - Generate the Hamiltonian for a Rydberg atom system. - Args: - atoms (list[AtomType]): Atom positions. - omega (float): Rabi frequency. Returns: - RydbergHamiltonian: Hamiltonian matrix. - """ - raise NotImplementedError - - -def set_zero_state(reg: Register): - """ - Set the quantum state to the zero state. + AtomArrangement: Positions of atoms. - Args: - reg (Register): Quantum state storage. """ - raise NotImplementedError + pass - -def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: +def apply_layer(layer: DetuningLayer, detuning_waveform: Waveform) -> np.ndarray: """ Simulate quantum evolution and record output for a given set of PCA values (x). + Note: Frequency omega is not a input for rydberg_h python function, instead amplitude is there. + For detuning we use x. + Args: layer (DetuningLayer): Configuration and quantum state of the layer. x (np.ndarray): Vector or matrix of real numbers representing PCA values for each image. @@ -118,37 +71,5 @@ def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: Returns: np.ndarray: Output values from the simulation. - TODO: Implement the actual simulation using suitable quantum simulation libraries. """ - h = rydberg_h(layer.atoms, x, layer.omega) - - reg = layer.reg - reg = set_zero_state(reg) - - t_start = layer.t_start - t_end = layer.t_end - t_step = layer.step - start_clock = NotImplemented - - # initialize output vector - steps = math.floor((t_end - t_start) / t_step) - out = np.zeros(steps * len(layer.readouts)) - - # Numerically simulate the quantum evolution with Krylov methods and store the readouts - i = 1 - - prob = AnalogEvolution( - reg, start_clock=start_clock, durations=[t_step] * steps, hamiltonian=h, options=None - ) - for i in range(steps): - # ignore first time step, this is just the initial state - if i == 0: - continue - - # TODO: Implement the emulation step function. - # NOTE: The following lines are placehoders. They are not correct, and should be replaced. - prob.emulate_step(i, t_start + i * t_step, t_step) - for j, readout in enumerate(layer.readouts): - out[i * len(layer.readouts) + j] = readout(prob) - - return out + pass From bee21d6c53bf3d821bb02962dddbf669ccfd164b Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sat, 6 Jul 2024 12:30:30 -0400 Subject: [PATCH 19/26] updating refs to uniform, scale, location functions-not classes --- qbraid_algorithms/qrc/dynamics.py | 40 +++++-------------------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index f7db823..37f4333 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -21,7 +21,7 @@ from bloqade.emulate.ir.emulator import Register from bloqade.atom_arrangement import Chain, Square, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement from bloqade.ir import Waveform -from bloqade.builder import Uniform, Scale, Location +from bloqade.builder.field import uniform, scale, location from bloqade import rydberg_h @dataclass @@ -34,42 +34,14 @@ def __init__( spatial_modulation: str ): if spatial_modulation == "uniform": - self.detuning = program.detuning.uniform + self.detuning: uniform = program.detuning.uniform elif spatial_modulation == "scale": - self.detuning = program.detuning.scale + self.detuning: scale = program.detuning.scale elif spatial_modulation == "location": - self.detuning = program.detuning.location + self.detuning: location = program.detuning.location else: raise ValueError("Invalid spatial modulation type.") - - -def generate_sites(self): - """ - Generate positions for atoms on a specified lattice type with a given scale. - - - - Returns: - AtomArrangement: Positions of atoms. - - """ - pass - -def apply_layer(layer: DetuningLayer, detuning_waveform: Waveform) -> np.ndarray: - """ - Simulate quantum evolution and record output for a given set of PCA values (x). - - Note: Frequency omega is not a input for rydberg_h python function, instead amplitude is there. - For detuning we use x. - - Args: - layer (DetuningLayer): Configuration and quantum state of the layer. - x (np.ndarray): Vector or matrix of real numbers representing PCA values for each image. - - Returns: - np.ndarray: Output values from the simulation. - - """ - pass +def apply_layer(self): + return self.detuning From 661aec12b82fd17c39fc3328588fd2255e538b91 Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Sat, 6 Jul 2024 13:29:44 -0400 Subject: [PATCH 20/26] updating dynamics, still wip migrating functionality properly --- qbraid_algorithms/qrc/dynamics.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index 37f4333..7f14c33 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -16,13 +16,9 @@ from dataclasses import dataclass, field from typing import Any -import numpy as np -from bloqade.emulate.ir.atom_type import AtomType -from bloqade.emulate.ir.emulator import Register -from bloqade.atom_arrangement import Chain, Square, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement +from bloqade.atom_arrangement import AtomArrangement from bloqade.ir import Waveform -from bloqade.builder.field import uniform, scale, location -from bloqade import rydberg_h +from bloqade.builder import field @dataclass class DetuningLayer: @@ -33,15 +29,22 @@ def __init__( program: AtomArrangement, spatial_modulation: str ): + if spatial_modulation == "uniform": - self.detuning: uniform = program.detuning.uniform + self.detuning: field.uniform = program.detuning.uniform elif spatial_modulation == "scale": - self.detuning: scale = program.detuning.scale + raise NotImplementedError( + f"{self.__class__.__name__}.spatial_modulation == 'scale' not implemented\n" + ) + # self.detuning: scale = program.detuning.scale elif spatial_modulation == "location": - self.detuning: location = program.detuning.location + raise NotImplementedError( + f"{self.__class__.__name__}.spatial_modulation == 'location' not implemented\n" + ) + # self.detuning: location = program.detuning.location else: raise ValueError("Invalid spatial modulation type.") -def apply_layer(self): - return self.detuning + def apply_layer(self, program): + return self.detuning.piecewise_linear(program.durations, program.amplitudes) From d615204c4a30ad35e1ab14ac756b3ae5c7108e17 Mon Sep 17 00:00:00 2001 From: babcockt18 <39506616+babcockt18@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:10:47 -0400 Subject: [PATCH 21/26] updating dynamics.py --- qbraid_algorithms/qrc/dynamics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index 7f14c33..fc0ca89 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -47,4 +47,5 @@ def __init__( def apply_layer(self, program): + print(f"{program}\n") return self.detuning.piecewise_linear(program.durations, program.amplitudes) From b04d9c90accc0aedd0ccd829e5f439a66db93270 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Wed, 10 Jul 2024 11:51:52 -0500 Subject: [PATCH 22/26] check-in time evolution progress --- pyproject.toml | 2 +- qbraid_algorithms/qrc/__init__.py | 3 +- qbraid_algorithms/qrc/dynamics.py | 22 +--- qbraid_algorithms/qrc/magnus.py | 70 ------------ qbraid_algorithms/qrc/magnus_expansion.py | 67 +++++++---- qbraid_algorithms/qrc/time_evolution.py | 130 +++++++++++++--------- tests/test_qrc_dynamics.py | 28 +---- 7 files changed, 134 insertions(+), 188 deletions(-) delete mode 100644 qbraid_algorithms/qrc/magnus.py diff --git a/pyproject.toml b/pyproject.toml index 341d24f..478d32e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,7 +67,7 @@ exclude = ''' [tool.pylint.'MESSAGES CONTROL'] max-line-length = 100 -disable = "W0108, W0511, W0401, R0902" +disable = "W0108,W0511,W0401,R0902,R0903" [tool.pylint.MASTER] ignore-paths = [ diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index af80ecb..d223648 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -22,8 +22,7 @@ """ -from .magnus import MagnusExpansion -from .model import QRCModel from .magnus_expansion import MagnusExpansion +from .model import QRCModel __all__ = ["QRCModel", "MagnusExpansion"] diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py index 6c77678..cfb2ada 100644 --- a/qbraid_algorithms/qrc/dynamics.py +++ b/qbraid_algorithms/qrc/dynamics.py @@ -22,7 +22,7 @@ from bloqade.emulate.ir.emulator import Register from bloqade.emulate.ir.state_vector import RydbergHamiltonian -from .time_evolution import AnalogEvolution +from .time_evolution import AnalogProgramEvolver @dataclass @@ -108,20 +108,6 @@ def apply_layer(layer: DetuningLayer, x: np.ndarray) -> np.ndarray: steps = math.floor((t_end - t_start) / t_step) out = np.zeros(steps * len(layer.readouts)) - # Numerically simulate the quantum evolution with Krylov methods and store the readouts - i = 1 - prob = AnalogEvolution( - reg, start_clock=start_clock, durations=[t_step] * steps, hamiltonian=h, options=None - ) - for i in range(steps): - # ignore first time step, this is just the initial state - if i == 0: - continue - - # TODO: Implement the emulation step function. - # NOTE: The following lines are placehoders. They are not correct, and should be replaced. - prob.emulate_step(i, t_start + i * t_step, t_step) - for j, readout in enumerate(layer.readouts): - out[i * len(layer.readouts) + j] = readout(prob) - - return out + evolver = AnalogProgramEvolver(num_atoms=len(layer.atoms), rabi_amplitudes=[layer.omega], durations=[t_step], geometric_configuration=layer.atoms) + + return evolver.evolve(backend="local_simulator") diff --git a/qbraid_algorithms/qrc/magnus.py b/qbraid_algorithms/qrc/magnus.py deleted file mode 100644 index fefb908..0000000 --- a/qbraid_algorithms/qrc/magnus.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module for quantum time evolution using Magnus expansion. - -""" - -from scipy.linalg import expm - - -class MagnusExpansion: - """ - Class that describes a time evolution using Magnus expansion. - - """ - - def __init__(self, h): - self.h = h - - def commutator(self, a, b): - """Compute the commutator of two matrices.""" - return a @ b - b @ a - - def compute_magnus_terms(self, t): - """Compute the terms of the Magnus expansion.""" - h_t = self.h * t - omega_1 = h_t - - # Second-order term - comm_h1_h2 = self.commutator(self.h, self.h) - omega_2 = 0.5 * (comm_h1_h2 * t**2) - - # Third-order term - comm_h1_comm_h2_h3 = self.commutator(self.h, self.commutator(self.h, self.h)) - comm_h3_comm_h2_h1 = self.commutator(self.commutator(self.h, self.h), self.h) - omega_3 = (1 / 6) * (comm_h1_comm_h2_h3 + comm_h3_comm_h2_h1) * t**3 - - # Fourth-order term - comm_h1_comm_h2_comm_h3_h4 = self.commutator( - self.h, self.commutator(self.h, self.commutator(self.h, self.h)) - ) - comm_h4_comm_h3_comm_h2_h1 = self.commutator( - self.commutator(self.commutator(self.h, self.h), self.h), self.h - ) - omega_4 = (1 / 24) * (comm_h1_comm_h2_comm_h3_h4 + comm_h4_comm_h3_comm_h2_h1) * t**4 - - return omega_1 + omega_2 + omega_3 + omega_4 - - def time_evolution_operator(self, t): - """Compute the time evolution operator using Magnus expansion.""" - omega = self.compute_magnus_terms(t) - return expm(omega) - - def simulate_dynamics(self, psi0, t_final, dt): - """Simulate the dynamics of the system.""" - psi = psi0 - t = 0 - while t < t_final: - u = self.time_evolution_operator(dt) - psi = u @ psi - t += dt - return psi diff --git a/qbraid_algorithms/qrc/magnus_expansion.py b/qbraid_algorithms/qrc/magnus_expansion.py index b183ae1..fefb908 100644 --- a/qbraid_algorithms/qrc/magnus_expansion.py +++ b/qbraid_algorithms/qrc/magnus_expansion.py @@ -1,41 +1,70 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Module for quantum time evolution using Magnus expansion. + +""" + from scipy.linalg import expm + class MagnusExpansion: - def __init__(self, H): - self.H = H + """ + Class that describes a time evolution using Magnus expansion. + + """ + + def __init__(self, h): + self.h = h - def commutator(self, A, B): - return A @ B - B @ A + def commutator(self, a, b): + """Compute the commutator of two matrices.""" + return a @ b - b @ a def compute_magnus_terms(self, t): - H_t = self.H * t - Ω1 = H_t + """Compute the terms of the Magnus expansion.""" + h_t = self.h * t + omega_1 = h_t # Second-order term - comm_H1_H2 = self.commutator(self.H, self.H) - Ω2 = 0.5 * (comm_H1_H2 * t**2) + comm_h1_h2 = self.commutator(self.h, self.h) + omega_2 = 0.5 * (comm_h1_h2 * t**2) # Third-order term - comm_H1_comm_H2_H3 = self.commutator(self.H, self.commutator(self.H, self.H)) - comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.H, self.H), self.H) - Ω3 = (1/6) * (comm_H1_comm_H2_H3 + comm_H3_comm_H2_H1) * t**3 + comm_h1_comm_h2_h3 = self.commutator(self.h, self.commutator(self.h, self.h)) + comm_h3_comm_h2_h1 = self.commutator(self.commutator(self.h, self.h), self.h) + omega_3 = (1 / 6) * (comm_h1_comm_h2_h3 + comm_h3_comm_h2_h1) * t**3 # Fourth-order term - comm_H1_comm_H2_comm_H3_H4 = self.commutator(self.H, self.commutator(self.H, self.commutator(self.H, self.H))) - comm_H4_comm_H3_comm_H2_H1 = self.commutator(self.commutator(self.commutator(self.H, self.H), self.H), self.H) - Ω4 = (1/24) * (comm_H1_comm_H2_comm_H3_H4 + comm_H4_comm_H3_comm_H2_H1) * t**4 + comm_h1_comm_h2_comm_h3_h4 = self.commutator( + self.h, self.commutator(self.h, self.commutator(self.h, self.h)) + ) + comm_h4_comm_h3_comm_h2_h1 = self.commutator( + self.commutator(self.commutator(self.h, self.h), self.h), self.h + ) + omega_4 = (1 / 24) * (comm_h1_comm_h2_comm_h3_h4 + comm_h4_comm_h3_comm_h2_h1) * t**4 - return Ω1 + Ω2 + Ω3 + Ω4 + return omega_1 + omega_2 + omega_3 + omega_4 def time_evolution_operator(self, t): - Ω = self.compute_magnus_terms(t) - return expm(Ω) + """Compute the time evolution operator using Magnus expansion.""" + omega = self.compute_magnus_terms(t) + return expm(omega) def simulate_dynamics(self, psi0, t_final, dt): + """Simulate the dynamics of the system.""" psi = psi0 t = 0 while t < t_final: - U = self.time_evolution_operator(dt) - psi = U @ psi + u = self.time_evolution_operator(dt) + psi = u @ psi t += dt return psi diff --git a/qbraid_algorithms/qrc/time_evolution.py b/qbraid_algorithms/qrc/time_evolution.py index 8dee78f..307b793 100644 --- a/qbraid_algorithms/qrc/time_evolution.py +++ b/qbraid_algorithms/qrc/time_evolution.py @@ -12,22 +12,30 @@ Module for quantum time evolution using Krylov subspace methods. """ -from dataclasses import dataclass +from collections import OrderedDict import numpy as np +from bloqade.atom_arrangement import ( + AtomArrangement, + Chain, + Honeycomb, + Kagome, + Lieb, + Rectangular, + Square, + Triangular, +) -from bloqade.atom_arrangement import Square, Chain, Rectangular, Honeycomb, Triangular, Lieb, Kagome, AtomArrangement -from bloqade.builder.waveform import PiecewiseLinear +# from bloqade.builder.waveform import PiecewiseLinear -class GeometryOptions: # pylint: disable=too-few-public-methods +class GeometryOptions: + """Class for defining the geometric configuration of the atoms in the lattice.""" - # pylint: disable-next=too-many-arguments def __init__( self, atom_arrangement_shape: str, lattice_spacing: float, - ): atom_arrangement_shape_dictionary = { "Square": Square, @@ -39,70 +47,90 @@ def __init__( "Kagome": Kagome, } - self.atom_arrangement: AtomArrangement = atom_arrangement_shape_dictionary.get(atom_arrangement_shape) + self.atom_arrangement: AtomArrangement = atom_arrangement_shape_dictionary.get( + atom_arrangement_shape + ) self.lattice_spacing: float = lattice_spacing -@dataclass -class AnalogEvolution: + +class AnalogProgramEvolver: + """Class for evolving program over discrete list of time steps. + + Attributes: + num_atoms (int): Number of atoms in the system. + amplitudes (List[float]): List of Rabi oscillation amplitudes. + durations (List[float]): List of pulse durations. + geometric_configuration (GeometryOptions): Configuration options for the geometric setup. + """ + + SUPPORTED_BACKENDS = ["local_simulator", "aquila"] def __init__( - self, - rabi_amplitudes: list[float], - durations: list[float], - simulation: bool, - geometric_configuration: GeometryOptions - ): + self, + num_atoms: int, + rabi_amplitudes: list[float], + durations: list[float], + geometric_configuration: GeometryOptions, + ): + """Initializes the AnalogEvolution with provided parameters. + Args: + num_atoms (int): Number of atoms in the system. + rabi_amplitudes (list[float]): Rabi amplitudes for each pulse. + durations (list[float]): Duration of each pulse. + geometric_configuration (GeometryOptions): Geometric settings for the evolution. - self.simulation = simulation + """ + self.num_atoms = num_atoms self.amplitudes = rabi_amplitudes self.durations = durations + self.geometric_configuration = geometric_configuration + def compute_rydberg_probs(self, counts: OrderedDict) -> np.ndarray: + """Calculate the average probability distribution of Rydberg states over all shots. - def get_prob(counts, num_atoms): - """Helper function for calculating the Rydberg probability averaged over all the shots""" - prob = np.zeros(num_atoms) + Args: + counts (OrderedDict): An OrderedDict where keys are bitstring state representations + and values are the counts of each state observed. - total_shots = 0 # Total number of shots in the counts - for key, val in counts[0].items(): + Returns: + np.ndarray: The probability of each state, averaged over all shots. + """ + prob = np.zeros(self.num_atoms) + + total_shots = 0 # Total number of shots in the counts + for key, val in counts.items(): prob += np.array([float(bit) for bit in [*key]]) * val total_shots += val prob /= total_shots return prob - def _time_evolution(self, amp_waveform, num_atoms: int): - """ - evolves the program over discrete list of time steps - """ - - program = ( - amp_waveform - .detuning.uniform.piecewise_linear(self.durations, self.amplitudes) - ) + def evolve(self, backend: str) -> np.ndarray: + """Evolves program over discrete list of time steps""" + amp_waveform = self.geometric_configuration.atom_arrangement( + lattice_spacing=self.geometric_configuration.lattice_spacing + ).rydberg.rabi.amplitude.uniform.constant(15.0, 4.0) + program = amp_waveform.detuning.uniform.piecewise_linear(self.durations, self.amplitudes) - if self.simulation: - [emulation] = (program.bloqade.python().hamiltonian()) + if backend == "local_simulator": + [emulation] = program.bloqade.python().hamiltonian() emulation.evolve(times=self.durations) return emulation.hamiltonian.tocsr(time=self.durations[-1]).toarray() - - - else: - hardware_run_bitstrings = program.braket.aquila.run_async(100).report().counts() - - expected_statevector = self.get_prob(hardware_run_bitstrings, num_atoms) - return expected_statevector - - def time_evolution(self, num_atoms: int): - """ - evolves the program over discrete list of time steps - """ - program_setup = self.geometric_configuration.atom_arrangement( - lattice_spacing=self.geometric_configuration.lattice_spacing - ).rydberg.rabi.amplitude.uniform.constant(15.0, 4.0) - return self._time_evolution(program_setup, num_atoms) - - - + if backend == "aquila": + # TODO: Revise for async task handling to avoid blocking while waiting for results. + bitstring_counts_batch: list[OrderedDict] = ( + program.braket.aquila.run_async(100).report().counts() + ) + if ( + len(bitstring_counts_batch) != 1 + ): # TODO: Double-check that counts list will always be length 1 here. + raise ValueError("Expected a single batch of counts.") + bitstring_counts = bitstring_counts_batch[0] + return self.compute_rydberg_probs(bitstring_counts) + + raise ValueError( + f"Backend {backend} is not supported. Supported backends are: {self.SUPPORTED_BACKENDS}" + ) diff --git a/tests/test_qrc_dynamics.py b/tests/test_qrc_dynamics.py index 1d7117a..22be228 100644 --- a/tests/test_qrc_dynamics.py +++ b/tests/test_qrc_dynamics.py @@ -21,11 +21,9 @@ from bloqade.emulate.ir.atom_type import AtomType from bloqade.emulate.ir.emulator import Register from bloqade.emulate.ir.space import Space, SpaceType -from bloqade.emulate.ir.state_vector import StateVector from numpy.typing import NDArray -from qbraid_algorithms.qrc.time_evolution import AnalogEvolution, EvolveOptions -from qbraid_algorithms.qrc.magnus import MagnusExpansion +from qbraid_algorithms.qrc.magnus_expansion import MagnusExpansion @pytest.fixture @@ -63,30 +61,6 @@ def space(program_register, atom_type, configurations, space_type) -> Space: ) -@pytest.mark.skip(reason="Not implemented yet") -def test_rbh(space): - """Test the Rydberg Blockade Hamiltonian (RBH)""" - initial_state = np.array([1, 0, 0, 0], dtype=complex) - - # Create a KrylovEvolution instance - krylov_options = EvolveOptions() - krylov_evolution = AnalogEvolution( - reg=StateVector(data=initial_state, space=space), - start_clock=0.0, - durations=[0.1, 0.2, 0.3], - hamiltonian=None, # This will be initialized in __post_init__ - options=krylov_options, - ) - - # Simulate the evolution (example step) - krylov_evolution.emulate_step(step=0, clock=0.0, duration=0.1) - - final_state = np.array([], dtype=complex) # dummy value - expected_value = StateVector(data=final_state, space=space) - - assert krylov_evolution.reg == expected_value - - @pytest.mark.skip(reason="Not completed yet") def test_simulate_dynamics(): """Test the simulation of quantum dynamics using Magnus expansion.""" From 34292b856378a46a4662600f8d881dc43c70cc08 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Wed, 10 Jul 2024 18:52:19 -0500 Subject: [PATCH 23/26] detuning layer dev --- examples/sandbox/README.md | 3 + pyproject.toml | 13 +-- qbraid_algorithms/qrc/__init__.py | 25 +++- qbraid_algorithms/qrc/dynamics.py | 51 --------- qbraid_algorithms/qrc/encoding.py | 39 ++++++- qbraid_algorithms/qrc/linear.py | 31 ----- qbraid_algorithms/qrc/model.py | 80 ------------- qbraid_algorithms/qrc/pca.py | 53 --------- qbraid_algorithms/qrc/qrc_model.py | 107 ++++++++++++++++++ qbraid_algorithms/qrc/time_evolution.py | 90 +++++---------- tests/test_qbraid_algorithms/__init__.py | 1 - .../test_qrc/__init__.py | 0 .../test_qrc/test_magnus_expansion.py | 20 ---- tests/test_qrc_dynamics.py | 82 -------------- tests/test_qrc_model.py | 30 +++++ tests/test_time_evolution.py | 35 ------ 16 files changed, 233 insertions(+), 427 deletions(-) create mode 100644 examples/sandbox/README.md delete mode 100644 qbraid_algorithms/qrc/dynamics.py delete mode 100644 qbraid_algorithms/qrc/linear.py delete mode 100644 qbraid_algorithms/qrc/model.py delete mode 100644 qbraid_algorithms/qrc/pca.py create mode 100644 qbraid_algorithms/qrc/qrc_model.py delete mode 100644 tests/test_qbraid_algorithms/__init__.py delete mode 100644 tests/test_qbraid_algorithms/test_qrc/__init__.py delete mode 100644 tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py delete mode 100644 tests/test_qrc_dynamics.py create mode 100644 tests/test_qrc_model.py delete mode 100644 tests/test_time_evolution.py diff --git a/examples/sandbox/README.md b/examples/sandbox/README.md new file mode 100644 index 0000000..bda4cdb --- /dev/null +++ b/examples/sandbox/README.md @@ -0,0 +1,3 @@ +# qBraid-Algorithms Developer Sandbox + +This sandbox directory is provided as a collaborative and temporary space where contributors can freely share experimental notebooks and early-stage code. Its main purpose is to increase transparency about ongoing work, thus enhancing collaboration. Feel free to push your notebooks and code frequently, regardless of their completeness or correctness. This directory will be removed with the release of version 0.1 of the `qbraid-algorithms` project. diff --git a/pyproject.toml b/pyproject.toml index da8dc8f..388e055 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ dependencies = ["torch>=2.3.0,<3.0", "numpy>=1.17,<2.1", "scipy~=1.13.1", "bloqa [project.urls] Homepage = "https://github.com/qBraid/qbraid-algorithms" -Documentation = "https://docs.qbraid.com/en/stable/" +Documentation = "https://docs.qbraid.com" "Bug Tracker" = "https://github.com/qBraid/qbraid-algorithms/issues" Discord = "https://discord.gg/TPBU2sa8Et" "Launch on Lab" = "https://account.qbraid.com/?gitHubUrl=https://github.com/qBraid/qbraid-algorithms.git" @@ -67,16 +67,7 @@ exclude = ''' [tool.pylint.'MESSAGES CONTROL'] max-line-length = 100 -disable = "W0108,W0511,W0401,R0902,R0903" - -[tool.pylint.MASTER] -ignore-paths = [ - "^.*\\_version.py$", -] - -[tool.pylint.'MESSAGES CONTROL'] -max-line-length = 100 -disable = "W0108, W0511, W0401, R0902" +disable = "W0108,W0511,W0401,R0902,R0903,R0913" [tool.pylint.MASTER] ignore-paths = [ diff --git a/qbraid_algorithms/qrc/__init__.py b/qbraid_algorithms/qrc/__init__.py index d223648..cc88a45 100644 --- a/qbraid_algorithms/qrc/__init__.py +++ b/qbraid_algorithms/qrc/__init__.py @@ -19,10 +19,31 @@ QRCModel MagnusExpansion + DetuningLayer + MagnusExpansion + AnalogProgramEvolver + +Functions +---------- + +.. autosummary:: + :toctree: ../stubs/ + + one_hot_encoding + pca_reduction """ +from .encoding import one_hot_encoding, pca_reduction from .magnus_expansion import MagnusExpansion -from .model import QRCModel +from .qrc_model import DetuningLayer, QRCModel +from .time_evolution import AnalogProgramEvolver -__all__ = ["QRCModel", "MagnusExpansion"] +__all__ = [ + "QRCModel", + "MagnusExpansion", + "DetuningLayer", + "AnalogProgramEvolver", + "one_hot_encoding", + "pca_reduction", +] diff --git a/qbraid_algorithms/qrc/dynamics.py b/qbraid_algorithms/qrc/dynamics.py deleted file mode 100644 index fc0ca89..0000000 --- a/qbraid_algorithms/qrc/dynamics.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module for simulating the dynamics of a quantum reservoir. - -""" - -from dataclasses import dataclass, field -from typing import Any - -from bloqade.atom_arrangement import AtomArrangement -from bloqade.ir import Waveform -from bloqade.builder import field - -@dataclass -class DetuningLayer: - """Class representing a detuning layer in a quantum reservoir.""" - - def __init__( - self, - program: AtomArrangement, - spatial_modulation: str - ): - - if spatial_modulation == "uniform": - self.detuning: field.uniform = program.detuning.uniform - elif spatial_modulation == "scale": - raise NotImplementedError( - f"{self.__class__.__name__}.spatial_modulation == 'scale' not implemented\n" - ) - # self.detuning: scale = program.detuning.scale - elif spatial_modulation == "location": - raise NotImplementedError( - f"{self.__class__.__name__}.spatial_modulation == 'location' not implemented\n" - ) - # self.detuning: location = program.detuning.location - else: - raise ValueError("Invalid spatial modulation type.") - - - def apply_layer(self, program): - print(f"{program}\n") - return self.detuning.piecewise_linear(program.durations, program.amplitudes) diff --git a/qbraid_algorithms/qrc/encoding.py b/qbraid_algorithms/qrc/encoding.py index 88cf1ec..5e28a1d 100644 --- a/qbraid_algorithms/qrc/encoding.py +++ b/qbraid_algorithms/qrc/encoding.py @@ -9,11 +9,12 @@ # THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. """ -Module for encoding of data. +Module for encoding of classical data. """ import numpy as np import torch +from sklearn.decomposition import PCA from sklearn.preprocessing import OneHotEncoder @@ -35,3 +36,39 @@ def one_hot_encoding(labels: np.ndarray, train: bool = True) -> torch.Tensor: else: encoded_data = encoder.transform(reshaped_data) return encoded_data + + +def pca_reduction( + data: torch.Tensor, + n_components: int, + data_dim: int, + delta_max: int, + train: bool = True, +) -> torch.Tensor: + """ + Perform PCA reduction on the provided data using PyTorch's pca_lowrank to + reduce its dimensionality. + + Args: + data (torch.Tensor): The input data tensor where each row represents a sample. + n_components (int): The number of principal components to retain. + data_dim (int): The dimension of the input data required for doing PCA. + delta_max (int): Scaling factor to bring PCA vals into a feasible range for local detuning. + train (bool, optional): Whether the data is training data. Defaults to True. + + Returns: + torch.Tensor: The transformed data + """ + # Perform PCA on training data + pca = PCA(n_components=n_components) + data_array: np.ndarray = data.data.numpy() + data_reshaped = data_array.reshape(-1, data_dim) + if train: + data_pca = pca.fit_transform(data_reshaped) + else: + data_pca = pca.transform(data_reshaped) + + # Scale PCA values to feasible range of local detuning + scaled_data_pca = data_pca / np.max(np.abs(data_pca)) * delta_max + + return scaled_data_pca diff --git a/qbraid_algorithms/qrc/linear.py b/qbraid_algorithms/qrc/linear.py deleted file mode 100644 index d44fa26..0000000 --- a/qbraid_algorithms/qrc/linear.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module for performing linear regression on output data. - -""" - -import torch - - -def linear_regression(embeddings: torch.Tensor): - """ - Perform linear regression on the input data using PyTorch's Linear module. - - Args: - embeddings (torch.Tensor): The input data tensor. - - Returns: - torch.Tensor: The predicted output tensor. - - TODO: Implement the linear regression model, possibly using torch.nn.Linear. - """ - raise NotImplementedError diff --git a/qbraid_algorithms/qrc/model.py b/qbraid_algorithms/qrc/model.py deleted file mode 100644 index 8b6c318..0000000 --- a/qbraid_algorithms/qrc/model.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module for assembling QRC model components and computing prediction. - -""" - -from dataclasses import dataclass -from typing import Any, Callable - -import numpy as np - -from .dynamics import DetuningLayer - -# from torch import nn - - -@dataclass -class QRCModel: - """Quantum Reservoir Computing (QRC) model.""" - - model_pca: Any # PCA model component - spectral: float # Spectral radius or related parameter - delta_max: float # Maximum delta parameter - detuning_layer: DetuningLayer # Detuning layer - linear_regression: Callable # Linear regression model - - def __call__(self, xs: np.ndarray) -> list[int]: - """ - Compute predictions for input images or data using quantum reservoir computing. - - Args: - xs (np.ndarray): Input data, either a batch of images or a single image. - - Returns: - list[int]: Predicted classes or values. - - TODO: Implement the transformation and prediction steps. - """ - raise NotImplementedError - - -# Define neural network model -# class Net(nn.Module): -# def __init__(self): -# super(Net, self).__init__() -# self.fc1 = nn.Linear(dim_pca, 10) -# def forward(self, x): -# x = torch.relu(self.fc1(x)) -# return x -# # Train classical model using PCA features -# model_reg = Net() -# criterion = nn.CrossEntropyLoss() -# optimizer = optim.Adam(model_reg.parameters(), lr=0.01) -# for epoch in range(1000): -# for x, y in train_loader: -# x = x.view(-1, 28*28) -# x_pca = pca.transform(x.numpy()) -# x_pca = torch.tensor(x_pca, dtype=torch.float32) -# y = torch.tensor(y, dtype=torch.long) -# optimizer.zero_grad() -# output = model_reg(x_pca) -# loss = criterion(output, y) -# loss.backward() -# optimizer.step() -# # Train QRC model using quantum reservoir computing -# pre_layer = DetuningLayer(atoms, readouts, Ω, t_start, t_end, step) -# model_qrc = Net() -# for epoch in range(1000): -# for x, y in train_loader: -# x = x.view(-1, 28*28) -# x_pca = pca.transform(x.numpy()) diff --git a/qbraid_algorithms/qrc/pca.py b/qbraid_algorithms/qrc/pca.py deleted file mode 100644 index a7139d0..0000000 --- a/qbraid_algorithms/qrc/pca.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -""" -Module implemting Principal Component Analysis (PCA) for dimensionality reduction. - -""" -import numpy as np -import torch -from sklearn.decomposition import PCA - - -def pca_reduction( - data: torch.Tensor, - n_components: int, - data_dim: int, - delta_max: int, - train: bool = True, -) -> torch.Tensor: - """ - Perform PCA reduction on the provided data using PyTorch's pca_lowrank to - reduce its dimensionality. - - Args: - data (torch.Tensor): The input data tensor where each row represents a sample. - n_components (int): The number of principal components to retain. - data_dim (int): The dimension of the input data required for doing PCA. - delta_max (int): Scaling factor to bring PCA vals into a feasible range for local detuning. - train (bool, optional): Whether the data is training data. Defaults to True. - - Returns: - torch.Tensor: The transformed data - """ - # Perform PCA on training data - pca = PCA(n_components=n_components) - data_array: np.ndarray = data.data.numpy() - data_reshaped = data_array.reshape(-1, data_dim) - if train: - data_pca = pca.fit_transform(data_reshaped) - else: - data_pca = pca.transform(data_reshaped) - - # Scale PCA values to feasible range of local detuning - scaled_data_pca = data_pca / np.max(np.abs(data_pca)) * delta_max - - return scaled_data_pca diff --git a/qbraid_algorithms/qrc/qrc_model.py b/qbraid_algorithms/qrc/qrc_model.py new file mode 100644 index 0000000..1d84a51 --- /dev/null +++ b/qbraid_algorithms/qrc/qrc_model.py @@ -0,0 +1,107 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Module for assembling QRC model components and computing prediction. + +""" + +from dataclasses import dataclass +from decimal import Decimal +from typing import Any + +import numpy as np +from bloqade.ir.location import Chain + +from .time_evolution import AnalogProgramEvolver + + +@dataclass +class DetuningLayer: + """Class representing a detuning layer in a quantum reservoir.""" + + num_sites: int # Number of sites in the chain lattice + lattice_spacing: float # Lattice spacing () + omega: float # Rabi frequency + step_size: float # Time evolution duration + num_steps: int # Number of time steps + + +class QRCModel: + """Quantum Reservoir Computing (QRC) model.""" + + def __init__(self, model_pca: Any, delta_max: float, detuning_layer: DetuningLayer): + """ + Initialize the Quantum Reservoir Computing model with necessary components. + + Args: + model_pca (Any): PCA model component. + delta_max (float): Maximum delta parameter. + detuning_layer (DetuningLayer): Detuning layer for the model. + """ + self.model_pca = model_pca + self.delta_max = delta_max + self.detuning_layer = detuning_layer + + def apply_detuning(self, x: np.ndarray) -> np.ndarray: + """ + Simulate quantum evolution and record output for a given set of values (x). + + Args: + x (np.ndarray): Vector or matrix of real numbers representing PCA values for each image. + + Returns: + np.ndarray: Output values from the simulation. + """ + layer = self.detuning_layer + + # using 0th order. Will need to modify to consider slew rate based on hardware + amplitude_omegas = [layer.omega] * (layer.num_steps - 2) + amplitudes = list(np.pad(amplitude_omegas, (1, 1), mode="constant")) + + durations = [Decimal(layer.step_size)] * (layer.num_steps - 1) + + atoms = Chain(layer.num_sites, lattice_spacing=layer.lattice_spacing) + + evolver = AnalogProgramEvolver(atoms=atoms, rabi_amplitudes=amplitudes, durations=durations) + probabilities = evolver.evolve(backend="emulator") + + # TODO: added dot as placeholder, will need to revisit + output_vector = np.dot(probabilities, x) + + return output_vector + + def linear_regression(self, embeddings): + """ + Perform linear regression on given data + + Args: + embeddings: The input data tensor. + + Returns: + Any: The predicted output tensor. + + TODO: Implement the linear regression model, possibly using torch.nn.Linear. + """ + raise NotImplementedError + + def predict(self, xs: np.ndarray) -> list[int]: + """ + Compute predictions for input images or data using quantum reservoir computing. + + Args: + xs (np.ndarray): Input data, either a batch of images or a single image. + + Returns: + list[int]: Predicted classes or values. + + TODO: Implement the transformation and prediction steps. + """ + raise NotImplementedError diff --git a/qbraid_algorithms/qrc/time_evolution.py b/qbraid_algorithms/qrc/time_evolution.py index 307b793..435b640 100644 --- a/qbraid_algorithms/qrc/time_evolution.py +++ b/qbraid_algorithms/qrc/time_evolution.py @@ -9,94 +9,66 @@ # THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. """ -Module for quantum time evolution using Krylov subspace methods. +Module for quantum time evolution using emulator or QPU. """ from collections import OrderedDict +from decimal import Decimal import numpy as np -from bloqade.atom_arrangement import ( - AtomArrangement, - Chain, - Honeycomb, - Kagome, - Lieb, - Rectangular, - Square, - Triangular, -) - -# from bloqade.builder.waveform import PiecewiseLinear - - -class GeometryOptions: - """Class for defining the geometric configuration of the atoms in the lattice.""" - - def __init__( - self, - atom_arrangement_shape: str, - lattice_spacing: float, - ): - atom_arrangement_shape_dictionary = { - "Square": Square, - "Chain": Chain, - "Rectangular": Rectangular, - "Honeycomb": Honeycomb, - "Triangular": Triangular, - "Lieb": Lieb, - "Kagome": Kagome, - } - - self.atom_arrangement: AtomArrangement = atom_arrangement_shape_dictionary.get( - atom_arrangement_shape - ) - self.lattice_spacing: float = lattice_spacing +from bloqade.atom_arrangement import Chain +from bloqade.builder.field import Detuning class AnalogProgramEvolver: """Class for evolving program over discrete list of time steps. Attributes: - num_atoms (int): Number of atoms in the system. + atoms (Chain): Chain lattice. amplitudes (List[float]): List of Rabi oscillation amplitudes. - durations (List[float]): List of pulse durations. - geometric_configuration (GeometryOptions): Configuration options for the geometric setup. + durations (List[Decimal]): List of pulse durations. + time_steps (list[float]): The times to evaluate the state vector. """ - SUPPORTED_BACKENDS = ["local_simulator", "aquila"] + SUPPORTED_BACKENDS = ["emulator", "qpu"] def __init__( self, - num_atoms: int, + atoms: Chain, rabi_amplitudes: list[float], - durations: list[float], - geometric_configuration: GeometryOptions, + durations: list[Decimal], ): """Initializes the AnalogEvolution with provided parameters. Args: - num_atoms (int): Number of atoms in the system. + atoms (Chain): Chain lattice. rabi_amplitudes (list[float]): Rabi amplitudes for each pulse. - durations (list[float]): Duration of each pulse. - geometric_configuration (GeometryOptions): Geometric settings for the evolution. + durations (list[Decimal]): Duration of each pulse. """ - self.num_atoms = num_atoms + self.atoms = atoms self.amplitudes = rabi_amplitudes self.durations = durations - self.geometric_configuration = geometric_configuration + self.time_steps = self._get_time_steps(durations) - def compute_rydberg_probs(self, counts: OrderedDict) -> np.ndarray: + @staticmethod + def _get_time_steps(durations: list[Decimal]) -> list[float]: + """Generate time steps from list of pulse durations.""" + return list(np.cumsum([0.0] + [float(d) for d in durations])) + + @staticmethod + def compute_rydberg_probs(num_sites: int, counts: OrderedDict) -> np.ndarray: """Calculate the average probability distribution of Rydberg states over all shots. Args: + num_sites (int): Number of sites in the chain lattice counts (OrderedDict): An OrderedDict where keys are bitstring state representations and values are the counts of each state observed. Returns: np.ndarray: The probability of each state, averaged over all shots. """ - prob = np.zeros(self.num_atoms) + prob = np.zeros(num_sites) total_shots = 0 # Total number of shots in the counts for key, val in counts.items(): @@ -108,18 +80,16 @@ def compute_rydberg_probs(self, counts: OrderedDict) -> np.ndarray: def evolve(self, backend: str) -> np.ndarray: """Evolves program over discrete list of time steps""" - amp_waveform = self.geometric_configuration.atom_arrangement( - lattice_spacing=self.geometric_configuration.lattice_spacing - ).rydberg.rabi.amplitude.uniform.constant(15.0, 4.0) + detuning: Detuning = self.atoms.rydberg.rabi.amplitude + amp_waveform = detuning.uniform.constant(max(self.amplitudes), sum(self.durations)) program = amp_waveform.detuning.uniform.piecewise_linear(self.durations, self.amplitudes) - if backend == "local_simulator": + if backend == "emulator": [emulation] = program.bloqade.python().hamiltonian() - emulation.evolve(times=self.durations) - - return emulation.hamiltonian.tocsr(time=self.durations[-1]).toarray() + emulation.evolve(times=self.time_steps) + return emulation.hamiltonian.tocsr(time=self.time_steps[-1]).toarray() - if backend == "aquila": + if backend == "qpu": # TODO: Revise for async task handling to avoid blocking while waiting for results. bitstring_counts_batch: list[OrderedDict] = ( program.braket.aquila.run_async(100).report().counts() @@ -129,7 +99,7 @@ def evolve(self, backend: str) -> np.ndarray: ): # TODO: Double-check that counts list will always be length 1 here. raise ValueError("Expected a single batch of counts.") bitstring_counts = bitstring_counts_batch[0] - return self.compute_rydberg_probs(bitstring_counts) + return self.compute_rydberg_probs(self.atoms.L, bitstring_counts) raise ValueError( f"Backend {backend} is not supported. Supported backends are: {self.SUPPORTED_BACKENDS}" diff --git a/tests/test_qbraid_algorithms/__init__.py b/tests/test_qbraid_algorithms/__init__.py deleted file mode 100644 index 321033c..0000000 --- a/tests/test_qbraid_algorithms/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .test_qrc import * diff --git a/tests/test_qbraid_algorithms/test_qrc/__init__.py b/tests/test_qbraid_algorithms/test_qrc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py b/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py deleted file mode 100644 index 441f119..0000000 --- a/tests/test_qbraid_algorithms/test_qrc/test_magnus_expansion.py +++ /dev/null @@ -1,20 +0,0 @@ -import numpy as np -from qbraid_algorithms import qrc - -def test_simulate_dynamics(): - # Define a simple Hamiltonian and initial state - H = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian - psi0 = np.array([1, 0], dtype=complex) # Initial state - t_final = 1.0 - dt = 0.01 - - # Create an instance of MagnusExpansion - magnus = qrc.magnus_expansion.MagnusExpansion(H) - - # Simulate the dynamics - final_state = magnus.simulate_dynamics(psi0, t_final, dt) - - # Add assertions to check the final state - # For example: - expected_final_state = np.array([0.54030231+0.84147098j, 0.00000000+0.j]) - np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) diff --git a/tests/test_qrc_dynamics.py b/tests/test_qrc_dynamics.py deleted file mode 100644 index 22be228..0000000 --- a/tests/test_qrc_dynamics.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2024 qBraid -# -# This file is part of the qBraid-SDK -# -# The qBraid-SDK is free software released under the GNU General Public License v3 -# or later. You can redistribute and/or modify it under the terms of the GPL v3. -# See the LICENSE file in the project root or . -# -# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. - -# pylint: disable=redefined-outer-name - -""" -Unit tests for Quantum Reservoir Computing (QRC) dynamics modules. - -""" -from unittest.mock import Mock - -import numpy as np -import pytest -from bloqade.emulate.ir.atom_type import AtomType -from bloqade.emulate.ir.emulator import Register -from bloqade.emulate.ir.space import Space, SpaceType -from numpy.typing import NDArray - -from qbraid_algorithms.qrc.magnus_expansion import MagnusExpansion - - -@pytest.fixture -def program_register() -> Register: - """Create a program register.""" - return Mock() - - -@pytest.fixture -def atom_type() -> AtomType: - """Create an atom type.""" - return Mock() - - -@pytest.fixture -def configurations() -> NDArray: - """Create configurations.""" - return Mock() - - -@pytest.fixture -def space_type() -> SpaceType: - """Create a space type.""" - return Mock() - - -@pytest.fixture -def space(program_register, atom_type, configurations, space_type) -> Space: - """Create a space object.""" - return Space( - space_type=space_type, - atom_type=atom_type, - program_register=program_register, - configurations=configurations, - ) - - -@pytest.mark.skip(reason="Not completed yet") -def test_simulate_dynamics(): - """Test the simulation of quantum dynamics using Magnus expansion.""" - # Define a simple Hamiltonian and initial state - h = np.array([[0, 1], [1, 0]], dtype=complex) # Simple Hamiltonian - psi0 = np.array([1, 0], dtype=complex) # Initial state - t_final = 1.0 - dt = 0.01 - - # Create an instance of MagnusExpansion - magnus = MagnusExpansion(h) - - # Simulate the dynamics - final_state = magnus.simulate_dynamics(psi0, t_final, dt) - - # Add assertions to check the final state - # For example: - expected_final_state = np.array([0.54030231 + 0.84147098j, 0.00000000 + 0.0j]) - np.testing.assert_allclose(final_state, expected_final_state, rtol=1e-5) diff --git a/tests/test_qrc_model.py b/tests/test_qrc_model.py new file mode 100644 index 0000000..fdb06d7 --- /dev/null +++ b/tests/test_qrc_model.py @@ -0,0 +1,30 @@ +# Copyright (C) 2024 qBraid +# +# This file is part of the qBraid-SDK +# +# The qBraid-SDK is free software released under the GNU General Public License v3 +# or later. You can redistribute and/or modify it under the terms of the GPL v3. +# See the LICENSE file in the project root or . +# +# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3. + +""" +Unit tests for the QRC (Quantum Reservoir Computing) model. + +""" +import numpy as np +import pytest + +from qbraid_algorithms.qrc import DetuningLayer, QRCModel, pca_reduction + + +@pytest.mark.parametrize("dim_pca", [3, 10]) +def test_detuning_layer(dim_pca): + """Test applying detuning layer to single feature vector.""" + hyperparams = {"lattice_spacing": 4, "omega": 2 * np.pi, "step_size": 0.5, "num_steps": 20} + detuning_layer = DetuningLayer(num_sites=dim_pca, **hyperparams) + model = QRCModel(model_pca=pca_reduction, delta_max=0.6, detuning_layer=detuning_layer) + + input_vector = np.random.rand(2**dim_pca) + output_vector = model.apply_detuning(input_vector) + assert np.shape(input_vector) == np.shape(output_vector) diff --git a/tests/test_time_evolution.py b/tests/test_time_evolution.py deleted file mode 100644 index 193d8fa..0000000 --- a/tests/test_time_evolution.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest -from qbraid_algorithms import qrc - -@pytest.fixture -def time_evolution_instance(): - geometric_configuration = qrc.GeometryOptions("Square", 1.0) - analog_evolution = qrc.AnalogEvolution( - rabi_amplitudes=[1.0], - durations=[1.0], - simulation=True, - geometric_configuration=geometric_configuration - ) - return analog_evolution - -def test_time_evolution_returns_expected_result(time_evolution_instance): - num_atoms = 10 - result = time_evolution_instance.time_evolution(num_atoms) - assert result is not None - # Add more specific assertions based on the expected behavior - -def test_time_evolution_with_zero_atoms(time_evolution_instance): - num_atoms = 0 - with pytest.raises(ValueError): - time_evolution_instance.time_evolution(num_atoms) - -def test_time_evolution_with_negative_atoms(time_evolution_instance): - num_atoms = -5 - with pytest.raises(ValueError): - time_evolution_instance.time_evolution(num_atoms) - -def test_time_evolution_with_large_number_of_atoms(time_evolution_instance): - num_atoms = 1000000 - result = time_evolution_instance.time_evolution(num_atoms) - assert result is not None - # Add more specific assertions based on the expected behavior From aa50917ae07d67cc493a52041a700b31e54bdfed Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Wed, 10 Jul 2024 19:11:58 -0500 Subject: [PATCH 24/26] update docs + hide sklearn --- docs/_static/cards/jupyter.png | Bin 6497 -> 0 bytes docs/_static/cards/python.png | Bin 9955 -> 0 bytes docs/_static/cards/terminal.png | Bin 26055 -> 0 bytes docs/conf.py | 3 +- docs/index.rst | 77 ++++++++++++++++-------------- pyproject.toml | 2 +- qbraid_algorithms/qrc/encoding.py | 8 +++- 7 files changed, 51 insertions(+), 39 deletions(-) delete mode 100644 docs/_static/cards/jupyter.png delete mode 100644 docs/_static/cards/python.png delete mode 100644 docs/_static/cards/terminal.png diff --git a/docs/_static/cards/jupyter.png b/docs/_static/cards/jupyter.png deleted file mode 100644 index eda35a28deb8c25f125547c407b31ed1610c0520..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6497 zcmb`Mc{E$yyT?-$HME9OL;F%S&s9oCvqnrcBobp|s77dtn#U?Ff|}J_Do6yWd5ZLn z8f&N_v=lAHiyC7l-1Odet^4Qi{&D{}Yn|_N&i?LapS{m>K6{pxxQUY_^er&W5_>MxZ$(iZXSTx%O#kYr`9kj8#ivkEt^UmFm4TIact*=#@Vp_(zt$_{vyT_!0hxvo)4 zbvrf>^_pt&M=`?BeEitti!2mks#X-6|C?roTwPnPlP^|?I-A6`yc1+XsW`vX)!^?x z51PQt3rGSCfsmb!tG%)%cW)ug-0PxM&andqM9U{{Xus2p|7RR?8<9)^bOLh&BDJgy z_T@hF^3whil(-7Wjn~m$U_4D^vQWZ)# zPxrfD!QnIB^%fk*%gd_<(gEy?*VzzxG9Nn=5At#zKmi+uY-h8soR)r>t*~vt(MOgD ziC%WS&q8JZqN`^9dWG6<(v?|Sbasr3^J7ghdB8psQ7@)w($|D; z{h+#`q2c;k^_O77sq2p}7qfLOz$&cLBGYt5GWuad;?iiFaMNT<87yK5ov*pS4#sS6 zs_6NJouZCMPWpLIZ=g|+#?6=ND9AhB<`0^O@6UW)a#^*xDU@JJY$ITAx9NYuotRHl z3JdT5{o2W9f6)8K9*#waO^?K%T#Ze1eN~_BYh1x+WI{w@cWlfB>IRk8i-Ur+D(WBR z=cbi9AMUKS_-87K$^)^vwLE1lcTKP7*|<1}+*!9Zzo<5n(fM==r^NiC<;L<%ON-p4 z;Q+4a5^9eX8dVlINX=e_YQ5}e@G72rhq!$ zarM1zb{7p&Lq^_C{1~(#az`o0@1> zr})@<*&_eZ z+LQ)wR78VM?#GbGt&reULjPDZEe!Bihtr0JOBVN%Y zzg;)0I?on;poMu=%$(n9GaZzmv_?}I^V&M>Bj31Ey-ZZy+jUW_=o&6Hf>NnwuelQL zIQhv`+^cx0s^6nd9+jBFc(ox~LyvajW9%*sWz9*-atHq+PKIneYRYP20qKlTTFWXq zmby@?5bH$?37qZ74IXz+^8^zI;W_@y?M06%9FCa}EO%$|WFt6bixda6>#bj#Y8mf+ z&M;u!9CUR3`TeE-u9r)!%aM1)dNZ11KdMVqV~S1RbTh`<9v_pCby@Ji2TNHJhcRoH zcBWs?|LIAJH`XL-e9Ly2JvtC9Fa;_dC3pD8dK8syY__ki3{y6|1L zfBSjN$~T=@mR?rF*{7W8@yChIx;O3-XW3l0mZ;I_*nhm{UwqmjxKpWv%(v2iDm!G^ z?;pLL;qNqCd?-1_4K~=OP|;IFq5*$Hd!hK<+`demL3hk-)eaiHIU)#iWf5uY*s%L) zXgA@bwsj3}%$pg$`OeRD{h68ytTv4jy`uE=wClol!`4-NavEY$@Ai9B@TMsLx%vQV zW~i~Nr0EwW5-pC!vNKvOE*5od>c+~WY@NF?g5C#myah^jA)txgGD7ROxp?5F3C<~- z!Bq?)_nz!6rtA1zD2p?cN_*+Rhh>{KvF)4cO(QsDy0_aMehK^fgu@rnL7g0t+s6G! zvaJ*X!O`lmg4__1np=Vvmfv$YJPLl;x)9nif;q=xs=Q)c*Cu z-+GYK=H?(DsKmSMqZbZ*xzDWXuPr&iGeWX?$HEYexRqs^W{o< zc1X!@YwS6HFIXU39CVHyuFl zpGQeo9!aVCeSMCn}!;(!V98A4_ig$l&Ih`c8fp{eoG-((!rGdDLt1ehS#N4(`#x#iq~`E>4Mq6yQp^ZuB) zb(8x&7B(u$B@NSNUQThU?(?1zT`KHq^1(joT01Kqv3(OmzxTn`D)>zbxO+VcOIkX|8(kJR(v@H>7zpS!{ zo{s97G72#AL-eM=YS?0#XzbU{#m}eNy3kucJ66$VDfhMJq!5G0&WkRo+p0D%Vu5N) z7iwC6ZzTo8clI33eq?mg78;jJ_K01?rzl?S1lS{brB8ZSG+=MGjF=jD;8QSl*^kn=#xm;BHTD-XZ5qFR<9`O~ec0COw2m$MGFBWcpK#ymMePM+H| zc^}qDti+qhQjUwu(JSpjm(xO@?RtB~%vMj=VOu71SmpGu+f6C^JtEY@s)i<4Jw;_T zY8pc{pPa@@sx{Z!i(pj%tD-~gW z{zsBzP&R(xuJ)*U-b~BEH{x$a@#tCz=Vs&m_LmwRydbp}v-drvrSVptUMCBq;cjzk z${SiIU)wb*IQNb7jrM~CKP~l=tf!2+*-{2ZiK`tzKc$$jGSgNq?zy8ts3FjrCiqFp zKFryZTeC|=sM{p#?J#r50uMx>dB+kX>vC3R9vw{b&>JRt7u8~Hs6u2bGavJT~}sT8+Uc_pjfBBR<4GE6Z?0j)gVhu>BVw$h$1_Vo{@ z`s$v1`LLchuxHk80G%_lemvAan0cz!H7wLExB2pR5QQinR+YS)hJgXCMON&w2Z>nw z_aDx0+myBlq5b zKL#YgS3jLql^&Uvz8tl4SUc6wtA*S#JEsi+Jn%kIx>Ips0g2^*8QTK`k>Pi*@`L6Z z_L!H=Y`?BL6gHiIW{gl+ubl~hS@wbyM|hQ7IX#j5v%mJN%h_iJh^A$#?FcwZEdY1a z1G<@24^Y-X1WMgtMMhjUzBr}C=)Wl74S1QFVd;Ml%WG&UBVYOx90URACPBUs`A>Z< za@MiDV>Qth-$rGFCy1}`0A~3L`ukOsPs$iw2wslj13@3>^L^yP>^y2&Y7I}z*op2j zh1%%>*30q(DlJKIq?1j;8jQgy?iaQ(`UyQ;@3X=8AWPt zPht@Gi#IiOWvIL?nlR9Pz%pqf0^~!S;ds5+P-<;l;nVhEL>v0@2Pm+yor} zjA`J|Aam8XncbS8*+nA^q3CJt#9jvbcryP-fa^RRxW@8>2vd5{m$K|f<`i{$_?@eS zQ(HsY=wQ|ag45gur6@bhAP|pt3aT8Z-)0J<)3E!PjtzZbq{n#XI!+_5;jjK6wv7n< zHbOVN26UxTR+rNKUHLK+qNVPDK|?od(8UJ-4xW^mx_| z-k~>B4h->eKUkDAJo$|^h0+#U4mw$SdZBlgKbdpL3xJ0ETjJS@1ngD$vW1fx}H)$xRDZ(izn z7JZjGNzSa{g#>JQ!6brZbxWcoSa=Ql&%+Gf&3|Dta|k&Sp>o)EUmIK*LCgMZ4lQQ8 z1+;ej{^tvmpz5msdVB__&+Zc4#{a8=DWq+KGRA^pu*|L$XTOJ6cO2j*;`a(9lb=_8 z8ILqB#hr{PKcx|PHdc?WauUmpCFBqEl+Zlf-V)_u65Jl4HjpKdbruLJWduXDRVDzN zjfxez8I(wJV0l@5&hmBjCS&X76uf70$Z^_E=Xg!w#riO3!wbc^jw_cgg`&24HeJ)( zo~JiO?MS-j-8tS-K(MQTPZW%;Dc^ey^(drGlbf2Pqd_FAoS@oq{F9CLo>Ma<(oDEu z=5$fEfkFJ_@4JH2OQ?E(HnDHSq|VnhkN0}0l#G}=^OA?HhEcz^Y-yiit7hOYBPKER z)61h%t2v0{8n@lIRnlMYv|lQke<)dS-?6cAKt2#?ad1g9h7OLU8Q^2(v9jbb4VM{tJ7*W-R`ofnQO3L8^9-M1ZVv4|=GQH6dH?uuO3{k1P zsf@OA3DRLQ%6w$WeM#KW3T05PuTE+_GFqMqLpm^%e}Hs9on7Ll-0zt&ya?wcsvbYI zTlVxFT%~nCXVn;cYbTwacVUsycl`A=-0!r=Cf>0PQ2U!+G>rOeU*q?B@jHK0O|uQA zYuD#dXMGg#CkscZ(G{tR&KgD+)z?ML70I@)GnAnO(l&{&ua0ouDd0)8lu9vc7_>=R z%*4X_{z#h5L*5U#gn&N%s^-kU8kmoYhb9z`$}!z2sT7KFsAGDQdeZf3u9oDHkTMkHH1VL8tV{8A7sBXJ@*@b%ZjDBs*N&36?~I;9DZFB&L- zx_Q}f4mc(rt63(@EDpgn%F;;ZnA7R$zVo@G28{v_&eP$y724SNl<}*yLKaU=!DwW?jS@dh z3N3~IT_F;DJ!A z%!NI_7VFXY17qb0;o8MZ$$aNlMqRw}`Ngdvfw|3EWFb*nY2Nzvm`7!A_0{QDk&lY~ zSFeg6zEYj}@Nr?Q`X(1V!NR7Y)J^Yub8*N$A#q9Ty1aFVqxiZ4RwPv1pH~ZLU0waB zlzy0GCJV*ZGUV8qrwdjx=iY_72HQGErH}F?xAEsngAWofk15bx%4uSL8`6#QK2C> zQi!)K_{d_O+C`Rp3BFPK6Ok9YK{xmO{1-LiIN_M=nPtZTcTLZ$l^#xX093DQ?MGjV zO2^jn8KLt;RIE;RjQfk!CjN@)dMQG$v7FMg|o7I zohf37if#m$UzOLgi(%v9zbua%6Y4+Uo)MMIDIqc!=|G>coiEWX`xcLNE*&WZ#x0#= zWoMSFw87HxUa(Xc!C*4^?1SLZrFdqWt!YOKie^l=w#`zRyRF_t+hRD6 zQonF`+mNyDGEcHBu`<=DH#pQdTL&8Jw2DH4In~;*a;ToA#!rlaBVxg60fAH5m9Gk0 z5?jxA#nxR@dXq~v8NNscWt9C@*^gU6yFXM%O|~jb?NKHIKKfj#xBiulj5 z!7+*J<86+YlD}=*b#EK35XAZ2q4m&5C9aFxNHv8uc25mgS51Yi?T*o!=|o%|xb_NS zRP5GO`4qn4+r%&YAlFTciz2)8d>V43HUa&bcl(V->6Hzdy?>)E(wk)M_ zkST(Y@{sNPGo61#v6c7ioU8l*Q9CveB+}J4=-!7OrK|VU08#0HJ`c&lrR4|}8Fm1C zHT;=xU2%A)xRe$kN)6||fkINEO3N8>D=RA=)htPFa75V+!~y;^>;ToP6Oo%4wttko0imNV10C3)xrpWRM=|V(7*aa?NHseX1OYI#591#f(_Y`ofzi zBIV4Mw7f6K=FEQsE|L&nI8(1sNT6_NDMqK0a|(zb0N%3r+Non)uV4V6$pk8s8WEdZ z3D}aCzY(09JvdP#BKQ~($PaPJk!dEIyb}Bvfh(^K+n~=!_|3@Fyg?B4jrvJIG@LnU zE{b9Xn--GUkAEZJ1Yk89pjG*li>3UU>zPzPS?OffXG@}s&Ks(z%_Ox zQ*P4Cpaoq5!jk(X6zyY&p0H>^MZD(*)w3mgWJsgil15&DSI@&;=Pzv0CiYguRK^%Z zMwe#QXF_z>*&?ERRl{31yBbx$Ye4|4awfy>qH^?y&5!7RgDb@pT~Q;(;sFL1&sQI^ z*xK64mH#uwWft=^`|D*+`Xs4oc-^zh0hjtSOIMcY*FUFs3LWK$AiR>TTq(U3rKU(T z#z&XYvjoB%9hFXwI!NV;&nYL#dy@J?GiGjK3Z;oSVAHc#_rnpB^05Wq-4pyrgH5m)kGpxBTKSxg%VTQ zvZsU++7K!4B4nBW_5J+*kH^g8HLr8d>zwC#p7Xjh^SU?1-qvh8QIrS(z;+9BV+R0$ zAnXm`IWS3wwGfl+~h3B0G~ zleJZa5%Y~MEQId4;r#x z8_+@uX~FsIH%7Fe`?Syp>;Z%pj@81L7RaPwL>*REG1z^27{l9FRS3LCizwB=81}z|WykK(*=m3>8+G4I zT1bJqZx$`I7~^04Y&QF&9ed1nD{GA24>s&^d-k9a`=c$!Cws)4{oWd580$}sb5(4N zlc0Q!b2YT^hwKS^b)O7%zby8s6)oTn8>=SP60c|)(KYN*3wD>5x=$v%lV*M*D+AjY z_d*?P9RWJM&V`%B&!G&`>u7Wa&%P0b=d1&9A+iM6GES1g5Pc-RjUlWV^UKAxG~Ac( zl8h>AW5Z};EQ&=?PMMG&4Po%l5B|uycQ#EcYjw5I21044*Ao<7_mBcL1OQ7RClnk?((e3^?O9EV3=|Ls?{`4Jl;W!)?El|D7{8>K<|V z9Fa}47u*poeMo`)A>?$wce`i&hl*W;pKd*t_`p1CD7>>S(Zl30|K9Ml_8sAR!m{vz z%mfpO$IfxJp2`LmOc@>#iGtKu;pB;!>G6=l=xd<`)2pQ4-$fTULw`0-H%*rm7n3|n z%NItUS~cxVZfPEx58W2u+uJLC^ytx8#M#+wmi?Uh=DUCSvbN5+`qRq$H5E_re0oEV zY(DOciPs(xm=#I){CA>a-zks31*)|Vt;<>k&V&zZH=S+nl0Sdz{)I1lpXgrgyEd>m zcC~P9SGt+ZhPbh zXn2*xt0n6=`$u(g>9`Be0g#tM{CB0hd*Sx6f!Tb_ePR2wSd3o#^B~|6lnN=Rw7xhtWKf}11ay(sX zY2MD$2)&6|vVz~=`*T7h)^}rmdqwM)uP9o$`0hgd6CeM2(FOi-Qtg_( z=LlW9s}WIA-wc^}tS(N^ySVo_u~ZL$(}#m2t@c>9$$Z+BV+o%lVIosA*$ zPXo0(b=3MeeTSG%5W$@ohe zXrShzwesN0moJm6tr=6UffMoTBxRkZv${oz-mM?*C80Y~ak=JJh-+$X_K@MnDmP%f&-JVf+7au;AkD3=e+xDoCf3oW! zIDCLYN)bbzsvPS2a~P2xydnT)MOQLIhL~IOx&IWuK10teDr3>-vK}hS`EdBP4Xk zPOdt~cL%AMFx;9`_03sR#4h>i>rX+dS{xAZ(zUgN{^bVD>#j)WFSk`u#rc+ZVNaut zDaxC#Mfu%BNK6XJN&H<+l%WSfc$faApI}&ADD+QN`%kIPS|V9lZ_~)h{kEk%xGY|9 z?Z^Amd2s43R;s-S=T#uH?^CW%i#gQZQLVw`ZX5knZM$)c>sJ{9?K>fCUy$U%J)`>G z^AAyaa&R{{$K_cOu91MIqA8L&t1#T@XtOSTUBTY4`Z0;$j8&+ib;-#Qya4)VcfW0H zYet{tKmta4+Hi{oPb+->G?>FnP;80^@QhZxl}Y#?7Xtp7N>?zC zE_A&Cqo_=*=KQ@Z=DdhEzXgCl!$-w4?~vZj3vEqESyG!l+#d%~H#zWu0b+tG&=rtc zb3Y*~0AE%#X3rW?Z40{p{#Z5UCk?GYzk9!YBmMYg9034vAP4y7&9RLuPB;?^00l!y z5O;g*;OS48X)2J2e*b>>cW#v=^)5hSkeTGbB_E+rXN`vLZa_o;B!HIS!#bbPkGDpj z${6zlkPoE#OWH)(ESrCGO`P(~~_b%;n#u0-%{gg3%i4uV1VpF&W+#8e# zVmR=FuF*1 z{%czh&Yr+huz^Me0uxFB(txDBJq9-(IYC$R~3~|gx8*b3w9*11x>%0Y!It8F37$8jx^Z_<8f!uK@!b6_i3&6qG0WZut1jbjU zpOl%Lu!NLiCX{n_?OtlR(4tv)@@0|NLBno*pb%4Xp>^Wxs^ML?djC7Fz2%OXnZJi` zUHJ9vKdGCqiI5Dg$8#U+64pG9yf_7~FU|V8_crVX5C8Q2b587Gq~)M>_3k6S`=7Nd z30%$5(^=0*LoHDi@Eq73H6KywS-GNTXOwKU^yLeU_-ANP2A-iP@1gtn_0?bQmuz{&7P-UlaW*2!Nqb zvjlNK29cVq$wOb~UDfUq##eEYN%T8_4YUoiJxCP3lbgG~Na*&8{n^Taud+u4K`xxN zt^a1>BgoQo8Ea~Hmi}8{&luW9MQ;GQkRY)^qfRsbR*fj0E`uZ^Pasn@ zxZt z(}LP7BXJUpck6vjx%HHa{x&mvBfvDl%95@HSx*rCE|6HRtd_U%em}m~0t-vSQ|hT> zBo@XB;0AI$?bQ*ui2%`H609Joth`W{@=Rip5KLkkpyg=nEw5sNh6EZ9sKxf=B!uF) zx$XP$R&sEU0a64l5wjpP8n;HL$dSpxtP<{FS@9Ya=hH*`IO;6Mt~ zg!M#5;y$|YRB4WH0Ri+uQFw$HX--|28TtaiRYFXez|nub2HU|;!;9lm04RphtSm(0 zNvB=uf=H}`s5;04y=akXapHj@iCGhc252%W9A!9@0NjP9=6xU?xWG`$(f$K3sDtZ? zDU)PbAd!`uH6%EYtlVO1L7i8xzA^cNq_la9wIsCp}5@&3j4Igk$)dnF6ba>m`K z9r5Rp$BnHwem?aQvqXZQU1srIgDTvk4mO**LtqoG*el*7Ig;2AY;v)0%tnf?(2vWV zlAlbM7l`d|APTQmmj%4Vfw{Ob-Qr{U6FrAmopuE=PhSwh+EkqTNrlOsC&51!N8UQd zdeQcinbK8~^a7KAZo9|bNr9^UxXx{{Uq7nJL;bZMcOE7Ri`?+1)ENQ+;IGmc*UjyH zkg1+i1rY(vz=-3{cDTD!3UuQd4JRUo>&zJEWxa0-^smloj>cV?m#6a3753=UUH9ZG z5AaO(zmPMJmul=y;2O*;wk|ru;Tn%1aOv-pn`rT5$v~cnyvISSItPJ*gLRh1ZeiVi zPZPa89z0WF1r{cj7wq`2&oiv|mbIhJ1DT_7Aw{D#AD53cw2c25UmK5}_>;TbtkbX0 z>ebveetbImZoU$zm^xsqmIlbiZd8nPI+0O@$?EkBfP+SRYnD)KcjnQKEL$nC6Y4Vs z*Sr_xJHDN`jdoXDI5jb?H0iueXrTjzb@W~}@QO`}kr-UU!7|B3>3}C#mTXXLBBad! z>Aa{kw)(v=w159=9QPsZ9o&g6=<*9o;E&`0OH;a)q+A!+)X&;aM)#s&eCW{z=m(tC z?nbKkyLWOq7k9q(2uraK+7w`*9Wsa0?@2bRs1%`jwD#k8bD#uxHwW~*aM}4+aA0k4 zWcm{Wx86PpU3`Fj`v)mNoW}SWMPco@IG9e1Tt9odw%&&iI@d8yoK6F*!5%Mz=?j1} z^6P!IB79CL@Rd1a+wV!%sCbCR6!QtAh7#~5SM zg6f=S^Q{DY1Dr|;%yn9<*#*X^G@$)_xVqgngttWUfbWFM85!^lk@Z>H=;1q5Eq1iok+fJlD!#3ow#M zp`64EN4{xx>Us8XW^HT-F_?cLViWqk|1|`SW9HgDndtyGs7Scnfiy|lvIf-GiadRo#Xc1Ovk>uS-1-zu&eM zy&%2hm<+Zh>dp@*qq|KmNk}xJpMpOGtXR10Ql=heO}xcxh)mA~4mdM1lb^xd0ibd^ zPyxAKTh) z;;<|2fFPGjGr^jV${aRgE$E=qWR@A?ws)oUxAhMO!B+y1_Bm`7kJ-7)XmXCPgMH6$bM9Lx7gY$6i_-mq%e9d8A=-*v1%Mw!M zAE^1_B3VpX$lV7oDuziRkRgBIW5o$rMI{cmZt)Dzj(r4ZN+TSg;;GeYdG^LCVa0?5 zNkiQhh&kl?KKzHSAZP3$%W~GY6U!fvR0xEcyDm-uN)`sBrWg}et&Ztbv(H39HbD@*K!QE zcoR@~lYtICGrhf;I{bdw(_<%1Fc-ah4)xF_G|&^2z|kvjKE$}+NjX31&VbHgY|NP> zHdVCPtkj|=3-d==hid;md&g+5t-dg<{mA5Y*Yx!{Pw!3}q+gu|ea79yH3{F$9uEAv z(%4s#9Fe<}k^l4BkWk+fuS)%fAtSd7pAXvUEtc>$%|4Ic`!me3#J56JOsq*$SO#*3 zgF~gLidEh7&Fx30bAH7r4v8OwHYru8K^WA!Z+jzQ=2aLOzZV?+Hh-sgx$xinD`+gd z&TWG9yP^-Z_!yP{tlYk6b}Y^rvTTMR{}~6Q-vQM;5w`t|)?n1cAw!(IN&?zU0Ybtg zm@Y#vkN<0Yy4*;J+|P$wT-4#HI>rmxK8zGPabR)p zWHt}Xjgy^x&5wG1f=;g-@%IgA7hL$%%C{i5lWJLq%1=Cv7ky%CBp7%dmLpXC*k;bJ zFnOk|eFH^BCt}wK;p1fDw_=%*$a+n3G5=7+^l-GGbB4)I*III(dm? z`@$;9oX(m$A|Y`VjwE%~USH+Kg{3)ka^EAE1BnUHak0=!^R=u>zPDBi@{G*bKpw)g z(tHLZ6E*B$3SIdw8+lor&sRMcxPyareDY7eoxb#4?v*?B^3S&|sBb$Tee|pFUrPx{ zzNa~WgP&qV8rbzqI6R%v{GvxG-lmtIY{;=349g>&~hFECTUAl0D%^}qZJ?|F=Jr3Sp@C%+IJI6f_Uky3;5wZEC< zBN8MM5_DislP5{~|4}Q@8>&+mkOGwRgcQ?xFIW)}|JSb7fNLd688W?_K_h2Xsi^cY z7|G~n#!m_(elOj4^Q`h6Lqja9-y9yuOc*711qfFduKYJ!{jjHydff=LDt_h`El0;lMJ0j3;Ebg`o1Qc=20wat*~Rly zzKHMzQ5ICkqj@g#;X=C|PL3|i$s{STv?ij-X(UY2m!E5_qT&Q@*pZrJ|G&A^K5iwtDS0JEYz(g?Zzg1xC>r&V|-V=xL)+TZnaz*4#%}1k=a^pdGp{ zUv13}QPe*47$`tNO_SXr6(-!diP|lJ?T2YFm;m+`wOZxEK@- zI~A3mDk>4+yN^Hg>~gB%PQv<4507;tLw|@^I|>g9TN$~-V`+Rd_m4SfK<#*bIn@p8 zb~06Vz*i=$DDRd;fE-M)KPMIHBybrx1%28< ziFTe=QON}U{zhy6-pAWZBPuEvB_xJTsDrYx4Mc9x62^F?vb5Tk4gOQ+FhU{la>{>K zQW{J2Uh>9%FT_LVDNHF&&DPekl4G3hX{{2EZ#qe#HmHY(n2}*$m(7wDbyYG3Y&p%x z%n}|(S&?T!_M?4H;P3{Kz9qVh4j;k;Z%lqd!dsCfslL^skbS!|kPfu9+QTl+C1iT^ zo=HU{1)F1ACAUBB#w;G;2>>_o12^K&!!F&2h-kxnE>4bwxvNJJq#j(e1ie7Nt$Y=b zrL)Ma@sYOwIn?0lEQtr?|B{_Sh5u7e<{To~_u?H`m*oG)q#$Jng~Ue@2LIshVuh8H zX;+e&A~AA6?4R9Th@X()sPr%}B}Egy*#v;jDrWyjP`ed!_}nDKa&NQyA3=D61=O69 zOqb7#%RyrfUDd>YLO|lynsW5& z4kW)sVaOH8!Jw#a$9vkOZ0juvd)f|VZ5b4z?6ajDirt|uZd=5W2OW@iMcf0Aiz3d* zqXyW;&Xwrm%9-u#QGE;2%iyDeqTAqEp+e|-_IW35j3Vy0R+jG8Wd(FXU`ax~Dus!l z*TlGwVotWRD#>pw)yOOvL?VxYdn=?0PV65#BZXREUh|R?f`v{2?Cc9JTv*(PavWG`rLc%QX+0~fnh~ex`fp$=*D(gKXF~k30mjfx`$ij=6(~P&u zqyNaHn*z@dGw_2h&jt)dPl}E-B-lIDRz1#C{6QP};J)agv<5cKWzJk693A|4z53Eh;iXpl(gP*q%mQy$?*VPQZP_ZB zPhxWRu77`;nNF;tR;@4w{t|tDDyY-mWO8b=*XO^HP{V0R)R7FCAFvUlU4`=mfcNeGG~1u99y zkOwJH1zY@r6u1`~wshFArNf5*r;{YRoH574uyoZ(mN5R38U4A63W;=$b^oLXZ_Dt| zKk2Z5&sqj^B5*Rzx@zmd?SVZG&oTC>*g*;afi0!u>EvYA{0lz&}~L zGfd_>@a7trWjnsJA((`n>KEiZLMid9hUDEnVg_FXsJ>y_pAE80_Yrop+X&uJy|goC654-a7MC&dFS)% ztBStZ;r?1{0YAlYrN2=4I8Pk#RBtyw<%;Y|e}=sx4rLX4Bow7&nmK|Fh~2)H9v`NFL(!i*uMSA(JxPzh<3<#YW1THOJ9vBd z&}{}wJ0H8rFm!iDrTTSCs>V6N88_@Mz|jo@uj~=e7W~$&5@)3uYs!`6mFogs|ECit zWy=VL1Wue#1mK_U-1fJnztXj=vBOd2lUH)yKvmTGwa0{9~`hz zxY!z9%=E#QEH33^zjnUB;Ll9$i#x!V(xUn$#5`{gZ{CU9I^H`#qxF!+=4%8S${+0& zQWaU5JO9^{Csb97;zOU#EoanIlvDG5{iB)Wgr2t8cr^QM<;I&@0R}H7D{;)0X%Bi( zdtxtL0~zj^)qLpfarEhz(0SZs@W28h_jAzVi}~}0-gI}DQ`_M7*(b3@*N0Wp3h>y& zK3!YS0;F70QF(L9F{oQS9^?UO-b&dQAN=($-%)z|7;mJHv)DawxIx%MXZwJwX0d>^ zUZ3hda9FQyY~?=se(;svm*i)I_2)G;$o3{LMe18mee&=Kw~{UW*f}!aeroBmBP)!u z-RmeH*l}&36498ZZOn)fRKY^24pez85UTY*&HDa`Qg^h+f)1 zni#@^TYT-8uEj7$^e!LE87e&`v=Gbc-M;x?q{*;DI@4B*Uzyd$G{G&mUQX#XLb#4| zyt|(86wb7DUH?^lkhQ%6_OsO7-RPfs9M}o_mD(Tk0jC@IKS!PjebNg3yL9D0C%;T4 z8sQF=5b82Z&qvqYvJK{%i5hSppGfMcXltF4nbeKtX~v0-@re_KNm+*x9%sXAvmTC4 zV2rH{(tH1*?JnZIeHo}K`DI^sh`A&1=Nnz=Qj)Dh?@>N@UH2J!9uP959R4~~WUf+e zf#W6#-JQyK-aGJH`F z60uv~v7hYwc0#BBSxipcX+#^_TT5(!tgziRyQ@!nJlQN2$8FE)u8dDVciYh3{*UuO zS>qx3Dy2wsxe3kZG*<53TfSU-k@C}yGXs*%q?ewvGWy>Q~AAa^#wNI5kqZ#(dOQA^IkRD^sXD%7ryY^ zZiY{yxaom?jDw|}6k_A`PmTTUh(hAxu1`cLV z%GKYYmvTt*%(}xn*B2BE+;#ca;>WEI-W+)xAnW=Nj(>>1IF|pllnu7of9DB|Jw(AC P#sMr$Y>g`n>2d!b!VCd- diff --git a/docs/_static/cards/terminal.png b/docs/_static/cards/terminal.png deleted file mode 100644 index e1ce2117b0e6d9e851616aad73ddb7ab5d2666b3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26055 zcmeFZc{tSH`#(PRY)vI0qO6q|OLjxM5ZWosP?jW1*0GLEk%%HDdxccC$}(je6On91 zh{25PF^p{tX2$o7Ud#LQ{rvm8uHSWiuXp|NoH6Hl*8AM&z90AFJ~3x4O@sw@3qT+c zVbjwm&O;!a;3Fqw3om#qp?<7^2Z!%@lVgzLcBv`w<*c)fsf&dLod;3f8~Tc7RMljS1orOTYv|+TqJRFxslQ$a-{7)0{QSK2VX(l!K-EA^RWBb`n7W>x z9!yOGrlFw%N~rh-dHOk`R6Kp<{#5dJJtv%fulu-p`?+~}?q%2Oc+Jb-4=yXqZs?zX zfBNa?=JIb#p1yw#3k(p({spG4ss{U~Zt$uh`>*;|K5ovSWp@2W>V}&o|Ht3{YR3@9 zZu~zw^Jk=+e+5G|5-^1QGi^o!5{O|w2;>OF^u*DNC=N;u{|A-cOQfK=k5*Q}Db80; zJ?3gWz02k8!?8P%Q`+RC3;T!9Jr&>LcI;WbNt!&8(-fCqA>_-(V~`w|s$ZF;)sLzMFNnkvuktd7@VZ)mUAx>qQ+=lv;C*?u(N6Ce0a`@q1{X}Ea3^JhRlN? zD-=RlkLP4Ulq@rBwlV6}`n2kjq1d9#@$XP(sNLPKpv$z z^)igH$3Abxs_O)+?~2}7H}T>8HgqLj9(C{Da`!VC%;SXEvl5`*`@%@Q>bh9)Ew#Dh^8)jmh(Pb5d-wAsB@8d_4xi=@7n5x6Oc5gZPg-rX@i0nmP7M!tZ*RRh899t+-Hw`aYhSa59%e9BXsGZ~c%Gpz}`OTzS7fg=cj(lN@S?#swNLA%Z znLnMZGuED#$>fJz^SV5vHqP2Ta|h0FqML)+{p*p&4^e|)&`+t@PlDC-=6@e8YSo97 zAot;MKf`|vo(J#Q9sHyKmcvnhU^nWH*zwP*;JrKl*E*C&VI706*ac7JwRu2lC$ce< z-;L_@4;7kxUB|uUqfwKFVfVyP^Wc#w3lr?R6p244>jgh4ehr?F7fMk#n$ZVUr^nbx zBN~UD^*Hv@o4)x?NBB$G*qci|ae&Tj|94ijoe?z44BdBRVf4PPs<8`E1d0mjf~(I= zj;vi2dYfIn-v=$KhAOO$iS9i-wIfcac>b24BXq`5b9+m@V08oi?KIgrxVjK5f7BG= zTL5Y%HjHSPTLGr#d|cV|aNbr#Pa4FbT0hQJN+84mT7Rs4HxTw{ky2>&87wwg2~wwK zFaBfr9gL>Mh5q-qAD>&yL?Ch2*m*EuH7@sme>szlodL6bMurz5;Jh&FLAwT3Tr@QdmG+%N-(5aYeEmrKt_@=nEMq@|0A>!j)m`Y=UfAaH0S4FU? zt_f^2{v9v#ogdOzKtu`EzI)zOgmFa$yxxLk%jcB0d&OfcncG3qpk7kge*bZWpICxQ zLsW?rkiVvruY?d47_m8S$cRVu{IPcI!KT`uI-gHhNh8R4ZO3m)A}XTATsb4=E1FOT zOqRN@BWI@nQ!t62%{PB-K^gJIB|9Q#BuDCtF=D89srSc*o!3!riV$(H^*Ax#Q&n5n zuOT;=l&sJgZO;cIHn;e&EAB#7h(s;?6kCt_NlgYTLjSNOnb1O8cpp1o(Tmy_eG3a3 zCH|+F6Wdy5wR-J*2gIMuB}*zS^oV}i*rK;6Xwgq3b}?qJy+i$M2m2@AYHlV&&-WCo z7;6F=@B=Yo8>Yi{xv7}#;vU>23xn( zUmuO=c;fdlx8%{1lj(%%1685gBud)$ z$H=5pVK67(SJFFR;#ug90D1mh4$w&dZCkBAp9uPBQMsmV>XqVGlxr)mg0+ax4~7^G zNKZ#!&*BV`Y6RJ>Z7>VEztbM zM|BNFspolM!7>XqiQyB4rR~AY76eVB=IUF!Ej>V*{bT4=H11~&1ma-Em5_v#br;eh zuFeAaNp$k$+cK`~LgI-AoJ4DZc&nrw*rng=9=&r!JvK=;+TXEjqcSeKB!qm2&*;&7 z$&kIvF5?IAh>tzokaxXnqxfw;*~(J}+$`+Z${+`;7)5%4Kv1lNVSh*t()J-;du`b3 z_OZ%xt_7d-@eCPxW@7Gz!O|@g_f6bC=j1(8wt{R6aelY_yv7T=6sQ3AJmPwFH>%q> zLx$ie>H>C|c3sP!&hd`ZYsT_g9fI`u+Fp5hl;!wyOH!Etfi{2Xw#OY zRlncJ$4vj1Yv)W!N-<=Z?9+Q8OQ&L%(i~2!ow?vs)+B`b4t-;7s()Y#n;>J%x@!)r zDO;tc=^4rJyLhze)oRIJO*`DD7n>vNROg>EL$gn;MRv+5_@_wdDlsYhk5J!_-glXY zqGC9&U6Ni{cviU45vy7`i8LHnPxu(S^3z84t$=(nWm)`e?RRIAf# zJCIEh1L6b!l0JAPU)i#SKO06|JHE@xD_JjH z*R@k`Pg_3^FkU>Q&lAB?NuOf9dBRjN%}!pL4S>(LJ@jf5g*4p$yd4#9-WkF~Tc6KK zzGWbY*^YV4but$Mj*!Uc3p_CQ`2_>nQJh(25`FxkmqE)4WN*B+RM_)`@ZY_-)$NRv z+@)&bMOxw#t1sz|6)(MNh5yXU`^aiLl_vUy)oy|-btobUy0$bVM-Rbll%6OgVV9h; zZ&2PxzEw@ngY&j%BVT)=?Eox+AsWzS!tX$vnj)6(3F zzqbsu@Z3((2*og%Jf>~wjFh}AV@*Fr6P_XJ#OWP}BE&W|22b^qv6D*-9SwUXuaY#t4j$*<}oy%9lSmw_*-o@jl5FiA{Wjqw~rwwRBmo95ZI9 zWT(i(N)ju=trHjfY2Vae?q-zZH_gpfhW*!I!BPLjrK+6fdxZOh{rslxd|=4Cwl-U! zB<^BkKW~FQS_(GJzji9shuRyiWK6NN%P7r5;}xf-uQLz(>rU)#cN?`Ve#b{UeXO&e z1Dt;MFrMLOJ4F*cd*W)&!0GPj-Ym$OBT_ZcC0^5xOZ(0XV)!s7|5}jR)p3n2(Vw-E zO0PBfpB@P{--j#olICpfU-!=Ezn_C;qV0DONY_!kIp2BZ60{Om4%cjmC^vx97MzAU zw-L2x*Vg$GmxLo*oH^&){1iSSf;iPUZZ9o2;rCA|nEE?K$JEcAK7!`ox^?GcPB6AD z(w3LXE{mpPkgRpQ^|pmwFeMY_H9Iq0DPHwrrX|BOhITQ^e=+W}V2DSXJAt z?qy!JW@{}Dv;P`2M%xif2kWrrm7jZ?0{SW%&F65CigHa4jFjg_ZXrJ_MNQcb@GgE)n|*tg20OBX?SxN zTQo7YL;dy8R#dukzjFX_#Cc*@T}9wa=V!4_>otkeg9m-JZ?_i(S~k6;aQg0T zk0F((pv2`1_pxm%5eNLnzv=3~Uga_6-q=+n#F(_7Ff;uQ zzo3KjK0$&G2Q&2!x8qv$*Y>o>)ZY~M=vl%Ns_*Q(&IMEuP?XqI>cal1x0aCN%=6WD zXI^VwlM1^rVj|CjO|9!+%4TUi)d8yyEJ-Ja&KV6&MD28jZ#;ax(;VjEnaNihJT#fq zFK)n6efdLT`#Q<2a4Y3ODOiICVD1J*V!Lk^FlqX)P`wHWQxR0A8I&f5P6T*0-4w9XG&A4#>Q3CsC{>3iWuAI4%FDWgmfz<_ zQ42&^W*aeUb}*8>Qant1`U(~3)25V6-P_mH@1(XGKdG&*%vo@BnSJ{Pv@0XQ(f|t9 zSb}vPm8t++i@L&mvpUVZc-P!rfEAq5RxdPsVQcTjP6=iuNjOp3G5bKfp7}(X`j>WK zxjNO$8hmOg4C)i(&N}dV(KABaIJu#v{eboy$K(?ir-Q#+-N*d+Syi)k@m4H}y)v_( z9E_$3hB7-*Ze?k!^cP&S3EJ1>wGTePW@`H9oXYRde*H=_Az70lXzGgVctXe^RBxK( zWQD&B^w0Il@0lr=hx!WlBlIUK7DOYy>gQy$fSg`&HPXcWxbG^_D$zP}*UuezGnFT1 z7$u3&@JGdm5Lff1^=`Xb9e7o!Db{jeDjyxT_xeY2hHn1<L_W4+`DFnc89DZxfg&J+R5=EeRasQq7vFdo6<^3BIm`@ z&=M2egk&b<;%>%r_h(X`QiAWV zjIT#$Ie|Ca@?e$^Mt#o-D>0nN@Ri1S;EO~u*d@EA*XllBzipY}%QNAmCid>b`6pIF zBZAeVUGFmvNry~6kFM=zSSeV4ZJXjQIfGJyi1MK+-$;#mx7(v;;=tL96*`k<54Qbo zaSd%D*n%VnDWJFZ*yBF@E^@})Z z;(cGFv*;c+X#Ty;q2k1tI?1Y?qZ3i__)KNUaT!Bo3I=|10Dr)id*jkQgMnB8K?qH8L1mwxNrW#Mov$ z@>Bup9(y0P6|ozEIB1qBY28w^N13In1i8LvAen%CF%igIM$_m_W@dgGd<*MT%pT^Y zH|YvB)}5xNGEe-lxh@1f$%$KsuQ+otzd;e71jsUbHc(IZ=Rx1N!mc-xK0oE_IQJ!k z^!fs{=Ie|Vl@yeHW4=Q^?4f+7M|f>+{^>RM#H_tjoTk7k7xf{Ix;itliD^8{-3JmS zg*-C}DzC9MTmZspJ8{T(J3`<3&2^^MeA|rX64%{WkJX_U5%bFjJ6kbWBfEt|rdBCS zn8cxdQwY=Au?w8Nl8Du>*5SQk=y@@u&&*r|<1TS|zhe!5b~VMIOEMQ}m9$j#oYgm05w5a6 zq_ob8kiWOMYklL`s!A{HN0EabbMQAo=Z5u#J7Moiz-Fvbgst6at2)u>I>&nb$%vwI zm9WoW$}21zv%Y(&TnSl(s`Vmeyw`dtb}N`n3rARvjwQ|tWj(qy+%-4aoCLjE#e6)P zeZUlLsaGj9K$`qWKd5KS>$_=K-@yVidqKK-(gWh!ER~(PAt<`!(Gj%$?BtQ!g<3uw zF|I5>`||ShisTgO#zkpYGHq$NX6}(+V~9r9r~BbqHQ190Dfo>`og!fdLIX(BffFsd z#=MLB_g+>2sMoy{hixOU9Z6iFol;H@=ld81f%Y$I%3%dqT~w9zW14C*b9@(LIcbBG zQr1i^Ss)#pGBee=@v>%T1b$Fz?w6RB3q!~}`d)?*gJ8Z@$tBw?=;J^JQoc6T{_<7q zO|K&UBd;;((n|eZ=H1FF$)CP0?YkY0moxaSgl2}LdlJ9Fx}y;GLGigRJ09ucFNRpc zDe6xR&Ylnen^zX4dr5TheVjeK$u%FZK(r0wJi9>dFO8!uxqJ1Qe^xuR4&Jf)Vu`+R zo_1kPVofC>ot~2vQIfng5wmE1c;s@IH0C2X)wsbKdM4WTf*!2n;9(=3J+Q$cV1ae= zb=YZ*h|c(ud?V=Xb30cv%!t- zPGn>`m@|Xph%6sc*oxL1*Q@zE>ofdEh8>a?n~{#4GOT%{XD&(F93NORQ(d+*;rABO z-!^qyBbm*M9meZCKCNT&5oDS3B$aFk+B08PVF^o?A=JN4I{<4kUU)27d*bseb^5e9 zq*M`-zC}CLYwyMR_S-k!3n+5-;@?+_wt99-TO)#&mB!5KM0YSY3{jjh4`P1GeQ(LR zxn{0r_+noT4{ULGjKCMDTA)Q&e%;Jpr-*r&b>x^-7(UD|^}Z%E#_v2nJ=-qoZBxz4 zP)Jh77hi=UL;k1t;fVoi2H!}lLgS6&b`qLqxxKzC(4gqHB*QsJQ^CLwO)s0)C%GNE z_^bf#LN>o{X0fFh8N1x|L--bSQIycG$uVvhI?dl#-wV%=W9?w<*)PjO)i6ekiqgJ| z?;SZ2b)xs@QbnI=!04U4-lmtC&4=fXx}ROR}P0#GlYAr9o3PW#tp67@XXr}UocU)8d2yK|8c8=RPHlBrTeX1pX0eZf!|Qukuj?>7aWA)|6I zRBbBvHik!1)S@Q&V^`3cIdu;5_{*6mY-Dm~KK$|OsoJ}JTi*x_3|cwi<-KbTrmQ{#O0mdy1R*@u^vWs&w*@%#Fdba=z<+Tqi14iAIh+8Ck}uMV-S#gO*7 zmJPRvOpM(fr0i>)$XKBP#JijaE$aHtM#?Kz`#Tvw^I~cFq$o1R)ZX;l3b)<91M5H> zGHV`Ee9~Dvg#O5*9wH6NeX+E62V>c8mlp2^#sO8C3OS=kFDNzNDE>J0GM)Bg?{fd~ z4pmfm_LDagr_C%m#Rn91J{;K978&-A?V~lGVI!tmJZP;B*$IuKU*gkeTBo`U%)h^s zW_VFE^oYh-pWa%!mBwWbFd7qF)T2&_37hMn8ztJKdY}@8JMrR zL3)>)=`(u)uI6yR{qzw-N`{9EAw|CnYr0aaTpF7>2)4I?^@CPQ1T-ya;C#r_E@Y4e zS_%GCr0}!c_|othz6c{`vTJpKdsw6U82=GxLZd_mE|ki2ociW$nJ>$e+<#n&qa$UB zrGhg#5rh_HQgm!y$gHixTac9s)ss3kDdywJ7fY*3RpCP=hdK6#Q9CIf{6c$ju4=aU z<}TgR>H>l1_{1^7KG=TrHA>U4j$gfkVy10qg4k@GweHw9Vx zL#|d30lw&8$a_z>F%ra#@s~S!)X^n_(!t>ijdfO;XqMda!#gde5Q4GGchAAQsM&x+ zk8<$-d@wCZmSjZlgU+%BU%(8o_TwZ= zwz{V^w}ODL{Ih@%`F1P3?1xTy2VNRQC_rWY}15bu>Zc$RI`?$chvZLR?Xe|K!ySovb2Ue;RWxQbx; zL?+~J0Iee9oK0m-|3loJ#gP7*m8?oykyqGs&c)e&&-%g}q)~YXt~I?U3rCXVOy5F6 zw%KiZZh0~=<@kJQLBH5*6g`oMpY4M#i#~b8bY;8sbpxxKk$7TBSXcVc(yO%MhC(+I zS>BtuJ&Hx5)x{%+bN6`|A$jHo;J0W0( zv#y%wHN@vXnDKgC)+zt23@%WfpA<1+_UqvtSpY7a_+rPqglpCOJ=?fS@C8P6@i$bz zlsv9xjC*h3#LMM|Q&Ti=#irEDY1~gLOonNmVBKnuAa*THcoc4lVq5z3=nE)U$_HreU0h`uyu>zsb|{|FWbyR7#Fx3=>gV@LUrPNP;dO9l+wJRBFR;%#37VIkM5QH3+k@^oEJ1DQW&Q66Ib(D9J?oU z&v%U~&8w%V)}jl$?kv9s+e^o4^x>Um(<5#Zxp0O`N7q(bq!{YTJu!pYZt8GV5U)dW z{A4XqcG4Jf$-z+-7qvtz5B%hiIDI^(T3t<-UybQezy;g%#hU0llyYqaVN6t?U$tGJ z#=A{@_r+>|)~xK)>~w=deg3E4B)Kw8>jo$VS#?g)^Luk4taDiO7cNQ_h1fsM>wbMM zaXt`exMqxdv>A?%A{tttj81?#JRr)5mpVx?~bfoe27+168X+w_Yyi3s4*;(ZpGrzfIcu_ zi*&j!ewMyrf?nSrMf0t&AdArh5C1J-uBj>msUST9hiYv1ug(N5Ec9O*DwcAW2zjf&C zw9b+;jpX@>A^aJAA=}+{!$ZCMlHudyX5FHoVi6%6-cWul>jyu!N1(mgUdlVi zay}CSk`cb1t(GG3pp#7Wb3=x3JMzoU79SA8e-UAmT)x;TU9$9JJzOh1la}TRY1IAT zTw-}S{MhZ13wtGun)@EyU4)ZYEIE7bD2E1&p?Nw3!=u5}7>bJ<Nr3`2Y>+997u77KY1Yqo){Itr6F<~o-!spUT?Il5KYD*gdK^#ndi_W1kK3{-w z8f?w}BHvihfUfEe>$m+O&LH9x%Br~0y_3%qBQf87cBnnq;dQa+-~Wo#c>=T+71|Bx z5MASWR&QzZo&Q@;qzDU1UkJJG;J4!~2UDt^P`Y^>sj+y5sTY5=$L=BBq*DhCxyau& zua-@G^b`mm694&F&!F@`NSPI6<3_=DQloTSK09ET7xcgMLz=v?bwA42 z3v8wF()^J2OtJ2W=9o8+aO`;iXLKMDSKce!6ZP^x)kCzVM{BA+ly|rIh3QV3$`8b% z!`|v!aoC-^hl0V9uoVREF4o?64OoBh{A5MQJziuE4*~h)=X)zNeSSLhA%}=$@}duM zexwPhVn1o;)~^vFf9PA$P$J>IZ|Bb(Ig`VfuxV*4cVO`t0gLCLOzX#)gU9P3n6ai& zwGB0WfjyUd+FODqG6RRFy7JKoOI#4C0kSrB8aEJ42;G+T&g2LOUAYDMkbv|u2b7na zGOc&Y4;AVwq9UL3ZcLnByWi840%Vbl{{7>%&R1VWq}$r?RLN_z;^B@K^Yc2TY&eIF zS=_6h>0R@v8s&aQBZLY%Qh9o%X0hvWKai2=x?^&Ea^BjW>Juj3{%nSVc@Sb0(->|u zk7|24;^oKrb_%mnbBMEW%yBt%ha#OXbSH~<81}5zHV*za8hG5f0f?ykcmjXjHoiIE zE4cw97Nd+UtUL49Wog1o?J6(v+}79!-u%@|BhIhr(`tx_$t5{bZ)=x^-8TX(4DkgrLQCXc-shL_l2Q={m(-_NEH>h*r#cuJ z8^rdk7TSY05Q%sWmDaApyLhUx2Zfh0uDyA%5|y=`|JB&8goe@(**iPhq3gWgsJpP^ z&T(|&8TRh7?mpt^_Rg(idWIWKw>=nvMlt1%MBB!NPfncQt}5|2aCFE7$LqSgA$1GO zekM(dA^ZuM6L^y=#36wlzUvQ?&}6AL8ls4&pHuoD>O5YQqw_&B?uT*J_{L7b2;F(I zcAVfylK_DHO=hy}$*mvO2+6Ny2*zr)&cDUzUbv?;-r+t`qqC9lwXhxbHn~6GU^i=O@h93^ zLV$JMFG1AW0<3}I&~frnD)jWz*GMvc@LGR5%^@COSkueHiT)V>SD*hi0^B0eUzR+B z#vM*j1M_TCoc`(;6}h=^XVS4ognknX5^Yx)qKUXd4v3zim1U8=3U;=;ZUyH>#NEG^ z5YFh`9B$Y6=XfuzP`YOp$Cm3;C4GLbORu=mcv@{|D*;2E6S4ZLu`I~I*`5iZmuzA- zFWXvPj*L|%#gr>Tj{BVoKQ{08)Bcy1Zc;Cpb&gx)&n@OhRB+S)VJ@2Hn2^Y5lG~(fk&GZ>&;f8>#TY`i5%le`_*elfc?uU>UsJ*^d z-8j$c+h9GRt_%7i1QIH{JW31(OEDWh1o&A`x8hu8tAFfbXtAVy+eTpr_7s}qbOJy8 zxS8duc$jW%+i9Y365C1JKRC3xZC_u=AKH(Ls_pb^=t5pdwyOuynd|Ao2h2=BO9}>6 zi8GcfPV|UIbYg<0XyaS4`p=ZacQCl*M5<9iZ0Q8#w~rv0L3Vb<_jnN%lpuK+|0!BT zi-Gud2G_{;|5eJQ`08x5=!4!T@z{Wn@B9%pz8+Zhzlt02zz_(|n;$;|C{$H+cn4~# zsb$-|jxLAzFLIb_ECef7{F-M&$GgMWu&(hY6s^1KYNy9vH+;Y)^o|&G7vV)`b=-Dz zIzQVcJ~;h)zY6U{KZu{(e18%1Q|Frj?kNChHl_W%80`9I(ipcxAtri$rUz0Wb~zF) zmZZoB8r~gtUJ?xcz^VRaYG(pgePj+C+_-%L9$Z*4)kri%95uhN<7|t{DdQ#IO=V|{ zXpG0lUPb4fe$1r`v*X^VLvjD_0sJH&RfvXMXNl(Fqe?qhZY`mnxkZz`j^BRjs`gt) z9Lyk(U5l*{*ypeJ(F+{0z$=Z!Ke&FugsKE|%~M0nIXqztkY4(EKssBSKW|R8wuSw% zz=IRH(UEu`vaNU=75X12aA)<8ogkVXYg95a2=)&*KZ_eZR)>2MVA)?!3eC9pH+vI6 zoV}}RByfTn4BPa*G{|XEq>C_)3u(}Q$?Ve(9RpEFBh_Qhe-2G0x;rW19{2rZ=y!?L z?N7Vj`QGnN)*d=d8QdQhT9Iqju=d9P_|Ke<5As@D}cUZCT(YMv9cyc zSBXIS=2@h44Ow8FIU~_rW6?Qhww5EJWEo0-Q&3#lJ#(&${CwhKKROSeFi1jE&81P~ zR?UVU>#w=RrNpZEh}BKkvb+>g0Y_R*LoXlBJ@}9D-`)jaDT~ z^z5t}YA=_h%?!R@o>O2-nwk@_v;-A5Dv?V#eBbhRnvJ@zZq>kUcyu!3A-T&g>6?({^ybz|+xP93xJgOM$F8!-+tMA0Q3BB}qq7LjWb zN1Bj$W3guI!}+!uQ^2#<9>=RTbe4cLUf%FN?(C2g5VDO?$|omG2N=$@$Aw-`l0f^M z^~n-oA^6$ql)|6S*ZCc&e2MR4jd$PP!W`9037aEm3g}S0Hr)kF>fAOeOd$}N-|!W5 zkDrxY-^IF8%aTy12r4j5i4EmM+>1qn1C?;A&P)$xlz1y&<&K}Tm%YWSB zj}qj#RCR~*;}}9owoACe8)lgA7S|@w zLlyoMS`m{WJ%8VX@oX~4*S5-X9!7G6Ux{&z-$G z^PqpxXcbun;&IAQ>5cTgtSgux3T-fIF@zw9pkA3Dkx@?r2hNTu|C1xxD^3#u;n(T5 zK-rCu5e0k#N z17rxgX#uyX*+Nvt4ipZe;2kBttLLl!bbt@7RS+KLv8s~LPpR^l2qXZAB9Tm5)z1^i zs#;n?PN+b!_wOb_*Of@G|TwhIKq`eI(n7Q2mp3{kTCOF1zxwqs8Cp-9nN zbC32EtHNZ~K?EhMet;`srOQ{6{~0J? z$}7n) zhuZen`ntm$5-J*$J-QHh2i*%yzfhzq+Y@>YJfU)^wC&bj_XcU?$6WbED>tBjbhJMd zjk-26@+bxvn!di*D#}6a3!;nkaMi0qxZj}v4{Gx5_ETCjkqF+e_!-Qy4M?@}m@iGV zocx%d(n;G(_j-c$y+A9QHYWxuQs4uVx_5x)QgF9G>-@%F(Y`U~M9ld|wX&4_G`3?2xnDZ4Yw9YL43J81!&AdZS zYaVIlKSIGoo6L9!v!b%Sk{`-wNQBt;1B3P`Z$eYdo=mK5UqyGQ+j{$}nh{yjh@!4E zUCr(5~3H$xyfG{hxyF;2^bHSau5@}Nk<{C#EEw~JOzpOwf0(iik z3ijR28zm&CYx_6;*m6&vogRxLuOUxFZJ>tliDg+Ygyc3Q$e=#~$T(P;V9a?X_ofAF z$yRzOF92P@WMnPnD(%5KZ2YeHu!+o2w1T#3n}ra!GrA+%xdEJ*z0ZzB!=i(l3`o={ z9YYJ@o<+CwtIx{l?R+w!)P$9lz}kz%u6u-qBheXnIK4{@u4m10a|P$2i-!Wj@|)?A zhfH$c{>O+FavxEbvMgy8O~Y*>J|X-McD9SE^7mNP0!s7HayouB-B<-@p&D~~=bKLA z7T7K(>ojjub_!*&HC`wMkZLO-=#4Y+ouTzP0cc{>V%6k}*j-Tg(l_^q%bQJzkw5xw z>n9eIOB&sQ)EXFgQx1(LGB@^WuRqJ(kOlN$t?XbYB4UY9+M>XW$b~)#_jTHwjkHyD z4dGg|r;;&HMxy~~wOu)DwbeB|iad8(T19Q7n@D2O7Uw=rl;RZ)vaP5IW<9S5ARU=B zEexCu%|*GE)_)rzyi2sgVrPZk{yNjB?$bm36l>Tu|HRg-W_fvmpEecR;UxbrQ)-3zpJtHdJoyFO%rmw!?6Dp&7-C3cSykM6UREe z39Y`z>pe(0KnooE@N?Wy30Y6xZqQ+oNdhA!Vg~|biu0GGn#caZKH^^ zR^+ud<0|v5G)iYs;^c1E6@!YBovx&th+4~AYkY}AN~TKOc;zX0V*BZZB|5yCcjJ8(&wM#9(l?WyB=x zP-w!y#QI~JQNb-Pv_18DLixE~;o8n08gJrAi%2%nw~EM`o1ktJBiu4jUjl;6Kj7CTXT#2Lw%1;0sjK0n4oONe%ucO?3WLVhQoShn4s)JUGmCI@K% zSF;J{1X-Jl0DSu8<;|(qKv=iibwEX%GCXs%gLV;cE>Ul{>>>zOa|85JNEBTIfTyjq zQ-PPax8K!t27w`wzmvOuFC_epUpdOxYt>y9EBwh7a<`_t2vF^A1J|?lA@=!*mn)~* z5~1E@rMFd>7Z6{kR=Cjz097Yj2t0s;Ek2J+g~Z+VHcnxkcPJ9P%uW)^`u$I)mgGN< z@VdoZO8cKGf{*@9g5iSs&jT%D9~a~;W`X0jnT`|%=~BJs^?zlHrG1>&2w>Q=O}+<> z0}jtXqSx#raaGVk{;uJkI&=H#xIK*INB@aL!6h9L!Pk{{w2#dA>lx7Mq#eEd-O0}V z)c}r|uE1qL0CZBWSz*43H3#t~xIv}ac|G}c>c+(LEcz^{!$&^IgEg(^oag4ja z++^4h{|tkYC`1YF!+@ zTmkITQSWQCJ6PH3ncZ+U7u8bP0%FED-AUna-)&b4lOLZN6$HLBySMG z&ULgSItw|dK$Wu@z5@#mGIR_+^eWv$r2E0VLiW@DkatJw=hOxJ7y_0MD0&revl_?# z0dBkHjr!BcR#f9n>Jc^Z<>(D4*!*(4J8koVBs5t1A`$zbSt&h(Ib}b~qD!lu+%pFi zd}1g3al!y1mH3ayYtY@j6*HRuO}G3j`LgUV?0N&qg;TAOz388~VwsPxXFTCb)+J_P@`G!zbZpKCP9P{Ax7P&bG7~ndi_?6EBq_WiRFGob_^k%@77l~??8~dk3 zzp?h;(9gCM0(!uuRMwf$B-H3hqa>mb5 z{d|^|eSf4`FWiv^8dX0uS~v{&0YFcBu2;}13H?9c{3o``;cjdLLXNFRl1f1eZ}V`& zjOGPIfW^lD5dF5Ds`WiSZ5~PSQs0HDw^g&_T41tw?f72UW)3{N5igKx=J!^0FiWN4 zYLtWcSm}kB@XgEu&;!%Gov*VDU|PKB8kNUd9Q*&qUb~*o_%){A06YGV&Zl16x4kVG z)mO4i3;;I#l!JXGJmDvI{r;>$v!(0a(>u0z02ST9Kh^W8r9Iy8TP^95g0ZzeqMve7 z2|{Zxw{L`~^5%qE|H3I;R4(2*CQ?u5gsql6kwN}PAcK3NL9fZrl8!N^3o1vTmlhMA z|0)UORQo}wr#HAq6v)^Cobfhvx(A#yelhna$YAP@HzI*TA0>>4ue#s)QzE=$;h+3n zb7KUXS`3L53I!q7f!k^?))b|mj`i6@!INc zVEU4P7P6&U-@Y}E!lV|R*6na;aM;O)-2WVN>>&Ypzd8Ir#j;)Hd+nb7VAQWvNj!V4O_%4?(CPZTeqVzz~SGT_2JNtObGc* zssGDE6Yl~6LwoZ!pg$LR@grWmYxOi={F~Lgu0M28@IlJ9s_qj*iU9Zw%9WbofdMDp zk%tOxGQM2NspiCBhsdQ367yyg@4U)!U#k>wJ#_0MnC}$v1 zMjxyqF83iTDzNE*0o0Om;Hio6lHjM|*Ad{>DTnQcBC&g6mh9rE7#@m{>rw+s#|mcR zduUs|d4_Mc>>L1uo&QmKKP=N`T;z5ky($9|=kJ}pt>`_mP22nAu!aUtFYwGafiiFh z$f3xS)Wp%e7-ysHO>y{t(xQYvBzl+2`|1EdgRR@yqhsHm)%;5V#p5tTuVT}8xywU% zR40b`Nf!b$)hzZe3Se$ zpZf*}S8b{u5nh30!gTiK7nY}SyTAn&<|hpKV13nw@@7M~3a%VylhFWL1n@rRYiRUD z&M95?ty?vXdZn3N%8G#CC>D2QA8^Yd!YVyh8~Xe=Qmz7r``e2ia)}ZShg&~qFT%KV zulgQg3u584-cJB-0jy16Cp96V%(evFloX`0qh!P*yQ;Kisy#!m)R&7udJMNY3+b^W zmYH}@f^09aR`T1XE_U3-PFv*v!z6OB)Ch#1a2>UNQrrTU3Bjk38 zaKhXd7Z9Si@1s7pN3VhqHCMoR*bDof4o;oiue@)Ig{RM-0-$Pzao*q8cDXjTgz&{- zJyJ_6tg|3O1F0doVlwT&n};nmF|p0@KQoCZR;2+Q)!axW^y&Gc;5gVi;KdFti=w+( zlkO3EXN9B~Z3mb!eW!~g27(D+osL1KhiAY_zux!eLI+AK2)+JI5J~@3_3(S|M-wlW zb#MB8e4jN$`!=;T^}7L4_HM=Pm(2!A*MlR=*%aiBI^)fBzdU2u3rCSEunN1;AP-ZH z7k#pH`}g{(_gie%>uLJvTO6*_fK3yKnF+>OfOVpKCTVT8j@|9_q(=_9$3(Llrl@q|P%T z96dg7ciZz9gHYyf78$k)O=>73jQr<6i;>>8gA5V%;|(N`kar(bvbR0`;eMBgnr&ZG zQ(Rnqu1#8%%tIM+o9~=Y5&OQlGPJ>3G4{@--n9LDFPH3XE~kXCIm2&M2;~+wp^_}J z!lp2eH^o*r%v#tBEchO8B-yW*MHzkwn>oQcetzSgZWjY6Ak$BpSG(zatnZuY0+Pcg zy=;xIcJ49dvN2#&ZmrpSx_jj!O}MUdyUm44zG)B*+Vby)?!SguN-h75!3&ViKn_A# z22JsMLTW4ovW3_zbd`XY-ubdxOIQaHkPr(L((PyWcgmwyyT9Or3mPBbTlnj2_*M=M zANa9YweVGW!{&{V%Nu@>6ThU-;aHAkJ0pZ`_?4=Bk~$RP4J)4@23758?)AzJ*fdu zRlTGb5Fg3Nlnd+o0u}w4l*A5#5BhBFGv4~c2NPdu*_H-6wIaB9F{jc+Lh>nJVyy;`-OkdmY?CtLM6ONWr71DT6%cK7ca|==X&*1G>C}XX28^q8CQYOco!P0{1$VVTON4G_ zesaRL8)h5?>ii^fZ4qU<($9HW9A=RJY62T3_^h*?Au{#7mL_0R>Og^fYX@n=ePiJcH%8Zje$E(A1$=ad)-Zej+TGMoRcPn&G^Bd&j7C}zm*k(!z zb;pt8==AYe1^Bn(5<>*YE5}xIo(*wr*Pkw&6PsV=6HmKGCY^pf;Nbm#$~yOWCj0-7 z&mk(bipU`f(}7G)L=DNIMRRw+T^Q+Jx+7;f48xQ}xtlbi+!g7>kmGF*6S+0iy>8?X zGbFd;<}{1#_qoKK`}h58k3F{Q@wu+;b6xNE=ly=Yo}UZ8HY+5fqy7PG+Cso4H&0Tv z64F@Y8JagTUpb0L5|YejGo|CjiBBxT(0)UK;=oqB;tv*wM<5oFB_pLfaGlh4I2Km& z9HrpwJL_4$QTTw>`*vSdGwUr&e#t~N$wa6>Oh1U^cX#wA|Lq*w^EBl7J(Lfu=VM}} zmy^frT+-(N#@^Dynu}%3aT{zTJKuQo2Um%JWh=5cJm<0mjfOQ|On%zmC67EY**+Dl zD%#C%QArjo7-H;NZx8CCqKY-vEaLSb-|$K#iJ}N~w?&e((u@0@RFE^%74KnQp&rxg zYl4q+tA0Cj$P)>U`iKX;&K}4Txm@&LhZG(&M#n=k4Q~;z&`HDXFU|u#o?}|fhcK;t zgU^6L`9(%rQ5oETimCb)O|cZAqwxZ|b2z?^L2`nf9&Xxk3r?9KXu5=@20Ngli-!%t zdHJ~g`3a;KjWt;vcr9AD#efc|3 zZHU5x$GnT_?rE+=3-{+>Mb%2IE9jGDE%XZ~I!wxJ^ZSn<87Sy{Fqe~E-I}CxzbyLFVrvjl}4BZfepSmR(L$cdL>`2Ur z*bM_-Tt1rax*nPQWv~Ll6SkZoRd24GYqX0j41MrOjpO_4Udldmk7q-{^MjP$g=l!V znHumjj$2Q{pAX(cmC+%3&=1Z+^>$9bBy5YmwzS9;a=b9cikIXi;1Wrz<}WLI4-Si&Bo1ohI5$hTiZ$=%d1gL= zi(n7(lnEnzcg*^le!rF%8Oe46QAFl~Wo*+L;WdiwnLxt`Otpx?f~P3<0)k4|S1Ctp?*O59M=0*_&3 zF=Q1lVO(}^-`2sx$KODEug2!*=vo2CnoG`9aEX{{!T@mJ%zR1ik0kZ(hBYG z-1!76AE&uJ+^M-_}ia>AfiSpxLGUe%dr8q&x#=n~mBgWw~E_dQK`kt@o~6M37!fc}ak7 zZY)Ry{2X)E1MNa|t*dW>9FdebH=)H->z*pbcRKCZ+9@1qwtH21C_7n5$$G8nqWiqu zUA>C|5%m7d|81r5Dh6fW6_5K85ULS3SQBu;yHBEiwQLpRMz&%*uK+I5 z*&)z0Z_+}vB<4_SiH4R<^soK8;0t4FLd^Py7u-)+8h=%K)k)%=~DW|pTGJu43 zw<&ciLB3Ro#dNWTvHQ*uDl_sy792Zj5Xh{J;8z5*>*DJhlZVsHzv}Q+y4$}P6hTiJ zA`jRN7OoM-%ZJW0<8o{!ee$w+d^zp(=zyz#*a64M64)o2BCcM&W5yBlhXYe!P?vN+ z&-Ng^7*RKVMq~EYxBa1E88^7HWE^wTPK`Gp5Z;T%yoTlpL~*DKosT*;Ifod26Un$1 zQj};7S3>)@c@q6CN3FaT?TB;9_>zKfkczG}pN9Yy#u8ZA>lz0s=A^mH`N^F2=kIWI-@y z&fr0c4Ev;>YB8Sc4QSYAqdEmMhVp_xvWS|`QJYeq1%&l6%>jO6{rBX#AfFdIKs?U0 zvth<98cT6@X{!e5dwMwBhi#ud=Wc2P)K?hf9eVR$=|1oxzeFFxK!2$yn;dlcuOwC{ zh^dNcl(~1qi)C5u7;aCymTK;nB&C-xBe7& zl3#>F-9147K-gk!!o5RzXcECc@VH?M`L?2qf4j754_(92#M|b^DO;1*e$kX>ZXvQ+ zSdisjGt7i*jQKbR(Vh^=3)t0}T zS}y#Id4S(OG^!GiB0XfCA5F;@Ma?A4$N}W()zAXR z@P2kSdmA6thYH$DYLNZqbAbZI&+<+a{tId16e6TC*77Qe(v#d_7oWJ(vLU!}%5H+J zohck^bMk4$)SQ#zTtac4$^Mjx-$+2YD@QoGyhM9f&GW6n&veu-5^!>>Yz4wbkCu%2 z&1Xk6`=&HQ$94_&T%+gCvA%pg?i&Ocz%{*DyowXjizhMdHcW*_mDV_BehkUMywZ4=E}S84%x4*OT$-NUg>e#o2>Sf<<>s7=xvb;X6k^l|pQzWR=PX0wBa; zR|(gKFPmx854+DvjqQ~Tm}X>2;!0vD74;eX){2j0lYp|Sqiv-&%;Kj>&=1;@!%yTK z7a)j_Dx2Rv!RncfHs5@Zv`Bvlk1U(1{v*IChalzU zhvMMvxsT9ptJfIIdI>}mLzf*}AAcd}CurUBFEM`QA@_O@G67=amdn(9yl?fEHuJ(g z=Ug^A1HGDs4cPP+nbNMhP`=!w*qE2i<(WsNy9Hi+Wg#RI?NdW{k;}Q zVgRlOk_@EpM-sTq>-9H%o{fCf$NL#?LpOmec$E)p8G2h-Mbn=X9+kY2GKVhvY%nHL z4Jc{LSsxHqK;^Wd8b+sVl+lfk*GYg_SoC8>0Tvd}I|>{BosflPs!l2cnHflCA?!6+ zg-S~qjTmtXx#DhE_AVVjgVgZ&ptqt%TmIVdsY2j+nx_8>=gE9B@&wOl6>O6U_z;-! zW>i8OAHteWs^G|hr-+-pa@XLthC7M?0zXs`@_J_q-~;K#Wy87?JM@kZi9wpOt3ZnrV( z9f@H-{<}B;BCp21b3;20`LXNR0~lRWoLeCyT3b!m=}AaXu*59P|eyAdfgJu)DZ`6!u;g0@A$SYj>m I%-v%C4*_+(;Q#;t diff --git a/docs/conf.py b/docs/conf.py index 7609c67..8d8d0a5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -8,7 +8,7 @@ import qbraid_algorithms -project = "qbraid-algorithms" +project = "qBraid" copyright = "2024, qBraid Development Team" author = "qBraid Development Team" @@ -25,6 +25,7 @@ "sphinx.ext.autodoc", "sphinx_autodoc_typehints", "sphinx.ext.autosummary", + "sphinx-copybutton" ] autodoc_mock_imports = ["torchvision"] diff --git a/docs/index.rst b/docs/index.rst index afac2f1..fed0120 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,6 +1,3 @@ -Documentation -============== - .. raw:: html @@ -49,47 +46,57 @@ Documentation

- qbraid logo + qbraid logo qBraid - + | algorithms

- A cloud-based platform for quantum computing. + Build hybrid quantum-classical algorithms with qBraid.

- - | +:Release: |release| + +Overview +--------- + +Python package for building, simulating, and benchmarking hybrid quantum-classical algorithms. + + +Installation +------------- + +qbraid-algorithms requires Python 3.9 or greater, and can be installed with pip as follows: + +.. code-block:: bash + + pip install qbraid-algorithms + + +Install from Source +^^^^^^^^^^^^^^^^^^^^ + +You can also install from source by cloning this repository and running a pip install command in the root directory of the repository: + +.. code-block:: bash + + git clone https://github.com/qBraid/qbraid-algorithms.git + cd qbraid-algorithms + pip3 install . + + +Resources +---------- + +- `User Guide `_ +- `Example Notebooks `_ +- `API Reference `_ + +| + .. toctree:: :maxdepth: 1 :caption: API Reference diff --git a/pyproject.toml b/pyproject.toml index 388e055..15f8efd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ Discord = "https://discord.gg/TPBU2sa8Et" visualization = ["matplotlib"] test = ["pytest", "pytest-cov"] lint = ["isort", "ruff", "qbraid-cli"] -docs = ["sphinx~=7.3.7", "sphinx-autodoc-typehints>=1.24,<2.3", "sphinx-rtd-theme~=2.0.0", "docutils<0.22"] +docs = ["sphinx~=7.3.7", "sphinx-autodoc-typehints>=1.24,<2.3", "sphinx-rtd-theme~=2.0.0", "docutils<0.22", ""sphinx-copybutton"] [tool.setuptools_scm] write_to = "qbraid_algorithms/_version.py" diff --git a/qbraid_algorithms/qrc/encoding.py b/qbraid_algorithms/qrc/encoding.py index 5e28a1d..9bed34c 100644 --- a/qbraid_algorithms/qrc/encoding.py +++ b/qbraid_algorithms/qrc/encoding.py @@ -14,8 +14,6 @@ """ import numpy as np import torch -from sklearn.decomposition import PCA -from sklearn.preprocessing import OneHotEncoder def one_hot_encoding(labels: np.ndarray, train: bool = True) -> torch.Tensor: @@ -29,6 +27,9 @@ def one_hot_encoding(labels: np.ndarray, train: bool = True) -> torch.Tensor: torch.Tensor: The one-hot encoded matrix where each row corresponds to a label. """ + # pylint: disable-next=import-outside-toplevel + from sklearn.preprocessing import OneHotEncoder + encoder = OneHotEncoder(sparse_output=False) reshaped_data = labels.reshape(-1, 1) if train: @@ -59,6 +60,9 @@ def pca_reduction( Returns: torch.Tensor: The transformed data """ + # pylint: disable-next=import-outside-toplevel + from sklearn.decomposition import PCA + # Perform PCA on training data pca = PCA(n_components=n_components) data_array: np.ndarray = data.data.numpy() From 34774ed5ac02d717651cc72092fc78715461cd2a Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Wed, 10 Jul 2024 19:13:15 -0500 Subject: [PATCH 25/26] fix pyproject --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 15f8efd..0f668d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ Discord = "https://discord.gg/TPBU2sa8Et" visualization = ["matplotlib"] test = ["pytest", "pytest-cov"] lint = ["isort", "ruff", "qbraid-cli"] -docs = ["sphinx~=7.3.7", "sphinx-autodoc-typehints>=1.24,<2.3", "sphinx-rtd-theme~=2.0.0", "docutils<0.22", ""sphinx-copybutton"] +docs = ["sphinx~=7.3.7", "sphinx-autodoc-typehints>=1.24,<2.3", "sphinx-rtd-theme~=2.0.0", "docutils<0.22", "sphinx-copybutton"] [tool.setuptools_scm] write_to = "qbraid_algorithms/_version.py" From 09c95afcfd8f1639d02b86e2f4f92c87bd356188 Mon Sep 17 00:00:00 2001 From: Ryan Hill Date: Wed, 10 Jul 2024 19:41:42 -0500 Subject: [PATCH 26/26] fix docs --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 8d8d0a5..dfe050b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,7 +25,7 @@ "sphinx.ext.autodoc", "sphinx_autodoc_typehints", "sphinx.ext.autosummary", - "sphinx-copybutton" + "sphinx_copybutton" ] autodoc_mock_imports = ["torchvision"]